1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 { INNER_DST_MAC, 48, KEY_OPT_MAC,
405 offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 offsetof(struct hclge_fd_rule, tuples.src_mac),
409 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 { INNER_L2_RSV, 16, KEY_OPT_LE16,
418 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 { INNER_IP_TOS, 8, KEY_OPT_U8,
421 offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 { INNER_IP_PROTO, 8, KEY_OPT_U8,
424 offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 { INNER_SRC_IP, 32, KEY_OPT_IP,
427 offsetof(struct hclge_fd_rule, tuples.src_ip),
428 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 { INNER_DST_IP, 32, KEY_OPT_IP,
430 offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 { INNER_L3_RSV, 16, KEY_OPT_LE16,
433 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 offsetof(struct hclge_fd_rule, tuples.src_port),
437 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 { INNER_DST_PORT, 16, KEY_OPT_LE16,
439 offsetof(struct hclge_fd_rule, tuples.dst_port),
440 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 { INNER_L4_RSV, 32, KEY_OPT_LE32,
442 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 #define HCLGE_MAC_CMD_NUM 21
450 u64 *data = (u64 *)(&hdev->mac_stats);
451 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
456 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 dev_err(&hdev->pdev->dev,
460 "Get MAC pkt stats fail, status = %d.\n", ret);
465 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 /* for special opcode 0032, only the first desc has the head */
467 if (unlikely(i == 0)) {
468 desc_data = (__le64 *)(&desc[i].data[0]);
469 n = HCLGE_RD_FIRST_STATS_NUM;
471 desc_data = (__le64 *)(&desc[i]);
472 n = HCLGE_RD_OTHER_STATS_NUM;
475 for (k = 0; k < n; k++) {
476 *data += le64_to_cpu(*desc_data);
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 u64 *data = (u64 *)(&hdev->mac_stats);
488 struct hclge_desc *desc;
493 /* This may be called inside atomic sections,
494 * so GFP_ATOMIC is more suitalbe here
496 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
500 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
507 for (i = 0; i < desc_num; i++) {
508 /* for special opcode 0034, only the first desc has the head */
510 desc_data = (__le64 *)(&desc[i].data[0]);
511 n = HCLGE_RD_FIRST_STATS_NUM;
513 desc_data = (__le64 *)(&desc[i]);
514 n = HCLGE_RD_OTHER_STATS_NUM;
517 for (k = 0; k < n; k++) {
518 *data += le64_to_cpu(*desc_data);
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 struct hclge_desc desc;
536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
541 desc_data = (__le32 *)(&desc.data[0]);
542 reg_num = le32_to_cpu(*desc_data);
544 *desc_num = 1 + ((reg_num - 3) >> 2) +
545 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
555 ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 /* The firmware supports the new statistics acquisition method */
558 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 else if (ret == -EOPNOTSUPP)
560 ret = hclge_mac_update_stats_defective(hdev);
562 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 struct hclge_vport *vport = hclge_get_vport(handle);
571 struct hclge_dev *hdev = vport->back;
572 struct hnae3_queue *queue;
573 struct hclge_desc desc[1];
574 struct hclge_tqp *tqp;
577 for (i = 0; i < kinfo->num_tqps; i++) {
578 queue = handle->kinfo.tqp[i];
579 tqp = container_of(queue, struct hclge_tqp, q);
580 /* command : HCLGE_OPC_QUERY_IGU_STAT */
581 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
584 desc[0].data[0] = cpu_to_le32(tqp->index);
585 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 dev_err(&hdev->pdev->dev,
588 "Query tqp stat fail, status = %d,queue = %d\n",
592 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 le32_to_cpu(desc[0].data[1]);
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 queue = handle->kinfo.tqp[i];
598 tqp = container_of(queue, struct hclge_tqp, q);
599 /* command : HCLGE_OPC_QUERY_IGU_STAT */
600 hclge_cmd_setup_basic_desc(&desc[0],
601 HCLGE_OPC_QUERY_TX_STATS,
604 desc[0].data[0] = cpu_to_le32(tqp->index);
605 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 dev_err(&hdev->pdev->dev,
608 "Query tqp stat fail, status = %d,queue = %d\n",
612 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 le32_to_cpu(desc[0].data[1]);
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 struct hclge_tqp *tqp;
626 for (i = 0; i < kinfo->num_tqps; i++) {
627 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 /* each tqp has TX & RX two queues */
644 return kinfo->num_tqps * (2);
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
653 for (i = 0; i < kinfo->num_tqps; i++) {
654 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 struct hclge_tqp, q);
656 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 buff = buff + ETH_GSTRING_LEN;
661 for (i = 0; i < kinfo->num_tqps; i++) {
662 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 struct hclge_tqp, q);
664 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 buff = buff + ETH_GSTRING_LEN;
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 const struct hclge_comm_stats_str strs[],
679 for (i = 0; i < size; i++)
680 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 const struct hclge_comm_stats_str strs[],
689 char *buff = (char *)data;
692 if (stringset != ETH_SS_STATS)
695 for (i = 0; i < size; i++) {
696 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 buff = buff + ETH_GSTRING_LEN;
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 struct hnae3_handle *handle;
708 handle = &hdev->vport[0].nic;
709 if (handle->client) {
710 status = hclge_tqps_update_stats(handle);
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
718 status = hclge_mac_update_stats(hdev);
720 dev_err(&hdev->pdev->dev,
721 "Update MAC stats fail, status = %d.\n", status);
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 struct net_device_stats *net_stats)
727 struct hclge_vport *vport = hclge_get_vport(handle);
728 struct hclge_dev *hdev = vport->back;
731 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
734 status = hclge_mac_update_stats(hdev);
736 dev_err(&hdev->pdev->dev,
737 "Update MAC stats fail, status = %d.\n",
740 status = hclge_tqps_update_stats(handle);
742 dev_err(&hdev->pdev->dev,
743 "Update TQPS stats fail, status = %d.\n",
746 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 HNAE3_SUPPORT_PHY_LOOPBACK |\
753 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 struct hclge_vport *vport = hclge_get_vport(handle);
757 struct hclge_dev *hdev = vport->back;
760 /* Loopback test support rules:
761 * mac: only GE mode support
762 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 * phy: only support when phy device exist on board
765 if (stringset == ETH_SS_TEST) {
766 /* clear loopback bit flags at first */
767 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
777 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 hdev->hw.mac.phydev->drv->set_loopback) ||
782 hnae3_dev_phy_imp_supported(hdev)) {
784 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 } else if (stringset == ETH_SS_STATS) {
787 count = ARRAY_SIZE(g_mac_stats_string) +
788 hclge_tqps_get_sset_count(handle, stringset);
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
797 u8 *p = (char *)data;
800 if (stringset == ETH_SS_STATS) {
801 size = ARRAY_SIZE(g_mac_stats_string);
802 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 p = hclge_tqps_get_strings(handle, p);
805 } else if (stringset == ETH_SS_TEST) {
806 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 p += ETH_GSTRING_LEN;
811 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 p += ETH_GSTRING_LEN;
816 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 p += ETH_GSTRING_LEN;
822 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 p += ETH_GSTRING_LEN;
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 struct hclge_vport *vport = hclge_get_vport(handle);
833 struct hclge_dev *hdev = vport->back;
836 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 ARRAY_SIZE(g_mac_stats_string), data);
838 p = hclge_tqps_get_stats(handle, p);
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 struct hns3_mac_stats *mac_stats)
844 struct hclge_vport *vport = hclge_get_vport(handle);
845 struct hclge_dev *hdev = vport->back;
847 hclge_update_stats(handle, NULL);
849 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 struct hclge_func_status_cmd *status)
856 #define HCLGE_MAC_ID_MASK 0xF
858 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
861 /* Set the pf to main pf */
862 if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 hdev->flag |= HCLGE_FLAG_MAIN;
865 hdev->flag &= ~HCLGE_FLAG_MAIN;
867 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
871 static int hclge_query_function_status(struct hclge_dev *hdev)
873 #define HCLGE_QUERY_MAX_CNT 5
875 struct hclge_func_status_cmd *req;
876 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 req = (struct hclge_func_status_cmd *)desc.data;
884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 dev_err(&hdev->pdev->dev,
887 "query function status failed %d.\n", ret);
891 /* Check pf reset is done */
894 usleep_range(1000, 2000);
895 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897 return hclge_parse_func_status(hdev, req);
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 struct hclge_pf_res_cmd *req;
903 struct hclge_desc desc;
906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 dev_err(&hdev->pdev->dev,
910 "query pf resource failed %d.\n", ret);
914 req = (struct hclge_pf_res_cmd *)desc.data;
915 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 le16_to_cpu(req->ext_tqp_num);
917 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919 if (req->tx_buf_size)
921 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927 if (req->dv_buf_size)
929 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 dev_err(&hdev->pdev->dev,
938 "only %u msi resources available, not enough for pf(min:2).\n",
943 if (hnae3_dev_roce_supported(hdev)) {
945 le16_to_cpu(req->pf_intr_vector_number_roce);
947 /* PF should have NIC vectors and Roce vectors,
948 * NIC vectors are queued before Roce vectors.
950 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 hdev->num_msi = hdev->num_nic_msi;
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
962 *speed = HCLGE_MAC_SPEED_10M;
965 *speed = HCLGE_MAC_SPEED_100M;
968 *speed = HCLGE_MAC_SPEED_1G;
971 *speed = HCLGE_MAC_SPEED_10G;
974 *speed = HCLGE_MAC_SPEED_25G;
977 *speed = HCLGE_MAC_SPEED_40G;
980 *speed = HCLGE_MAC_SPEED_50G;
983 *speed = HCLGE_MAC_SPEED_100G;
986 *speed = HCLGE_MAC_SPEED_200G;
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
997 struct hclge_vport *vport = hclge_get_vport(handle);
998 struct hclge_dev *hdev = vport->back;
999 u32 speed_ability = hdev->hw.mac.speed_ability;
1003 case HCLGE_MAC_SPEED_10M:
1004 speed_bit = HCLGE_SUPPORT_10M_BIT;
1006 case HCLGE_MAC_SPEED_100M:
1007 speed_bit = HCLGE_SUPPORT_100M_BIT;
1009 case HCLGE_MAC_SPEED_1G:
1010 speed_bit = HCLGE_SUPPORT_1G_BIT;
1012 case HCLGE_MAC_SPEED_10G:
1013 speed_bit = HCLGE_SUPPORT_10G_BIT;
1015 case HCLGE_MAC_SPEED_25G:
1016 speed_bit = HCLGE_SUPPORT_25G_BIT;
1018 case HCLGE_MAC_SPEED_40G:
1019 speed_bit = HCLGE_SUPPORT_40G_BIT;
1021 case HCLGE_MAC_SPEED_50G:
1022 speed_bit = HCLGE_SUPPORT_50G_BIT;
1024 case HCLGE_MAC_SPEED_100G:
1025 speed_bit = HCLGE_SUPPORT_100G_BIT;
1027 case HCLGE_MAC_SPEED_200G:
1028 speed_bit = HCLGE_SUPPORT_200G_BIT;
1034 if (speed_bit & speed_ability)
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 mac->fec_ability = 0;
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 struct hclge_mac *mac = &hdev->hw.mac;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 struct hclge_mac *mac = &hdev->hw.mac;
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 unsigned long *supported = hdev->hw.mac.supported;
1207 /* default to support all speed for GE port */
1209 speed_ability = HCLGE_SUPPORT_GE;
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 u8 media_type = hdev->hw.mac.media_type;
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1277 return HCLGE_MAC_SPEED_1G;
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 #define SPEED_ABILITY_EXT_SHIFT 8
1284 struct hclge_cfg_param_cmd *req;
1285 u64 mac_addr_tmp_high;
1286 u16 speed_ability_ext;
1290 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292 /* get the configuration */
1293 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 HCLGE_CFG_TQP_DESC_N_M,
1297 HCLGE_CFG_TQP_DESC_N_S);
1299 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 HCLGE_CFG_PHY_ADDR_M,
1301 HCLGE_CFG_PHY_ADDR_S);
1302 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 HCLGE_CFG_MEDIA_TP_M,
1304 HCLGE_CFG_MEDIA_TP_S);
1305 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 HCLGE_CFG_RX_BUF_LEN_M,
1307 HCLGE_CFG_RX_BUF_LEN_S);
1308 /* get mac_address */
1309 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 HCLGE_CFG_MAC_ADDR_H_M,
1312 HCLGE_CFG_MAC_ADDR_H_S);
1314 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 HCLGE_CFG_DEFAULT_SPEED_M,
1318 HCLGE_CFG_DEFAULT_SPEED_S);
1319 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 HCLGE_CFG_RSS_SIZE_M,
1321 HCLGE_CFG_RSS_SIZE_S);
1323 for (i = 0; i < ETH_ALEN; i++)
1324 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 HCLGE_CFG_SPEED_ABILITY_M,
1331 HCLGE_CFG_SPEED_ABILITY_S);
1332 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 HCLGE_CFG_VLAN_FLTR_CAP_M,
1339 HCLGE_CFG_VLAN_FLTR_CAP_S);
1341 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1342 HCLGE_CFG_UMV_TBL_SPACE_M,
1343 HCLGE_CFG_UMV_TBL_SPACE_S);
1344 if (!cfg->umv_space)
1345 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1348 HCLGE_CFG_PF_RSS_SIZE_M,
1349 HCLGE_CFG_PF_RSS_SIZE_S);
1351 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1352 * power of 2, instead of reading out directly. This would
1353 * be more flexible for future changes and expansions.
1354 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1355 * it does not make sense if PF's field is 0. In this case, PF and VF
1356 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1359 1U << cfg->pf_rss_size_max :
1360 cfg->vf_rss_size_max;
1363 /* hclge_get_cfg: query the static parameter from flash
1364 * @hdev: pointer to struct hclge_dev
1365 * @hcfg: the config structure to be getted
1367 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1369 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1370 struct hclge_cfg_param_cmd *req;
1374 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1377 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1378 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1380 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1381 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1382 /* Len should be united by 4 bytes when send to hardware */
1383 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1384 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1385 req->offset = cpu_to_le32(offset);
1388 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1390 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1394 hclge_parse_cfg(hcfg, desc);
1399 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1401 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1403 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1405 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1407 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1408 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1410 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1411 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1414 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1415 struct hclge_desc *desc)
1417 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1418 struct hclge_dev_specs_0_cmd *req0;
1419 struct hclge_dev_specs_1_cmd *req1;
1421 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1422 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1424 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1425 ae_dev->dev_specs.rss_ind_tbl_size =
1426 le16_to_cpu(req0->rss_ind_tbl_size);
1427 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1428 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1429 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1430 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1431 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1432 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1435 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1437 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1439 if (!dev_specs->max_non_tso_bd_num)
1440 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1441 if (!dev_specs->rss_ind_tbl_size)
1442 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1443 if (!dev_specs->rss_key_size)
1444 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1445 if (!dev_specs->max_tm_rate)
1446 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1447 if (!dev_specs->max_qset_num)
1448 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1449 if (!dev_specs->max_int_gl)
1450 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1451 if (!dev_specs->max_frm_size)
1452 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1455 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1457 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1461 /* set default specifications as devices lower than version V3 do not
1462 * support querying specifications from firmware.
1464 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465 hclge_set_default_dev_specs(hdev);
1469 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1472 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1474 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1476 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1480 hclge_parse_dev_specs(hdev, desc);
1481 hclge_check_dev_specs(hdev);
1486 static int hclge_get_cap(struct hclge_dev *hdev)
1490 ret = hclge_query_function_status(hdev);
1492 dev_err(&hdev->pdev->dev,
1493 "query function status error %d.\n", ret);
1497 /* get pf resource */
1498 return hclge_query_pf_resource(hdev);
1501 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1503 #define HCLGE_MIN_TX_DESC 64
1504 #define HCLGE_MIN_RX_DESC 64
1506 if (!is_kdump_kernel())
1509 dev_info(&hdev->pdev->dev,
1510 "Running kdump kernel. Using minimal resources\n");
1512 /* minimal queue pairs equals to the number of vports */
1513 hdev->num_tqps = hdev->num_req_vfs + 1;
1514 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1518 static int hclge_configure(struct hclge_dev *hdev)
1520 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1521 struct hclge_cfg cfg;
1525 ret = hclge_get_cfg(hdev, &cfg);
1529 hdev->base_tqp_pid = 0;
1530 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1531 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1532 hdev->rx_buf_len = cfg.rx_buf_len;
1533 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1534 hdev->hw.mac.media_type = cfg.media_type;
1535 hdev->hw.mac.phy_addr = cfg.phy_addr;
1536 hdev->num_tx_desc = cfg.tqp_desc_num;
1537 hdev->num_rx_desc = cfg.tqp_desc_num;
1538 hdev->tm_info.num_pg = 1;
1539 hdev->tc_max = cfg.tc_num;
1540 hdev->tm_info.hw_pfc_map = 0;
1541 hdev->wanted_umv_size = cfg.umv_space;
1542 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1543 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1545 if (hnae3_dev_fd_supported(hdev)) {
1547 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1550 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1552 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1553 cfg.default_speed, ret);
1557 hclge_parse_link_mode(hdev, cfg.speed_ability);
1559 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1561 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1562 (hdev->tc_max < 1)) {
1563 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1568 /* Dev does not support DCB */
1569 if (!hnae3_dev_dcb_supported(hdev)) {
1573 hdev->pfc_max = hdev->tc_max;
1576 hdev->tm_info.num_tc = 1;
1578 /* Currently not support uncontiuous tc */
1579 for (i = 0; i < hdev->tm_info.num_tc; i++)
1580 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1582 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1584 hclge_init_kdump_kernel_config(hdev);
1586 /* Set the init affinity based on pci func number */
1587 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1588 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1589 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1590 &hdev->affinity_mask);
1595 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1598 struct hclge_cfg_tso_status_cmd *req;
1599 struct hclge_desc desc;
1601 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1603 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1604 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1605 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1607 return hclge_cmd_send(&hdev->hw, &desc, 1);
1610 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1612 struct hclge_cfg_gro_status_cmd *req;
1613 struct hclge_desc desc;
1616 if (!hnae3_dev_gro_supported(hdev))
1619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1620 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1622 req->gro_en = en ? 1 : 0;
1624 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1626 dev_err(&hdev->pdev->dev,
1627 "GRO hardware config cmd failed, ret = %d\n", ret);
1632 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1634 struct hclge_tqp *tqp;
1637 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1638 sizeof(struct hclge_tqp), GFP_KERNEL);
1644 for (i = 0; i < hdev->num_tqps; i++) {
1645 tqp->dev = &hdev->pdev->dev;
1648 tqp->q.ae_algo = &ae_algo;
1649 tqp->q.buf_size = hdev->rx_buf_len;
1650 tqp->q.tx_desc_num = hdev->num_tx_desc;
1651 tqp->q.rx_desc_num = hdev->num_rx_desc;
1653 /* need an extended offset to configure queues >=
1654 * HCLGE_TQP_MAX_SIZE_DEV_V2
1656 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1657 tqp->q.io_base = hdev->hw.io_base +
1658 HCLGE_TQP_REG_OFFSET +
1659 i * HCLGE_TQP_REG_SIZE;
1661 tqp->q.io_base = hdev->hw.io_base +
1662 HCLGE_TQP_REG_OFFSET +
1663 HCLGE_TQP_EXT_REG_OFFSET +
1664 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1673 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1674 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1676 struct hclge_tqp_map_cmd *req;
1677 struct hclge_desc desc;
1680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1682 req = (struct hclge_tqp_map_cmd *)desc.data;
1683 req->tqp_id = cpu_to_le16(tqp_pid);
1684 req->tqp_vf = func_id;
1685 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1687 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1688 req->tqp_vid = cpu_to_le16(tqp_vid);
1690 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1692 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1697 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1699 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1700 struct hclge_dev *hdev = vport->back;
1703 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1704 alloced < num_tqps; i++) {
1705 if (!hdev->htqp[i].alloced) {
1706 hdev->htqp[i].q.handle = &vport->nic;
1707 hdev->htqp[i].q.tqp_index = alloced;
1708 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1709 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1710 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1711 hdev->htqp[i].alloced = true;
1715 vport->alloc_tqps = alloced;
1716 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1717 vport->alloc_tqps / hdev->tm_info.num_tc);
1719 /* ensure one to one mapping between irq and queue at default */
1720 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1721 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1726 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1727 u16 num_tx_desc, u16 num_rx_desc)
1730 struct hnae3_handle *nic = &vport->nic;
1731 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1732 struct hclge_dev *hdev = vport->back;
1735 kinfo->num_tx_desc = num_tx_desc;
1736 kinfo->num_rx_desc = num_rx_desc;
1738 kinfo->rx_buf_len = hdev->rx_buf_len;
1740 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1741 sizeof(struct hnae3_queue *), GFP_KERNEL);
1745 ret = hclge_assign_tqp(vport, num_tqps);
1747 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1752 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1753 struct hclge_vport *vport)
1755 struct hnae3_handle *nic = &vport->nic;
1756 struct hnae3_knic_private_info *kinfo;
1759 kinfo = &nic->kinfo;
1760 for (i = 0; i < vport->alloc_tqps; i++) {
1761 struct hclge_tqp *q =
1762 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1766 is_pf = !(vport->vport_id);
1767 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1776 static int hclge_map_tqp(struct hclge_dev *hdev)
1778 struct hclge_vport *vport = hdev->vport;
1781 num_vport = hdev->num_req_vfs + 1;
1782 for (i = 0; i < num_vport; i++) {
1785 ret = hclge_map_tqp_to_vport(hdev, vport);
1795 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1797 struct hnae3_handle *nic = &vport->nic;
1798 struct hclge_dev *hdev = vport->back;
1801 nic->pdev = hdev->pdev;
1802 nic->ae_algo = &ae_algo;
1803 nic->numa_node_mask = hdev->numa_node_mask;
1805 ret = hclge_knic_setup(vport, num_tqps,
1806 hdev->num_tx_desc, hdev->num_rx_desc);
1808 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1813 static int hclge_alloc_vport(struct hclge_dev *hdev)
1815 struct pci_dev *pdev = hdev->pdev;
1816 struct hclge_vport *vport;
1822 /* We need to alloc a vport for main NIC of PF */
1823 num_vport = hdev->num_req_vfs + 1;
1825 if (hdev->num_tqps < num_vport) {
1826 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1827 hdev->num_tqps, num_vport);
1831 /* Alloc the same number of TQPs for every vport */
1832 tqp_per_vport = hdev->num_tqps / num_vport;
1833 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1835 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1840 hdev->vport = vport;
1841 hdev->num_alloc_vport = num_vport;
1843 if (IS_ENABLED(CONFIG_PCI_IOV))
1844 hdev->num_alloc_vfs = hdev->num_req_vfs;
1846 for (i = 0; i < num_vport; i++) {
1848 vport->vport_id = i;
1849 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1850 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1851 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1852 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1853 vport->req_vlan_fltr_en = true;
1854 INIT_LIST_HEAD(&vport->vlan_list);
1855 INIT_LIST_HEAD(&vport->uc_mac_list);
1856 INIT_LIST_HEAD(&vport->mc_mac_list);
1857 spin_lock_init(&vport->mac_list_lock);
1860 ret = hclge_vport_setup(vport, tqp_main_vport);
1862 ret = hclge_vport_setup(vport, tqp_per_vport);
1865 "vport setup failed for vport %d, %d\n",
1876 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1877 struct hclge_pkt_buf_alloc *buf_alloc)
1879 /* TX buffer size is unit by 128 byte */
1880 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1881 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1882 struct hclge_tx_buff_alloc_cmd *req;
1883 struct hclge_desc desc;
1887 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1890 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1893 req->tx_pkt_buff[i] =
1894 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1895 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1898 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1900 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1906 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1907 struct hclge_pkt_buf_alloc *buf_alloc)
1909 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1912 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1917 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1922 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1923 if (hdev->hw_tc_map & BIT(i))
1928 /* Get the number of pfc enabled TCs, which have private buffer */
1929 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1930 struct hclge_pkt_buf_alloc *buf_alloc)
1932 struct hclge_priv_buf *priv;
1936 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1937 priv = &buf_alloc->priv_buf[i];
1938 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1946 /* Get the number of pfc disabled TCs, which have private buffer */
1947 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1948 struct hclge_pkt_buf_alloc *buf_alloc)
1950 struct hclge_priv_buf *priv;
1954 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1955 priv = &buf_alloc->priv_buf[i];
1956 if (hdev->hw_tc_map & BIT(i) &&
1957 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1965 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1967 struct hclge_priv_buf *priv;
1971 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1972 priv = &buf_alloc->priv_buf[i];
1974 rx_priv += priv->buf_size;
1979 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1981 u32 i, total_tx_size = 0;
1983 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1984 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1986 return total_tx_size;
1989 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1990 struct hclge_pkt_buf_alloc *buf_alloc,
1993 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1994 u32 tc_num = hclge_get_tc_num(hdev);
1995 u32 shared_buf, aligned_mps;
1999 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2001 if (hnae3_dev_dcb_supported(hdev))
2002 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2005 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2006 + hdev->dv_buf_size;
2008 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2009 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2010 HCLGE_BUF_SIZE_UNIT);
2012 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2013 if (rx_all < rx_priv + shared_std)
2016 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2017 buf_alloc->s_buf.buf_size = shared_buf;
2018 if (hnae3_dev_dcb_supported(hdev)) {
2019 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2020 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2021 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2022 HCLGE_BUF_SIZE_UNIT);
2024 buf_alloc->s_buf.self.high = aligned_mps +
2025 HCLGE_NON_DCB_ADDITIONAL_BUF;
2026 buf_alloc->s_buf.self.low = aligned_mps;
2029 if (hnae3_dev_dcb_supported(hdev)) {
2030 hi_thrd = shared_buf - hdev->dv_buf_size;
2032 if (tc_num <= NEED_RESERVE_TC_NUM)
2033 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2037 hi_thrd = hi_thrd / tc_num;
2039 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2040 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2041 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2043 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2044 lo_thrd = aligned_mps;
2047 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2048 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2049 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2055 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2056 struct hclge_pkt_buf_alloc *buf_alloc)
2060 total_size = hdev->pkt_buf_size;
2062 /* alloc tx buffer for all enabled tc */
2063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2066 if (hdev->hw_tc_map & BIT(i)) {
2067 if (total_size < hdev->tx_buf_size)
2070 priv->tx_buf_size = hdev->tx_buf_size;
2072 priv->tx_buf_size = 0;
2075 total_size -= priv->tx_buf_size;
2081 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2082 struct hclge_pkt_buf_alloc *buf_alloc)
2084 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2088 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2089 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2096 if (!(hdev->hw_tc_map & BIT(i)))
2101 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2102 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2103 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2104 HCLGE_BUF_SIZE_UNIT);
2107 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2111 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2114 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2117 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2118 struct hclge_pkt_buf_alloc *buf_alloc)
2120 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2124 /* let the last to be cleared first */
2125 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127 unsigned int mask = BIT((unsigned int)i);
2129 if (hdev->hw_tc_map & mask &&
2130 !(hdev->tm_info.hw_pfc_map & mask)) {
2131 /* Clear the no pfc TC private buffer */
2139 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2140 no_pfc_priv_num == 0)
2144 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2147 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2148 struct hclge_pkt_buf_alloc *buf_alloc)
2150 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2151 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2154 /* let the last to be cleared first */
2155 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2156 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2157 unsigned int mask = BIT((unsigned int)i);
2159 if (hdev->hw_tc_map & mask &&
2160 hdev->tm_info.hw_pfc_map & mask) {
2161 /* Reduce the number of pfc TC with private buffer */
2169 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2174 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2177 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2178 struct hclge_pkt_buf_alloc *buf_alloc)
2180 #define COMPENSATE_BUFFER 0x3C00
2181 #define COMPENSATE_HALF_MPS_NUM 5
2182 #define PRIV_WL_GAP 0x1800
2184 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2185 u32 tc_num = hclge_get_tc_num(hdev);
2186 u32 half_mps = hdev->mps >> 1;
2191 rx_priv = rx_priv / tc_num;
2193 if (tc_num <= NEED_RESERVE_TC_NUM)
2194 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2196 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2197 COMPENSATE_HALF_MPS_NUM * half_mps;
2198 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2199 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2200 if (rx_priv < min_rx_priv)
2203 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2204 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2211 if (!(hdev->hw_tc_map & BIT(i)))
2215 priv->buf_size = rx_priv;
2216 priv->wl.high = rx_priv - hdev->dv_buf_size;
2217 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2220 buf_alloc->s_buf.buf_size = 0;
2225 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2226 * @hdev: pointer to struct hclge_dev
2227 * @buf_alloc: pointer to buffer calculation data
2228 * @return: 0: calculate successful, negative: fail
2230 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2231 struct hclge_pkt_buf_alloc *buf_alloc)
2233 /* When DCB is not supported, rx private buffer is not allocated. */
2234 if (!hnae3_dev_dcb_supported(hdev)) {
2235 u32 rx_all = hdev->pkt_buf_size;
2237 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2238 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2244 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2247 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2250 /* try to decrease the buffer size */
2251 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2254 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2257 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2263 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2264 struct hclge_pkt_buf_alloc *buf_alloc)
2266 struct hclge_rx_priv_buff_cmd *req;
2267 struct hclge_desc desc;
2271 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2272 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2274 /* Alloc private buffer TCs */
2275 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2276 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2279 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2281 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2285 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2286 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2288 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2290 dev_err(&hdev->pdev->dev,
2291 "rx private buffer alloc cmd failed %d\n", ret);
2296 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2297 struct hclge_pkt_buf_alloc *buf_alloc)
2299 struct hclge_rx_priv_wl_buf *req;
2300 struct hclge_priv_buf *priv;
2301 struct hclge_desc desc[2];
2305 for (i = 0; i < 2; i++) {
2306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2308 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2310 /* The first descriptor set the NEXT bit to 1 */
2312 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2314 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2316 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2317 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2319 priv = &buf_alloc->priv_buf[idx];
2320 req->tc_wl[j].high =
2321 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2322 req->tc_wl[j].high |=
2323 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2325 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2326 req->tc_wl[j].low |=
2327 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 /* Send 2 descriptor at one time */
2332 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2334 dev_err(&hdev->pdev->dev,
2335 "rx private waterline config cmd failed %d\n",
2340 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2341 struct hclge_pkt_buf_alloc *buf_alloc)
2343 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2344 struct hclge_rx_com_thrd *req;
2345 struct hclge_desc desc[2];
2346 struct hclge_tc_thrd *tc;
2350 for (i = 0; i < 2; i++) {
2351 hclge_cmd_setup_basic_desc(&desc[i],
2352 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2353 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2355 /* The first descriptor set the NEXT bit to 1 */
2357 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2359 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2361 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2362 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2364 req->com_thrd[j].high =
2365 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2366 req->com_thrd[j].high |=
2367 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368 req->com_thrd[j].low =
2369 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2370 req->com_thrd[j].low |=
2371 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2375 /* Send 2 descriptors at one time */
2376 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2378 dev_err(&hdev->pdev->dev,
2379 "common threshold config cmd failed %d\n", ret);
2383 static int hclge_common_wl_config(struct hclge_dev *hdev,
2384 struct hclge_pkt_buf_alloc *buf_alloc)
2386 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2387 struct hclge_rx_com_wl *req;
2388 struct hclge_desc desc;
2391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2393 req = (struct hclge_rx_com_wl *)desc.data;
2394 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2395 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2397 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2398 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2402 dev_err(&hdev->pdev->dev,
2403 "common waterline config cmd failed %d\n", ret);
2408 int hclge_buffer_alloc(struct hclge_dev *hdev)
2410 struct hclge_pkt_buf_alloc *pkt_buf;
2413 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2417 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2419 dev_err(&hdev->pdev->dev,
2420 "could not calc tx buffer size for all TCs %d\n", ret);
2424 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2426 dev_err(&hdev->pdev->dev,
2427 "could not alloc tx buffers %d\n", ret);
2431 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2433 dev_err(&hdev->pdev->dev,
2434 "could not calc rx priv buffer size for all TCs %d\n",
2439 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2441 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2446 if (hnae3_dev_dcb_supported(hdev)) {
2447 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2449 dev_err(&hdev->pdev->dev,
2450 "could not configure rx private waterline %d\n",
2455 ret = hclge_common_thrd_config(hdev, pkt_buf);
2457 dev_err(&hdev->pdev->dev,
2458 "could not configure common threshold %d\n",
2464 ret = hclge_common_wl_config(hdev, pkt_buf);
2466 dev_err(&hdev->pdev->dev,
2467 "could not configure common waterline %d\n", ret);
2474 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2476 struct hnae3_handle *roce = &vport->roce;
2477 struct hnae3_handle *nic = &vport->nic;
2478 struct hclge_dev *hdev = vport->back;
2480 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2482 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2485 roce->rinfo.base_vector = hdev->roce_base_vector;
2487 roce->rinfo.netdev = nic->kinfo.netdev;
2488 roce->rinfo.roce_io_base = hdev->hw.io_base;
2489 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2491 roce->pdev = nic->pdev;
2492 roce->ae_algo = nic->ae_algo;
2493 roce->numa_node_mask = nic->numa_node_mask;
2498 static int hclge_init_msi(struct hclge_dev *hdev)
2500 struct pci_dev *pdev = hdev->pdev;
2504 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2506 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2509 "failed(%d) to allocate MSI/MSI-X vectors\n",
2513 if (vectors < hdev->num_msi)
2514 dev_warn(&hdev->pdev->dev,
2515 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2516 hdev->num_msi, vectors);
2518 hdev->num_msi = vectors;
2519 hdev->num_msi_left = vectors;
2521 hdev->base_msi_vector = pdev->irq;
2522 hdev->roce_base_vector = hdev->base_msi_vector +
2525 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2526 sizeof(u16), GFP_KERNEL);
2527 if (!hdev->vector_status) {
2528 pci_free_irq_vectors(pdev);
2532 for (i = 0; i < hdev->num_msi; i++)
2533 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2535 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2536 sizeof(int), GFP_KERNEL);
2537 if (!hdev->vector_irq) {
2538 pci_free_irq_vectors(pdev);
2545 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2547 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2548 duplex = HCLGE_MAC_FULL;
2553 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2556 struct hclge_config_mac_speed_dup_cmd *req;
2557 struct hclge_desc desc;
2560 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2562 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2565 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2568 case HCLGE_MAC_SPEED_10M:
2569 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 HCLGE_CFG_SPEED_S, 6);
2572 case HCLGE_MAC_SPEED_100M:
2573 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 HCLGE_CFG_SPEED_S, 7);
2576 case HCLGE_MAC_SPEED_1G:
2577 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 HCLGE_CFG_SPEED_S, 0);
2580 case HCLGE_MAC_SPEED_10G:
2581 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 HCLGE_CFG_SPEED_S, 1);
2584 case HCLGE_MAC_SPEED_25G:
2585 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 HCLGE_CFG_SPEED_S, 2);
2588 case HCLGE_MAC_SPEED_40G:
2589 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 HCLGE_CFG_SPEED_S, 3);
2592 case HCLGE_MAC_SPEED_50G:
2593 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 HCLGE_CFG_SPEED_S, 4);
2596 case HCLGE_MAC_SPEED_100G:
2597 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 HCLGE_CFG_SPEED_S, 5);
2600 case HCLGE_MAC_SPEED_200G:
2601 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 HCLGE_CFG_SPEED_S, 8);
2605 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2609 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2612 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2614 dev_err(&hdev->pdev->dev,
2615 "mac speed/duplex config cmd failed %d.\n", ret);
2622 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2624 struct hclge_mac *mac = &hdev->hw.mac;
2627 duplex = hclge_check_speed_dup(duplex, speed);
2628 if (!mac->support_autoneg && mac->speed == speed &&
2629 mac->duplex == duplex)
2632 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2636 hdev->hw.mac.speed = speed;
2637 hdev->hw.mac.duplex = duplex;
2642 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2645 struct hclge_vport *vport = hclge_get_vport(handle);
2646 struct hclge_dev *hdev = vport->back;
2648 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2651 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2653 struct hclge_config_auto_neg_cmd *req;
2654 struct hclge_desc desc;
2658 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2660 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2662 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2663 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2665 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2667 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2673 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2675 struct hclge_vport *vport = hclge_get_vport(handle);
2676 struct hclge_dev *hdev = vport->back;
2678 if (!hdev->hw.mac.support_autoneg) {
2680 dev_err(&hdev->pdev->dev,
2681 "autoneg is not supported by current port\n");
2688 return hclge_set_autoneg_en(hdev, enable);
2691 static int hclge_get_autoneg(struct hnae3_handle *handle)
2693 struct hclge_vport *vport = hclge_get_vport(handle);
2694 struct hclge_dev *hdev = vport->back;
2695 struct phy_device *phydev = hdev->hw.mac.phydev;
2698 return phydev->autoneg;
2700 return hdev->hw.mac.autoneg;
2703 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2705 struct hclge_vport *vport = hclge_get_vport(handle);
2706 struct hclge_dev *hdev = vport->back;
2709 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2711 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2714 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2717 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2719 struct hclge_vport *vport = hclge_get_vport(handle);
2720 struct hclge_dev *hdev = vport->back;
2722 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2723 return hclge_set_autoneg_en(hdev, !halt);
2728 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2730 struct hclge_config_fec_cmd *req;
2731 struct hclge_desc desc;
2734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2736 req = (struct hclge_config_fec_cmd *)desc.data;
2737 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2738 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2739 if (fec_mode & BIT(HNAE3_FEC_RS))
2740 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2741 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2742 if (fec_mode & BIT(HNAE3_FEC_BASER))
2743 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2744 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2746 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2748 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2753 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2755 struct hclge_vport *vport = hclge_get_vport(handle);
2756 struct hclge_dev *hdev = vport->back;
2757 struct hclge_mac *mac = &hdev->hw.mac;
2760 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2761 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2765 ret = hclge_set_fec_hw(hdev, fec_mode);
2769 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2773 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2776 struct hclge_vport *vport = hclge_get_vport(handle);
2777 struct hclge_dev *hdev = vport->back;
2778 struct hclge_mac *mac = &hdev->hw.mac;
2781 *fec_ability = mac->fec_ability;
2783 *fec_mode = mac->fec_mode;
2786 static int hclge_mac_init(struct hclge_dev *hdev)
2788 struct hclge_mac *mac = &hdev->hw.mac;
2791 hdev->support_sfp_query = true;
2792 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2793 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2794 hdev->hw.mac.duplex);
2798 if (hdev->hw.mac.support_autoneg) {
2799 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2806 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2807 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2812 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2814 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2818 ret = hclge_set_default_loopback(hdev);
2822 ret = hclge_buffer_alloc(hdev);
2824 dev_err(&hdev->pdev->dev,
2825 "allocate buffer fail, ret=%d\n", ret);
2830 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2832 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2834 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 hclge_wq, &hdev->service_task, 0);
2838 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2840 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2842 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 hclge_wq, &hdev->service_task, 0);
2846 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2848 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2850 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 hclge_wq, &hdev->service_task,
2855 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2857 struct hclge_link_status_cmd *req;
2858 struct hclge_desc desc;
2861 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2864 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2869 req = (struct hclge_link_status_cmd *)desc.data;
2870 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2871 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2876 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2878 struct phy_device *phydev = hdev->hw.mac.phydev;
2880 *link_status = HCLGE_LINK_STATUS_DOWN;
2882 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2885 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2888 return hclge_get_mac_link_status(hdev, link_status);
2891 static void hclge_push_link_status(struct hclge_dev *hdev)
2893 struct hclge_vport *vport;
2897 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2898 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2900 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2901 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2904 ret = hclge_push_vf_link_status(vport);
2906 dev_err(&hdev->pdev->dev,
2907 "failed to push link status to vf%u, ret = %d\n",
2913 static void hclge_update_link_status(struct hclge_dev *hdev)
2915 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2916 struct hnae3_handle *handle = &hdev->vport[0].nic;
2917 struct hnae3_client *rclient = hdev->roce_client;
2918 struct hnae3_client *client = hdev->nic_client;
2925 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2928 ret = hclge_get_mac_phy_link(hdev, &state);
2930 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2934 if (state != hdev->hw.mac.link) {
2935 client->ops->link_status_change(handle, state);
2936 hclge_config_mac_tnl_int(hdev, state);
2937 if (rclient && rclient->ops->link_status_change)
2938 rclient->ops->link_status_change(rhandle, state);
2940 hdev->hw.mac.link = state;
2941 hclge_push_link_status(hdev);
2944 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2947 static void hclge_update_port_capability(struct hclge_dev *hdev,
2948 struct hclge_mac *mac)
2950 if (hnae3_dev_fec_supported(hdev))
2951 /* update fec ability by speed */
2952 hclge_convert_setting_fec(mac);
2954 /* firmware can not identify back plane type, the media type
2955 * read from configuration can help deal it
2957 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2958 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2959 mac->module_type = HNAE3_MODULE_TYPE_KR;
2960 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2961 mac->module_type = HNAE3_MODULE_TYPE_TP;
2963 if (mac->support_autoneg) {
2964 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2965 linkmode_copy(mac->advertising, mac->supported);
2967 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2969 linkmode_zero(mac->advertising);
2973 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2975 struct hclge_sfp_info_cmd *resp;
2976 struct hclge_desc desc;
2979 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2980 resp = (struct hclge_sfp_info_cmd *)desc.data;
2981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2982 if (ret == -EOPNOTSUPP) {
2983 dev_warn(&hdev->pdev->dev,
2984 "IMP do not support get SFP speed %d\n", ret);
2987 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2991 *speed = le32_to_cpu(resp->speed);
2996 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2998 struct hclge_sfp_info_cmd *resp;
2999 struct hclge_desc desc;
3002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3003 resp = (struct hclge_sfp_info_cmd *)desc.data;
3005 resp->query_type = QUERY_ACTIVE_SPEED;
3007 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3008 if (ret == -EOPNOTSUPP) {
3009 dev_warn(&hdev->pdev->dev,
3010 "IMP does not support get SFP info %d\n", ret);
3013 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3017 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3018 * set to mac->speed.
3020 if (!le32_to_cpu(resp->speed))
3023 mac->speed = le32_to_cpu(resp->speed);
3024 /* if resp->speed_ability is 0, it means it's an old version
3025 * firmware, do not update these params
3027 if (resp->speed_ability) {
3028 mac->module_type = le32_to_cpu(resp->module_type);
3029 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3030 mac->autoneg = resp->autoneg;
3031 mac->support_autoneg = resp->autoneg_ability;
3032 mac->speed_type = QUERY_ACTIVE_SPEED;
3033 if (!resp->active_fec)
3036 mac->fec_mode = BIT(resp->active_fec);
3038 mac->speed_type = QUERY_SFP_SPEED;
3044 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3045 struct ethtool_link_ksettings *cmd)
3047 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3048 struct hclge_vport *vport = hclge_get_vport(handle);
3049 struct hclge_phy_link_ksetting_0_cmd *req0;
3050 struct hclge_phy_link_ksetting_1_cmd *req1;
3051 u32 supported, advertising, lp_advertising;
3052 struct hclge_dev *hdev = vport->back;
3055 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3057 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3058 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3061 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3063 dev_err(&hdev->pdev->dev,
3064 "failed to get phy link ksetting, ret = %d.\n", ret);
3068 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3069 cmd->base.autoneg = req0->autoneg;
3070 cmd->base.speed = le32_to_cpu(req0->speed);
3071 cmd->base.duplex = req0->duplex;
3072 cmd->base.port = req0->port;
3073 cmd->base.transceiver = req0->transceiver;
3074 cmd->base.phy_address = req0->phy_address;
3075 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3076 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3077 supported = le32_to_cpu(req0->supported);
3078 advertising = le32_to_cpu(req0->advertising);
3079 lp_advertising = le32_to_cpu(req0->lp_advertising);
3080 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3082 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3084 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3087 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3088 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3089 cmd->base.master_slave_state = req1->master_slave_state;
3095 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3096 const struct ethtool_link_ksettings *cmd)
3098 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3099 struct hclge_vport *vport = hclge_get_vport(handle);
3100 struct hclge_phy_link_ksetting_0_cmd *req0;
3101 struct hclge_phy_link_ksetting_1_cmd *req1;
3102 struct hclge_dev *hdev = vport->back;
3106 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3107 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3108 (cmd->base.duplex != DUPLEX_HALF &&
3109 cmd->base.duplex != DUPLEX_FULL)))
3112 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3114 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3115 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3118 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3119 req0->autoneg = cmd->base.autoneg;
3120 req0->speed = cpu_to_le32(cmd->base.speed);
3121 req0->duplex = cmd->base.duplex;
3122 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3123 cmd->link_modes.advertising);
3124 req0->advertising = cpu_to_le32(advertising);
3125 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3127 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3128 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3130 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3132 dev_err(&hdev->pdev->dev,
3133 "failed to set phy link ksettings, ret = %d.\n", ret);
3137 hdev->hw.mac.autoneg = cmd->base.autoneg;
3138 hdev->hw.mac.speed = cmd->base.speed;
3139 hdev->hw.mac.duplex = cmd->base.duplex;
3140 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3145 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3147 struct ethtool_link_ksettings cmd;
3150 if (!hnae3_dev_phy_imp_supported(hdev))
3153 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3157 hdev->hw.mac.autoneg = cmd.base.autoneg;
3158 hdev->hw.mac.speed = cmd.base.speed;
3159 hdev->hw.mac.duplex = cmd.base.duplex;
3164 static int hclge_tp_port_init(struct hclge_dev *hdev)
3166 struct ethtool_link_ksettings cmd;
3168 if (!hnae3_dev_phy_imp_supported(hdev))
3171 cmd.base.autoneg = hdev->hw.mac.autoneg;
3172 cmd.base.speed = hdev->hw.mac.speed;
3173 cmd.base.duplex = hdev->hw.mac.duplex;
3174 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3176 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3179 static int hclge_update_port_info(struct hclge_dev *hdev)
3181 struct hclge_mac *mac = &hdev->hw.mac;
3182 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3185 /* get the port info from SFP cmd if not copper port */
3186 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3187 return hclge_update_tp_port_info(hdev);
3189 /* if IMP does not support get SFP/qSFP info, return directly */
3190 if (!hdev->support_sfp_query)
3193 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3194 ret = hclge_get_sfp_info(hdev, mac);
3196 ret = hclge_get_sfp_speed(hdev, &speed);
3198 if (ret == -EOPNOTSUPP) {
3199 hdev->support_sfp_query = false;
3205 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3206 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3207 hclge_update_port_capability(hdev, mac);
3210 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3213 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3214 return 0; /* do nothing if no SFP */
3216 /* must config full duplex for SFP */
3217 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3221 static int hclge_get_status(struct hnae3_handle *handle)
3223 struct hclge_vport *vport = hclge_get_vport(handle);
3224 struct hclge_dev *hdev = vport->back;
3226 hclge_update_link_status(hdev);
3228 return hdev->hw.mac.link;
3231 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3233 if (!pci_num_vf(hdev->pdev)) {
3234 dev_err(&hdev->pdev->dev,
3235 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3239 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3240 dev_err(&hdev->pdev->dev,
3241 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3242 vf, pci_num_vf(hdev->pdev));
3246 /* VF start from 1 in vport */
3247 vf += HCLGE_VF_VPORT_START_NUM;
3248 return &hdev->vport[vf];
3251 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3252 struct ifla_vf_info *ivf)
3254 struct hclge_vport *vport = hclge_get_vport(handle);
3255 struct hclge_dev *hdev = vport->back;
3257 vport = hclge_get_vf_vport(hdev, vf);
3262 ivf->linkstate = vport->vf_info.link_state;
3263 ivf->spoofchk = vport->vf_info.spoofchk;
3264 ivf->trusted = vport->vf_info.trusted;
3265 ivf->min_tx_rate = 0;
3266 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3267 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3268 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3269 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3270 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3275 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3278 struct hclge_vport *vport = hclge_get_vport(handle);
3279 struct hclge_dev *hdev = vport->back;
3283 vport = hclge_get_vf_vport(hdev, vf);
3287 link_state_old = vport->vf_info.link_state;
3288 vport->vf_info.link_state = link_state;
3290 ret = hclge_push_vf_link_status(vport);
3292 vport->vf_info.link_state = link_state_old;
3293 dev_err(&hdev->pdev->dev,
3294 "failed to push vf%d link status, ret = %d\n", vf, ret);
3300 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3302 u32 cmdq_src_reg, msix_src_reg;
3304 /* fetch the events from their corresponding regs */
3305 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3306 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3308 /* Assumption: If by any chance reset and mailbox events are reported
3309 * together then we will only process reset event in this go and will
3310 * defer the processing of the mailbox events. Since, we would have not
3311 * cleared RX CMDQ event this time we would receive again another
3312 * interrupt from H/W just for the mailbox.
3314 * check for vector0 reset event sources
3316 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3317 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3318 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3319 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3320 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3321 hdev->rst_stats.imp_rst_cnt++;
3322 return HCLGE_VECTOR0_EVENT_RST;
3325 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3326 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3327 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3328 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3329 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3330 hdev->rst_stats.global_rst_cnt++;
3331 return HCLGE_VECTOR0_EVENT_RST;
3334 /* check for vector0 msix event source */
3335 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3336 *clearval = msix_src_reg;
3337 return HCLGE_VECTOR0_EVENT_ERR;
3340 /* check for vector0 mailbox(=CMDQ RX) event source */
3341 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3342 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3343 *clearval = cmdq_src_reg;
3344 return HCLGE_VECTOR0_EVENT_MBX;
3347 /* print other vector0 event source */
3348 dev_info(&hdev->pdev->dev,
3349 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3350 cmdq_src_reg, msix_src_reg);
3351 *clearval = msix_src_reg;
3353 return HCLGE_VECTOR0_EVENT_OTHER;
3356 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3359 switch (event_type) {
3360 case HCLGE_VECTOR0_EVENT_RST:
3361 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3363 case HCLGE_VECTOR0_EVENT_MBX:
3364 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3371 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3373 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3374 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3375 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3376 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3377 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3380 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3382 writel(enable ? 1 : 0, vector->addr);
3385 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3387 struct hclge_dev *hdev = data;
3391 hclge_enable_vector(&hdev->misc_vector, false);
3392 event_cause = hclge_check_event_cause(hdev, &clearval);
3394 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3395 switch (event_cause) {
3396 case HCLGE_VECTOR0_EVENT_ERR:
3397 /* we do not know what type of reset is required now. This could
3398 * only be decided after we fetch the type of errors which
3399 * caused this event. Therefore, we will do below for now:
3400 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3401 * have defered type of reset to be used.
3402 * 2. Schedule the reset service task.
3403 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3404 * will fetch the correct type of reset. This would be done
3405 * by first decoding the types of errors.
3407 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3409 case HCLGE_VECTOR0_EVENT_RST:
3410 hclge_reset_task_schedule(hdev);
3412 case HCLGE_VECTOR0_EVENT_MBX:
3413 /* If we are here then,
3414 * 1. Either we are not handling any mbx task and we are not
3417 * 2. We could be handling a mbx task but nothing more is
3419 * In both cases, we should schedule mbx task as there are more
3420 * mbx messages reported by this interrupt.
3422 hclge_mbx_task_schedule(hdev);
3425 dev_warn(&hdev->pdev->dev,
3426 "received unknown or unhandled event of vector0\n");
3430 hclge_clear_event_cause(hdev, event_cause, clearval);
3432 /* Enable interrupt if it is not cause by reset. And when
3433 * clearval equal to 0, it means interrupt status may be
3434 * cleared by hardware before driver reads status register.
3435 * For this case, vector0 interrupt also should be enabled.
3438 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3439 hclge_enable_vector(&hdev->misc_vector, true);
3445 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3447 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3448 dev_warn(&hdev->pdev->dev,
3449 "vector(vector_id %d) has been freed.\n", vector_id);
3453 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3454 hdev->num_msi_left += 1;
3455 hdev->num_msi_used -= 1;
3458 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3460 struct hclge_misc_vector *vector = &hdev->misc_vector;
3462 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3464 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3465 hdev->vector_status[0] = 0;
3467 hdev->num_msi_left -= 1;
3468 hdev->num_msi_used += 1;
3471 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3472 const cpumask_t *mask)
3474 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3477 cpumask_copy(&hdev->affinity_mask, mask);
3480 static void hclge_irq_affinity_release(struct kref *ref)
3484 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3486 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3487 &hdev->affinity_mask);
3489 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3490 hdev->affinity_notify.release = hclge_irq_affinity_release;
3491 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3492 &hdev->affinity_notify);
3495 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3497 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3498 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3501 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3505 hclge_get_misc_vector(hdev);
3507 /* this would be explicitly freed in the end */
3508 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3509 HCLGE_NAME, pci_name(hdev->pdev));
3510 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3511 0, hdev->misc_vector.name, hdev);
3513 hclge_free_vector(hdev, 0);
3514 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3515 hdev->misc_vector.vector_irq);
3521 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3523 free_irq(hdev->misc_vector.vector_irq, hdev);
3524 hclge_free_vector(hdev, 0);
3527 int hclge_notify_client(struct hclge_dev *hdev,
3528 enum hnae3_reset_notify_type type)
3530 struct hnae3_handle *handle = &hdev->vport[0].nic;
3531 struct hnae3_client *client = hdev->nic_client;
3534 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3537 if (!client->ops->reset_notify)
3540 ret = client->ops->reset_notify(handle, type);
3542 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3548 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3549 enum hnae3_reset_notify_type type)
3551 struct hnae3_handle *handle = &hdev->vport[0].roce;
3552 struct hnae3_client *client = hdev->roce_client;
3555 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3558 if (!client->ops->reset_notify)
3561 ret = client->ops->reset_notify(handle, type);
3563 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3569 static int hclge_reset_wait(struct hclge_dev *hdev)
3571 #define HCLGE_RESET_WATI_MS 100
3572 #define HCLGE_RESET_WAIT_CNT 350
3574 u32 val, reg, reg_bit;
3577 switch (hdev->reset_type) {
3578 case HNAE3_IMP_RESET:
3579 reg = HCLGE_GLOBAL_RESET_REG;
3580 reg_bit = HCLGE_IMP_RESET_BIT;
3582 case HNAE3_GLOBAL_RESET:
3583 reg = HCLGE_GLOBAL_RESET_REG;
3584 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3586 case HNAE3_FUNC_RESET:
3587 reg = HCLGE_FUN_RST_ING;
3588 reg_bit = HCLGE_FUN_RST_ING_B;
3591 dev_err(&hdev->pdev->dev,
3592 "Wait for unsupported reset type: %d\n",
3597 val = hclge_read_dev(&hdev->hw, reg);
3598 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3599 msleep(HCLGE_RESET_WATI_MS);
3600 val = hclge_read_dev(&hdev->hw, reg);
3604 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3605 dev_warn(&hdev->pdev->dev,
3606 "Wait for reset timeout: %d\n", hdev->reset_type);
3613 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3615 struct hclge_vf_rst_cmd *req;
3616 struct hclge_desc desc;
3618 req = (struct hclge_vf_rst_cmd *)desc.data;
3619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3620 req->dest_vfid = func_id;
3625 return hclge_cmd_send(&hdev->hw, &desc, 1);
3628 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3632 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3633 struct hclge_vport *vport = &hdev->vport[i];
3636 /* Send cmd to set/clear VF's FUNC_RST_ING */
3637 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3639 dev_err(&hdev->pdev->dev,
3640 "set vf(%u) rst failed %d!\n",
3641 vport->vport_id, ret);
3645 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3648 /* Inform VF to process the reset.
3649 * hclge_inform_reset_assert_to_vf may fail if VF
3650 * driver is not loaded.
3652 ret = hclge_inform_reset_assert_to_vf(vport);
3654 dev_warn(&hdev->pdev->dev,
3655 "inform reset to vf(%u) failed %d!\n",
3656 vport->vport_id, ret);
3662 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3664 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3665 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3666 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3669 hclge_mbx_handler(hdev);
3671 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3674 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3676 struct hclge_pf_rst_sync_cmd *req;
3677 struct hclge_desc desc;
3681 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3685 /* vf need to down netdev by mbx during PF or FLR reset */
3686 hclge_mailbox_service_task(hdev);
3688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689 /* for compatible with old firmware, wait
3690 * 100 ms for VF to stop IO
3692 if (ret == -EOPNOTSUPP) {
3693 msleep(HCLGE_RESET_SYNC_TIME);
3696 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3699 } else if (req->all_vf_ready) {
3702 msleep(HCLGE_PF_RESET_SYNC_TIME);
3703 hclge_cmd_reuse_desc(&desc, true);
3704 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3706 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3709 void hclge_report_hw_error(struct hclge_dev *hdev,
3710 enum hnae3_hw_error_type type)
3712 struct hnae3_client *client = hdev->nic_client;
3714 if (!client || !client->ops->process_hw_error ||
3715 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3718 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3721 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3725 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3726 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3727 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3728 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3729 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3732 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3733 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3734 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3735 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3739 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3741 struct hclge_desc desc;
3742 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3745 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3746 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3747 req->fun_reset_vfid = func_id;
3749 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3751 dev_err(&hdev->pdev->dev,
3752 "send function reset cmd fail, status =%d\n", ret);
3757 static void hclge_do_reset(struct hclge_dev *hdev)
3759 struct hnae3_handle *handle = &hdev->vport[0].nic;
3760 struct pci_dev *pdev = hdev->pdev;
3763 if (hclge_get_hw_reset_stat(handle)) {
3764 dev_info(&pdev->dev, "hardware reset not finish\n");
3765 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3766 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3767 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3771 switch (hdev->reset_type) {
3772 case HNAE3_GLOBAL_RESET:
3773 dev_info(&pdev->dev, "global reset requested\n");
3774 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3775 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3776 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3778 case HNAE3_FUNC_RESET:
3779 dev_info(&pdev->dev, "PF reset requested\n");
3780 /* schedule again to check later */
3781 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3782 hclge_reset_task_schedule(hdev);
3785 dev_warn(&pdev->dev,
3786 "unsupported reset type: %d\n", hdev->reset_type);
3791 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3792 unsigned long *addr)
3794 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3795 struct hclge_dev *hdev = ae_dev->priv;
3797 /* first, resolve any unknown reset type to the known type(s) */
3798 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3799 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3800 HCLGE_MISC_VECTOR_INT_STS);
3801 /* we will intentionally ignore any errors from this function
3802 * as we will end up in *some* reset request in any case
3804 if (hclge_handle_hw_msix_error(hdev, addr))
3805 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3808 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3809 /* We defered the clearing of the error event which caused
3810 * interrupt since it was not posssible to do that in
3811 * interrupt context (and this is the reason we introduced
3812 * new UNKNOWN reset type). Now, the errors have been
3813 * handled and cleared in hardware we can safely enable
3814 * interrupts. This is an exception to the norm.
3816 hclge_enable_vector(&hdev->misc_vector, true);
3819 /* return the highest priority reset level amongst all */
3820 if (test_bit(HNAE3_IMP_RESET, addr)) {
3821 rst_level = HNAE3_IMP_RESET;
3822 clear_bit(HNAE3_IMP_RESET, addr);
3823 clear_bit(HNAE3_GLOBAL_RESET, addr);
3824 clear_bit(HNAE3_FUNC_RESET, addr);
3825 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3826 rst_level = HNAE3_GLOBAL_RESET;
3827 clear_bit(HNAE3_GLOBAL_RESET, addr);
3828 clear_bit(HNAE3_FUNC_RESET, addr);
3829 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3830 rst_level = HNAE3_FUNC_RESET;
3831 clear_bit(HNAE3_FUNC_RESET, addr);
3832 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3833 rst_level = HNAE3_FLR_RESET;
3834 clear_bit(HNAE3_FLR_RESET, addr);
3837 if (hdev->reset_type != HNAE3_NONE_RESET &&
3838 rst_level < hdev->reset_type)
3839 return HNAE3_NONE_RESET;
3844 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3848 switch (hdev->reset_type) {
3849 case HNAE3_IMP_RESET:
3850 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3852 case HNAE3_GLOBAL_RESET:
3853 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3862 /* For revision 0x20, the reset interrupt source
3863 * can only be cleared after hardware reset done
3865 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3866 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3869 hclge_enable_vector(&hdev->misc_vector, true);
3872 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3876 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3878 reg_val |= HCLGE_NIC_SW_RST_RDY;
3880 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3882 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3885 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3889 ret = hclge_set_all_vf_rst(hdev, true);
3893 hclge_func_reset_sync_vf(hdev);
3898 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3903 switch (hdev->reset_type) {
3904 case HNAE3_FUNC_RESET:
3905 ret = hclge_func_reset_notify_vf(hdev);
3909 ret = hclge_func_reset_cmd(hdev, 0);
3911 dev_err(&hdev->pdev->dev,
3912 "asserting function reset fail %d!\n", ret);
3916 /* After performaning pf reset, it is not necessary to do the
3917 * mailbox handling or send any command to firmware, because
3918 * any mailbox handling or command to firmware is only valid
3919 * after hclge_cmd_init is called.
3921 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3922 hdev->rst_stats.pf_rst_cnt++;
3924 case HNAE3_FLR_RESET:
3925 ret = hclge_func_reset_notify_vf(hdev);
3929 case HNAE3_IMP_RESET:
3930 hclge_handle_imp_error(hdev);
3931 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3932 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3933 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3939 /* inform hardware that preparatory work is done */
3940 msleep(HCLGE_RESET_SYNC_TIME);
3941 hclge_reset_handshake(hdev, true);
3942 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3947 static void hclge_show_rst_info(struct hclge_dev *hdev)
3951 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3955 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3957 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3962 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3964 #define MAX_RESET_FAIL_CNT 5
3966 if (hdev->reset_pending) {
3967 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3968 hdev->reset_pending);
3970 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3971 HCLGE_RESET_INT_M) {
3972 dev_info(&hdev->pdev->dev,
3973 "reset failed because new reset interrupt\n");
3974 hclge_clear_reset_cause(hdev);
3976 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3977 hdev->rst_stats.reset_fail_cnt++;
3978 set_bit(hdev->reset_type, &hdev->reset_pending);
3979 dev_info(&hdev->pdev->dev,
3980 "re-schedule reset task(%u)\n",
3981 hdev->rst_stats.reset_fail_cnt);
3985 hclge_clear_reset_cause(hdev);
3987 /* recover the handshake status when reset fail */
3988 hclge_reset_handshake(hdev, true);
3990 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3992 hclge_show_rst_info(hdev);
3994 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3999 static void hclge_update_reset_level(struct hclge_dev *hdev)
4001 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4002 enum hnae3_reset_type reset_level;
4004 /* reset request will not be set during reset, so clear
4005 * pending reset request to avoid unnecessary reset
4006 * caused by the same reason.
4008 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4010 /* if default_reset_request has a higher level reset request,
4011 * it should be handled as soon as possible. since some errors
4012 * need this kind of reset to fix.
4014 reset_level = hclge_get_reset_level(ae_dev,
4015 &hdev->default_reset_request);
4016 if (reset_level != HNAE3_NONE_RESET)
4017 set_bit(reset_level, &hdev->reset_request);
4020 static int hclge_set_rst_done(struct hclge_dev *hdev)
4022 struct hclge_pf_rst_done_cmd *req;
4023 struct hclge_desc desc;
4026 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4027 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4028 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031 /* To be compatible with the old firmware, which does not support
4032 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4035 if (ret == -EOPNOTSUPP) {
4036 dev_warn(&hdev->pdev->dev,
4037 "current firmware does not support command(0x%x)!\n",
4038 HCLGE_OPC_PF_RST_DONE);
4041 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4048 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4052 switch (hdev->reset_type) {
4053 case HNAE3_FUNC_RESET:
4054 case HNAE3_FLR_RESET:
4055 ret = hclge_set_all_vf_rst(hdev, false);
4057 case HNAE3_GLOBAL_RESET:
4058 case HNAE3_IMP_RESET:
4059 ret = hclge_set_rst_done(hdev);
4065 /* clear up the handshake status after re-initialize done */
4066 hclge_reset_handshake(hdev, false);
4071 static int hclge_reset_stack(struct hclge_dev *hdev)
4075 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4079 ret = hclge_reset_ae_dev(hdev->ae_dev);
4083 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4086 static int hclge_reset_prepare(struct hclge_dev *hdev)
4090 hdev->rst_stats.reset_cnt++;
4091 /* perform reset of the stack & ae device for a client */
4092 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4097 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4102 return hclge_reset_prepare_wait(hdev);
4105 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4109 hdev->rst_stats.hw_reset_done_cnt++;
4111 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4116 ret = hclge_reset_stack(hdev);
4121 hclge_clear_reset_cause(hdev);
4123 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4124 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4128 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4131 ret = hclge_reset_prepare_up(hdev);
4136 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4141 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4145 hdev->last_reset_time = jiffies;
4146 hdev->rst_stats.reset_fail_cnt = 0;
4147 hdev->rst_stats.reset_done_cnt++;
4148 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4150 hclge_update_reset_level(hdev);
4155 static void hclge_reset(struct hclge_dev *hdev)
4157 if (hclge_reset_prepare(hdev))
4160 if (hclge_reset_wait(hdev))
4163 if (hclge_reset_rebuild(hdev))
4169 if (hclge_reset_err_handle(hdev))
4170 hclge_reset_task_schedule(hdev);
4173 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4175 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4176 struct hclge_dev *hdev = ae_dev->priv;
4178 /* We might end up getting called broadly because of 2 below cases:
4179 * 1. Recoverable error was conveyed through APEI and only way to bring
4180 * normalcy is to reset.
4181 * 2. A new reset request from the stack due to timeout
4183 * check if this is a new reset request and we are not here just because
4184 * last reset attempt did not succeed and watchdog hit us again. We will
4185 * know this if last reset request did not occur very recently (watchdog
4186 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4187 * In case of new request we reset the "reset level" to PF reset.
4188 * And if it is a repeat reset request of the most recent one then we
4189 * want to make sure we throttle the reset request. Therefore, we will
4190 * not allow it again before 3*HZ times.
4193 if (time_before(jiffies, (hdev->last_reset_time +
4194 HCLGE_RESET_INTERVAL))) {
4195 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4199 if (hdev->default_reset_request) {
4201 hclge_get_reset_level(ae_dev,
4202 &hdev->default_reset_request);
4203 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4204 hdev->reset_level = HNAE3_FUNC_RESET;
4207 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4210 /* request reset & schedule reset task */
4211 set_bit(hdev->reset_level, &hdev->reset_request);
4212 hclge_reset_task_schedule(hdev);
4214 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4215 hdev->reset_level++;
4218 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4219 enum hnae3_reset_type rst_type)
4221 struct hclge_dev *hdev = ae_dev->priv;
4223 set_bit(rst_type, &hdev->default_reset_request);
4226 static void hclge_reset_timer(struct timer_list *t)
4228 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4230 /* if default_reset_request has no value, it means that this reset
4231 * request has already be handled, so just return here
4233 if (!hdev->default_reset_request)
4236 dev_info(&hdev->pdev->dev,
4237 "triggering reset in reset timer\n");
4238 hclge_reset_event(hdev->pdev, NULL);
4241 static void hclge_reset_subtask(struct hclge_dev *hdev)
4243 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4245 /* check if there is any ongoing reset in the hardware. This status can
4246 * be checked from reset_pending. If there is then, we need to wait for
4247 * hardware to complete reset.
4248 * a. If we are able to figure out in reasonable time that hardware
4249 * has fully resetted then, we can proceed with driver, client
4251 * b. else, we can come back later to check this status so re-sched
4254 hdev->last_reset_time = jiffies;
4255 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4256 if (hdev->reset_type != HNAE3_NONE_RESET)
4259 /* check if we got any *new* reset requests to be honored */
4260 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4261 if (hdev->reset_type != HNAE3_NONE_RESET)
4262 hclge_do_reset(hdev);
4264 hdev->reset_type = HNAE3_NONE_RESET;
4267 static void hclge_reset_service_task(struct hclge_dev *hdev)
4269 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4272 down(&hdev->reset_sem);
4273 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4275 hclge_reset_subtask(hdev);
4277 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4278 up(&hdev->reset_sem);
4281 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4285 /* start from vport 1 for PF is always alive */
4286 for (i = 1; i < hdev->num_alloc_vport; i++) {
4287 struct hclge_vport *vport = &hdev->vport[i];
4289 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4290 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4292 /* If vf is not alive, set to default value */
4293 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4294 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4298 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4300 unsigned long delta = round_jiffies_relative(HZ);
4302 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4305 /* Always handle the link updating to make sure link state is
4306 * updated when it is triggered by mbx.
4308 hclge_update_link_status(hdev);
4309 hclge_sync_mac_table(hdev);
4310 hclge_sync_promisc_mode(hdev);
4311 hclge_sync_fd_table(hdev);
4313 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4314 delta = jiffies - hdev->last_serv_processed;
4316 if (delta < round_jiffies_relative(HZ)) {
4317 delta = round_jiffies_relative(HZ) - delta;
4322 hdev->serv_processed_cnt++;
4323 hclge_update_vport_alive(hdev);
4325 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4326 hdev->last_serv_processed = jiffies;
4330 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4331 hclge_update_stats_for_all(hdev);
4333 hclge_update_port_info(hdev);
4334 hclge_sync_vlan_filter(hdev);
4336 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4337 hclge_rfs_filter_expire(hdev);
4339 hdev->last_serv_processed = jiffies;
4342 hclge_task_schedule(hdev, delta);
4345 static void hclge_service_task(struct work_struct *work)
4347 struct hclge_dev *hdev =
4348 container_of(work, struct hclge_dev, service_task.work);
4350 hclge_reset_service_task(hdev);
4351 hclge_mailbox_service_task(hdev);
4352 hclge_periodic_service_task(hdev);
4354 /* Handle reset and mbx again in case periodical task delays the
4355 * handling by calling hclge_task_schedule() in
4356 * hclge_periodic_service_task().
4358 hclge_reset_service_task(hdev);
4359 hclge_mailbox_service_task(hdev);
4362 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4364 /* VF handle has no client */
4365 if (!handle->client)
4366 return container_of(handle, struct hclge_vport, nic);
4367 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4368 return container_of(handle, struct hclge_vport, roce);
4370 return container_of(handle, struct hclge_vport, nic);
4373 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4374 struct hnae3_vector_info *vector_info)
4376 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4378 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4380 /* need an extend offset to config vector >= 64 */
4381 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4382 vector_info->io_addr = hdev->hw.io_base +
4383 HCLGE_VECTOR_REG_BASE +
4384 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4386 vector_info->io_addr = hdev->hw.io_base +
4387 HCLGE_VECTOR_EXT_REG_BASE +
4388 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4389 HCLGE_VECTOR_REG_OFFSET_H +
4390 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4391 HCLGE_VECTOR_REG_OFFSET;
4393 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4394 hdev->vector_irq[idx] = vector_info->vector;
4397 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4398 struct hnae3_vector_info *vector_info)
4400 struct hclge_vport *vport = hclge_get_vport(handle);
4401 struct hnae3_vector_info *vector = vector_info;
4402 struct hclge_dev *hdev = vport->back;
4407 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4408 vector_num = min(hdev->num_msi_left, vector_num);
4410 for (j = 0; j < vector_num; j++) {
4411 while (++i < hdev->num_nic_msi) {
4412 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4413 hclge_get_vector_info(hdev, i, vector);
4421 hdev->num_msi_left -= alloc;
4422 hdev->num_msi_used += alloc;
4427 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4431 for (i = 0; i < hdev->num_msi; i++)
4432 if (vector == hdev->vector_irq[i])
4438 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4440 struct hclge_vport *vport = hclge_get_vport(handle);
4441 struct hclge_dev *hdev = vport->back;
4444 vector_id = hclge_get_vector_index(hdev, vector);
4445 if (vector_id < 0) {
4446 dev_err(&hdev->pdev->dev,
4447 "Get vector index fail. vector = %d\n", vector);
4451 hclge_free_vector(hdev, vector_id);
4456 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4458 return HCLGE_RSS_KEY_SIZE;
4461 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4462 const u8 hfunc, const u8 *key)
4464 struct hclge_rss_config_cmd *req;
4465 unsigned int key_offset = 0;
4466 struct hclge_desc desc;
4471 key_counts = HCLGE_RSS_KEY_SIZE;
4472 req = (struct hclge_rss_config_cmd *)desc.data;
4474 while (key_counts) {
4475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4478 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4479 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4481 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4482 memcpy(req->hash_key,
4483 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4485 key_counts -= key_size;
4487 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4489 dev_err(&hdev->pdev->dev,
4490 "Configure RSS config fail, status = %d\n",
4498 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4500 struct hclge_rss_indirection_table_cmd *req;
4501 struct hclge_desc desc;
4502 int rss_cfg_tbl_num;
4510 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4511 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4512 HCLGE_RSS_CFG_TBL_SIZE;
4514 for (i = 0; i < rss_cfg_tbl_num; i++) {
4515 hclge_cmd_setup_basic_desc
4516 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4518 req->start_table_index =
4519 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4520 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4521 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4522 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4523 req->rss_qid_l[j] = qid & 0xff;
4525 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4526 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4527 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4528 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4530 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4532 dev_err(&hdev->pdev->dev,
4533 "Configure rss indir table fail,status = %d\n",
4541 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4542 u16 *tc_size, u16 *tc_offset)
4544 struct hclge_rss_tc_mode_cmd *req;
4545 struct hclge_desc desc;
4549 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4550 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4552 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4555 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4556 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4557 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4558 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4559 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4560 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4561 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4563 req->rss_tc_mode[i] = cpu_to_le16(mode);
4566 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4568 dev_err(&hdev->pdev->dev,
4569 "Configure rss tc mode fail, status = %d\n", ret);
4574 static void hclge_get_rss_type(struct hclge_vport *vport)
4576 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4577 vport->rss_tuple_sets.ipv4_udp_en ||
4578 vport->rss_tuple_sets.ipv4_sctp_en ||
4579 vport->rss_tuple_sets.ipv6_tcp_en ||
4580 vport->rss_tuple_sets.ipv6_udp_en ||
4581 vport->rss_tuple_sets.ipv6_sctp_en)
4582 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4583 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4584 vport->rss_tuple_sets.ipv6_fragment_en)
4585 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4587 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4590 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4592 struct hclge_rss_input_tuple_cmd *req;
4593 struct hclge_desc desc;
4596 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4598 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4600 /* Get the tuple cfg from pf */
4601 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4602 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4603 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4604 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4605 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4606 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4607 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4608 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4609 hclge_get_rss_type(&hdev->vport[0]);
4610 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4612 dev_err(&hdev->pdev->dev,
4613 "Configure rss input fail, status = %d\n", ret);
4617 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4620 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4621 struct hclge_vport *vport = hclge_get_vport(handle);
4624 /* Get hash algorithm */
4626 switch (vport->rss_algo) {
4627 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4628 *hfunc = ETH_RSS_HASH_TOP;
4630 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4631 *hfunc = ETH_RSS_HASH_XOR;
4634 *hfunc = ETH_RSS_HASH_UNKNOWN;
4639 /* Get the RSS Key required by the user */
4641 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4643 /* Get indirect table */
4645 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4646 indir[i] = vport->rss_indirection_tbl[i];
4651 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4652 const u8 *key, const u8 hfunc)
4654 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4655 struct hclge_vport *vport = hclge_get_vport(handle);
4656 struct hclge_dev *hdev = vport->back;
4660 /* Set the RSS Hash Key if specififed by the user */
4663 case ETH_RSS_HASH_TOP:
4664 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4666 case ETH_RSS_HASH_XOR:
4667 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4669 case ETH_RSS_HASH_NO_CHANGE:
4670 hash_algo = vport->rss_algo;
4676 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4680 /* Update the shadow RSS key with user specified qids */
4681 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4682 vport->rss_algo = hash_algo;
4685 /* Update the shadow RSS table with user specified qids */
4686 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4687 vport->rss_indirection_tbl[i] = indir[i];
4689 /* Update the hardware */
4690 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4693 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4695 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4697 if (nfc->data & RXH_L4_B_2_3)
4698 hash_sets |= HCLGE_D_PORT_BIT;
4700 hash_sets &= ~HCLGE_D_PORT_BIT;
4702 if (nfc->data & RXH_IP_SRC)
4703 hash_sets |= HCLGE_S_IP_BIT;
4705 hash_sets &= ~HCLGE_S_IP_BIT;
4707 if (nfc->data & RXH_IP_DST)
4708 hash_sets |= HCLGE_D_IP_BIT;
4710 hash_sets &= ~HCLGE_D_IP_BIT;
4712 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4713 hash_sets |= HCLGE_V_TAG_BIT;
4718 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4719 struct ethtool_rxnfc *nfc,
4720 struct hclge_rss_input_tuple_cmd *req)
4722 struct hclge_dev *hdev = vport->back;
4725 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4726 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4727 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4728 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4729 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4730 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4731 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4732 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4734 tuple_sets = hclge_get_rss_hash_bits(nfc);
4735 switch (nfc->flow_type) {
4737 req->ipv4_tcp_en = tuple_sets;
4740 req->ipv6_tcp_en = tuple_sets;
4743 req->ipv4_udp_en = tuple_sets;
4746 req->ipv6_udp_en = tuple_sets;
4749 req->ipv4_sctp_en = tuple_sets;
4752 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4753 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4756 req->ipv6_sctp_en = tuple_sets;
4759 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4762 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4771 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4772 struct ethtool_rxnfc *nfc)
4774 struct hclge_vport *vport = hclge_get_vport(handle);
4775 struct hclge_dev *hdev = vport->back;
4776 struct hclge_rss_input_tuple_cmd *req;
4777 struct hclge_desc desc;
4780 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4781 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4784 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4787 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4789 dev_err(&hdev->pdev->dev,
4790 "failed to init rss tuple cmd, ret = %d\n", ret);
4794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4796 dev_err(&hdev->pdev->dev,
4797 "Set rss tuple fail, status = %d\n", ret);
4801 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4802 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4803 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4804 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4805 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4806 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4807 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4808 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4809 hclge_get_rss_type(vport);
4813 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4816 switch (flow_type) {
4818 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4821 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4824 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4827 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4830 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4833 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4837 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4846 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4850 if (tuple_sets & HCLGE_D_PORT_BIT)
4851 tuple_data |= RXH_L4_B_2_3;
4852 if (tuple_sets & HCLGE_S_PORT_BIT)
4853 tuple_data |= RXH_L4_B_0_1;
4854 if (tuple_sets & HCLGE_D_IP_BIT)
4855 tuple_data |= RXH_IP_DST;
4856 if (tuple_sets & HCLGE_S_IP_BIT)
4857 tuple_data |= RXH_IP_SRC;
4862 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4863 struct ethtool_rxnfc *nfc)
4865 struct hclge_vport *vport = hclge_get_vport(handle);
4871 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4872 if (ret || !tuple_sets)
4875 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4880 static int hclge_get_tc_size(struct hnae3_handle *handle)
4882 struct hclge_vport *vport = hclge_get_vport(handle);
4883 struct hclge_dev *hdev = vport->back;
4885 return hdev->pf_rss_size_max;
4888 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4890 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4891 struct hclge_vport *vport = hdev->vport;
4892 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4893 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4894 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4895 struct hnae3_tc_info *tc_info;
4900 tc_info = &vport->nic.kinfo.tc_info;
4901 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4902 rss_size = tc_info->tqp_count[i];
4905 if (!(hdev->hw_tc_map & BIT(i)))
4908 /* tc_size set to hardware is the log2 of roundup power of two
4909 * of rss_size, the acutal queue size is limited by indirection
4912 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4914 dev_err(&hdev->pdev->dev,
4915 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4920 roundup_size = roundup_pow_of_two(rss_size);
4921 roundup_size = ilog2(roundup_size);
4924 tc_size[i] = roundup_size;
4925 tc_offset[i] = tc_info->tqp_offset[i];
4928 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4931 int hclge_rss_init_hw(struct hclge_dev *hdev)
4933 struct hclge_vport *vport = hdev->vport;
4934 u16 *rss_indir = vport[0].rss_indirection_tbl;
4935 u8 *key = vport[0].rss_hash_key;
4936 u8 hfunc = vport[0].rss_algo;
4939 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4943 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4947 ret = hclge_set_rss_input_tuple(hdev);
4951 return hclge_init_rss_tc_mode(hdev);
4954 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4956 struct hclge_vport *vport = &hdev->vport[0];
4959 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4960 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4963 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4965 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4966 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4967 struct hclge_vport *vport = &hdev->vport[0];
4970 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4971 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4973 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4974 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4975 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4976 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4977 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4978 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4979 vport->rss_tuple_sets.ipv6_sctp_en =
4980 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4981 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4982 HCLGE_RSS_INPUT_TUPLE_SCTP;
4983 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4985 vport->rss_algo = rss_algo;
4987 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4988 sizeof(*rss_ind_tbl), GFP_KERNEL);
4992 vport->rss_indirection_tbl = rss_ind_tbl;
4993 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4995 hclge_rss_indir_init_cfg(hdev);
5000 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5001 int vector_id, bool en,
5002 struct hnae3_ring_chain_node *ring_chain)
5004 struct hclge_dev *hdev = vport->back;
5005 struct hnae3_ring_chain_node *node;
5006 struct hclge_desc desc;
5007 struct hclge_ctrl_vector_chain_cmd *req =
5008 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5009 enum hclge_cmd_status status;
5010 enum hclge_opcode_type op;
5011 u16 tqp_type_and_id;
5014 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5015 hclge_cmd_setup_basic_desc(&desc, op, false);
5016 req->int_vector_id_l = hnae3_get_field(vector_id,
5017 HCLGE_VECTOR_ID_L_M,
5018 HCLGE_VECTOR_ID_L_S);
5019 req->int_vector_id_h = hnae3_get_field(vector_id,
5020 HCLGE_VECTOR_ID_H_M,
5021 HCLGE_VECTOR_ID_H_S);
5024 for (node = ring_chain; node; node = node->next) {
5025 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5026 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5028 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5029 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5030 HCLGE_TQP_ID_S, node->tqp_index);
5031 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5033 hnae3_get_field(node->int_gl_idx,
5034 HNAE3_RING_GL_IDX_M,
5035 HNAE3_RING_GL_IDX_S));
5036 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5037 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5038 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5039 req->vfid = vport->vport_id;
5041 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5043 dev_err(&hdev->pdev->dev,
5044 "Map TQP fail, status is %d.\n",
5050 hclge_cmd_setup_basic_desc(&desc,
5053 req->int_vector_id_l =
5054 hnae3_get_field(vector_id,
5055 HCLGE_VECTOR_ID_L_M,
5056 HCLGE_VECTOR_ID_L_S);
5057 req->int_vector_id_h =
5058 hnae3_get_field(vector_id,
5059 HCLGE_VECTOR_ID_H_M,
5060 HCLGE_VECTOR_ID_H_S);
5065 req->int_cause_num = i;
5066 req->vfid = vport->vport_id;
5067 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5069 dev_err(&hdev->pdev->dev,
5070 "Map TQP fail, status is %d.\n", status);
5078 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5079 struct hnae3_ring_chain_node *ring_chain)
5081 struct hclge_vport *vport = hclge_get_vport(handle);
5082 struct hclge_dev *hdev = vport->back;
5085 vector_id = hclge_get_vector_index(hdev, vector);
5086 if (vector_id < 0) {
5087 dev_err(&hdev->pdev->dev,
5088 "failed to get vector index. vector=%d\n", vector);
5092 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5095 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5096 struct hnae3_ring_chain_node *ring_chain)
5098 struct hclge_vport *vport = hclge_get_vport(handle);
5099 struct hclge_dev *hdev = vport->back;
5102 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5105 vector_id = hclge_get_vector_index(hdev, vector);
5106 if (vector_id < 0) {
5107 dev_err(&handle->pdev->dev,
5108 "Get vector index fail. ret =%d\n", vector_id);
5112 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5114 dev_err(&handle->pdev->dev,
5115 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5121 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5122 bool en_uc, bool en_mc, bool en_bc)
5124 struct hclge_vport *vport = &hdev->vport[vf_id];
5125 struct hnae3_handle *handle = &vport->nic;
5126 struct hclge_promisc_cfg_cmd *req;
5127 struct hclge_desc desc;
5128 bool uc_tx_en = en_uc;
5132 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5134 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5137 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5140 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5141 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5142 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5143 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5144 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5145 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5146 req->extend_promisc = promisc_cfg;
5148 /* to be compatible with DEVICE_VERSION_V1/2 */
5150 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5151 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5152 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5153 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5154 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5155 req->promisc = promisc_cfg;
5157 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5159 dev_err(&hdev->pdev->dev,
5160 "failed to set vport %u promisc mode, ret = %d.\n",
5166 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5167 bool en_mc_pmc, bool en_bc_pmc)
5169 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5170 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5173 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5176 struct hclge_vport *vport = hclge_get_vport(handle);
5177 struct hclge_dev *hdev = vport->back;
5178 bool en_bc_pmc = true;
5180 /* For device whose version below V2, if broadcast promisc enabled,
5181 * vlan filter is always bypassed. So broadcast promisc should be
5182 * disabled until user enable promisc mode
5184 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5185 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5187 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5191 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5193 struct hclge_vport *vport = hclge_get_vport(handle);
5195 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5198 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5200 if (hlist_empty(&hdev->fd_rule_list))
5201 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5204 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5206 if (!test_bit(location, hdev->fd_bmap)) {
5207 set_bit(location, hdev->fd_bmap);
5208 hdev->hclge_fd_rule_num++;
5212 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5214 if (test_bit(location, hdev->fd_bmap)) {
5215 clear_bit(location, hdev->fd_bmap);
5216 hdev->hclge_fd_rule_num--;
5220 static void hclge_fd_free_node(struct hclge_dev *hdev,
5221 struct hclge_fd_rule *rule)
5223 hlist_del(&rule->rule_node);
5225 hclge_sync_fd_state(hdev);
5228 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5229 struct hclge_fd_rule *old_rule,
5230 struct hclge_fd_rule *new_rule,
5231 enum HCLGE_FD_NODE_STATE state)
5234 case HCLGE_FD_TO_ADD:
5235 case HCLGE_FD_ACTIVE:
5236 /* 1) if the new state is TO_ADD, just replace the old rule
5237 * with the same location, no matter its state, because the
5238 * new rule will be configured to the hardware.
5239 * 2) if the new state is ACTIVE, it means the new rule
5240 * has been configured to the hardware, so just replace
5241 * the old rule node with the same location.
5242 * 3) for it doesn't add a new node to the list, so it's
5243 * unnecessary to update the rule number and fd_bmap.
5245 new_rule->rule_node.next = old_rule->rule_node.next;
5246 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5247 memcpy(old_rule, new_rule, sizeof(*old_rule));
5250 case HCLGE_FD_DELETED:
5251 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5252 hclge_fd_free_node(hdev, old_rule);
5254 case HCLGE_FD_TO_DEL:
5255 /* if new request is TO_DEL, and old rule is existent
5256 * 1) the state of old rule is TO_DEL, we need do nothing,
5257 * because we delete rule by location, other rule content
5259 * 2) the state of old rule is ACTIVE, we need to change its
5260 * state to TO_DEL, so the rule will be deleted when periodic
5261 * task being scheduled.
5262 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5263 * been added to hardware, so we just delete the rule node from
5264 * fd_rule_list directly.
5266 if (old_rule->state == HCLGE_FD_TO_ADD) {
5267 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5268 hclge_fd_free_node(hdev, old_rule);
5271 old_rule->state = HCLGE_FD_TO_DEL;
5276 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5278 struct hclge_fd_rule **parent)
5280 struct hclge_fd_rule *rule;
5281 struct hlist_node *node;
5283 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5284 if (rule->location == location)
5286 else if (rule->location > location)
5288 /* record the parent node, use to keep the nodes in fd_rule_list
5297 /* insert fd rule node in ascend order according to rule->location */
5298 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5299 struct hclge_fd_rule *rule,
5300 struct hclge_fd_rule *parent)
5302 INIT_HLIST_NODE(&rule->rule_node);
5305 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5307 hlist_add_head(&rule->rule_node, hlist);
5310 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5311 struct hclge_fd_user_def_cfg *cfg)
5313 struct hclge_fd_user_def_cfg_cmd *req;
5314 struct hclge_desc desc;
5318 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5320 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5322 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5323 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5324 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5325 req->ol2_cfg = cpu_to_le16(data);
5328 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5329 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5330 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5331 req->ol3_cfg = cpu_to_le16(data);
5334 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5335 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5336 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5337 req->ol4_cfg = cpu_to_le16(data);
5339 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5341 dev_err(&hdev->pdev->dev,
5342 "failed to set fd user def data, ret= %d\n", ret);
5346 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5350 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5354 spin_lock_bh(&hdev->fd_rule_lock);
5356 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5358 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5361 spin_unlock_bh(&hdev->fd_rule_lock);
5364 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5365 struct hclge_fd_rule *rule)
5367 struct hlist_head *hlist = &hdev->fd_rule_list;
5368 struct hclge_fd_rule *fd_rule, *parent = NULL;
5369 struct hclge_fd_user_def_info *info, *old_info;
5370 struct hclge_fd_user_def_cfg *cfg;
5372 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5373 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5376 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5377 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5378 info = &rule->ep.user_def;
5380 if (!cfg->ref_cnt || cfg->offset == info->offset)
5383 if (cfg->ref_cnt > 1)
5386 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5388 old_info = &fd_rule->ep.user_def;
5389 if (info->layer == old_info->layer)
5394 dev_err(&hdev->pdev->dev,
5395 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5400 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5401 struct hclge_fd_rule *rule)
5403 struct hclge_fd_user_def_cfg *cfg;
5405 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5406 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5409 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5410 if (!cfg->ref_cnt) {
5411 cfg->offset = rule->ep.user_def.offset;
5412 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5417 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5418 struct hclge_fd_rule *rule)
5420 struct hclge_fd_user_def_cfg *cfg;
5422 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5423 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5426 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5431 if (!cfg->ref_cnt) {
5433 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5437 static void hclge_update_fd_list(struct hclge_dev *hdev,
5438 enum HCLGE_FD_NODE_STATE state, u16 location,
5439 struct hclge_fd_rule *new_rule)
5441 struct hlist_head *hlist = &hdev->fd_rule_list;
5442 struct hclge_fd_rule *fd_rule, *parent = NULL;
5444 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5446 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5447 if (state == HCLGE_FD_ACTIVE)
5448 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5449 hclge_sync_fd_user_def_cfg(hdev, true);
5451 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5455 /* it's unlikely to fail here, because we have checked the rule
5458 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5459 dev_warn(&hdev->pdev->dev,
5460 "failed to delete fd rule %u, it's inexistent\n",
5465 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5466 hclge_sync_fd_user_def_cfg(hdev, true);
5468 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5469 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5471 if (state == HCLGE_FD_TO_ADD) {
5472 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5473 hclge_task_schedule(hdev, 0);
5477 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5479 struct hclge_get_fd_mode_cmd *req;
5480 struct hclge_desc desc;
5483 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5485 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5487 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5489 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5493 *fd_mode = req->mode;
5498 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5499 u32 *stage1_entry_num,
5500 u32 *stage2_entry_num,
5501 u16 *stage1_counter_num,
5502 u16 *stage2_counter_num)
5504 struct hclge_get_fd_allocation_cmd *req;
5505 struct hclge_desc desc;
5508 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5510 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5512 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5514 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5519 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5520 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5521 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5522 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5527 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5528 enum HCLGE_FD_STAGE stage_num)
5530 struct hclge_set_fd_key_config_cmd *req;
5531 struct hclge_fd_key_cfg *stage;
5532 struct hclge_desc desc;
5535 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5537 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5538 stage = &hdev->fd_cfg.key_cfg[stage_num];
5539 req->stage = stage_num;
5540 req->key_select = stage->key_sel;
5541 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5542 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5543 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5544 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5545 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5546 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5550 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5555 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5557 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5559 spin_lock_bh(&hdev->fd_rule_lock);
5560 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5561 spin_unlock_bh(&hdev->fd_rule_lock);
5563 hclge_fd_set_user_def_cmd(hdev, cfg);
5566 static int hclge_init_fd_config(struct hclge_dev *hdev)
5568 #define LOW_2_WORDS 0x03
5569 struct hclge_fd_key_cfg *key_cfg;
5572 if (!hnae3_dev_fd_supported(hdev))
5575 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5579 switch (hdev->fd_cfg.fd_mode) {
5580 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5581 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5583 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5584 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5587 dev_err(&hdev->pdev->dev,
5588 "Unsupported flow director mode %u\n",
5589 hdev->fd_cfg.fd_mode);
5593 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5594 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5595 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5596 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5597 key_cfg->outer_sipv6_word_en = 0;
5598 key_cfg->outer_dipv6_word_en = 0;
5600 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5601 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5602 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5603 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5605 /* If use max 400bit key, we can support tuples for ether type */
5606 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5607 key_cfg->tuple_active |=
5608 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5609 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5610 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5613 /* roce_type is used to filter roce frames
5614 * dst_vport is used to specify the rule
5616 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5618 ret = hclge_get_fd_allocation(hdev,
5619 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5620 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5621 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5622 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5626 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5629 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5630 int loc, u8 *key, bool is_add)
5632 struct hclge_fd_tcam_config_1_cmd *req1;
5633 struct hclge_fd_tcam_config_2_cmd *req2;
5634 struct hclge_fd_tcam_config_3_cmd *req3;
5635 struct hclge_desc desc[3];
5638 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5639 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5640 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5641 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5642 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5644 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5645 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5646 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5648 req1->stage = stage;
5649 req1->xy_sel = sel_x ? 1 : 0;
5650 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5651 req1->index = cpu_to_le32(loc);
5652 req1->entry_vld = sel_x ? is_add : 0;
5655 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5656 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5657 sizeof(req2->tcam_data));
5658 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5659 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5662 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5664 dev_err(&hdev->pdev->dev,
5665 "config tcam key fail, ret=%d\n",
5671 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5672 struct hclge_fd_ad_data *action)
5674 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5675 struct hclge_fd_ad_config_cmd *req;
5676 struct hclge_desc desc;
5680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5682 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5683 req->index = cpu_to_le32(loc);
5686 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5687 action->write_rule_id_to_bd);
5688 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5690 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5691 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5692 action->override_tc);
5693 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5694 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5697 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5698 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5699 action->forward_to_direct_queue);
5700 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5702 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5703 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5704 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5705 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5706 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5707 action->counter_id);
5709 req->ad_data = cpu_to_le64(ad_data);
5710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5712 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5717 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5718 struct hclge_fd_rule *rule)
5720 int offset, moffset, ip_offset;
5721 enum HCLGE_FD_KEY_OPT key_opt;
5722 u16 tmp_x_s, tmp_y_s;
5723 u32 tmp_x_l, tmp_y_l;
5727 if (rule->unused_tuple & BIT(tuple_bit))
5730 key_opt = tuple_key_info[tuple_bit].key_opt;
5731 offset = tuple_key_info[tuple_bit].offset;
5732 moffset = tuple_key_info[tuple_bit].moffset;
5736 calc_x(*key_x, p[offset], p[moffset]);
5737 calc_y(*key_y, p[offset], p[moffset]);
5741 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5742 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5743 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5744 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5748 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5749 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5750 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5751 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5755 for (i = 0; i < ETH_ALEN; i++) {
5756 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5758 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5764 ip_offset = IPV4_INDEX * sizeof(u32);
5765 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5766 *(u32 *)(&p[moffset + ip_offset]));
5767 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5768 *(u32 *)(&p[moffset + ip_offset]));
5769 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5770 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5778 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5779 u8 vf_id, u8 network_port_id)
5781 u32 port_number = 0;
5783 if (port_type == HOST_PORT) {
5784 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5786 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5788 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5790 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5791 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5792 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5798 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5799 __le32 *key_x, __le32 *key_y,
5800 struct hclge_fd_rule *rule)
5802 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5803 u8 cur_pos = 0, tuple_size, shift_bits;
5806 for (i = 0; i < MAX_META_DATA; i++) {
5807 tuple_size = meta_data_key_info[i].key_length;
5808 tuple_bit = key_cfg->meta_data_active & BIT(i);
5810 switch (tuple_bit) {
5811 case BIT(ROCE_TYPE):
5812 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5813 cur_pos += tuple_size;
5815 case BIT(DST_VPORT):
5816 port_number = hclge_get_port_number(HOST_PORT, 0,
5818 hnae3_set_field(meta_data,
5819 GENMASK(cur_pos + tuple_size, cur_pos),
5820 cur_pos, port_number);
5821 cur_pos += tuple_size;
5828 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5829 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5830 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5832 *key_x = cpu_to_le32(tmp_x << shift_bits);
5833 *key_y = cpu_to_le32(tmp_y << shift_bits);
5836 /* A complete key is combined with meta data key and tuple key.
5837 * Meta data key is stored at the MSB region, and tuple key is stored at
5838 * the LSB region, unused bits will be filled 0.
5840 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5841 struct hclge_fd_rule *rule)
5843 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5844 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5845 u8 *cur_key_x, *cur_key_y;
5846 u8 meta_data_region;
5851 memset(key_x, 0, sizeof(key_x));
5852 memset(key_y, 0, sizeof(key_y));
5856 for (i = 0 ; i < MAX_TUPLE; i++) {
5859 tuple_size = tuple_key_info[i].key_length / 8;
5860 if (!(key_cfg->tuple_active & BIT(i)))
5863 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5866 cur_key_x += tuple_size;
5867 cur_key_y += tuple_size;
5871 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5872 MAX_META_DATA_LENGTH / 8;
5874 hclge_fd_convert_meta_data(key_cfg,
5875 (__le32 *)(key_x + meta_data_region),
5876 (__le32 *)(key_y + meta_data_region),
5879 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5882 dev_err(&hdev->pdev->dev,
5883 "fd key_y config fail, loc=%u, ret=%d\n",
5884 rule->queue_id, ret);
5888 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5891 dev_err(&hdev->pdev->dev,
5892 "fd key_x config fail, loc=%u, ret=%d\n",
5893 rule->queue_id, ret);
5897 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5898 struct hclge_fd_rule *rule)
5900 struct hclge_vport *vport = hdev->vport;
5901 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5902 struct hclge_fd_ad_data ad_data;
5904 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5905 ad_data.ad_id = rule->location;
5907 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5908 ad_data.drop_packet = true;
5909 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5910 ad_data.override_tc = true;
5912 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5914 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5916 ad_data.forward_to_direct_queue = true;
5917 ad_data.queue_id = rule->queue_id;
5920 ad_data.use_counter = false;
5921 ad_data.counter_id = 0;
5923 ad_data.use_next_stage = false;
5924 ad_data.next_input_key = 0;
5926 ad_data.write_rule_id_to_bd = true;
5927 ad_data.rule_id = rule->location;
5929 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5932 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5935 if (!spec || !unused_tuple)
5938 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5941 *unused_tuple |= BIT(INNER_SRC_IP);
5944 *unused_tuple |= BIT(INNER_DST_IP);
5947 *unused_tuple |= BIT(INNER_SRC_PORT);
5950 *unused_tuple |= BIT(INNER_DST_PORT);
5953 *unused_tuple |= BIT(INNER_IP_TOS);
5958 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5961 if (!spec || !unused_tuple)
5964 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5965 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5968 *unused_tuple |= BIT(INNER_SRC_IP);
5971 *unused_tuple |= BIT(INNER_DST_IP);
5974 *unused_tuple |= BIT(INNER_IP_TOS);
5977 *unused_tuple |= BIT(INNER_IP_PROTO);
5979 if (spec->l4_4_bytes)
5982 if (spec->ip_ver != ETH_RX_NFC_IP4)
5988 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5991 if (!spec || !unused_tuple)
5994 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5996 /* check whether src/dst ip address used */
5997 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5998 *unused_tuple |= BIT(INNER_SRC_IP);
6000 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6001 *unused_tuple |= BIT(INNER_DST_IP);
6004 *unused_tuple |= BIT(INNER_SRC_PORT);
6007 *unused_tuple |= BIT(INNER_DST_PORT);
6010 *unused_tuple |= BIT(INNER_IP_TOS);
6015 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6018 if (!spec || !unused_tuple)
6021 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6022 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6024 /* check whether src/dst ip address used */
6025 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6026 *unused_tuple |= BIT(INNER_SRC_IP);
6028 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6029 *unused_tuple |= BIT(INNER_DST_IP);
6031 if (!spec->l4_proto)
6032 *unused_tuple |= BIT(INNER_IP_PROTO);
6035 *unused_tuple |= BIT(INNER_IP_TOS);
6037 if (spec->l4_4_bytes)
6043 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6045 if (!spec || !unused_tuple)
6048 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6049 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6050 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6052 if (is_zero_ether_addr(spec->h_source))
6053 *unused_tuple |= BIT(INNER_SRC_MAC);
6055 if (is_zero_ether_addr(spec->h_dest))
6056 *unused_tuple |= BIT(INNER_DST_MAC);
6059 *unused_tuple |= BIT(INNER_ETH_TYPE);
6064 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6065 struct ethtool_rx_flow_spec *fs,
6068 if (fs->flow_type & FLOW_EXT) {
6069 if (fs->h_ext.vlan_etype) {
6070 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6074 if (!fs->h_ext.vlan_tci)
6075 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6077 if (fs->m_ext.vlan_tci &&
6078 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6079 dev_err(&hdev->pdev->dev,
6080 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6081 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6085 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6088 if (fs->flow_type & FLOW_MAC_EXT) {
6089 if (hdev->fd_cfg.fd_mode !=
6090 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6091 dev_err(&hdev->pdev->dev,
6092 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6096 if (is_zero_ether_addr(fs->h_ext.h_dest))
6097 *unused_tuple |= BIT(INNER_DST_MAC);
6099 *unused_tuple &= ~BIT(INNER_DST_MAC);
6105 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6106 struct hclge_fd_user_def_info *info)
6108 switch (flow_type) {
6110 info->layer = HCLGE_FD_USER_DEF_L2;
6111 *unused_tuple &= ~BIT(INNER_L2_RSV);
6114 case IPV6_USER_FLOW:
6115 info->layer = HCLGE_FD_USER_DEF_L3;
6116 *unused_tuple &= ~BIT(INNER_L3_RSV);
6122 info->layer = HCLGE_FD_USER_DEF_L4;
6123 *unused_tuple &= ~BIT(INNER_L4_RSV);
6132 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6134 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6137 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6138 struct ethtool_rx_flow_spec *fs,
6140 struct hclge_fd_user_def_info *info)
6142 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6143 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6144 u16 data, offset, data_mask, offset_mask;
6147 info->layer = HCLGE_FD_USER_DEF_NONE;
6148 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6150 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6153 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6154 * for data, and bit32~47 is used for offset.
6156 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6157 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6158 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6159 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6161 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6162 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6166 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6167 dev_err(&hdev->pdev->dev,
6168 "user-def offset[%u] should be no more than %u\n",
6169 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6173 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6174 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6178 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6180 dev_err(&hdev->pdev->dev,
6181 "unsupported flow type for user-def bytes, ret = %d\n",
6187 info->data_mask = data_mask;
6188 info->offset = offset;
6193 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6194 struct ethtool_rx_flow_spec *fs,
6196 struct hclge_fd_user_def_info *info)
6201 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6202 dev_err(&hdev->pdev->dev,
6203 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6205 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6209 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6213 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6214 switch (flow_type) {
6218 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6222 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6228 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6231 case IPV6_USER_FLOW:
6232 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6236 if (hdev->fd_cfg.fd_mode !=
6237 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6238 dev_err(&hdev->pdev->dev,
6239 "ETHER_FLOW is not supported in current fd mode!\n");
6243 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6247 dev_err(&hdev->pdev->dev,
6248 "unsupported protocol type, protocol type = %#x\n",
6254 dev_err(&hdev->pdev->dev,
6255 "failed to check flow union tuple, ret = %d\n",
6260 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6263 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6264 struct ethtool_rx_flow_spec *fs,
6265 struct hclge_fd_rule *rule, u8 ip_proto)
6267 rule->tuples.src_ip[IPV4_INDEX] =
6268 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6269 rule->tuples_mask.src_ip[IPV4_INDEX] =
6270 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6272 rule->tuples.dst_ip[IPV4_INDEX] =
6273 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6274 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6275 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6277 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6278 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6280 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6281 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6283 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6284 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6286 rule->tuples.ether_proto = ETH_P_IP;
6287 rule->tuples_mask.ether_proto = 0xFFFF;
6289 rule->tuples.ip_proto = ip_proto;
6290 rule->tuples_mask.ip_proto = 0xFF;
6293 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6294 struct ethtool_rx_flow_spec *fs,
6295 struct hclge_fd_rule *rule)
6297 rule->tuples.src_ip[IPV4_INDEX] =
6298 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6299 rule->tuples_mask.src_ip[IPV4_INDEX] =
6300 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6302 rule->tuples.dst_ip[IPV4_INDEX] =
6303 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6304 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6305 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6307 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6308 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6310 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6311 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6313 rule->tuples.ether_proto = ETH_P_IP;
6314 rule->tuples_mask.ether_proto = 0xFFFF;
6317 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6318 struct ethtool_rx_flow_spec *fs,
6319 struct hclge_fd_rule *rule, u8 ip_proto)
6321 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6323 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6326 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6328 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6331 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6332 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6334 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6335 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6337 rule->tuples.ether_proto = ETH_P_IPV6;
6338 rule->tuples_mask.ether_proto = 0xFFFF;
6340 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6341 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6343 rule->tuples.ip_proto = ip_proto;
6344 rule->tuples_mask.ip_proto = 0xFF;
6347 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6348 struct ethtool_rx_flow_spec *fs,
6349 struct hclge_fd_rule *rule)
6351 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6353 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6356 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6358 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6361 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6362 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6364 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6365 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6367 rule->tuples.ether_proto = ETH_P_IPV6;
6368 rule->tuples_mask.ether_proto = 0xFFFF;
6371 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6372 struct ethtool_rx_flow_spec *fs,
6373 struct hclge_fd_rule *rule)
6375 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6376 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6378 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6379 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6381 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6382 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6385 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6386 struct hclge_fd_rule *rule)
6388 switch (info->layer) {
6389 case HCLGE_FD_USER_DEF_L2:
6390 rule->tuples.l2_user_def = info->data;
6391 rule->tuples_mask.l2_user_def = info->data_mask;
6393 case HCLGE_FD_USER_DEF_L3:
6394 rule->tuples.l3_user_def = info->data;
6395 rule->tuples_mask.l3_user_def = info->data_mask;
6397 case HCLGE_FD_USER_DEF_L4:
6398 rule->tuples.l4_user_def = (u32)info->data << 16;
6399 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6405 rule->ep.user_def = *info;
6408 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6409 struct ethtool_rx_flow_spec *fs,
6410 struct hclge_fd_rule *rule,
6411 struct hclge_fd_user_def_info *info)
6413 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6415 switch (flow_type) {
6417 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6420 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6423 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6426 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6429 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6432 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6435 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6437 case IPV6_USER_FLOW:
6438 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6441 hclge_fd_get_ether_tuple(hdev, fs, rule);
6447 if (fs->flow_type & FLOW_EXT) {
6448 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6449 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6450 hclge_fd_get_user_def_tuple(info, rule);
6453 if (fs->flow_type & FLOW_MAC_EXT) {
6454 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6455 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6461 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6462 struct hclge_fd_rule *rule)
6466 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6470 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6473 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6474 struct hclge_fd_rule *rule)
6478 spin_lock_bh(&hdev->fd_rule_lock);
6480 if (hdev->fd_active_type != rule->rule_type &&
6481 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6482 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6483 dev_err(&hdev->pdev->dev,
6484 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6485 rule->rule_type, hdev->fd_active_type);
6486 spin_unlock_bh(&hdev->fd_rule_lock);
6490 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6494 ret = hclge_clear_arfs_rules(hdev);
6498 ret = hclge_fd_config_rule(hdev, rule);
6502 rule->state = HCLGE_FD_ACTIVE;
6503 hdev->fd_active_type = rule->rule_type;
6504 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6507 spin_unlock_bh(&hdev->fd_rule_lock);
6511 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6513 struct hclge_vport *vport = hclge_get_vport(handle);
6514 struct hclge_dev *hdev = vport->back;
6516 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6519 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6520 u16 *vport_id, u8 *action, u16 *queue_id)
6522 struct hclge_vport *vport = hdev->vport;
6524 if (ring_cookie == RX_CLS_FLOW_DISC) {
6525 *action = HCLGE_FD_ACTION_DROP_PACKET;
6527 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6528 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6531 if (vf > hdev->num_req_vfs) {
6532 dev_err(&hdev->pdev->dev,
6533 "Error: vf id (%u) > max vf num (%u)\n",
6534 vf, hdev->num_req_vfs);
6538 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6539 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6542 dev_err(&hdev->pdev->dev,
6543 "Error: queue id (%u) > max tqp num (%u)\n",
6548 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6555 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6556 struct ethtool_rxnfc *cmd)
6558 struct hclge_vport *vport = hclge_get_vport(handle);
6559 struct hclge_dev *hdev = vport->back;
6560 struct hclge_fd_user_def_info info;
6561 u16 dst_vport_id = 0, q_index = 0;
6562 struct ethtool_rx_flow_spec *fs;
6563 struct hclge_fd_rule *rule;
6568 if (!hnae3_dev_fd_supported(hdev)) {
6569 dev_err(&hdev->pdev->dev,
6570 "flow table director is not supported\n");
6575 dev_err(&hdev->pdev->dev,
6576 "please enable flow director first\n");
6580 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6582 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6586 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6591 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6595 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6601 rule->flow_type = fs->flow_type;
6602 rule->location = fs->location;
6603 rule->unused_tuple = unused;
6604 rule->vf_id = dst_vport_id;
6605 rule->queue_id = q_index;
6606 rule->action = action;
6607 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6609 ret = hclge_add_fd_entry_common(hdev, rule);
6616 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6617 struct ethtool_rxnfc *cmd)
6619 struct hclge_vport *vport = hclge_get_vport(handle);
6620 struct hclge_dev *hdev = vport->back;
6621 struct ethtool_rx_flow_spec *fs;
6624 if (!hnae3_dev_fd_supported(hdev))
6627 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6629 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6632 spin_lock_bh(&hdev->fd_rule_lock);
6633 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6634 !test_bit(fs->location, hdev->fd_bmap)) {
6635 dev_err(&hdev->pdev->dev,
6636 "Delete fail, rule %u is inexistent\n", fs->location);
6637 spin_unlock_bh(&hdev->fd_rule_lock);
6641 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6646 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6649 spin_unlock_bh(&hdev->fd_rule_lock);
6653 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6656 struct hclge_fd_rule *rule;
6657 struct hlist_node *node;
6660 if (!hnae3_dev_fd_supported(hdev))
6663 spin_lock_bh(&hdev->fd_rule_lock);
6665 for_each_set_bit(location, hdev->fd_bmap,
6666 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6667 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6671 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6673 hlist_del(&rule->rule_node);
6676 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6677 hdev->hclge_fd_rule_num = 0;
6678 bitmap_zero(hdev->fd_bmap,
6679 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6682 spin_unlock_bh(&hdev->fd_rule_lock);
6685 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6687 hclge_clear_fd_rules_in_list(hdev, true);
6688 hclge_fd_disable_user_def(hdev);
6691 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6693 struct hclge_vport *vport = hclge_get_vport(handle);
6694 struct hclge_dev *hdev = vport->back;
6695 struct hclge_fd_rule *rule;
6696 struct hlist_node *node;
6698 /* Return ok here, because reset error handling will check this
6699 * return value. If error is returned here, the reset process will
6702 if (!hnae3_dev_fd_supported(hdev))
6705 /* if fd is disabled, should not restore it when reset */
6709 spin_lock_bh(&hdev->fd_rule_lock);
6710 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6711 if (rule->state == HCLGE_FD_ACTIVE)
6712 rule->state = HCLGE_FD_TO_ADD;
6714 spin_unlock_bh(&hdev->fd_rule_lock);
6715 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6720 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6721 struct ethtool_rxnfc *cmd)
6723 struct hclge_vport *vport = hclge_get_vport(handle);
6724 struct hclge_dev *hdev = vport->back;
6726 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6729 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6730 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6735 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6736 struct ethtool_tcpip4_spec *spec,
6737 struct ethtool_tcpip4_spec *spec_mask)
6739 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6740 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6741 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6743 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6744 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6745 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6747 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6748 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6749 0 : cpu_to_be16(rule->tuples_mask.src_port);
6751 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6752 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6753 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6755 spec->tos = rule->tuples.ip_tos;
6756 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6757 0 : rule->tuples_mask.ip_tos;
6760 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6761 struct ethtool_usrip4_spec *spec,
6762 struct ethtool_usrip4_spec *spec_mask)
6764 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6765 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6766 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6768 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6769 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6770 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6772 spec->tos = rule->tuples.ip_tos;
6773 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6774 0 : rule->tuples_mask.ip_tos;
6776 spec->proto = rule->tuples.ip_proto;
6777 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6778 0 : rule->tuples_mask.ip_proto;
6780 spec->ip_ver = ETH_RX_NFC_IP4;
6783 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6784 struct ethtool_tcpip6_spec *spec,
6785 struct ethtool_tcpip6_spec *spec_mask)
6787 cpu_to_be32_array(spec->ip6src,
6788 rule->tuples.src_ip, IPV6_SIZE);
6789 cpu_to_be32_array(spec->ip6dst,
6790 rule->tuples.dst_ip, IPV6_SIZE);
6791 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6792 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6794 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6797 if (rule->unused_tuple & BIT(INNER_DST_IP))
6798 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6800 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6803 spec->tclass = rule->tuples.ip_tos;
6804 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6805 0 : rule->tuples_mask.ip_tos;
6807 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6808 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6809 0 : cpu_to_be16(rule->tuples_mask.src_port);
6811 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6812 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6813 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6816 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6817 struct ethtool_usrip6_spec *spec,
6818 struct ethtool_usrip6_spec *spec_mask)
6820 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6821 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6822 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6823 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6825 cpu_to_be32_array(spec_mask->ip6src,
6826 rule->tuples_mask.src_ip, IPV6_SIZE);
6828 if (rule->unused_tuple & BIT(INNER_DST_IP))
6829 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6831 cpu_to_be32_array(spec_mask->ip6dst,
6832 rule->tuples_mask.dst_ip, IPV6_SIZE);
6834 spec->tclass = rule->tuples.ip_tos;
6835 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6836 0 : rule->tuples_mask.ip_tos;
6838 spec->l4_proto = rule->tuples.ip_proto;
6839 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6840 0 : rule->tuples_mask.ip_proto;
6843 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6844 struct ethhdr *spec,
6845 struct ethhdr *spec_mask)
6847 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6848 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6850 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6851 eth_zero_addr(spec_mask->h_source);
6853 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6855 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6856 eth_zero_addr(spec_mask->h_dest);
6858 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6860 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6861 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6862 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6865 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6866 struct hclge_fd_rule *rule)
6868 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6869 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6870 fs->h_ext.data[0] = 0;
6871 fs->h_ext.data[1] = 0;
6872 fs->m_ext.data[0] = 0;
6873 fs->m_ext.data[1] = 0;
6875 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6876 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6878 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6879 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6883 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6884 struct hclge_fd_rule *rule)
6886 if (fs->flow_type & FLOW_EXT) {
6887 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6888 fs->m_ext.vlan_tci =
6889 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6890 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6892 hclge_fd_get_user_def_info(fs, rule);
6895 if (fs->flow_type & FLOW_MAC_EXT) {
6896 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6897 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6898 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6900 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6901 rule->tuples_mask.dst_mac);
6905 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6906 struct ethtool_rxnfc *cmd)
6908 struct hclge_vport *vport = hclge_get_vport(handle);
6909 struct hclge_fd_rule *rule = NULL;
6910 struct hclge_dev *hdev = vport->back;
6911 struct ethtool_rx_flow_spec *fs;
6912 struct hlist_node *node2;
6914 if (!hnae3_dev_fd_supported(hdev))
6917 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6919 spin_lock_bh(&hdev->fd_rule_lock);
6921 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6922 if (rule->location >= fs->location)
6926 if (!rule || fs->location != rule->location) {
6927 spin_unlock_bh(&hdev->fd_rule_lock);
6932 fs->flow_type = rule->flow_type;
6933 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6937 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6938 &fs->m_u.tcp_ip4_spec);
6941 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6942 &fs->m_u.usr_ip4_spec);
6947 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6948 &fs->m_u.tcp_ip6_spec);
6950 case IPV6_USER_FLOW:
6951 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6952 &fs->m_u.usr_ip6_spec);
6954 /* The flow type of fd rule has been checked before adding in to rule
6955 * list. As other flow types have been handled, it must be ETHER_FLOW
6956 * for the default case
6959 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6960 &fs->m_u.ether_spec);
6964 hclge_fd_get_ext_info(fs, rule);
6966 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6967 fs->ring_cookie = RX_CLS_FLOW_DISC;
6971 fs->ring_cookie = rule->queue_id;
6972 vf_id = rule->vf_id;
6973 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6974 fs->ring_cookie |= vf_id;
6977 spin_unlock_bh(&hdev->fd_rule_lock);
6982 static int hclge_get_all_rules(struct hnae3_handle *handle,
6983 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6985 struct hclge_vport *vport = hclge_get_vport(handle);
6986 struct hclge_dev *hdev = vport->back;
6987 struct hclge_fd_rule *rule;
6988 struct hlist_node *node2;
6991 if (!hnae3_dev_fd_supported(hdev))
6994 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6996 spin_lock_bh(&hdev->fd_rule_lock);
6997 hlist_for_each_entry_safe(rule, node2,
6998 &hdev->fd_rule_list, rule_node) {
6999 if (cnt == cmd->rule_cnt) {
7000 spin_unlock_bh(&hdev->fd_rule_lock);
7004 if (rule->state == HCLGE_FD_TO_DEL)
7007 rule_locs[cnt] = rule->location;
7011 spin_unlock_bh(&hdev->fd_rule_lock);
7013 cmd->rule_cnt = cnt;
7018 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7019 struct hclge_fd_rule_tuples *tuples)
7021 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7022 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7024 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7025 tuples->ip_proto = fkeys->basic.ip_proto;
7026 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7028 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7029 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7030 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7034 for (i = 0; i < IPV6_SIZE; i++) {
7035 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7036 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7041 /* traverse all rules, check whether an existed rule has the same tuples */
7042 static struct hclge_fd_rule *
7043 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7044 const struct hclge_fd_rule_tuples *tuples)
7046 struct hclge_fd_rule *rule = NULL;
7047 struct hlist_node *node;
7049 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7050 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7057 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7058 struct hclge_fd_rule *rule)
7060 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7061 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7062 BIT(INNER_SRC_PORT);
7065 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7066 rule->state = HCLGE_FD_TO_ADD;
7067 if (tuples->ether_proto == ETH_P_IP) {
7068 if (tuples->ip_proto == IPPROTO_TCP)
7069 rule->flow_type = TCP_V4_FLOW;
7071 rule->flow_type = UDP_V4_FLOW;
7073 if (tuples->ip_proto == IPPROTO_TCP)
7074 rule->flow_type = TCP_V6_FLOW;
7076 rule->flow_type = UDP_V6_FLOW;
7078 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7079 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7082 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7083 u16 flow_id, struct flow_keys *fkeys)
7085 struct hclge_vport *vport = hclge_get_vport(handle);
7086 struct hclge_fd_rule_tuples new_tuples = {};
7087 struct hclge_dev *hdev = vport->back;
7088 struct hclge_fd_rule *rule;
7091 if (!hnae3_dev_fd_supported(hdev))
7094 /* when there is already fd rule existed add by user,
7095 * arfs should not work
7097 spin_lock_bh(&hdev->fd_rule_lock);
7098 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7099 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7100 spin_unlock_bh(&hdev->fd_rule_lock);
7104 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7106 /* check is there flow director filter existed for this flow,
7107 * if not, create a new filter for it;
7108 * if filter exist with different queue id, modify the filter;
7109 * if filter exist with same queue id, do nothing
7111 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7113 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7114 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7115 spin_unlock_bh(&hdev->fd_rule_lock);
7119 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7121 spin_unlock_bh(&hdev->fd_rule_lock);
7125 rule->location = bit_id;
7126 rule->arfs.flow_id = flow_id;
7127 rule->queue_id = queue_id;
7128 hclge_fd_build_arfs_rule(&new_tuples, rule);
7129 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7130 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7131 } else if (rule->queue_id != queue_id) {
7132 rule->queue_id = queue_id;
7133 rule->state = HCLGE_FD_TO_ADD;
7134 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7135 hclge_task_schedule(hdev, 0);
7137 spin_unlock_bh(&hdev->fd_rule_lock);
7138 return rule->location;
7141 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7143 #ifdef CONFIG_RFS_ACCEL
7144 struct hnae3_handle *handle = &hdev->vport[0].nic;
7145 struct hclge_fd_rule *rule;
7146 struct hlist_node *node;
7148 spin_lock_bh(&hdev->fd_rule_lock);
7149 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7150 spin_unlock_bh(&hdev->fd_rule_lock);
7153 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7154 if (rule->state != HCLGE_FD_ACTIVE)
7156 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7157 rule->arfs.flow_id, rule->location)) {
7158 rule->state = HCLGE_FD_TO_DEL;
7159 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7162 spin_unlock_bh(&hdev->fd_rule_lock);
7166 /* make sure being called after lock up with fd_rule_lock */
7167 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7169 #ifdef CONFIG_RFS_ACCEL
7170 struct hclge_fd_rule *rule;
7171 struct hlist_node *node;
7174 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7177 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7178 switch (rule->state) {
7179 case HCLGE_FD_TO_DEL:
7180 case HCLGE_FD_ACTIVE:
7181 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7182 rule->location, NULL, false);
7186 case HCLGE_FD_TO_ADD:
7187 hclge_fd_dec_rule_cnt(hdev, rule->location);
7188 hlist_del(&rule->rule_node);
7195 hclge_sync_fd_state(hdev);
7201 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7202 struct hclge_fd_rule *rule)
7204 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7205 struct flow_match_basic match;
7206 u16 ethtype_key, ethtype_mask;
7208 flow_rule_match_basic(flow, &match);
7209 ethtype_key = ntohs(match.key->n_proto);
7210 ethtype_mask = ntohs(match.mask->n_proto);
7212 if (ethtype_key == ETH_P_ALL) {
7216 rule->tuples.ether_proto = ethtype_key;
7217 rule->tuples_mask.ether_proto = ethtype_mask;
7218 rule->tuples.ip_proto = match.key->ip_proto;
7219 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7221 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7222 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7226 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7227 struct hclge_fd_rule *rule)
7229 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7230 struct flow_match_eth_addrs match;
7232 flow_rule_match_eth_addrs(flow, &match);
7233 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7234 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7235 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7236 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7238 rule->unused_tuple |= BIT(INNER_DST_MAC);
7239 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7243 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7244 struct hclge_fd_rule *rule)
7246 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7247 struct flow_match_vlan match;
7249 flow_rule_match_vlan(flow, &match);
7250 rule->tuples.vlan_tag1 = match.key->vlan_id |
7251 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7252 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7253 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7255 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7259 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7260 struct hclge_fd_rule *rule)
7264 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7265 struct flow_match_control match;
7267 flow_rule_match_control(flow, &match);
7268 addr_type = match.key->addr_type;
7271 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7272 struct flow_match_ipv4_addrs match;
7274 flow_rule_match_ipv4_addrs(flow, &match);
7275 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7276 rule->tuples_mask.src_ip[IPV4_INDEX] =
7277 be32_to_cpu(match.mask->src);
7278 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7279 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7280 be32_to_cpu(match.mask->dst);
7281 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7282 struct flow_match_ipv6_addrs match;
7284 flow_rule_match_ipv6_addrs(flow, &match);
7285 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7287 be32_to_cpu_array(rule->tuples_mask.src_ip,
7288 match.mask->src.s6_addr32, IPV6_SIZE);
7289 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7291 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7292 match.mask->dst.s6_addr32, IPV6_SIZE);
7294 rule->unused_tuple |= BIT(INNER_SRC_IP);
7295 rule->unused_tuple |= BIT(INNER_DST_IP);
7299 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7300 struct hclge_fd_rule *rule)
7302 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7303 struct flow_match_ports match;
7305 flow_rule_match_ports(flow, &match);
7307 rule->tuples.src_port = be16_to_cpu(match.key->src);
7308 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7309 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7310 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7312 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7313 rule->unused_tuple |= BIT(INNER_DST_PORT);
7317 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7318 struct flow_cls_offload *cls_flower,
7319 struct hclge_fd_rule *rule)
7321 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7322 struct flow_dissector *dissector = flow->match.dissector;
7324 if (dissector->used_keys &
7325 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7326 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7327 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7328 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7329 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7330 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7331 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7332 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7333 dissector->used_keys);
7337 hclge_get_cls_key_basic(flow, rule);
7338 hclge_get_cls_key_mac(flow, rule);
7339 hclge_get_cls_key_vlan(flow, rule);
7340 hclge_get_cls_key_ip(flow, rule);
7341 hclge_get_cls_key_port(flow, rule);
7346 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7347 struct flow_cls_offload *cls_flower, int tc)
7349 u32 prio = cls_flower->common.prio;
7351 if (tc < 0 || tc > hdev->tc_max) {
7352 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7357 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7358 dev_err(&hdev->pdev->dev,
7359 "prio %u should be in range[1, %u]\n",
7360 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7364 if (test_bit(prio - 1, hdev->fd_bmap)) {
7365 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7371 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7372 struct flow_cls_offload *cls_flower,
7375 struct hclge_vport *vport = hclge_get_vport(handle);
7376 struct hclge_dev *hdev = vport->back;
7377 struct hclge_fd_rule *rule;
7380 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7382 dev_err(&hdev->pdev->dev,
7383 "failed to check cls flower params, ret = %d\n", ret);
7387 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7391 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7397 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7398 rule->cls_flower.tc = tc;
7399 rule->location = cls_flower->common.prio - 1;
7401 rule->cls_flower.cookie = cls_flower->cookie;
7402 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7404 ret = hclge_add_fd_entry_common(hdev, rule);
7411 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7412 unsigned long cookie)
7414 struct hclge_fd_rule *rule;
7415 struct hlist_node *node;
7417 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7418 if (rule->cls_flower.cookie == cookie)
7425 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7426 struct flow_cls_offload *cls_flower)
7428 struct hclge_vport *vport = hclge_get_vport(handle);
7429 struct hclge_dev *hdev = vport->back;
7430 struct hclge_fd_rule *rule;
7433 spin_lock_bh(&hdev->fd_rule_lock);
7435 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7437 spin_unlock_bh(&hdev->fd_rule_lock);
7441 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7444 spin_unlock_bh(&hdev->fd_rule_lock);
7448 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7449 spin_unlock_bh(&hdev->fd_rule_lock);
7454 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7456 struct hclge_fd_rule *rule;
7457 struct hlist_node *node;
7460 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7463 spin_lock_bh(&hdev->fd_rule_lock);
7465 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7466 switch (rule->state) {
7467 case HCLGE_FD_TO_ADD:
7468 ret = hclge_fd_config_rule(hdev, rule);
7471 rule->state = HCLGE_FD_ACTIVE;
7473 case HCLGE_FD_TO_DEL:
7474 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7475 rule->location, NULL, false);
7478 hclge_fd_dec_rule_cnt(hdev, rule->location);
7479 hclge_fd_free_node(hdev, rule);
7488 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7490 spin_unlock_bh(&hdev->fd_rule_lock);
7493 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7495 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7496 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7498 hclge_clear_fd_rules_in_list(hdev, clear_list);
7501 hclge_sync_fd_user_def_cfg(hdev, false);
7503 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7506 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7508 struct hclge_vport *vport = hclge_get_vport(handle);
7509 struct hclge_dev *hdev = vport->back;
7511 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7512 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7515 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7517 struct hclge_vport *vport = hclge_get_vport(handle);
7518 struct hclge_dev *hdev = vport->back;
7520 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7523 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7525 struct hclge_vport *vport = hclge_get_vport(handle);
7526 struct hclge_dev *hdev = vport->back;
7528 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7531 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7533 struct hclge_vport *vport = hclge_get_vport(handle);
7534 struct hclge_dev *hdev = vport->back;
7536 return hdev->rst_stats.hw_reset_done_cnt;
7539 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7541 struct hclge_vport *vport = hclge_get_vport(handle);
7542 struct hclge_dev *hdev = vport->back;
7544 hdev->fd_en = enable;
7547 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7549 hclge_restore_fd_entries(handle);
7551 hclge_task_schedule(hdev, 0);
7554 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7556 struct hclge_desc desc;
7557 struct hclge_config_mac_mode_cmd *req =
7558 (struct hclge_config_mac_mode_cmd *)desc.data;
7562 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7565 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7566 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7567 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7568 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7569 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7570 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7571 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7572 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7573 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7574 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7577 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7579 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7581 dev_err(&hdev->pdev->dev,
7582 "mac enable fail, ret =%d.\n", ret);
7585 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7586 u8 switch_param, u8 param_mask)
7588 struct hclge_mac_vlan_switch_cmd *req;
7589 struct hclge_desc desc;
7593 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7594 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7596 /* read current config parameter */
7597 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7599 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7600 req->func_id = cpu_to_le32(func_id);
7602 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7604 dev_err(&hdev->pdev->dev,
7605 "read mac vlan switch parameter fail, ret = %d\n", ret);
7609 /* modify and write new config parameter */
7610 hclge_cmd_reuse_desc(&desc, false);
7611 req->switch_param = (req->switch_param & param_mask) | switch_param;
7612 req->param_mask = param_mask;
7614 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7616 dev_err(&hdev->pdev->dev,
7617 "set mac vlan switch parameter fail, ret = %d\n", ret);
7621 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7624 #define HCLGE_PHY_LINK_STATUS_NUM 200
7626 struct phy_device *phydev = hdev->hw.mac.phydev;
7631 ret = phy_read_status(phydev);
7633 dev_err(&hdev->pdev->dev,
7634 "phy update link status fail, ret = %d\n", ret);
7638 if (phydev->link == link_ret)
7641 msleep(HCLGE_LINK_STATUS_MS);
7642 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7645 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7647 #define HCLGE_MAC_LINK_STATUS_NUM 100
7654 ret = hclge_get_mac_link_status(hdev, &link_status);
7657 if (link_status == link_ret)
7660 msleep(HCLGE_LINK_STATUS_MS);
7661 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7665 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7670 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7673 hclge_phy_link_status_wait(hdev, link_ret);
7675 return hclge_mac_link_status_wait(hdev, link_ret);
7678 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7680 struct hclge_config_mac_mode_cmd *req;
7681 struct hclge_desc desc;
7685 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7686 /* 1 Read out the MAC mode config at first */
7687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7690 dev_err(&hdev->pdev->dev,
7691 "mac loopback get fail, ret =%d.\n", ret);
7695 /* 2 Then setup the loopback flag */
7696 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7697 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7699 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7701 /* 3 Config mac work mode with loopback flag
7702 * and its original configure parameters
7704 hclge_cmd_reuse_desc(&desc, false);
7705 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7707 dev_err(&hdev->pdev->dev,
7708 "mac loopback set fail, ret =%d.\n", ret);
7712 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7713 enum hnae3_loop loop_mode)
7715 #define HCLGE_COMMON_LB_RETRY_MS 10
7716 #define HCLGE_COMMON_LB_RETRY_NUM 100
7718 struct hclge_common_lb_cmd *req;
7719 struct hclge_desc desc;
7723 req = (struct hclge_common_lb_cmd *)desc.data;
7724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7726 switch (loop_mode) {
7727 case HNAE3_LOOP_SERIAL_SERDES:
7728 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7730 case HNAE3_LOOP_PARALLEL_SERDES:
7731 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7733 case HNAE3_LOOP_PHY:
7734 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7737 dev_err(&hdev->pdev->dev,
7738 "unsupported common loopback mode %d\n", loop_mode);
7743 req->enable = loop_mode_b;
7744 req->mask = loop_mode_b;
7746 req->mask = loop_mode_b;
7749 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7751 dev_err(&hdev->pdev->dev,
7752 "common loopback set fail, ret = %d\n", ret);
7757 msleep(HCLGE_COMMON_LB_RETRY_MS);
7758 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7760 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7762 dev_err(&hdev->pdev->dev,
7763 "common loopback get, ret = %d\n", ret);
7766 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7767 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7769 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7770 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7772 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7773 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7779 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7780 enum hnae3_loop loop_mode)
7784 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7788 hclge_cfg_mac_mode(hdev, en);
7790 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7792 dev_err(&hdev->pdev->dev,
7793 "serdes loopback config mac mode timeout\n");
7798 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7799 struct phy_device *phydev)
7803 if (!phydev->suspended) {
7804 ret = phy_suspend(phydev);
7809 ret = phy_resume(phydev);
7813 return phy_loopback(phydev, true);
7816 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7817 struct phy_device *phydev)
7821 ret = phy_loopback(phydev, false);
7825 return phy_suspend(phydev);
7828 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7830 struct phy_device *phydev = hdev->hw.mac.phydev;
7834 if (hnae3_dev_phy_imp_supported(hdev))
7835 return hclge_set_common_loopback(hdev, en,
7841 ret = hclge_enable_phy_loopback(hdev, phydev);
7843 ret = hclge_disable_phy_loopback(hdev, phydev);
7845 dev_err(&hdev->pdev->dev,
7846 "set phy loopback fail, ret = %d\n", ret);
7850 hclge_cfg_mac_mode(hdev, en);
7852 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7854 dev_err(&hdev->pdev->dev,
7855 "phy loopback config mac mode timeout\n");
7860 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7861 u16 stream_id, bool enable)
7863 struct hclge_desc desc;
7864 struct hclge_cfg_com_tqp_queue_cmd *req =
7865 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7867 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7868 req->tqp_id = cpu_to_le16(tqp_id);
7869 req->stream_id = cpu_to_le16(stream_id);
7871 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7873 return hclge_cmd_send(&hdev->hw, &desc, 1);
7876 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7878 struct hclge_vport *vport = hclge_get_vport(handle);
7879 struct hclge_dev *hdev = vport->back;
7883 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7884 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7891 static int hclge_set_loopback(struct hnae3_handle *handle,
7892 enum hnae3_loop loop_mode, bool en)
7894 struct hclge_vport *vport = hclge_get_vport(handle);
7895 struct hclge_dev *hdev = vport->back;
7898 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7899 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7900 * the same, the packets are looped back in the SSU. If SSU loopback
7901 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7903 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7904 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7906 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7907 HCLGE_SWITCH_ALW_LPBK_MASK);
7912 switch (loop_mode) {
7913 case HNAE3_LOOP_APP:
7914 ret = hclge_set_app_loopback(hdev, en);
7916 case HNAE3_LOOP_SERIAL_SERDES:
7917 case HNAE3_LOOP_PARALLEL_SERDES:
7918 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7920 case HNAE3_LOOP_PHY:
7921 ret = hclge_set_phy_loopback(hdev, en);
7925 dev_err(&hdev->pdev->dev,
7926 "loop_mode %d is not supported\n", loop_mode);
7933 ret = hclge_tqp_enable(handle, en);
7935 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7936 en ? "enable" : "disable", ret);
7941 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7945 ret = hclge_set_app_loopback(hdev, false);
7949 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7953 return hclge_cfg_common_loopback(hdev, false,
7954 HNAE3_LOOP_PARALLEL_SERDES);
7957 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7959 struct hclge_vport *vport = hclge_get_vport(handle);
7960 struct hnae3_knic_private_info *kinfo;
7961 struct hnae3_queue *queue;
7962 struct hclge_tqp *tqp;
7965 kinfo = &vport->nic.kinfo;
7966 for (i = 0; i < kinfo->num_tqps; i++) {
7967 queue = handle->kinfo.tqp[i];
7968 tqp = container_of(queue, struct hclge_tqp, q);
7969 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7973 static void hclge_flush_link_update(struct hclge_dev *hdev)
7975 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7977 unsigned long last = hdev->serv_processed_cnt;
7980 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7981 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7982 last == hdev->serv_processed_cnt)
7986 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7988 struct hclge_vport *vport = hclge_get_vport(handle);
7989 struct hclge_dev *hdev = vport->back;
7992 hclge_task_schedule(hdev, 0);
7994 /* Set the DOWN flag here to disable link updating */
7995 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7997 /* flush memory to make sure DOWN is seen by service task */
7998 smp_mb__before_atomic();
7999 hclge_flush_link_update(hdev);
8003 static int hclge_ae_start(struct hnae3_handle *handle)
8005 struct hclge_vport *vport = hclge_get_vport(handle);
8006 struct hclge_dev *hdev = vport->back;
8009 hclge_cfg_mac_mode(hdev, true);
8010 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8011 hdev->hw.mac.link = 0;
8013 /* reset tqp stats */
8014 hclge_reset_tqp_stats(handle);
8016 hclge_mac_start_phy(hdev);
8021 static void hclge_ae_stop(struct hnae3_handle *handle)
8023 struct hclge_vport *vport = hclge_get_vport(handle);
8024 struct hclge_dev *hdev = vport->back;
8026 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8027 spin_lock_bh(&hdev->fd_rule_lock);
8028 hclge_clear_arfs_rules(hdev);
8029 spin_unlock_bh(&hdev->fd_rule_lock);
8031 /* If it is not PF reset, the firmware will disable the MAC,
8032 * so it only need to stop phy here.
8034 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8035 hdev->reset_type != HNAE3_FUNC_RESET) {
8036 hclge_mac_stop_phy(hdev);
8037 hclge_update_link_status(hdev);
8041 hclge_reset_tqp(handle);
8043 hclge_config_mac_tnl_int(hdev, false);
8046 hclge_cfg_mac_mode(hdev, false);
8048 hclge_mac_stop_phy(hdev);
8050 /* reset tqp stats */
8051 hclge_reset_tqp_stats(handle);
8052 hclge_update_link_status(hdev);
8055 int hclge_vport_start(struct hclge_vport *vport)
8057 struct hclge_dev *hdev = vport->back;
8059 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8060 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8061 vport->last_active_jiffies = jiffies;
8063 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8064 if (vport->vport_id) {
8065 hclge_restore_mac_table_common(vport);
8066 hclge_restore_vport_vlan_table(vport);
8068 hclge_restore_hw_table(hdev);
8072 clear_bit(vport->vport_id, hdev->vport_config_block);
8077 void hclge_vport_stop(struct hclge_vport *vport)
8079 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8082 static int hclge_client_start(struct hnae3_handle *handle)
8084 struct hclge_vport *vport = hclge_get_vport(handle);
8086 return hclge_vport_start(vport);
8089 static void hclge_client_stop(struct hnae3_handle *handle)
8091 struct hclge_vport *vport = hclge_get_vport(handle);
8093 hclge_vport_stop(vport);
8096 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8097 u16 cmdq_resp, u8 resp_code,
8098 enum hclge_mac_vlan_tbl_opcode op)
8100 struct hclge_dev *hdev = vport->back;
8103 dev_err(&hdev->pdev->dev,
8104 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8109 if (op == HCLGE_MAC_VLAN_ADD) {
8110 if (!resp_code || resp_code == 1)
8112 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8113 resp_code == HCLGE_ADD_MC_OVERFLOW)
8116 dev_err(&hdev->pdev->dev,
8117 "add mac addr failed for undefined, code=%u.\n",
8120 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8123 } else if (resp_code == 1) {
8124 dev_dbg(&hdev->pdev->dev,
8125 "remove mac addr failed for miss.\n");
8129 dev_err(&hdev->pdev->dev,
8130 "remove mac addr failed for undefined, code=%u.\n",
8133 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8136 } else if (resp_code == 1) {
8137 dev_dbg(&hdev->pdev->dev,
8138 "lookup mac addr failed for miss.\n");
8142 dev_err(&hdev->pdev->dev,
8143 "lookup mac addr failed for undefined, code=%u.\n",
8148 dev_err(&hdev->pdev->dev,
8149 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8154 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8156 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8158 unsigned int word_num;
8159 unsigned int bit_num;
8161 if (vfid > 255 || vfid < 0)
8164 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8165 word_num = vfid / 32;
8166 bit_num = vfid % 32;
8168 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8170 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8172 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8173 bit_num = vfid % 32;
8175 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8177 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8183 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8185 #define HCLGE_DESC_NUMBER 3
8186 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8189 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8190 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8191 if (desc[i].data[j])
8197 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8198 const u8 *addr, bool is_mc)
8200 const unsigned char *mac_addr = addr;
8201 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8202 (mac_addr[0]) | (mac_addr[1] << 8);
8203 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8205 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8207 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8208 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8211 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8212 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8215 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8216 struct hclge_mac_vlan_tbl_entry_cmd *req)
8218 struct hclge_dev *hdev = vport->back;
8219 struct hclge_desc desc;
8224 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8226 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8228 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8230 dev_err(&hdev->pdev->dev,
8231 "del mac addr failed for cmd_send, ret =%d.\n",
8235 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8236 retval = le16_to_cpu(desc.retval);
8238 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8239 HCLGE_MAC_VLAN_REMOVE);
8242 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8243 struct hclge_mac_vlan_tbl_entry_cmd *req,
8244 struct hclge_desc *desc,
8247 struct hclge_dev *hdev = vport->back;
8252 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8254 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8255 memcpy(desc[0].data,
8257 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8258 hclge_cmd_setup_basic_desc(&desc[1],
8259 HCLGE_OPC_MAC_VLAN_ADD,
8261 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8262 hclge_cmd_setup_basic_desc(&desc[2],
8263 HCLGE_OPC_MAC_VLAN_ADD,
8265 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8267 memcpy(desc[0].data,
8269 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8270 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8273 dev_err(&hdev->pdev->dev,
8274 "lookup mac addr failed for cmd_send, ret =%d.\n",
8278 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8279 retval = le16_to_cpu(desc[0].retval);
8281 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8282 HCLGE_MAC_VLAN_LKUP);
8285 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8286 struct hclge_mac_vlan_tbl_entry_cmd *req,
8287 struct hclge_desc *mc_desc)
8289 struct hclge_dev *hdev = vport->back;
8296 struct hclge_desc desc;
8298 hclge_cmd_setup_basic_desc(&desc,
8299 HCLGE_OPC_MAC_VLAN_ADD,
8301 memcpy(desc.data, req,
8302 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8303 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8304 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8305 retval = le16_to_cpu(desc.retval);
8307 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8309 HCLGE_MAC_VLAN_ADD);
8311 hclge_cmd_reuse_desc(&mc_desc[0], false);
8312 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8313 hclge_cmd_reuse_desc(&mc_desc[1], false);
8314 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8315 hclge_cmd_reuse_desc(&mc_desc[2], false);
8316 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8317 memcpy(mc_desc[0].data, req,
8318 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8319 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8320 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8321 retval = le16_to_cpu(mc_desc[0].retval);
8323 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8325 HCLGE_MAC_VLAN_ADD);
8329 dev_err(&hdev->pdev->dev,
8330 "add mac addr failed for cmd_send, ret =%d.\n",
8338 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8339 u16 *allocated_size)
8341 struct hclge_umv_spc_alc_cmd *req;
8342 struct hclge_desc desc;
8345 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8346 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8348 req->space_size = cpu_to_le32(space_size);
8350 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8352 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8357 *allocated_size = le32_to_cpu(desc.data[1]);
8362 static int hclge_init_umv_space(struct hclge_dev *hdev)
8364 u16 allocated_size = 0;
8367 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8371 if (allocated_size < hdev->wanted_umv_size)
8372 dev_warn(&hdev->pdev->dev,
8373 "failed to alloc umv space, want %u, get %u\n",
8374 hdev->wanted_umv_size, allocated_size);
8376 hdev->max_umv_size = allocated_size;
8377 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8378 hdev->share_umv_size = hdev->priv_umv_size +
8379 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8384 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8386 struct hclge_vport *vport;
8389 for (i = 0; i < hdev->num_alloc_vport; i++) {
8390 vport = &hdev->vport[i];
8391 vport->used_umv_num = 0;
8394 mutex_lock(&hdev->vport_lock);
8395 hdev->share_umv_size = hdev->priv_umv_size +
8396 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8397 mutex_unlock(&hdev->vport_lock);
8400 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8402 struct hclge_dev *hdev = vport->back;
8406 mutex_lock(&hdev->vport_lock);
8408 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8409 hdev->share_umv_size == 0);
8412 mutex_unlock(&hdev->vport_lock);
8417 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8419 struct hclge_dev *hdev = vport->back;
8422 if (vport->used_umv_num > hdev->priv_umv_size)
8423 hdev->share_umv_size++;
8425 if (vport->used_umv_num > 0)
8426 vport->used_umv_num--;
8428 if (vport->used_umv_num >= hdev->priv_umv_size &&
8429 hdev->share_umv_size > 0)
8430 hdev->share_umv_size--;
8431 vport->used_umv_num++;
8435 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8438 struct hclge_mac_node *mac_node, *tmp;
8440 list_for_each_entry_safe(mac_node, tmp, list, node)
8441 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8447 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8448 enum HCLGE_MAC_NODE_STATE state)
8451 /* from set_rx_mode or tmp_add_list */
8452 case HCLGE_MAC_TO_ADD:
8453 if (mac_node->state == HCLGE_MAC_TO_DEL)
8454 mac_node->state = HCLGE_MAC_ACTIVE;
8456 /* only from set_rx_mode */
8457 case HCLGE_MAC_TO_DEL:
8458 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8459 list_del(&mac_node->node);
8462 mac_node->state = HCLGE_MAC_TO_DEL;
8465 /* only from tmp_add_list, the mac_node->state won't be
8468 case HCLGE_MAC_ACTIVE:
8469 if (mac_node->state == HCLGE_MAC_TO_ADD)
8470 mac_node->state = HCLGE_MAC_ACTIVE;
8476 int hclge_update_mac_list(struct hclge_vport *vport,
8477 enum HCLGE_MAC_NODE_STATE state,
8478 enum HCLGE_MAC_ADDR_TYPE mac_type,
8479 const unsigned char *addr)
8481 struct hclge_dev *hdev = vport->back;
8482 struct hclge_mac_node *mac_node;
8483 struct list_head *list;
8485 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8486 &vport->uc_mac_list : &vport->mc_mac_list;
8488 spin_lock_bh(&vport->mac_list_lock);
8490 /* if the mac addr is already in the mac list, no need to add a new
8491 * one into it, just check the mac addr state, convert it to a new
8492 * state, or just remove it, or do nothing.
8494 mac_node = hclge_find_mac_node(list, addr);
8496 hclge_update_mac_node(mac_node, state);
8497 spin_unlock_bh(&vport->mac_list_lock);
8498 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8502 /* if this address is never added, unnecessary to delete */
8503 if (state == HCLGE_MAC_TO_DEL) {
8504 spin_unlock_bh(&vport->mac_list_lock);
8505 dev_err(&hdev->pdev->dev,
8506 "failed to delete address %pM from mac list\n",
8511 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8513 spin_unlock_bh(&vport->mac_list_lock);
8517 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8519 mac_node->state = state;
8520 ether_addr_copy(mac_node->mac_addr, addr);
8521 list_add_tail(&mac_node->node, list);
8523 spin_unlock_bh(&vport->mac_list_lock);
8528 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8529 const unsigned char *addr)
8531 struct hclge_vport *vport = hclge_get_vport(handle);
8533 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8537 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8538 const unsigned char *addr)
8540 struct hclge_dev *hdev = vport->back;
8541 struct hclge_mac_vlan_tbl_entry_cmd req;
8542 struct hclge_desc desc;
8543 u16 egress_port = 0;
8546 /* mac addr check */
8547 if (is_zero_ether_addr(addr) ||
8548 is_broadcast_ether_addr(addr) ||
8549 is_multicast_ether_addr(addr)) {
8550 dev_err(&hdev->pdev->dev,
8551 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8552 addr, is_zero_ether_addr(addr),
8553 is_broadcast_ether_addr(addr),
8554 is_multicast_ether_addr(addr));
8558 memset(&req, 0, sizeof(req));
8560 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8561 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8563 req.egress_port = cpu_to_le16(egress_port);
8565 hclge_prepare_mac_addr(&req, addr, false);
8567 /* Lookup the mac address in the mac_vlan table, and add
8568 * it if the entry is inexistent. Repeated unicast entry
8569 * is not allowed in the mac vlan table.
8571 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8572 if (ret == -ENOENT) {
8573 mutex_lock(&hdev->vport_lock);
8574 if (!hclge_is_umv_space_full(vport, false)) {
8575 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8577 hclge_update_umv_space(vport, false);
8578 mutex_unlock(&hdev->vport_lock);
8581 mutex_unlock(&hdev->vport_lock);
8583 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8584 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8585 hdev->priv_umv_size);
8590 /* check if we just hit the duplicate */
8592 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8593 vport->vport_id, addr);
8597 dev_err(&hdev->pdev->dev,
8598 "PF failed to add unicast entry(%pM) in the MAC table\n",
8604 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8605 const unsigned char *addr)
8607 struct hclge_vport *vport = hclge_get_vport(handle);
8609 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8613 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8614 const unsigned char *addr)
8616 struct hclge_dev *hdev = vport->back;
8617 struct hclge_mac_vlan_tbl_entry_cmd req;
8620 /* mac addr check */
8621 if (is_zero_ether_addr(addr) ||
8622 is_broadcast_ether_addr(addr) ||
8623 is_multicast_ether_addr(addr)) {
8624 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8629 memset(&req, 0, sizeof(req));
8630 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8631 hclge_prepare_mac_addr(&req, addr, false);
8632 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8634 mutex_lock(&hdev->vport_lock);
8635 hclge_update_umv_space(vport, true);
8636 mutex_unlock(&hdev->vport_lock);
8637 } else if (ret == -ENOENT) {
8644 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8645 const unsigned char *addr)
8647 struct hclge_vport *vport = hclge_get_vport(handle);
8649 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8653 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8654 const unsigned char *addr)
8656 struct hclge_dev *hdev = vport->back;
8657 struct hclge_mac_vlan_tbl_entry_cmd req;
8658 struct hclge_desc desc[3];
8661 /* mac addr check */
8662 if (!is_multicast_ether_addr(addr)) {
8663 dev_err(&hdev->pdev->dev,
8664 "Add mc mac err! invalid mac:%pM.\n",
8668 memset(&req, 0, sizeof(req));
8669 hclge_prepare_mac_addr(&req, addr, true);
8670 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8672 /* This mac addr do not exist, add new entry for it */
8673 memset(desc[0].data, 0, sizeof(desc[0].data));
8674 memset(desc[1].data, 0, sizeof(desc[0].data));
8675 memset(desc[2].data, 0, sizeof(desc[0].data));
8677 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8680 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8681 /* if already overflow, not to print each time */
8682 if (status == -ENOSPC &&
8683 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8684 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8689 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8690 const unsigned char *addr)
8692 struct hclge_vport *vport = hclge_get_vport(handle);
8694 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8698 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8699 const unsigned char *addr)
8701 struct hclge_dev *hdev = vport->back;
8702 struct hclge_mac_vlan_tbl_entry_cmd req;
8703 enum hclge_cmd_status status;
8704 struct hclge_desc desc[3];
8706 /* mac addr check */
8707 if (!is_multicast_ether_addr(addr)) {
8708 dev_dbg(&hdev->pdev->dev,
8709 "Remove mc mac err! invalid mac:%pM.\n",
8714 memset(&req, 0, sizeof(req));
8715 hclge_prepare_mac_addr(&req, addr, true);
8716 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8718 /* This mac addr exist, remove this handle's VFID for it */
8719 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8723 if (hclge_is_all_function_id_zero(desc))
8724 /* All the vfid is zero, so need to delete this entry */
8725 status = hclge_remove_mac_vlan_tbl(vport, &req);
8727 /* Not all the vfid is zero, update the vfid */
8728 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8729 } else if (status == -ENOENT) {
8736 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8737 struct list_head *list,
8738 int (*sync)(struct hclge_vport *,
8739 const unsigned char *))
8741 struct hclge_mac_node *mac_node, *tmp;
8744 list_for_each_entry_safe(mac_node, tmp, list, node) {
8745 ret = sync(vport, mac_node->mac_addr);
8747 mac_node->state = HCLGE_MAC_ACTIVE;
8749 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8756 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8757 struct list_head *list,
8758 int (*unsync)(struct hclge_vport *,
8759 const unsigned char *))
8761 struct hclge_mac_node *mac_node, *tmp;
8764 list_for_each_entry_safe(mac_node, tmp, list, node) {
8765 ret = unsync(vport, mac_node->mac_addr);
8766 if (!ret || ret == -ENOENT) {
8767 list_del(&mac_node->node);
8770 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8777 static bool hclge_sync_from_add_list(struct list_head *add_list,
8778 struct list_head *mac_list)
8780 struct hclge_mac_node *mac_node, *tmp, *new_node;
8781 bool all_added = true;
8783 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8784 if (mac_node->state == HCLGE_MAC_TO_ADD)
8787 /* if the mac address from tmp_add_list is not in the
8788 * uc/mc_mac_list, it means have received a TO_DEL request
8789 * during the time window of adding the mac address into mac
8790 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8791 * then it will be removed at next time. else it must be TO_ADD,
8792 * this address hasn't been added into mac table,
8793 * so just remove the mac node.
8795 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8797 hclge_update_mac_node(new_node, mac_node->state);
8798 list_del(&mac_node->node);
8800 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8801 mac_node->state = HCLGE_MAC_TO_DEL;
8802 list_del(&mac_node->node);
8803 list_add_tail(&mac_node->node, mac_list);
8805 list_del(&mac_node->node);
8813 static void hclge_sync_from_del_list(struct list_head *del_list,
8814 struct list_head *mac_list)
8816 struct hclge_mac_node *mac_node, *tmp, *new_node;
8818 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8819 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8821 /* If the mac addr exists in the mac list, it means
8822 * received a new TO_ADD request during the time window
8823 * of configuring the mac address. For the mac node
8824 * state is TO_ADD, and the address is already in the
8825 * in the hardware(due to delete fail), so we just need
8826 * to change the mac node state to ACTIVE.
8828 new_node->state = HCLGE_MAC_ACTIVE;
8829 list_del(&mac_node->node);
8832 list_del(&mac_node->node);
8833 list_add_tail(&mac_node->node, mac_list);
8838 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8839 enum HCLGE_MAC_ADDR_TYPE mac_type,
8842 if (mac_type == HCLGE_MAC_ADDR_UC) {
8844 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8846 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8849 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8851 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8855 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8856 enum HCLGE_MAC_ADDR_TYPE mac_type)
8858 struct hclge_mac_node *mac_node, *tmp, *new_node;
8859 struct list_head tmp_add_list, tmp_del_list;
8860 struct list_head *list;
8863 INIT_LIST_HEAD(&tmp_add_list);
8864 INIT_LIST_HEAD(&tmp_del_list);
8866 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8867 * we can add/delete these mac addr outside the spin lock
8869 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8870 &vport->uc_mac_list : &vport->mc_mac_list;
8872 spin_lock_bh(&vport->mac_list_lock);
8874 list_for_each_entry_safe(mac_node, tmp, list, node) {
8875 switch (mac_node->state) {
8876 case HCLGE_MAC_TO_DEL:
8877 list_del(&mac_node->node);
8878 list_add_tail(&mac_node->node, &tmp_del_list);
8880 case HCLGE_MAC_TO_ADD:
8881 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8884 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8885 new_node->state = mac_node->state;
8886 list_add_tail(&new_node->node, &tmp_add_list);
8894 spin_unlock_bh(&vport->mac_list_lock);
8896 /* delete first, in order to get max mac table space for adding */
8897 if (mac_type == HCLGE_MAC_ADDR_UC) {
8898 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8899 hclge_rm_uc_addr_common);
8900 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8901 hclge_add_uc_addr_common);
8903 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8904 hclge_rm_mc_addr_common);
8905 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8906 hclge_add_mc_addr_common);
8909 /* if some mac addresses were added/deleted fail, move back to the
8910 * mac_list, and retry at next time.
8912 spin_lock_bh(&vport->mac_list_lock);
8914 hclge_sync_from_del_list(&tmp_del_list, list);
8915 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8917 spin_unlock_bh(&vport->mac_list_lock);
8919 hclge_update_overflow_flags(vport, mac_type, all_added);
8922 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8924 struct hclge_dev *hdev = vport->back;
8926 if (test_bit(vport->vport_id, hdev->vport_config_block))
8929 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8935 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8939 for (i = 0; i < hdev->num_alloc_vport; i++) {
8940 struct hclge_vport *vport = &hdev->vport[i];
8942 if (!hclge_need_sync_mac_table(vport))
8945 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8946 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8950 static void hclge_build_del_list(struct list_head *list,
8952 struct list_head *tmp_del_list)
8954 struct hclge_mac_node *mac_cfg, *tmp;
8956 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8957 switch (mac_cfg->state) {
8958 case HCLGE_MAC_TO_DEL:
8959 case HCLGE_MAC_ACTIVE:
8960 list_del(&mac_cfg->node);
8961 list_add_tail(&mac_cfg->node, tmp_del_list);
8963 case HCLGE_MAC_TO_ADD:
8965 list_del(&mac_cfg->node);
8973 static void hclge_unsync_del_list(struct hclge_vport *vport,
8974 int (*unsync)(struct hclge_vport *vport,
8975 const unsigned char *addr),
8977 struct list_head *tmp_del_list)
8979 struct hclge_mac_node *mac_cfg, *tmp;
8982 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8983 ret = unsync(vport, mac_cfg->mac_addr);
8984 if (!ret || ret == -ENOENT) {
8985 /* clear all mac addr from hardware, but remain these
8986 * mac addr in the mac list, and restore them after
8987 * vf reset finished.
8990 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8991 mac_cfg->state = HCLGE_MAC_TO_ADD;
8993 list_del(&mac_cfg->node);
8996 } else if (is_del_list) {
8997 mac_cfg->state = HCLGE_MAC_TO_DEL;
9002 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9003 enum HCLGE_MAC_ADDR_TYPE mac_type)
9005 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9006 struct hclge_dev *hdev = vport->back;
9007 struct list_head tmp_del_list, *list;
9009 if (mac_type == HCLGE_MAC_ADDR_UC) {
9010 list = &vport->uc_mac_list;
9011 unsync = hclge_rm_uc_addr_common;
9013 list = &vport->mc_mac_list;
9014 unsync = hclge_rm_mc_addr_common;
9017 INIT_LIST_HEAD(&tmp_del_list);
9020 set_bit(vport->vport_id, hdev->vport_config_block);
9022 spin_lock_bh(&vport->mac_list_lock);
9024 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9026 spin_unlock_bh(&vport->mac_list_lock);
9028 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9030 spin_lock_bh(&vport->mac_list_lock);
9032 hclge_sync_from_del_list(&tmp_del_list, list);
9034 spin_unlock_bh(&vport->mac_list_lock);
9037 /* remove all mac address when uninitailize */
9038 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9039 enum HCLGE_MAC_ADDR_TYPE mac_type)
9041 struct hclge_mac_node *mac_node, *tmp;
9042 struct hclge_dev *hdev = vport->back;
9043 struct list_head tmp_del_list, *list;
9045 INIT_LIST_HEAD(&tmp_del_list);
9047 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9048 &vport->uc_mac_list : &vport->mc_mac_list;
9050 spin_lock_bh(&vport->mac_list_lock);
9052 list_for_each_entry_safe(mac_node, tmp, list, node) {
9053 switch (mac_node->state) {
9054 case HCLGE_MAC_TO_DEL:
9055 case HCLGE_MAC_ACTIVE:
9056 list_del(&mac_node->node);
9057 list_add_tail(&mac_node->node, &tmp_del_list);
9059 case HCLGE_MAC_TO_ADD:
9060 list_del(&mac_node->node);
9066 spin_unlock_bh(&vport->mac_list_lock);
9068 if (mac_type == HCLGE_MAC_ADDR_UC)
9069 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9070 hclge_rm_uc_addr_common);
9072 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9073 hclge_rm_mc_addr_common);
9075 if (!list_empty(&tmp_del_list))
9076 dev_warn(&hdev->pdev->dev,
9077 "uninit %s mac list for vport %u not completely.\n",
9078 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9081 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9082 list_del(&mac_node->node);
9087 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9089 struct hclge_vport *vport;
9092 for (i = 0; i < hdev->num_alloc_vport; i++) {
9093 vport = &hdev->vport[i];
9094 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9095 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9099 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9100 u16 cmdq_resp, u8 resp_code)
9102 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9103 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9104 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9105 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9110 dev_err(&hdev->pdev->dev,
9111 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9116 switch (resp_code) {
9117 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9118 case HCLGE_ETHERTYPE_ALREADY_ADD:
9121 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9122 dev_err(&hdev->pdev->dev,
9123 "add mac ethertype failed for manager table overflow.\n");
9124 return_status = -EIO;
9126 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9127 dev_err(&hdev->pdev->dev,
9128 "add mac ethertype failed for key conflict.\n");
9129 return_status = -EIO;
9132 dev_err(&hdev->pdev->dev,
9133 "add mac ethertype failed for undefined, code=%u.\n",
9135 return_status = -EIO;
9138 return return_status;
9141 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9144 struct hclge_mac_vlan_tbl_entry_cmd req;
9145 struct hclge_dev *hdev = vport->back;
9146 struct hclge_desc desc;
9147 u16 egress_port = 0;
9150 if (is_zero_ether_addr(mac_addr))
9153 memset(&req, 0, sizeof(req));
9154 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9155 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9156 req.egress_port = cpu_to_le16(egress_port);
9157 hclge_prepare_mac_addr(&req, mac_addr, false);
9159 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9162 vf_idx += HCLGE_VF_VPORT_START_NUM;
9163 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9165 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9171 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9174 struct hclge_vport *vport = hclge_get_vport(handle);
9175 struct hclge_dev *hdev = vport->back;
9177 vport = hclge_get_vf_vport(hdev, vf);
9181 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9182 dev_info(&hdev->pdev->dev,
9183 "Specified MAC(=%pM) is same as before, no change committed!\n",
9188 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9189 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9194 ether_addr_copy(vport->vf_info.mac, mac_addr);
9196 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9197 dev_info(&hdev->pdev->dev,
9198 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9200 return hclge_inform_reset_assert_to_vf(vport);
9203 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9208 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9209 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9211 struct hclge_desc desc;
9216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9217 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9219 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9221 dev_err(&hdev->pdev->dev,
9222 "add mac ethertype failed for cmd_send, ret =%d.\n",
9227 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9228 retval = le16_to_cpu(desc.retval);
9230 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9233 static int init_mgr_tbl(struct hclge_dev *hdev)
9238 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9239 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9241 dev_err(&hdev->pdev->dev,
9242 "add mac ethertype failed, ret =%d.\n",
9251 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9253 struct hclge_vport *vport = hclge_get_vport(handle);
9254 struct hclge_dev *hdev = vport->back;
9256 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9259 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9260 const u8 *old_addr, const u8 *new_addr)
9262 struct list_head *list = &vport->uc_mac_list;
9263 struct hclge_mac_node *old_node, *new_node;
9265 new_node = hclge_find_mac_node(list, new_addr);
9267 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9271 new_node->state = HCLGE_MAC_TO_ADD;
9272 ether_addr_copy(new_node->mac_addr, new_addr);
9273 list_add(&new_node->node, list);
9275 if (new_node->state == HCLGE_MAC_TO_DEL)
9276 new_node->state = HCLGE_MAC_ACTIVE;
9278 /* make sure the new addr is in the list head, avoid dev
9279 * addr may be not re-added into mac table for the umv space
9280 * limitation after global/imp reset which will clear mac
9281 * table by hardware.
9283 list_move(&new_node->node, list);
9286 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9287 old_node = hclge_find_mac_node(list, old_addr);
9289 if (old_node->state == HCLGE_MAC_TO_ADD) {
9290 list_del(&old_node->node);
9293 old_node->state = HCLGE_MAC_TO_DEL;
9298 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9303 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9306 const unsigned char *new_addr = (const unsigned char *)p;
9307 struct hclge_vport *vport = hclge_get_vport(handle);
9308 struct hclge_dev *hdev = vport->back;
9309 unsigned char *old_addr = NULL;
9312 /* mac addr check */
9313 if (is_zero_ether_addr(new_addr) ||
9314 is_broadcast_ether_addr(new_addr) ||
9315 is_multicast_ether_addr(new_addr)) {
9316 dev_err(&hdev->pdev->dev,
9317 "change uc mac err! invalid mac: %pM.\n",
9322 ret = hclge_pause_addr_cfg(hdev, new_addr);
9324 dev_err(&hdev->pdev->dev,
9325 "failed to configure mac pause address, ret = %d\n",
9331 old_addr = hdev->hw.mac.mac_addr;
9333 spin_lock_bh(&vport->mac_list_lock);
9334 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9336 dev_err(&hdev->pdev->dev,
9337 "failed to change the mac addr:%pM, ret = %d\n",
9339 spin_unlock_bh(&vport->mac_list_lock);
9342 hclge_pause_addr_cfg(hdev, old_addr);
9346 /* we must update dev addr with spin lock protect, preventing dev addr
9347 * being removed by set_rx_mode path.
9349 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9350 spin_unlock_bh(&vport->mac_list_lock);
9352 hclge_task_schedule(hdev, 0);
9357 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9359 struct mii_ioctl_data *data = if_mii(ifr);
9361 if (!hnae3_dev_phy_imp_supported(hdev))
9366 data->phy_id = hdev->hw.mac.phy_addr;
9367 /* this command reads phy id and register at the same time */
9370 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9374 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9380 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9383 struct hclge_vport *vport = hclge_get_vport(handle);
9384 struct hclge_dev *hdev = vport->back;
9386 if (!hdev->hw.mac.phydev)
9387 return hclge_mii_ioctl(hdev, ifr, cmd);
9389 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9392 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9395 struct hclge_port_vlan_filter_bypass_cmd *req;
9396 struct hclge_desc desc;
9399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9400 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9402 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9407 dev_err(&hdev->pdev->dev,
9408 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9414 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9415 u8 fe_type, bool filter_en, u8 vf_id)
9417 struct hclge_vlan_filter_ctrl_cmd *req;
9418 struct hclge_desc desc;
9421 /* read current vlan filter parameter */
9422 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9423 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9424 req->vlan_type = vlan_type;
9427 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9429 dev_err(&hdev->pdev->dev,
9430 "failed to get vlan filter config, ret = %d.\n", ret);
9434 /* modify and write new config parameter */
9435 hclge_cmd_reuse_desc(&desc, false);
9436 req->vlan_fe = filter_en ?
9437 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9439 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9441 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9447 #define HCLGE_FILTER_TYPE_VF 0
9448 #define HCLGE_FILTER_TYPE_PORT 1
9449 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
9450 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
9451 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
9452 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
9453 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
9454 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
9455 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
9456 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
9457 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
9459 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9461 struct hclge_dev *hdev = vport->back;
9462 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9465 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9466 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9467 HCLGE_FILTER_FE_EGRESS_V1_B,
9468 enable, vport->vport_id);
9470 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9471 HCLGE_FILTER_FE_EGRESS, enable,
9476 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
9477 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9479 else if (!vport->vport_id)
9480 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9481 HCLGE_FILTER_FE_INGRESS,
9487 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9489 struct hnae3_handle *handle = &vport->nic;
9490 struct hclge_vport_vlan_cfg *vlan, *tmp;
9491 struct hclge_dev *hdev = vport->back;
9493 if (vport->vport_id) {
9494 if (vport->port_base_vlan_cfg.state !=
9495 HNAE3_PORT_BASE_VLAN_DISABLE)
9498 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9500 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9504 if (!vport->req_vlan_fltr_en)
9507 /* compatible with former device, always enable vlan filter */
9508 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9511 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9512 if (vlan->vlan_id != 0)
9518 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9520 struct hclge_dev *hdev = vport->back;
9524 mutex_lock(&hdev->vport_lock);
9526 vport->req_vlan_fltr_en = request_en;
9528 need_en = hclge_need_enable_vport_vlan_filter(vport);
9529 if (need_en == vport->cur_vlan_fltr_en) {
9530 mutex_unlock(&hdev->vport_lock);
9534 ret = hclge_set_vport_vlan_filter(vport, need_en);
9536 mutex_unlock(&hdev->vport_lock);
9540 vport->cur_vlan_fltr_en = need_en;
9542 mutex_unlock(&hdev->vport_lock);
9547 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9549 struct hclge_vport *vport = hclge_get_vport(handle);
9551 return hclge_enable_vport_vlan_filter(vport, enable);
9554 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9555 bool is_kill, u16 vlan,
9556 struct hclge_desc *desc)
9558 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9559 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9564 hclge_cmd_setup_basic_desc(&desc[0],
9565 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9566 hclge_cmd_setup_basic_desc(&desc[1],
9567 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9569 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9571 vf_byte_off = vfid / 8;
9572 vf_byte_val = 1 << (vfid % 8);
9574 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9575 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9577 req0->vlan_id = cpu_to_le16(vlan);
9578 req0->vlan_cfg = is_kill;
9580 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9581 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9583 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9585 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9587 dev_err(&hdev->pdev->dev,
9588 "Send vf vlan command fail, ret =%d.\n",
9596 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9597 bool is_kill, struct hclge_desc *desc)
9599 struct hclge_vlan_filter_vf_cfg_cmd *req;
9601 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9604 #define HCLGE_VF_VLAN_NO_ENTRY 2
9605 if (!req->resp_code || req->resp_code == 1)
9608 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9609 set_bit(vfid, hdev->vf_vlan_full);
9610 dev_warn(&hdev->pdev->dev,
9611 "vf vlan table is full, vf vlan filter is disabled\n");
9615 dev_err(&hdev->pdev->dev,
9616 "Add vf vlan filter fail, ret =%u.\n",
9619 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9620 if (!req->resp_code)
9623 /* vf vlan filter is disabled when vf vlan table is full,
9624 * then new vlan id will not be added into vf vlan table.
9625 * Just return 0 without warning, avoid massive verbose
9626 * print logs when unload.
9628 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9631 dev_err(&hdev->pdev->dev,
9632 "Kill vf vlan filter fail, ret =%u.\n",
9639 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9640 bool is_kill, u16 vlan)
9642 struct hclge_vport *vport = &hdev->vport[vfid];
9643 struct hclge_desc desc[2];
9646 /* if vf vlan table is full, firmware will close vf vlan filter, it
9647 * is unable and unnecessary to add new vlan id to vf vlan filter.
9648 * If spoof check is enable, and vf vlan is full, it shouldn't add
9649 * new vlan, because tx packets with these vlan id will be dropped.
9651 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9652 if (vport->vf_info.spoofchk && vlan) {
9653 dev_err(&hdev->pdev->dev,
9654 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9660 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9664 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9667 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9668 u16 vlan_id, bool is_kill)
9670 struct hclge_vlan_filter_pf_cfg_cmd *req;
9671 struct hclge_desc desc;
9672 u8 vlan_offset_byte_val;
9673 u8 vlan_offset_byte;
9677 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9679 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9680 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9681 HCLGE_VLAN_BYTE_SIZE;
9682 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9684 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9685 req->vlan_offset = vlan_offset_160;
9686 req->vlan_cfg = is_kill;
9687 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9689 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9691 dev_err(&hdev->pdev->dev,
9692 "port vlan command, send fail, ret =%d.\n", ret);
9696 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9697 u16 vport_id, u16 vlan_id,
9700 u16 vport_idx, vport_num = 0;
9703 if (is_kill && !vlan_id)
9706 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9708 dev_err(&hdev->pdev->dev,
9709 "Set %u vport vlan filter config fail, ret =%d.\n",
9714 /* vlan 0 may be added twice when 8021q module is enabled */
9715 if (!is_kill && !vlan_id &&
9716 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9719 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9720 dev_err(&hdev->pdev->dev,
9721 "Add port vlan failed, vport %u is already in vlan %u\n",
9727 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9728 dev_err(&hdev->pdev->dev,
9729 "Delete port vlan failed, vport %u is not in vlan %u\n",
9734 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9737 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9738 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9744 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9746 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9747 struct hclge_vport_vtag_tx_cfg_cmd *req;
9748 struct hclge_dev *hdev = vport->back;
9749 struct hclge_desc desc;
9753 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9755 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9756 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9757 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9758 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9759 vcfg->accept_tag1 ? 1 : 0);
9760 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9761 vcfg->accept_untag1 ? 1 : 0);
9762 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9763 vcfg->accept_tag2 ? 1 : 0);
9764 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9765 vcfg->accept_untag2 ? 1 : 0);
9766 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9767 vcfg->insert_tag1_en ? 1 : 0);
9768 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9769 vcfg->insert_tag2_en ? 1 : 0);
9770 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9771 vcfg->tag_shift_mode_en ? 1 : 0);
9772 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9774 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9775 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9776 HCLGE_VF_NUM_PER_BYTE;
9777 req->vf_bitmap[bmap_index] =
9778 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9780 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9782 dev_err(&hdev->pdev->dev,
9783 "Send port txvlan cfg command fail, ret =%d\n",
9789 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9791 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9792 struct hclge_vport_vtag_rx_cfg_cmd *req;
9793 struct hclge_dev *hdev = vport->back;
9794 struct hclge_desc desc;
9798 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9800 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9801 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9802 vcfg->strip_tag1_en ? 1 : 0);
9803 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9804 vcfg->strip_tag2_en ? 1 : 0);
9805 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9806 vcfg->vlan1_vlan_prionly ? 1 : 0);
9807 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9808 vcfg->vlan2_vlan_prionly ? 1 : 0);
9809 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9810 vcfg->strip_tag1_discard_en ? 1 : 0);
9811 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9812 vcfg->strip_tag2_discard_en ? 1 : 0);
9814 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9815 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9816 HCLGE_VF_NUM_PER_BYTE;
9817 req->vf_bitmap[bmap_index] =
9818 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9820 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9822 dev_err(&hdev->pdev->dev,
9823 "Send port rxvlan cfg command fail, ret =%d\n",
9829 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9830 u16 port_base_vlan_state,
9831 u16 vlan_tag, u8 qos)
9835 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9836 vport->txvlan_cfg.accept_tag1 = true;
9837 vport->txvlan_cfg.insert_tag1_en = false;
9838 vport->txvlan_cfg.default_tag1 = 0;
9840 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9842 vport->txvlan_cfg.accept_tag1 =
9843 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9844 vport->txvlan_cfg.insert_tag1_en = true;
9845 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9849 vport->txvlan_cfg.accept_untag1 = true;
9851 /* accept_tag2 and accept_untag2 are not supported on
9852 * pdev revision(0x20), new revision support them,
9853 * this two fields can not be configured by user.
9855 vport->txvlan_cfg.accept_tag2 = true;
9856 vport->txvlan_cfg.accept_untag2 = true;
9857 vport->txvlan_cfg.insert_tag2_en = false;
9858 vport->txvlan_cfg.default_tag2 = 0;
9859 vport->txvlan_cfg.tag_shift_mode_en = true;
9861 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9862 vport->rxvlan_cfg.strip_tag1_en = false;
9863 vport->rxvlan_cfg.strip_tag2_en =
9864 vport->rxvlan_cfg.rx_vlan_offload_en;
9865 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9867 vport->rxvlan_cfg.strip_tag1_en =
9868 vport->rxvlan_cfg.rx_vlan_offload_en;
9869 vport->rxvlan_cfg.strip_tag2_en = true;
9870 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9873 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9874 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9875 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9877 ret = hclge_set_vlan_tx_offload_cfg(vport);
9881 return hclge_set_vlan_rx_offload_cfg(vport);
9884 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9886 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9887 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9888 struct hclge_desc desc;
9891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9892 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9893 rx_req->ot_fst_vlan_type =
9894 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9895 rx_req->ot_sec_vlan_type =
9896 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9897 rx_req->in_fst_vlan_type =
9898 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9899 rx_req->in_sec_vlan_type =
9900 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9902 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9904 dev_err(&hdev->pdev->dev,
9905 "Send rxvlan protocol type command fail, ret =%d\n",
9910 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9912 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9913 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9914 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9916 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9918 dev_err(&hdev->pdev->dev,
9919 "Send txvlan protocol type command fail, ret =%d\n",
9925 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9927 #define HCLGE_DEF_VLAN_TYPE 0x8100
9929 struct hnae3_handle *handle = &hdev->vport[0].nic;
9930 struct hclge_vport *vport;
9934 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9935 /* for revision 0x21, vf vlan filter is per function */
9936 for (i = 0; i < hdev->num_alloc_vport; i++) {
9937 vport = &hdev->vport[i];
9938 ret = hclge_set_vlan_filter_ctrl(hdev,
9939 HCLGE_FILTER_TYPE_VF,
9940 HCLGE_FILTER_FE_EGRESS,
9945 vport->cur_vlan_fltr_en = true;
9948 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9949 HCLGE_FILTER_FE_INGRESS, true,
9954 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9955 HCLGE_FILTER_FE_EGRESS_V1_B,
9961 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9962 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9963 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9964 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9965 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9966 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9968 ret = hclge_set_vlan_protocol_type(hdev);
9972 for (i = 0; i < hdev->num_alloc_vport; i++) {
9976 vport = &hdev->vport[i];
9977 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9978 qos = vport->port_base_vlan_cfg.vlan_info.qos;
9980 ret = hclge_vlan_offload_cfg(vport,
9981 vport->port_base_vlan_cfg.state,
9987 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9990 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9993 struct hclge_vport_vlan_cfg *vlan;
9995 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9999 vlan->hd_tbl_status = writen_to_tbl;
10000 vlan->vlan_id = vlan_id;
10002 list_add_tail(&vlan->node, &vport->vlan_list);
10005 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10007 struct hclge_vport_vlan_cfg *vlan, *tmp;
10008 struct hclge_dev *hdev = vport->back;
10011 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10012 if (!vlan->hd_tbl_status) {
10013 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10015 vlan->vlan_id, false);
10017 dev_err(&hdev->pdev->dev,
10018 "restore vport vlan list failed, ret=%d\n",
10023 vlan->hd_tbl_status = true;
10029 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10032 struct hclge_vport_vlan_cfg *vlan, *tmp;
10033 struct hclge_dev *hdev = vport->back;
10035 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10036 if (vlan->vlan_id == vlan_id) {
10037 if (is_write_tbl && vlan->hd_tbl_status)
10038 hclge_set_vlan_filter_hw(hdev,
10039 htons(ETH_P_8021Q),
10044 list_del(&vlan->node);
10051 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10053 struct hclge_vport_vlan_cfg *vlan, *tmp;
10054 struct hclge_dev *hdev = vport->back;
10056 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10057 if (vlan->hd_tbl_status)
10058 hclge_set_vlan_filter_hw(hdev,
10059 htons(ETH_P_8021Q),
10064 vlan->hd_tbl_status = false;
10066 list_del(&vlan->node);
10070 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10073 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10075 struct hclge_vport_vlan_cfg *vlan, *tmp;
10076 struct hclge_vport *vport;
10079 for (i = 0; i < hdev->num_alloc_vport; i++) {
10080 vport = &hdev->vport[i];
10081 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10082 list_del(&vlan->node);
10088 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10090 struct hclge_vport_vlan_cfg *vlan, *tmp;
10091 struct hclge_dev *hdev = vport->back;
10097 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10098 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10099 state = vport->port_base_vlan_cfg.state;
10101 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10102 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10103 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10104 vport->vport_id, vlan_id,
10109 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10110 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10112 vlan->vlan_id, false);
10115 vlan->hd_tbl_status = true;
10119 /* For global reset and imp reset, hardware will clear the mac table,
10120 * so we change the mac address state from ACTIVE to TO_ADD, then they
10121 * can be restored in the service task after reset complete. Furtherly,
10122 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10123 * be restored after reset, so just remove these mac nodes from mac_list.
10125 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10127 struct hclge_mac_node *mac_node, *tmp;
10129 list_for_each_entry_safe(mac_node, tmp, list, node) {
10130 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10131 mac_node->state = HCLGE_MAC_TO_ADD;
10132 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10133 list_del(&mac_node->node);
10139 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10141 spin_lock_bh(&vport->mac_list_lock);
10143 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10144 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10145 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10147 spin_unlock_bh(&vport->mac_list_lock);
10150 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10152 struct hclge_vport *vport = &hdev->vport[0];
10153 struct hnae3_handle *handle = &vport->nic;
10155 hclge_restore_mac_table_common(vport);
10156 hclge_restore_vport_vlan_table(vport);
10157 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10158 hclge_restore_fd_entries(handle);
10161 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10163 struct hclge_vport *vport = hclge_get_vport(handle);
10165 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10166 vport->rxvlan_cfg.strip_tag1_en = false;
10167 vport->rxvlan_cfg.strip_tag2_en = enable;
10168 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10170 vport->rxvlan_cfg.strip_tag1_en = enable;
10171 vport->rxvlan_cfg.strip_tag2_en = true;
10172 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10175 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10176 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10177 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10178 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10180 return hclge_set_vlan_rx_offload_cfg(vport);
10183 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10185 struct hclge_dev *hdev = vport->back;
10187 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10188 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10191 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10192 u16 port_base_vlan_state,
10193 struct hclge_vlan_info *new_info,
10194 struct hclge_vlan_info *old_info)
10196 struct hclge_dev *hdev = vport->back;
10199 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10200 hclge_rm_vport_all_vlan_table(vport, false);
10201 /* force clear VLAN 0 */
10202 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10205 return hclge_set_vlan_filter_hw(hdev,
10206 htons(new_info->vlan_proto),
10208 new_info->vlan_tag,
10212 /* force add VLAN 0 */
10213 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10217 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10218 vport->vport_id, old_info->vlan_tag,
10223 return hclge_add_vport_all_vlan_table(vport);
10226 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10227 const struct hclge_vlan_info *old_cfg)
10229 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10232 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10238 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10239 struct hclge_vlan_info *vlan_info)
10241 struct hnae3_handle *nic = &vport->nic;
10242 struct hclge_vlan_info *old_vlan_info;
10243 struct hclge_dev *hdev = vport->back;
10246 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10248 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10253 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10256 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10257 /* add new VLAN tag */
10258 ret = hclge_set_vlan_filter_hw(hdev,
10259 htons(vlan_info->vlan_proto),
10261 vlan_info->vlan_tag,
10266 /* remove old VLAN tag */
10267 if (old_vlan_info->vlan_tag == 0)
10268 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10271 ret = hclge_set_vlan_filter_hw(hdev,
10272 htons(ETH_P_8021Q),
10274 old_vlan_info->vlan_tag,
10277 dev_err(&hdev->pdev->dev,
10278 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10279 vport->vport_id, old_vlan_info->vlan_tag, ret);
10286 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10292 vport->port_base_vlan_cfg.state = state;
10293 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10294 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10296 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10298 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10299 hclge_set_vport_vlan_fltr_change(vport);
10304 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10305 enum hnae3_port_base_vlan_state state,
10308 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10310 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10312 return HNAE3_PORT_BASE_VLAN_ENABLE;
10316 return HNAE3_PORT_BASE_VLAN_DISABLE;
10318 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10319 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10320 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10322 return HNAE3_PORT_BASE_VLAN_MODIFY;
10325 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10326 u16 vlan, u8 qos, __be16 proto)
10328 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10329 struct hclge_vport *vport = hclge_get_vport(handle);
10330 struct hclge_dev *hdev = vport->back;
10331 struct hclge_vlan_info vlan_info;
10335 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10336 return -EOPNOTSUPP;
10338 vport = hclge_get_vf_vport(hdev, vfid);
10342 /* qos is a 3 bits value, so can not be bigger than 7 */
10343 if (vlan > VLAN_N_VID - 1 || qos > 7)
10345 if (proto != htons(ETH_P_8021Q))
10346 return -EPROTONOSUPPORT;
10348 state = hclge_get_port_base_vlan_state(vport,
10349 vport->port_base_vlan_cfg.state,
10351 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10354 vlan_info.vlan_tag = vlan;
10355 vlan_info.qos = qos;
10356 vlan_info.vlan_proto = ntohs(proto);
10358 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10360 dev_err(&hdev->pdev->dev,
10361 "failed to update port base vlan for vf %d, ret = %d\n",
10366 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10369 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10370 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10371 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10372 vport->vport_id, state,
10378 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10380 struct hclge_vlan_info *vlan_info;
10381 struct hclge_vport *vport;
10385 /* clear port base vlan for all vf */
10386 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10387 vport = &hdev->vport[vf];
10388 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10390 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10392 vlan_info->vlan_tag, true);
10394 dev_err(&hdev->pdev->dev,
10395 "failed to clear vf vlan for vf%d, ret = %d\n",
10396 vf - HCLGE_VF_VPORT_START_NUM, ret);
10400 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10401 u16 vlan_id, bool is_kill)
10403 struct hclge_vport *vport = hclge_get_vport(handle);
10404 struct hclge_dev *hdev = vport->back;
10405 bool writen_to_tbl = false;
10408 /* When device is resetting or reset failed, firmware is unable to
10409 * handle mailbox. Just record the vlan id, and remove it after
10412 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10413 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10414 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10418 /* when port base vlan enabled, we use port base vlan as the vlan
10419 * filter entry. In this case, we don't update vlan filter table
10420 * when user add new vlan or remove exist vlan, just update the vport
10421 * vlan list. The vlan id in vlan list will be writen in vlan filter
10422 * table until port base vlan disabled
10424 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10425 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10427 writen_to_tbl = true;
10432 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10434 hclge_add_vport_vlan_table(vport, vlan_id,
10436 } else if (is_kill) {
10437 /* when remove hw vlan filter failed, record the vlan id,
10438 * and try to remove it from hw later, to be consistence
10441 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10444 hclge_set_vport_vlan_fltr_change(vport);
10449 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10451 struct hclge_vport *vport;
10455 for (i = 0; i < hdev->num_alloc_vport; i++) {
10456 vport = &hdev->vport[i];
10457 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10461 ret = hclge_enable_vport_vlan_filter(vport,
10462 vport->req_vlan_fltr_en);
10464 dev_err(&hdev->pdev->dev,
10465 "failed to sync vlan filter state for vport%u, ret = %d\n",
10466 vport->vport_id, ret);
10467 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10474 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10476 #define HCLGE_MAX_SYNC_COUNT 60
10478 int i, ret, sync_cnt = 0;
10481 /* start from vport 1 for PF is always alive */
10482 for (i = 0; i < hdev->num_alloc_vport; i++) {
10483 struct hclge_vport *vport = &hdev->vport[i];
10485 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10487 while (vlan_id != VLAN_N_VID) {
10488 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10489 vport->vport_id, vlan_id,
10491 if (ret && ret != -EINVAL)
10494 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10495 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10496 hclge_set_vport_vlan_fltr_change(vport);
10499 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10502 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10507 hclge_sync_vlan_fltr_state(hdev);
10510 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10512 struct hclge_config_max_frm_size_cmd *req;
10513 struct hclge_desc desc;
10515 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10517 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10518 req->max_frm_size = cpu_to_le16(new_mps);
10519 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10521 return hclge_cmd_send(&hdev->hw, &desc, 1);
10524 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10526 struct hclge_vport *vport = hclge_get_vport(handle);
10528 return hclge_set_vport_mtu(vport, new_mtu);
10531 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10533 struct hclge_dev *hdev = vport->back;
10534 int i, max_frm_size, ret;
10536 /* HW supprt 2 layer vlan */
10537 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10538 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10539 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10542 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10543 mutex_lock(&hdev->vport_lock);
10544 /* VF's mps must fit within hdev->mps */
10545 if (vport->vport_id && max_frm_size > hdev->mps) {
10546 mutex_unlock(&hdev->vport_lock);
10548 } else if (vport->vport_id) {
10549 vport->mps = max_frm_size;
10550 mutex_unlock(&hdev->vport_lock);
10554 /* PF's mps must be greater then VF's mps */
10555 for (i = 1; i < hdev->num_alloc_vport; i++)
10556 if (max_frm_size < hdev->vport[i].mps) {
10557 mutex_unlock(&hdev->vport_lock);
10561 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10563 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10565 dev_err(&hdev->pdev->dev,
10566 "Change mtu fail, ret =%d\n", ret);
10570 hdev->mps = max_frm_size;
10571 vport->mps = max_frm_size;
10573 ret = hclge_buffer_alloc(hdev);
10575 dev_err(&hdev->pdev->dev,
10576 "Allocate buffer fail, ret =%d\n", ret);
10579 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10580 mutex_unlock(&hdev->vport_lock);
10584 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10587 struct hclge_reset_tqp_queue_cmd *req;
10588 struct hclge_desc desc;
10591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10593 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10594 req->tqp_id = cpu_to_le16(queue_id);
10596 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10598 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10600 dev_err(&hdev->pdev->dev,
10601 "Send tqp reset cmd error, status =%d\n", ret);
10608 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10610 struct hclge_reset_tqp_queue_cmd *req;
10611 struct hclge_desc desc;
10614 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10616 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10617 req->tqp_id = cpu_to_le16(queue_id);
10619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10621 dev_err(&hdev->pdev->dev,
10622 "Get reset status error, status =%d\n", ret);
10626 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10629 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10631 struct hnae3_queue *queue;
10632 struct hclge_tqp *tqp;
10634 queue = handle->kinfo.tqp[queue_id];
10635 tqp = container_of(queue, struct hclge_tqp, q);
10640 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10642 struct hclge_vport *vport = hclge_get_vport(handle);
10643 struct hclge_dev *hdev = vport->back;
10644 u16 reset_try_times = 0;
10650 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10651 queue_gid = hclge_covert_handle_qid_global(handle, i);
10652 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10654 dev_err(&hdev->pdev->dev,
10655 "failed to send reset tqp cmd, ret = %d\n",
10660 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10661 reset_status = hclge_get_reset_status(hdev, queue_gid);
10665 /* Wait for tqp hw reset */
10666 usleep_range(1000, 1200);
10669 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10670 dev_err(&hdev->pdev->dev,
10671 "wait for tqp hw reset timeout\n");
10675 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10677 dev_err(&hdev->pdev->dev,
10678 "failed to deassert soft reset, ret = %d\n",
10682 reset_try_times = 0;
10687 static int hclge_reset_rcb(struct hnae3_handle *handle)
10689 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10690 #define HCLGE_RESET_RCB_SUCCESS 1U
10692 struct hclge_vport *vport = hclge_get_vport(handle);
10693 struct hclge_dev *hdev = vport->back;
10694 struct hclge_reset_cmd *req;
10695 struct hclge_desc desc;
10700 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10702 req = (struct hclge_reset_cmd *)desc.data;
10703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10704 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10705 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10706 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10710 dev_err(&hdev->pdev->dev,
10711 "failed to send rcb reset cmd, ret = %d\n", ret);
10715 return_status = req->fun_reset_rcb_return_status;
10716 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10719 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10720 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10725 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10726 * again to reset all tqps
10728 return hclge_reset_tqp_cmd(handle);
10731 int hclge_reset_tqp(struct hnae3_handle *handle)
10733 struct hclge_vport *vport = hclge_get_vport(handle);
10734 struct hclge_dev *hdev = vport->back;
10737 /* only need to disable PF's tqp */
10738 if (!vport->vport_id) {
10739 ret = hclge_tqp_enable(handle, false);
10741 dev_err(&hdev->pdev->dev,
10742 "failed to disable tqp, ret = %d\n", ret);
10747 return hclge_reset_rcb(handle);
10750 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10752 struct hclge_vport *vport = hclge_get_vport(handle);
10753 struct hclge_dev *hdev = vport->back;
10755 return hdev->fw_version;
10758 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10760 struct phy_device *phydev = hdev->hw.mac.phydev;
10765 phy_set_asym_pause(phydev, rx_en, tx_en);
10768 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10772 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10775 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10777 dev_err(&hdev->pdev->dev,
10778 "configure pauseparam error, ret = %d.\n", ret);
10783 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10785 struct phy_device *phydev = hdev->hw.mac.phydev;
10786 u16 remote_advertising = 0;
10787 u16 local_advertising;
10788 u32 rx_pause, tx_pause;
10791 if (!phydev->link || !phydev->autoneg)
10794 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10797 remote_advertising = LPA_PAUSE_CAP;
10799 if (phydev->asym_pause)
10800 remote_advertising |= LPA_PAUSE_ASYM;
10802 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10803 remote_advertising);
10804 tx_pause = flowctl & FLOW_CTRL_TX;
10805 rx_pause = flowctl & FLOW_CTRL_RX;
10807 if (phydev->duplex == HCLGE_MAC_HALF) {
10812 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10815 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10816 u32 *rx_en, u32 *tx_en)
10818 struct hclge_vport *vport = hclge_get_vport(handle);
10819 struct hclge_dev *hdev = vport->back;
10820 u8 media_type = hdev->hw.mac.media_type;
10822 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10823 hclge_get_autoneg(handle) : 0;
10825 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10831 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10834 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10837 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10846 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10847 u32 rx_en, u32 tx_en)
10849 if (rx_en && tx_en)
10850 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10851 else if (rx_en && !tx_en)
10852 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10853 else if (!rx_en && tx_en)
10854 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10856 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10858 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10861 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10862 u32 rx_en, u32 tx_en)
10864 struct hclge_vport *vport = hclge_get_vport(handle);
10865 struct hclge_dev *hdev = vport->back;
10866 struct phy_device *phydev = hdev->hw.mac.phydev;
10869 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10870 fc_autoneg = hclge_get_autoneg(handle);
10871 if (auto_neg != fc_autoneg) {
10872 dev_info(&hdev->pdev->dev,
10873 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10874 return -EOPNOTSUPP;
10878 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10879 dev_info(&hdev->pdev->dev,
10880 "Priority flow control enabled. Cannot set link flow control.\n");
10881 return -EOPNOTSUPP;
10884 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10886 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10888 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10889 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10892 return phy_start_aneg(phydev);
10894 return -EOPNOTSUPP;
10897 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10898 u8 *auto_neg, u32 *speed, u8 *duplex)
10900 struct hclge_vport *vport = hclge_get_vport(handle);
10901 struct hclge_dev *hdev = vport->back;
10904 *speed = hdev->hw.mac.speed;
10906 *duplex = hdev->hw.mac.duplex;
10908 *auto_neg = hdev->hw.mac.autoneg;
10911 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10914 struct hclge_vport *vport = hclge_get_vport(handle);
10915 struct hclge_dev *hdev = vport->back;
10917 /* When nic is down, the service task is not running, doesn't update
10918 * the port information per second. Query the port information before
10919 * return the media type, ensure getting the correct media information.
10921 hclge_update_port_info(hdev);
10924 *media_type = hdev->hw.mac.media_type;
10927 *module_type = hdev->hw.mac.module_type;
10930 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10931 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10933 struct hclge_vport *vport = hclge_get_vport(handle);
10934 struct hclge_dev *hdev = vport->back;
10935 struct phy_device *phydev = hdev->hw.mac.phydev;
10936 int mdix_ctrl, mdix, is_resolved;
10937 unsigned int retval;
10940 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10941 *tp_mdix = ETH_TP_MDI_INVALID;
10945 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10947 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10948 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10949 HCLGE_PHY_MDIX_CTRL_S);
10951 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10952 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10953 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10955 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10957 switch (mdix_ctrl) {
10959 *tp_mdix_ctrl = ETH_TP_MDI;
10962 *tp_mdix_ctrl = ETH_TP_MDI_X;
10965 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10968 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10973 *tp_mdix = ETH_TP_MDI_INVALID;
10975 *tp_mdix = ETH_TP_MDI_X;
10977 *tp_mdix = ETH_TP_MDI;
10980 static void hclge_info_show(struct hclge_dev *hdev)
10982 struct device *dev = &hdev->pdev->dev;
10984 dev_info(dev, "PF info begin:\n");
10986 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10987 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10988 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10989 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10990 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10991 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10992 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10993 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10994 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10995 dev_info(dev, "This is %s PF\n",
10996 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10997 dev_info(dev, "DCB %s\n",
10998 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10999 dev_info(dev, "MQPRIO %s\n",
11000 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11002 dev_info(dev, "PF info end.\n");
11005 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11006 struct hclge_vport *vport)
11008 struct hnae3_client *client = vport->nic.client;
11009 struct hclge_dev *hdev = ae_dev->priv;
11010 int rst_cnt = hdev->rst_stats.reset_cnt;
11013 ret = client->ops->init_instance(&vport->nic);
11017 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11018 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11019 rst_cnt != hdev->rst_stats.reset_cnt) {
11024 /* Enable nic hw error interrupts */
11025 ret = hclge_config_nic_hw_error(hdev, true);
11027 dev_err(&ae_dev->pdev->dev,
11028 "fail(%d) to enable hw error interrupts\n", ret);
11032 hnae3_set_client_init_flag(client, ae_dev, 1);
11034 if (netif_msg_drv(&hdev->vport->nic))
11035 hclge_info_show(hdev);
11040 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11041 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11042 msleep(HCLGE_WAIT_RESET_DONE);
11044 client->ops->uninit_instance(&vport->nic, 0);
11049 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11050 struct hclge_vport *vport)
11052 struct hclge_dev *hdev = ae_dev->priv;
11053 struct hnae3_client *client;
11057 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11061 client = hdev->roce_client;
11062 ret = hclge_init_roce_base_info(vport);
11066 rst_cnt = hdev->rst_stats.reset_cnt;
11067 ret = client->ops->init_instance(&vport->roce);
11071 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11072 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11073 rst_cnt != hdev->rst_stats.reset_cnt) {
11075 goto init_roce_err;
11078 /* Enable roce ras interrupts */
11079 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11081 dev_err(&ae_dev->pdev->dev,
11082 "fail(%d) to enable roce ras interrupts\n", ret);
11083 goto init_roce_err;
11086 hnae3_set_client_init_flag(client, ae_dev, 1);
11091 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11092 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11093 msleep(HCLGE_WAIT_RESET_DONE);
11095 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11100 static int hclge_init_client_instance(struct hnae3_client *client,
11101 struct hnae3_ae_dev *ae_dev)
11103 struct hclge_dev *hdev = ae_dev->priv;
11104 struct hclge_vport *vport = &hdev->vport[0];
11107 switch (client->type) {
11108 case HNAE3_CLIENT_KNIC:
11109 hdev->nic_client = client;
11110 vport->nic.client = client;
11111 ret = hclge_init_nic_client_instance(ae_dev, vport);
11115 ret = hclge_init_roce_client_instance(ae_dev, vport);
11120 case HNAE3_CLIENT_ROCE:
11121 if (hnae3_dev_roce_supported(hdev)) {
11122 hdev->roce_client = client;
11123 vport->roce.client = client;
11126 ret = hclge_init_roce_client_instance(ae_dev, vport);
11138 hdev->nic_client = NULL;
11139 vport->nic.client = NULL;
11142 hdev->roce_client = NULL;
11143 vport->roce.client = NULL;
11147 static void hclge_uninit_client_instance(struct hnae3_client *client,
11148 struct hnae3_ae_dev *ae_dev)
11150 struct hclge_dev *hdev = ae_dev->priv;
11151 struct hclge_vport *vport = &hdev->vport[0];
11153 if (hdev->roce_client) {
11154 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11155 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11156 msleep(HCLGE_WAIT_RESET_DONE);
11158 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11159 hdev->roce_client = NULL;
11160 vport->roce.client = NULL;
11162 if (client->type == HNAE3_CLIENT_ROCE)
11164 if (hdev->nic_client && client->ops->uninit_instance) {
11165 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11166 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11167 msleep(HCLGE_WAIT_RESET_DONE);
11169 client->ops->uninit_instance(&vport->nic, 0);
11170 hdev->nic_client = NULL;
11171 vport->nic.client = NULL;
11175 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11177 #define HCLGE_MEM_BAR 4
11179 struct pci_dev *pdev = hdev->pdev;
11180 struct hclge_hw *hw = &hdev->hw;
11182 /* for device does not have device memory, return directly */
11183 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11186 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11187 pci_resource_start(pdev, HCLGE_MEM_BAR),
11188 pci_resource_len(pdev, HCLGE_MEM_BAR));
11189 if (!hw->mem_base) {
11190 dev_err(&pdev->dev, "failed to map device memory\n");
11197 static int hclge_pci_init(struct hclge_dev *hdev)
11199 struct pci_dev *pdev = hdev->pdev;
11200 struct hclge_hw *hw;
11203 ret = pci_enable_device(pdev);
11205 dev_err(&pdev->dev, "failed to enable PCI device\n");
11209 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11211 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11213 dev_err(&pdev->dev,
11214 "can't set consistent PCI DMA");
11215 goto err_disable_device;
11217 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11220 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11222 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11223 goto err_disable_device;
11226 pci_set_master(pdev);
11228 hw->io_base = pcim_iomap(pdev, 2, 0);
11229 if (!hw->io_base) {
11230 dev_err(&pdev->dev, "Can't map configuration register space\n");
11232 goto err_clr_master;
11235 ret = hclge_dev_mem_map(hdev);
11237 goto err_unmap_io_base;
11239 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11244 pcim_iounmap(pdev, hdev->hw.io_base);
11246 pci_clear_master(pdev);
11247 pci_release_regions(pdev);
11248 err_disable_device:
11249 pci_disable_device(pdev);
11254 static void hclge_pci_uninit(struct hclge_dev *hdev)
11256 struct pci_dev *pdev = hdev->pdev;
11258 if (hdev->hw.mem_base)
11259 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11261 pcim_iounmap(pdev, hdev->hw.io_base);
11262 pci_free_irq_vectors(pdev);
11263 pci_clear_master(pdev);
11264 pci_release_mem_regions(pdev);
11265 pci_disable_device(pdev);
11268 static void hclge_state_init(struct hclge_dev *hdev)
11270 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11271 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11272 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11273 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11274 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11275 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11276 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11279 static void hclge_state_uninit(struct hclge_dev *hdev)
11281 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11282 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11284 if (hdev->reset_timer.function)
11285 del_timer_sync(&hdev->reset_timer);
11286 if (hdev->service_task.work.func)
11287 cancel_delayed_work_sync(&hdev->service_task);
11290 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11291 enum hnae3_reset_type rst_type)
11293 #define HCLGE_RESET_RETRY_WAIT_MS 500
11294 #define HCLGE_RESET_RETRY_CNT 5
11296 struct hclge_dev *hdev = ae_dev->priv;
11301 down(&hdev->reset_sem);
11302 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11303 hdev->reset_type = rst_type;
11304 ret = hclge_reset_prepare(hdev);
11305 if (ret || hdev->reset_pending) {
11306 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11308 if (hdev->reset_pending ||
11309 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11310 dev_err(&hdev->pdev->dev,
11311 "reset_pending:0x%lx, retry_cnt:%d\n",
11312 hdev->reset_pending, retry_cnt);
11313 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11314 up(&hdev->reset_sem);
11315 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11320 /* disable misc vector before reset done */
11321 hclge_enable_vector(&hdev->misc_vector, false);
11322 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11324 if (hdev->reset_type == HNAE3_FLR_RESET)
11325 hdev->rst_stats.flr_rst_cnt++;
11328 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11330 struct hclge_dev *hdev = ae_dev->priv;
11333 hclge_enable_vector(&hdev->misc_vector, true);
11335 ret = hclge_reset_rebuild(hdev);
11337 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11339 hdev->reset_type = HNAE3_NONE_RESET;
11340 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11341 up(&hdev->reset_sem);
11344 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11348 for (i = 0; i < hdev->num_alloc_vport; i++) {
11349 struct hclge_vport *vport = &hdev->vport[i];
11352 /* Send cmd to clear VF's FUNC_RST_ING */
11353 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11355 dev_warn(&hdev->pdev->dev,
11356 "clear vf(%u) rst failed %d!\n",
11357 vport->vport_id, ret);
11361 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11363 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11364 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11367 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11369 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11370 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11373 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11375 struct pci_dev *pdev = ae_dev->pdev;
11376 struct hclge_dev *hdev;
11379 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11384 hdev->ae_dev = ae_dev;
11385 hdev->reset_type = HNAE3_NONE_RESET;
11386 hdev->reset_level = HNAE3_FUNC_RESET;
11387 ae_dev->priv = hdev;
11389 /* HW supprt 2 layer vlan */
11390 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11392 mutex_init(&hdev->vport_lock);
11393 spin_lock_init(&hdev->fd_rule_lock);
11394 sema_init(&hdev->reset_sem, 1);
11396 ret = hclge_pci_init(hdev);
11400 /* Firmware command queue initialize */
11401 ret = hclge_cmd_queue_init(hdev);
11403 goto err_pci_uninit;
11405 /* Firmware command initialize */
11406 ret = hclge_cmd_init(hdev);
11408 goto err_cmd_uninit;
11410 ret = hclge_get_cap(hdev);
11412 goto err_cmd_uninit;
11414 ret = hclge_query_dev_specs(hdev);
11416 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11418 goto err_cmd_uninit;
11421 ret = hclge_configure(hdev);
11423 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11424 goto err_cmd_uninit;
11427 ret = hclge_init_msi(hdev);
11429 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11430 goto err_cmd_uninit;
11433 ret = hclge_misc_irq_init(hdev);
11435 goto err_msi_uninit;
11437 ret = hclge_alloc_tqps(hdev);
11439 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11440 goto err_msi_irq_uninit;
11443 ret = hclge_alloc_vport(hdev);
11445 goto err_msi_irq_uninit;
11447 ret = hclge_map_tqp(hdev);
11449 goto err_msi_irq_uninit;
11451 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11452 !hnae3_dev_phy_imp_supported(hdev)) {
11453 ret = hclge_mac_mdio_config(hdev);
11455 goto err_msi_irq_uninit;
11458 ret = hclge_init_umv_space(hdev);
11460 goto err_mdiobus_unreg;
11462 ret = hclge_mac_init(hdev);
11464 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11465 goto err_mdiobus_unreg;
11468 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11470 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11471 goto err_mdiobus_unreg;
11474 ret = hclge_config_gro(hdev, true);
11476 goto err_mdiobus_unreg;
11478 ret = hclge_init_vlan_config(hdev);
11480 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11481 goto err_mdiobus_unreg;
11484 ret = hclge_tm_schd_init(hdev);
11486 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11487 goto err_mdiobus_unreg;
11490 ret = hclge_rss_init_cfg(hdev);
11492 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11493 goto err_mdiobus_unreg;
11496 ret = hclge_rss_init_hw(hdev);
11498 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11499 goto err_mdiobus_unreg;
11502 ret = init_mgr_tbl(hdev);
11504 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11505 goto err_mdiobus_unreg;
11508 ret = hclge_init_fd_config(hdev);
11510 dev_err(&pdev->dev,
11511 "fd table init fail, ret=%d\n", ret);
11512 goto err_mdiobus_unreg;
11515 INIT_KFIFO(hdev->mac_tnl_log);
11517 hclge_dcb_ops_set(hdev);
11519 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11520 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11522 /* Setup affinity after service timer setup because add_timer_on
11523 * is called in affinity notify.
11525 hclge_misc_affinity_setup(hdev);
11527 hclge_clear_all_event_cause(hdev);
11528 hclge_clear_resetting_state(hdev);
11530 /* Log and clear the hw errors those already occurred */
11531 hclge_handle_all_hns_hw_errors(ae_dev);
11533 /* request delayed reset for the error recovery because an immediate
11534 * global reset on a PF affecting pending initialization of other PFs
11536 if (ae_dev->hw_err_reset_req) {
11537 enum hnae3_reset_type reset_level;
11539 reset_level = hclge_get_reset_level(ae_dev,
11540 &ae_dev->hw_err_reset_req);
11541 hclge_set_def_reset_request(ae_dev, reset_level);
11542 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11545 hclge_init_rxd_adv_layout(hdev);
11547 /* Enable MISC vector(vector0) */
11548 hclge_enable_vector(&hdev->misc_vector, true);
11550 hclge_state_init(hdev);
11551 hdev->last_reset_time = jiffies;
11553 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11554 HCLGE_DRIVER_NAME);
11556 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11561 if (hdev->hw.mac.phydev)
11562 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11563 err_msi_irq_uninit:
11564 hclge_misc_irq_uninit(hdev);
11566 pci_free_irq_vectors(pdev);
11568 hclge_cmd_uninit(hdev);
11570 pcim_iounmap(pdev, hdev->hw.io_base);
11571 pci_clear_master(pdev);
11572 pci_release_regions(pdev);
11573 pci_disable_device(pdev);
11575 mutex_destroy(&hdev->vport_lock);
11579 static void hclge_stats_clear(struct hclge_dev *hdev)
11581 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11584 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11586 return hclge_config_switch_param(hdev, vf, enable,
11587 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11590 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11592 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11593 HCLGE_FILTER_FE_NIC_INGRESS_B,
11597 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11601 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11603 dev_err(&hdev->pdev->dev,
11604 "Set vf %d mac spoof check %s failed, ret=%d\n",
11605 vf, enable ? "on" : "off", ret);
11609 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11611 dev_err(&hdev->pdev->dev,
11612 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11613 vf, enable ? "on" : "off", ret);
11618 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11621 struct hclge_vport *vport = hclge_get_vport(handle);
11622 struct hclge_dev *hdev = vport->back;
11623 u32 new_spoofchk = enable ? 1 : 0;
11626 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11627 return -EOPNOTSUPP;
11629 vport = hclge_get_vf_vport(hdev, vf);
11633 if (vport->vf_info.spoofchk == new_spoofchk)
11636 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11637 dev_warn(&hdev->pdev->dev,
11638 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11640 else if (enable && hclge_is_umv_space_full(vport, true))
11641 dev_warn(&hdev->pdev->dev,
11642 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11645 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11649 vport->vf_info.spoofchk = new_spoofchk;
11653 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11655 struct hclge_vport *vport = hdev->vport;
11659 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11662 /* resume the vf spoof check state after reset */
11663 for (i = 0; i < hdev->num_alloc_vport; i++) {
11664 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11665 vport->vf_info.spoofchk);
11675 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11677 struct hclge_vport *vport = hclge_get_vport(handle);
11678 struct hclge_dev *hdev = vport->back;
11679 u32 new_trusted = enable ? 1 : 0;
11681 vport = hclge_get_vf_vport(hdev, vf);
11685 if (vport->vf_info.trusted == new_trusted)
11688 vport->vf_info.trusted = new_trusted;
11689 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11690 hclge_task_schedule(hdev, 0);
11695 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11700 /* reset vf rate to default value */
11701 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11702 struct hclge_vport *vport = &hdev->vport[vf];
11704 vport->vf_info.max_tx_rate = 0;
11705 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11707 dev_err(&hdev->pdev->dev,
11708 "vf%d failed to reset to default, ret=%d\n",
11709 vf - HCLGE_VF_VPORT_START_NUM, ret);
11713 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11714 int min_tx_rate, int max_tx_rate)
11716 if (min_tx_rate != 0 ||
11717 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11718 dev_err(&hdev->pdev->dev,
11719 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11720 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11727 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11728 int min_tx_rate, int max_tx_rate, bool force)
11730 struct hclge_vport *vport = hclge_get_vport(handle);
11731 struct hclge_dev *hdev = vport->back;
11734 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11738 vport = hclge_get_vf_vport(hdev, vf);
11742 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11745 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11749 vport->vf_info.max_tx_rate = max_tx_rate;
11754 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11756 struct hnae3_handle *handle = &hdev->vport->nic;
11757 struct hclge_vport *vport;
11761 /* resume the vf max_tx_rate after reset */
11762 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11763 vport = hclge_get_vf_vport(hdev, vf);
11767 /* zero means max rate, after reset, firmware already set it to
11768 * max rate, so just continue.
11770 if (!vport->vf_info.max_tx_rate)
11773 ret = hclge_set_vf_rate(handle, vf, 0,
11774 vport->vf_info.max_tx_rate, true);
11776 dev_err(&hdev->pdev->dev,
11777 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11778 vf, vport->vf_info.max_tx_rate, ret);
11786 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11788 struct hclge_vport *vport = hdev->vport;
11791 for (i = 0; i < hdev->num_alloc_vport; i++) {
11792 hclge_vport_stop(vport);
11797 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11799 struct hclge_dev *hdev = ae_dev->priv;
11800 struct pci_dev *pdev = ae_dev->pdev;
11803 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11805 hclge_stats_clear(hdev);
11806 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11807 * so here should not clean table in memory.
11809 if (hdev->reset_type == HNAE3_IMP_RESET ||
11810 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11811 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11812 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11813 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11814 hclge_reset_umv_space(hdev);
11817 ret = hclge_cmd_init(hdev);
11819 dev_err(&pdev->dev, "Cmd queue init failed\n");
11823 ret = hclge_map_tqp(hdev);
11825 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11829 ret = hclge_mac_init(hdev);
11831 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11835 ret = hclge_tp_port_init(hdev);
11837 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11842 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11844 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11848 ret = hclge_config_gro(hdev, true);
11852 ret = hclge_init_vlan_config(hdev);
11854 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11858 ret = hclge_tm_init_hw(hdev, true);
11860 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11864 ret = hclge_rss_init_hw(hdev);
11866 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11870 ret = init_mgr_tbl(hdev);
11872 dev_err(&pdev->dev,
11873 "failed to reinit manager table, ret = %d\n", ret);
11877 ret = hclge_init_fd_config(hdev);
11879 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11883 /* Log and clear the hw errors those already occurred */
11884 hclge_handle_all_hns_hw_errors(ae_dev);
11886 /* Re-enable the hw error interrupts because
11887 * the interrupts get disabled on global reset.
11889 ret = hclge_config_nic_hw_error(hdev, true);
11891 dev_err(&pdev->dev,
11892 "fail(%d) to re-enable NIC hw error interrupts\n",
11897 if (hdev->roce_client) {
11898 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11900 dev_err(&pdev->dev,
11901 "fail(%d) to re-enable roce ras interrupts\n",
11907 hclge_reset_vport_state(hdev);
11908 ret = hclge_reset_vport_spoofchk(hdev);
11912 ret = hclge_resume_vf_rate(hdev);
11916 hclge_init_rxd_adv_layout(hdev);
11918 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11919 HCLGE_DRIVER_NAME);
11924 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11926 struct hclge_dev *hdev = ae_dev->priv;
11927 struct hclge_mac *mac = &hdev->hw.mac;
11929 hclge_reset_vf_rate(hdev);
11930 hclge_clear_vf_vlan(hdev);
11931 hclge_misc_affinity_teardown(hdev);
11932 hclge_state_uninit(hdev);
11933 hclge_uninit_rxd_adv_layout(hdev);
11934 hclge_uninit_mac_table(hdev);
11935 hclge_del_all_fd_entries(hdev);
11938 mdiobus_unregister(mac->mdio_bus);
11940 /* Disable MISC vector(vector0) */
11941 hclge_enable_vector(&hdev->misc_vector, false);
11942 synchronize_irq(hdev->misc_vector.vector_irq);
11944 /* Disable all hw interrupts */
11945 hclge_config_mac_tnl_int(hdev, false);
11946 hclge_config_nic_hw_error(hdev, false);
11947 hclge_config_rocee_ras_interrupt(hdev, false);
11949 hclge_cmd_uninit(hdev);
11950 hclge_misc_irq_uninit(hdev);
11951 hclge_pci_uninit(hdev);
11952 mutex_destroy(&hdev->vport_lock);
11953 hclge_uninit_vport_vlan_table(hdev);
11954 ae_dev->priv = NULL;
11957 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11959 struct hclge_vport *vport = hclge_get_vport(handle);
11960 struct hclge_dev *hdev = vport->back;
11962 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11965 static void hclge_get_channels(struct hnae3_handle *handle,
11966 struct ethtool_channels *ch)
11968 ch->max_combined = hclge_get_max_channels(handle);
11969 ch->other_count = 1;
11971 ch->combined_count = handle->kinfo.rss_size;
11974 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11975 u16 *alloc_tqps, u16 *max_rss_size)
11977 struct hclge_vport *vport = hclge_get_vport(handle);
11978 struct hclge_dev *hdev = vport->back;
11980 *alloc_tqps = vport->alloc_tqps;
11981 *max_rss_size = hdev->pf_rss_size_max;
11984 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11985 bool rxfh_configured)
11987 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11988 struct hclge_vport *vport = hclge_get_vport(handle);
11989 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11990 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11991 struct hclge_dev *hdev = vport->back;
11992 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11993 u16 cur_rss_size = kinfo->rss_size;
11994 u16 cur_tqps = kinfo->num_tqps;
11995 u16 tc_valid[HCLGE_MAX_TC_NUM];
12001 kinfo->req_rss_size = new_tqps_num;
12003 ret = hclge_tm_vport_map_update(hdev);
12005 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12009 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12010 roundup_size = ilog2(roundup_size);
12011 /* Set the RSS TC mode according to the new RSS size */
12012 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12015 if (!(hdev->hw_tc_map & BIT(i)))
12019 tc_size[i] = roundup_size;
12020 tc_offset[i] = kinfo->rss_size * i;
12022 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12026 /* RSS indirection table has been configured by user */
12027 if (rxfh_configured)
12030 /* Reinitializes the rss indirect table according to the new RSS size */
12031 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12036 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12037 rss_indir[i] = i % kinfo->rss_size;
12039 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12041 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12048 dev_info(&hdev->pdev->dev,
12049 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12050 cur_rss_size, kinfo->rss_size,
12051 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12056 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12057 u32 *regs_num_64_bit)
12059 struct hclge_desc desc;
12063 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12064 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12066 dev_err(&hdev->pdev->dev,
12067 "Query register number cmd failed, ret = %d.\n", ret);
12071 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12072 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12074 total_num = *regs_num_32_bit + *regs_num_64_bit;
12081 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12084 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12085 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12087 struct hclge_desc *desc;
12088 u32 *reg_val = data;
12098 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12099 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12100 HCLGE_32_BIT_REG_RTN_DATANUM);
12101 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12105 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12106 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12108 dev_err(&hdev->pdev->dev,
12109 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12114 for (i = 0; i < cmd_num; i++) {
12116 desc_data = (__le32 *)(&desc[i].data[0]);
12117 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12119 desc_data = (__le32 *)(&desc[i]);
12120 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12122 for (k = 0; k < n; k++) {
12123 *reg_val++ = le32_to_cpu(*desc_data++);
12135 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12138 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12139 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12141 struct hclge_desc *desc;
12142 u64 *reg_val = data;
12152 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12153 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12154 HCLGE_64_BIT_REG_RTN_DATANUM);
12155 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12159 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12160 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12162 dev_err(&hdev->pdev->dev,
12163 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12168 for (i = 0; i < cmd_num; i++) {
12170 desc_data = (__le64 *)(&desc[i].data[0]);
12171 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12173 desc_data = (__le64 *)(&desc[i]);
12174 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12176 for (k = 0; k < n; k++) {
12177 *reg_val++ = le64_to_cpu(*desc_data++);
12189 #define MAX_SEPARATE_NUM 4
12190 #define SEPARATOR_VALUE 0xFDFCFBFA
12191 #define REG_NUM_PER_LINE 4
12192 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12193 #define REG_SEPARATOR_LINE 1
12194 #define REG_NUM_REMAIN_MASK 3
12196 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12200 /* initialize command BD except the last one */
12201 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12202 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12204 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12207 /* initialize the last command BD */
12208 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12210 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12213 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12217 u32 entries_per_desc, desc_index, index, offset, i;
12218 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12221 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12223 dev_err(&hdev->pdev->dev,
12224 "Get dfx bd num fail, status is %d.\n", ret);
12228 entries_per_desc = ARRAY_SIZE(desc[0].data);
12229 for (i = 0; i < type_num; i++) {
12230 offset = hclge_dfx_bd_offset_list[i];
12231 index = offset % entries_per_desc;
12232 desc_index = offset / entries_per_desc;
12233 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12239 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12240 struct hclge_desc *desc_src, int bd_num,
12241 enum hclge_opcode_type cmd)
12243 struct hclge_desc *desc = desc_src;
12246 hclge_cmd_setup_basic_desc(desc, cmd, true);
12247 for (i = 0; i < bd_num - 1; i++) {
12248 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12250 hclge_cmd_setup_basic_desc(desc, cmd, true);
12254 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12256 dev_err(&hdev->pdev->dev,
12257 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12263 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12266 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12267 struct hclge_desc *desc = desc_src;
12270 entries_per_desc = ARRAY_SIZE(desc->data);
12271 reg_num = entries_per_desc * bd_num;
12272 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12273 for (i = 0; i < reg_num; i++) {
12274 index = i % entries_per_desc;
12275 desc_index = i / entries_per_desc;
12276 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12278 for (i = 0; i < separator_num; i++)
12279 *reg++ = SEPARATOR_VALUE;
12281 return reg_num + separator_num;
12284 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12286 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12287 int data_len_per_desc, bd_num, i;
12292 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12296 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12298 dev_err(&hdev->pdev->dev,
12299 "Get dfx reg bd num fail, status is %d.\n", ret);
12303 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12305 for (i = 0; i < dfx_reg_type_num; i++) {
12306 bd_num = bd_num_list[i];
12307 data_len = data_len_per_desc * bd_num;
12308 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12312 kfree(bd_num_list);
12316 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12318 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12319 int bd_num, bd_num_max, buf_len, i;
12320 struct hclge_desc *desc_src;
12325 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12329 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12331 dev_err(&hdev->pdev->dev,
12332 "Get dfx reg bd num fail, status is %d.\n", ret);
12336 bd_num_max = bd_num_list[0];
12337 for (i = 1; i < dfx_reg_type_num; i++)
12338 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12340 buf_len = sizeof(*desc_src) * bd_num_max;
12341 desc_src = kzalloc(buf_len, GFP_KERNEL);
12347 for (i = 0; i < dfx_reg_type_num; i++) {
12348 bd_num = bd_num_list[i];
12349 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12350 hclge_dfx_reg_opcode_list[i]);
12352 dev_err(&hdev->pdev->dev,
12353 "Get dfx reg fail, status is %d.\n", ret);
12357 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12362 kfree(bd_num_list);
12366 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12367 struct hnae3_knic_private_info *kinfo)
12369 #define HCLGE_RING_REG_OFFSET 0x200
12370 #define HCLGE_RING_INT_REG_OFFSET 0x4
12372 int i, j, reg_num, separator_num;
12376 /* fetching per-PF registers valus from PF PCIe register space */
12377 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12378 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12379 for (i = 0; i < reg_num; i++)
12380 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12381 for (i = 0; i < separator_num; i++)
12382 *reg++ = SEPARATOR_VALUE;
12383 data_num_sum = reg_num + separator_num;
12385 reg_num = ARRAY_SIZE(common_reg_addr_list);
12386 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12387 for (i = 0; i < reg_num; i++)
12388 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12389 for (i = 0; i < separator_num; i++)
12390 *reg++ = SEPARATOR_VALUE;
12391 data_num_sum += reg_num + separator_num;
12393 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12394 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12395 for (j = 0; j < kinfo->num_tqps; j++) {
12396 for (i = 0; i < reg_num; i++)
12397 *reg++ = hclge_read_dev(&hdev->hw,
12398 ring_reg_addr_list[i] +
12399 HCLGE_RING_REG_OFFSET * j);
12400 for (i = 0; i < separator_num; i++)
12401 *reg++ = SEPARATOR_VALUE;
12403 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12405 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12406 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12407 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12408 for (i = 0; i < reg_num; i++)
12409 *reg++ = hclge_read_dev(&hdev->hw,
12410 tqp_intr_reg_addr_list[i] +
12411 HCLGE_RING_INT_REG_OFFSET * j);
12412 for (i = 0; i < separator_num; i++)
12413 *reg++ = SEPARATOR_VALUE;
12415 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12417 return data_num_sum;
12420 static int hclge_get_regs_len(struct hnae3_handle *handle)
12422 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12423 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12424 struct hclge_vport *vport = hclge_get_vport(handle);
12425 struct hclge_dev *hdev = vport->back;
12426 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12427 int regs_lines_32_bit, regs_lines_64_bit;
12430 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12432 dev_err(&hdev->pdev->dev,
12433 "Get register number failed, ret = %d.\n", ret);
12437 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12439 dev_err(&hdev->pdev->dev,
12440 "Get dfx reg len failed, ret = %d.\n", ret);
12444 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12445 REG_SEPARATOR_LINE;
12446 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12447 REG_SEPARATOR_LINE;
12448 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12449 REG_SEPARATOR_LINE;
12450 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12451 REG_SEPARATOR_LINE;
12452 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12453 REG_SEPARATOR_LINE;
12454 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12455 REG_SEPARATOR_LINE;
12457 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12458 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12459 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12462 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12465 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12466 struct hclge_vport *vport = hclge_get_vport(handle);
12467 struct hclge_dev *hdev = vport->back;
12468 u32 regs_num_32_bit, regs_num_64_bit;
12469 int i, reg_num, separator_num, ret;
12472 *version = hdev->fw_version;
12474 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12476 dev_err(&hdev->pdev->dev,
12477 "Get register number failed, ret = %d.\n", ret);
12481 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12483 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12485 dev_err(&hdev->pdev->dev,
12486 "Get 32 bit register failed, ret = %d.\n", ret);
12489 reg_num = regs_num_32_bit;
12491 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12492 for (i = 0; i < separator_num; i++)
12493 *reg++ = SEPARATOR_VALUE;
12495 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12497 dev_err(&hdev->pdev->dev,
12498 "Get 64 bit register failed, ret = %d.\n", ret);
12501 reg_num = regs_num_64_bit * 2;
12503 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12504 for (i = 0; i < separator_num; i++)
12505 *reg++ = SEPARATOR_VALUE;
12507 ret = hclge_get_dfx_reg(hdev, reg);
12509 dev_err(&hdev->pdev->dev,
12510 "Get dfx register failed, ret = %d.\n", ret);
12513 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12515 struct hclge_set_led_state_cmd *req;
12516 struct hclge_desc desc;
12519 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12521 req = (struct hclge_set_led_state_cmd *)desc.data;
12522 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12523 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12525 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12527 dev_err(&hdev->pdev->dev,
12528 "Send set led state cmd error, ret =%d\n", ret);
12533 enum hclge_led_status {
12536 HCLGE_LED_NO_CHANGE = 0xFF,
12539 static int hclge_set_led_id(struct hnae3_handle *handle,
12540 enum ethtool_phys_id_state status)
12542 struct hclge_vport *vport = hclge_get_vport(handle);
12543 struct hclge_dev *hdev = vport->back;
12546 case ETHTOOL_ID_ACTIVE:
12547 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12548 case ETHTOOL_ID_INACTIVE:
12549 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12555 static void hclge_get_link_mode(struct hnae3_handle *handle,
12556 unsigned long *supported,
12557 unsigned long *advertising)
12559 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12560 struct hclge_vport *vport = hclge_get_vport(handle);
12561 struct hclge_dev *hdev = vport->back;
12562 unsigned int idx = 0;
12564 for (; idx < size; idx++) {
12565 supported[idx] = hdev->hw.mac.supported[idx];
12566 advertising[idx] = hdev->hw.mac.advertising[idx];
12570 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12572 struct hclge_vport *vport = hclge_get_vport(handle);
12573 struct hclge_dev *hdev = vport->back;
12575 return hclge_config_gro(hdev, enable);
12578 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12580 struct hclge_vport *vport = &hdev->vport[0];
12581 struct hnae3_handle *handle = &vport->nic;
12586 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12587 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12588 vport->last_promisc_flags = vport->overflow_promisc_flags;
12591 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12592 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12593 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12594 tmp_flags & HNAE3_MPE);
12596 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12598 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12603 for (i = 1; i < hdev->num_alloc_vport; i++) {
12604 bool uc_en = false;
12605 bool mc_en = false;
12608 vport = &hdev->vport[i];
12610 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12614 if (vport->vf_info.trusted) {
12615 uc_en = vport->vf_info.request_uc_en > 0;
12616 mc_en = vport->vf_info.request_mc_en > 0;
12618 bc_en = vport->vf_info.request_bc_en > 0;
12620 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12623 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12627 hclge_set_vport_vlan_fltr_change(vport);
12631 static bool hclge_module_existed(struct hclge_dev *hdev)
12633 struct hclge_desc desc;
12637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12640 dev_err(&hdev->pdev->dev,
12641 "failed to get SFP exist state, ret = %d\n", ret);
12645 existed = le32_to_cpu(desc.data[0]);
12647 return existed != 0;
12650 /* need 6 bds(total 140 bytes) in one reading
12651 * return the number of bytes actually read, 0 means read failed.
12653 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12656 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12657 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12663 /* setup all 6 bds to read module eeprom info. */
12664 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12665 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12668 /* bd0~bd4 need next flag */
12669 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12670 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12673 /* setup bd0, this bd contains offset and read length. */
12674 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12675 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12676 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12677 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12679 ret = hclge_cmd_send(&hdev->hw, desc, i);
12681 dev_err(&hdev->pdev->dev,
12682 "failed to get SFP eeprom info, ret = %d\n", ret);
12686 /* copy sfp info from bd0 to out buffer. */
12687 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12688 memcpy(data, sfp_info_bd0->data, copy_len);
12689 read_len = copy_len;
12691 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12692 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12693 if (read_len >= len)
12696 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12697 memcpy(data + read_len, desc[i].data, copy_len);
12698 read_len += copy_len;
12704 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12707 struct hclge_vport *vport = hclge_get_vport(handle);
12708 struct hclge_dev *hdev = vport->back;
12712 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12713 return -EOPNOTSUPP;
12715 if (!hclge_module_existed(hdev))
12718 while (read_len < len) {
12719 data_len = hclge_get_sfp_eeprom_info(hdev,
12726 read_len += data_len;
12732 static const struct hnae3_ae_ops hclge_ops = {
12733 .init_ae_dev = hclge_init_ae_dev,
12734 .uninit_ae_dev = hclge_uninit_ae_dev,
12735 .reset_prepare = hclge_reset_prepare_general,
12736 .reset_done = hclge_reset_done,
12737 .init_client_instance = hclge_init_client_instance,
12738 .uninit_client_instance = hclge_uninit_client_instance,
12739 .map_ring_to_vector = hclge_map_ring_to_vector,
12740 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12741 .get_vector = hclge_get_vector,
12742 .put_vector = hclge_put_vector,
12743 .set_promisc_mode = hclge_set_promisc_mode,
12744 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12745 .set_loopback = hclge_set_loopback,
12746 .start = hclge_ae_start,
12747 .stop = hclge_ae_stop,
12748 .client_start = hclge_client_start,
12749 .client_stop = hclge_client_stop,
12750 .get_status = hclge_get_status,
12751 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12752 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12753 .get_media_type = hclge_get_media_type,
12754 .check_port_speed = hclge_check_port_speed,
12755 .get_fec = hclge_get_fec,
12756 .set_fec = hclge_set_fec,
12757 .get_rss_key_size = hclge_get_rss_key_size,
12758 .get_rss = hclge_get_rss,
12759 .set_rss = hclge_set_rss,
12760 .set_rss_tuple = hclge_set_rss_tuple,
12761 .get_rss_tuple = hclge_get_rss_tuple,
12762 .get_tc_size = hclge_get_tc_size,
12763 .get_mac_addr = hclge_get_mac_addr,
12764 .set_mac_addr = hclge_set_mac_addr,
12765 .do_ioctl = hclge_do_ioctl,
12766 .add_uc_addr = hclge_add_uc_addr,
12767 .rm_uc_addr = hclge_rm_uc_addr,
12768 .add_mc_addr = hclge_add_mc_addr,
12769 .rm_mc_addr = hclge_rm_mc_addr,
12770 .set_autoneg = hclge_set_autoneg,
12771 .get_autoneg = hclge_get_autoneg,
12772 .restart_autoneg = hclge_restart_autoneg,
12773 .halt_autoneg = hclge_halt_autoneg,
12774 .get_pauseparam = hclge_get_pauseparam,
12775 .set_pauseparam = hclge_set_pauseparam,
12776 .set_mtu = hclge_set_mtu,
12777 .reset_queue = hclge_reset_tqp,
12778 .get_stats = hclge_get_stats,
12779 .get_mac_stats = hclge_get_mac_stat,
12780 .update_stats = hclge_update_stats,
12781 .get_strings = hclge_get_strings,
12782 .get_sset_count = hclge_get_sset_count,
12783 .get_fw_version = hclge_get_fw_version,
12784 .get_mdix_mode = hclge_get_mdix_mode,
12785 .enable_vlan_filter = hclge_enable_vlan_filter,
12786 .set_vlan_filter = hclge_set_vlan_filter,
12787 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12788 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12789 .reset_event = hclge_reset_event,
12790 .get_reset_level = hclge_get_reset_level,
12791 .set_default_reset_request = hclge_set_def_reset_request,
12792 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12793 .set_channels = hclge_set_channels,
12794 .get_channels = hclge_get_channels,
12795 .get_regs_len = hclge_get_regs_len,
12796 .get_regs = hclge_get_regs,
12797 .set_led_id = hclge_set_led_id,
12798 .get_link_mode = hclge_get_link_mode,
12799 .add_fd_entry = hclge_add_fd_entry,
12800 .del_fd_entry = hclge_del_fd_entry,
12801 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12802 .get_fd_rule_info = hclge_get_fd_rule_info,
12803 .get_fd_all_rules = hclge_get_all_rules,
12804 .enable_fd = hclge_enable_fd,
12805 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12806 .dbg_read_cmd = hclge_dbg_read_cmd,
12807 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12808 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12809 .ae_dev_resetting = hclge_ae_dev_resetting,
12810 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12811 .set_gro_en = hclge_gro_en,
12812 .get_global_queue_id = hclge_covert_handle_qid_global,
12813 .set_timer_task = hclge_set_timer_task,
12814 .mac_connect_phy = hclge_mac_connect_phy,
12815 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12816 .get_vf_config = hclge_get_vf_config,
12817 .set_vf_link_state = hclge_set_vf_link_state,
12818 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12819 .set_vf_trust = hclge_set_vf_trust,
12820 .set_vf_rate = hclge_set_vf_rate,
12821 .set_vf_mac = hclge_set_vf_mac,
12822 .get_module_eeprom = hclge_get_module_eeprom,
12823 .get_cmdq_stat = hclge_get_cmdq_stat,
12824 .add_cls_flower = hclge_add_cls_flower,
12825 .del_cls_flower = hclge_del_cls_flower,
12826 .cls_flower_active = hclge_is_cls_flower_active,
12827 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12828 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12831 static struct hnae3_ae_algo ae_algo = {
12833 .pdev_id_table = ae_algo_pci_tbl,
12836 static int hclge_init(void)
12838 pr_info("%s is initializing\n", HCLGE_NAME);
12840 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12842 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12846 hnae3_register_ae_algo(&ae_algo);
12851 static void hclge_exit(void)
12853 hnae3_unregister_ae_algo(&ae_algo);
12854 destroy_workqueue(hclge_wq);
12856 module_init(hclge_init);
12857 module_exit(hclge_exit);
12859 MODULE_LICENSE("GPL");
12860 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12861 MODULE_DESCRIPTION("HCLGE Driver");
12862 MODULE_VERSION(HCLGE_MOD_VERSION);