1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static struct hnae3_ae_algo ae_algo;
76 static struct workqueue_struct *hclge_wq;
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32(tqp->index);
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32(tqp->index);
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 hdev->hw.mac.phydev->drv->set_loopback) ||
756 hnae3_dev_phy_imp_supported(hdev)) {
758 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
761 } else if (stringset == ETH_SS_STATS) {
762 count = ARRAY_SIZE(g_mac_stats_string) +
763 hclge_tqps_get_sset_count(handle, stringset);
769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
772 u8 *p = (char *)data;
775 if (stringset == ETH_SS_STATS) {
776 size = ARRAY_SIZE(g_mac_stats_string);
777 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
779 p = hclge_tqps_get_strings(handle, p);
780 } else if (stringset == ETH_SS_TEST) {
781 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
784 p += ETH_GSTRING_LEN;
786 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
789 p += ETH_GSTRING_LEN;
791 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
793 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
795 p += ETH_GSTRING_LEN;
797 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
800 p += ETH_GSTRING_LEN;
805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
807 struct hclge_vport *vport = hclge_get_vport(handle);
808 struct hclge_dev *hdev = vport->back;
811 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812 ARRAY_SIZE(g_mac_stats_string), data);
813 p = hclge_tqps_get_stats(handle, p);
816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817 struct hns3_mac_stats *mac_stats)
819 struct hclge_vport *vport = hclge_get_vport(handle);
820 struct hclge_dev *hdev = vport->back;
822 hclge_update_stats(handle, NULL);
824 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
831 #define HCLGE_MAC_ID_MASK 0xF
833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
836 /* Set the pf to main pf */
837 if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 hdev->flag |= HCLGE_FLAG_MAIN;
840 hdev->flag &= ~HCLGE_FLAG_MAIN;
842 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
846 static int hclge_query_function_status(struct hclge_dev *hdev)
848 #define HCLGE_QUERY_MAX_CNT 5
850 struct hclge_func_status_cmd *req;
851 struct hclge_desc desc;
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status_cmd *)desc.data;
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n", ret);
866 /* Check pf reset is done */
869 usleep_range(1000, 2000);
870 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
872 return hclge_parse_func_status(hdev, req);
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
877 struct hclge_pf_res_cmd *req;
878 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
884 dev_err(&hdev->pdev->dev,
885 "query pf resource failed %d.\n", ret);
889 req = (struct hclge_pf_res_cmd *)desc.data;
890 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
891 le16_to_cpu(req->ext_tqp_num);
892 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
894 if (req->tx_buf_size)
896 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
900 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (req->dv_buf_size)
904 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
906 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
908 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
910 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
911 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
912 dev_err(&hdev->pdev->dev,
913 "only %u msi resources available, not enough for pf(min:2).\n",
918 if (hnae3_dev_roce_supported(hdev)) {
920 le16_to_cpu(req->pf_intr_vector_number_roce);
922 /* PF should have NIC vectors and Roce vectors,
923 * NIC vectors are queued before Roce vectors.
925 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
927 hdev->num_msi = hdev->num_nic_msi;
933 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
937 *speed = HCLGE_MAC_SPEED_10M;
940 *speed = HCLGE_MAC_SPEED_100M;
943 *speed = HCLGE_MAC_SPEED_1G;
946 *speed = HCLGE_MAC_SPEED_10G;
949 *speed = HCLGE_MAC_SPEED_25G;
952 *speed = HCLGE_MAC_SPEED_40G;
955 *speed = HCLGE_MAC_SPEED_50G;
958 *speed = HCLGE_MAC_SPEED_100G;
961 *speed = HCLGE_MAC_SPEED_200G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1002 case HCLGE_MAC_SPEED_200G:
1003 speed_bit = HCLGE_SUPPORT_200G_BIT;
1009 if (speed_bit & speed_ability)
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1017 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1029 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1032 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1037 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1039 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1056 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1060 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1062 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1065 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1066 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1068 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1069 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1071 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1082 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1084 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1107 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1110 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1112 switch (mac->speed) {
1113 case HCLGE_MAC_SPEED_10G:
1114 case HCLGE_MAC_SPEED_40G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1118 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1120 case HCLGE_MAC_SPEED_25G:
1121 case HCLGE_MAC_SPEED_50G:
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1125 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1126 BIT(HNAE3_FEC_AUTO);
1128 case HCLGE_MAC_SPEED_100G:
1129 case HCLGE_MAC_SPEED_200G:
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1131 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1134 mac->fec_ability = 0;
1139 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1142 struct hclge_mac *mac = &hdev->hw.mac;
1144 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1145 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1148 hclge_convert_setting_sr(mac, speed_ability);
1149 hclge_convert_setting_lr(mac, speed_ability);
1150 hclge_convert_setting_cr(mac, speed_ability);
1151 if (hnae3_dev_fec_supported(hdev))
1152 hclge_convert_setting_fec(mac);
1154 if (hnae3_dev_pause_supported(hdev))
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1158 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1161 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1164 struct hclge_mac *mac = &hdev->hw.mac;
1166 hclge_convert_setting_kr(mac, speed_ability);
1167 if (hnae3_dev_fec_supported(hdev))
1168 hclge_convert_setting_fec(mac);
1170 if (hnae3_dev_pause_supported(hdev))
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1177 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1180 unsigned long *supported = hdev->hw.mac.supported;
1182 /* default to support all speed for GE port */
1184 speed_ability = HCLGE_SUPPORT_GE;
1186 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1190 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1191 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1197 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1202 if (hnae3_dev_pause_supported(hdev)) {
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1204 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1211 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1213 u8 media_type = hdev->hw.mac.media_type;
1215 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1216 hclge_parse_fiber_link_mode(hdev, speed_ability);
1217 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1218 hclge_parse_copper_link_mode(hdev, speed_ability);
1219 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1220 hclge_parse_backplane_link_mode(hdev, speed_ability);
1223 static u32 hclge_get_max_speed(u16 speed_ability)
1225 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1226 return HCLGE_MAC_SPEED_200G;
1228 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1229 return HCLGE_MAC_SPEED_100G;
1231 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1232 return HCLGE_MAC_SPEED_50G;
1234 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1235 return HCLGE_MAC_SPEED_40G;
1237 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1238 return HCLGE_MAC_SPEED_25G;
1240 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1241 return HCLGE_MAC_SPEED_10G;
1243 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1244 return HCLGE_MAC_SPEED_1G;
1246 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1247 return HCLGE_MAC_SPEED_100M;
1249 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1250 return HCLGE_MAC_SPEED_10M;
1252 return HCLGE_MAC_SPEED_1G;
1255 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1257 #define SPEED_ABILITY_EXT_SHIFT 8
1259 struct hclge_cfg_param_cmd *req;
1260 u64 mac_addr_tmp_high;
1261 u16 speed_ability_ext;
1265 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1267 /* get the configuration */
1268 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1272 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1273 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274 HCLGE_CFG_TQP_DESC_N_M,
1275 HCLGE_CFG_TQP_DESC_N_S);
1277 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1278 HCLGE_CFG_PHY_ADDR_M,
1279 HCLGE_CFG_PHY_ADDR_S);
1280 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1281 HCLGE_CFG_MEDIA_TP_M,
1282 HCLGE_CFG_MEDIA_TP_S);
1283 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1284 HCLGE_CFG_RX_BUF_LEN_M,
1285 HCLGE_CFG_RX_BUF_LEN_S);
1286 /* get mac_address */
1287 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1288 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 HCLGE_CFG_MAC_ADDR_H_M,
1290 HCLGE_CFG_MAC_ADDR_H_S);
1292 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1294 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1295 HCLGE_CFG_DEFAULT_SPEED_M,
1296 HCLGE_CFG_DEFAULT_SPEED_S);
1297 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1298 HCLGE_CFG_RSS_SIZE_M,
1299 HCLGE_CFG_RSS_SIZE_S);
1301 for (i = 0; i < ETH_ALEN; i++)
1302 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1304 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1305 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1307 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308 HCLGE_CFG_SPEED_ABILITY_M,
1309 HCLGE_CFG_SPEED_ABILITY_S);
1310 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1311 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1312 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1313 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1315 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1316 HCLGE_CFG_UMV_TBL_SPACE_M,
1317 HCLGE_CFG_UMV_TBL_SPACE_S);
1318 if (!cfg->umv_space)
1319 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1321 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1322 HCLGE_CFG_PF_RSS_SIZE_M,
1323 HCLGE_CFG_PF_RSS_SIZE_S);
1325 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1326 * power of 2, instead of reading out directly. This would
1327 * be more flexible for future changes and expansions.
1328 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1329 * it does not make sense if PF's field is 0. In this case, PF and VF
1330 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1332 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1333 1U << cfg->pf_rss_size_max :
1334 cfg->vf_rss_size_max;
1337 /* hclge_get_cfg: query the static parameter from flash
1338 * @hdev: pointer to struct hclge_dev
1339 * @hcfg: the config structure to be getted
1341 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1343 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1344 struct hclge_cfg_param_cmd *req;
1348 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1351 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1352 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1354 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1355 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1356 /* Len should be united by 4 bytes when send to hardware */
1357 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1358 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1359 req->offset = cpu_to_le32(offset);
1362 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1364 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1368 hclge_parse_cfg(hcfg, desc);
1373 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1375 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1377 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1379 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1380 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1381 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1382 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1383 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1384 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1385 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1388 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1389 struct hclge_desc *desc)
1391 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1392 struct hclge_dev_specs_0_cmd *req0;
1393 struct hclge_dev_specs_1_cmd *req1;
1395 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1396 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1398 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1399 ae_dev->dev_specs.rss_ind_tbl_size =
1400 le16_to_cpu(req0->rss_ind_tbl_size);
1401 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1402 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1403 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1404 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1405 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1406 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1409 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1411 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1413 if (!dev_specs->max_non_tso_bd_num)
1414 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1415 if (!dev_specs->rss_ind_tbl_size)
1416 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 if (!dev_specs->rss_key_size)
1418 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1419 if (!dev_specs->max_tm_rate)
1420 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1421 if (!dev_specs->max_qset_num)
1422 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1423 if (!dev_specs->max_int_gl)
1424 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1425 if (!dev_specs->max_frm_size)
1426 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1429 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1431 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1435 /* set default specifications as devices lower than version V3 do not
1436 * support querying specifications from firmware.
1438 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1439 hclge_set_default_dev_specs(hdev);
1443 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1444 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1446 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1448 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1450 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1454 hclge_parse_dev_specs(hdev, desc);
1455 hclge_check_dev_specs(hdev);
1460 static int hclge_get_cap(struct hclge_dev *hdev)
1464 ret = hclge_query_function_status(hdev);
1466 dev_err(&hdev->pdev->dev,
1467 "query function status error %d.\n", ret);
1471 /* get pf resource */
1472 return hclge_query_pf_resource(hdev);
1475 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1477 #define HCLGE_MIN_TX_DESC 64
1478 #define HCLGE_MIN_RX_DESC 64
1480 if (!is_kdump_kernel())
1483 dev_info(&hdev->pdev->dev,
1484 "Running kdump kernel. Using minimal resources\n");
1486 /* minimal queue pairs equals to the number of vports */
1487 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1488 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1489 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1492 static int hclge_configure(struct hclge_dev *hdev)
1494 struct hclge_cfg cfg;
1498 ret = hclge_get_cfg(hdev, &cfg);
1502 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1503 hdev->base_tqp_pid = 0;
1504 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1505 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1506 hdev->rx_buf_len = cfg.rx_buf_len;
1507 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1508 hdev->hw.mac.media_type = cfg.media_type;
1509 hdev->hw.mac.phy_addr = cfg.phy_addr;
1510 hdev->num_tx_desc = cfg.tqp_desc_num;
1511 hdev->num_rx_desc = cfg.tqp_desc_num;
1512 hdev->tm_info.num_pg = 1;
1513 hdev->tc_max = cfg.tc_num;
1514 hdev->tm_info.hw_pfc_map = 0;
1515 hdev->wanted_umv_size = cfg.umv_space;
1517 if (hnae3_dev_fd_supported(hdev)) {
1519 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1522 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1524 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1525 cfg.default_speed, ret);
1529 hclge_parse_link_mode(hdev, cfg.speed_ability);
1531 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1533 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1534 (hdev->tc_max < 1)) {
1535 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1540 /* Dev does not support DCB */
1541 if (!hnae3_dev_dcb_supported(hdev)) {
1545 hdev->pfc_max = hdev->tc_max;
1548 hdev->tm_info.num_tc = 1;
1550 /* Currently not support uncontiuous tc */
1551 for (i = 0; i < hdev->tm_info.num_tc; i++)
1552 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1554 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1556 hclge_init_kdump_kernel_config(hdev);
1558 /* Set the init affinity based on pci func number */
1559 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1560 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1561 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1562 &hdev->affinity_mask);
1567 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1570 struct hclge_cfg_tso_status_cmd *req;
1571 struct hclge_desc desc;
1573 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1575 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1576 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1577 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1579 return hclge_cmd_send(&hdev->hw, &desc, 1);
1582 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1584 struct hclge_cfg_gro_status_cmd *req;
1585 struct hclge_desc desc;
1588 if (!hnae3_dev_gro_supported(hdev))
1591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1592 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1594 req->gro_en = en ? 1 : 0;
1596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1598 dev_err(&hdev->pdev->dev,
1599 "GRO hardware config cmd failed, ret = %d\n", ret);
1604 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1606 struct hclge_tqp *tqp;
1609 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1610 sizeof(struct hclge_tqp), GFP_KERNEL);
1616 for (i = 0; i < hdev->num_tqps; i++) {
1617 tqp->dev = &hdev->pdev->dev;
1620 tqp->q.ae_algo = &ae_algo;
1621 tqp->q.buf_size = hdev->rx_buf_len;
1622 tqp->q.tx_desc_num = hdev->num_tx_desc;
1623 tqp->q.rx_desc_num = hdev->num_rx_desc;
1625 /* need an extended offset to configure queues >=
1626 * HCLGE_TQP_MAX_SIZE_DEV_V2
1628 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1629 tqp->q.io_base = hdev->hw.io_base +
1630 HCLGE_TQP_REG_OFFSET +
1631 i * HCLGE_TQP_REG_SIZE;
1633 tqp->q.io_base = hdev->hw.io_base +
1634 HCLGE_TQP_REG_OFFSET +
1635 HCLGE_TQP_EXT_REG_OFFSET +
1636 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1645 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1646 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1648 struct hclge_tqp_map_cmd *req;
1649 struct hclge_desc desc;
1652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1654 req = (struct hclge_tqp_map_cmd *)desc.data;
1655 req->tqp_id = cpu_to_le16(tqp_pid);
1656 req->tqp_vf = func_id;
1657 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1659 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1660 req->tqp_vid = cpu_to_le16(tqp_vid);
1662 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1664 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1669 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1671 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1672 struct hclge_dev *hdev = vport->back;
1675 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1676 alloced < num_tqps; i++) {
1677 if (!hdev->htqp[i].alloced) {
1678 hdev->htqp[i].q.handle = &vport->nic;
1679 hdev->htqp[i].q.tqp_index = alloced;
1680 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1681 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1682 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1683 hdev->htqp[i].alloced = true;
1687 vport->alloc_tqps = alloced;
1688 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1689 vport->alloc_tqps / hdev->tm_info.num_tc);
1691 /* ensure one to one mapping between irq and queue at default */
1692 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1693 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1698 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1699 u16 num_tx_desc, u16 num_rx_desc)
1702 struct hnae3_handle *nic = &vport->nic;
1703 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1704 struct hclge_dev *hdev = vport->back;
1707 kinfo->num_tx_desc = num_tx_desc;
1708 kinfo->num_rx_desc = num_rx_desc;
1710 kinfo->rx_buf_len = hdev->rx_buf_len;
1712 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1713 sizeof(struct hnae3_queue *), GFP_KERNEL);
1717 ret = hclge_assign_tqp(vport, num_tqps);
1719 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1724 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1725 struct hclge_vport *vport)
1727 struct hnae3_handle *nic = &vport->nic;
1728 struct hnae3_knic_private_info *kinfo;
1731 kinfo = &nic->kinfo;
1732 for (i = 0; i < vport->alloc_tqps; i++) {
1733 struct hclge_tqp *q =
1734 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1738 is_pf = !(vport->vport_id);
1739 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1748 static int hclge_map_tqp(struct hclge_dev *hdev)
1750 struct hclge_vport *vport = hdev->vport;
1753 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1754 for (i = 0; i < num_vport; i++) {
1757 ret = hclge_map_tqp_to_vport(hdev, vport);
1767 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1769 struct hnae3_handle *nic = &vport->nic;
1770 struct hclge_dev *hdev = vport->back;
1773 nic->pdev = hdev->pdev;
1774 nic->ae_algo = &ae_algo;
1775 nic->numa_node_mask = hdev->numa_node_mask;
1777 ret = hclge_knic_setup(vport, num_tqps,
1778 hdev->num_tx_desc, hdev->num_rx_desc);
1780 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1785 static int hclge_alloc_vport(struct hclge_dev *hdev)
1787 struct pci_dev *pdev = hdev->pdev;
1788 struct hclge_vport *vport;
1794 /* We need to alloc a vport for main NIC of PF */
1795 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1797 if (hdev->num_tqps < num_vport) {
1798 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1799 hdev->num_tqps, num_vport);
1803 /* Alloc the same number of TQPs for every vport */
1804 tqp_per_vport = hdev->num_tqps / num_vport;
1805 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1807 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1812 hdev->vport = vport;
1813 hdev->num_alloc_vport = num_vport;
1815 if (IS_ENABLED(CONFIG_PCI_IOV))
1816 hdev->num_alloc_vfs = hdev->num_req_vfs;
1818 for (i = 0; i < num_vport; i++) {
1820 vport->vport_id = i;
1821 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1822 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1823 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1824 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1825 INIT_LIST_HEAD(&vport->vlan_list);
1826 INIT_LIST_HEAD(&vport->uc_mac_list);
1827 INIT_LIST_HEAD(&vport->mc_mac_list);
1828 spin_lock_init(&vport->mac_list_lock);
1831 ret = hclge_vport_setup(vport, tqp_main_vport);
1833 ret = hclge_vport_setup(vport, tqp_per_vport);
1836 "vport setup failed for vport %d, %d\n",
1847 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1848 struct hclge_pkt_buf_alloc *buf_alloc)
1850 /* TX buffer size is unit by 128 byte */
1851 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1852 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1853 struct hclge_tx_buff_alloc_cmd *req;
1854 struct hclge_desc desc;
1858 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1860 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1861 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1862 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1864 req->tx_pkt_buff[i] =
1865 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1866 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1869 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1871 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1877 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1878 struct hclge_pkt_buf_alloc *buf_alloc)
1880 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1883 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1888 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1893 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1894 if (hdev->hw_tc_map & BIT(i))
1899 /* Get the number of pfc enabled TCs, which have private buffer */
1900 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1901 struct hclge_pkt_buf_alloc *buf_alloc)
1903 struct hclge_priv_buf *priv;
1907 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908 priv = &buf_alloc->priv_buf[i];
1909 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1917 /* Get the number of pfc disabled TCs, which have private buffer */
1918 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1919 struct hclge_pkt_buf_alloc *buf_alloc)
1921 struct hclge_priv_buf *priv;
1925 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1926 priv = &buf_alloc->priv_buf[i];
1927 if (hdev->hw_tc_map & BIT(i) &&
1928 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1936 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1938 struct hclge_priv_buf *priv;
1942 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1943 priv = &buf_alloc->priv_buf[i];
1945 rx_priv += priv->buf_size;
1950 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1952 u32 i, total_tx_size = 0;
1954 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1955 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1957 return total_tx_size;
1960 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1961 struct hclge_pkt_buf_alloc *buf_alloc,
1964 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1965 u32 tc_num = hclge_get_tc_num(hdev);
1966 u32 shared_buf, aligned_mps;
1970 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1972 if (hnae3_dev_dcb_supported(hdev))
1973 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1976 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1977 + hdev->dv_buf_size;
1979 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1980 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1981 HCLGE_BUF_SIZE_UNIT);
1983 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1984 if (rx_all < rx_priv + shared_std)
1987 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1988 buf_alloc->s_buf.buf_size = shared_buf;
1989 if (hnae3_dev_dcb_supported(hdev)) {
1990 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1991 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1992 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1993 HCLGE_BUF_SIZE_UNIT);
1995 buf_alloc->s_buf.self.high = aligned_mps +
1996 HCLGE_NON_DCB_ADDITIONAL_BUF;
1997 buf_alloc->s_buf.self.low = aligned_mps;
2000 if (hnae3_dev_dcb_supported(hdev)) {
2001 hi_thrd = shared_buf - hdev->dv_buf_size;
2003 if (tc_num <= NEED_RESERVE_TC_NUM)
2004 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2008 hi_thrd = hi_thrd / tc_num;
2010 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2011 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2012 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2014 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2015 lo_thrd = aligned_mps;
2018 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2019 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2020 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2026 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2027 struct hclge_pkt_buf_alloc *buf_alloc)
2031 total_size = hdev->pkt_buf_size;
2033 /* alloc tx buffer for all enabled tc */
2034 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2035 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2037 if (hdev->hw_tc_map & BIT(i)) {
2038 if (total_size < hdev->tx_buf_size)
2041 priv->tx_buf_size = hdev->tx_buf_size;
2043 priv->tx_buf_size = 0;
2046 total_size -= priv->tx_buf_size;
2052 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2053 struct hclge_pkt_buf_alloc *buf_alloc)
2055 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2056 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2059 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2067 if (!(hdev->hw_tc_map & BIT(i)))
2072 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2073 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2074 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2075 HCLGE_BUF_SIZE_UNIT);
2078 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2082 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2085 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2088 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2089 struct hclge_pkt_buf_alloc *buf_alloc)
2091 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2092 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2095 /* let the last to be cleared first */
2096 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2097 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2098 unsigned int mask = BIT((unsigned int)i);
2100 if (hdev->hw_tc_map & mask &&
2101 !(hdev->tm_info.hw_pfc_map & mask)) {
2102 /* Clear the no pfc TC private buffer */
2110 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2111 no_pfc_priv_num == 0)
2115 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2118 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2119 struct hclge_pkt_buf_alloc *buf_alloc)
2121 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2122 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2125 /* let the last to be cleared first */
2126 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2127 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2128 unsigned int mask = BIT((unsigned int)i);
2130 if (hdev->hw_tc_map & mask &&
2131 hdev->tm_info.hw_pfc_map & mask) {
2132 /* Reduce the number of pfc TC with private buffer */
2140 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2145 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2148 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2149 struct hclge_pkt_buf_alloc *buf_alloc)
2151 #define COMPENSATE_BUFFER 0x3C00
2152 #define COMPENSATE_HALF_MPS_NUM 5
2153 #define PRIV_WL_GAP 0x1800
2155 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2156 u32 tc_num = hclge_get_tc_num(hdev);
2157 u32 half_mps = hdev->mps >> 1;
2162 rx_priv = rx_priv / tc_num;
2164 if (tc_num <= NEED_RESERVE_TC_NUM)
2165 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2167 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2168 COMPENSATE_HALF_MPS_NUM * half_mps;
2169 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2170 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2172 if (rx_priv < min_rx_priv)
2175 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2176 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2183 if (!(hdev->hw_tc_map & BIT(i)))
2187 priv->buf_size = rx_priv;
2188 priv->wl.high = rx_priv - hdev->dv_buf_size;
2189 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2192 buf_alloc->s_buf.buf_size = 0;
2197 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2198 * @hdev: pointer to struct hclge_dev
2199 * @buf_alloc: pointer to buffer calculation data
2200 * @return: 0: calculate sucessful, negative: fail
2202 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2203 struct hclge_pkt_buf_alloc *buf_alloc)
2205 /* When DCB is not supported, rx private buffer is not allocated. */
2206 if (!hnae3_dev_dcb_supported(hdev)) {
2207 u32 rx_all = hdev->pkt_buf_size;
2209 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2210 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2216 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2219 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2222 /* try to decrease the buffer size */
2223 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2226 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2229 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2235 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2236 struct hclge_pkt_buf_alloc *buf_alloc)
2238 struct hclge_rx_priv_buff_cmd *req;
2239 struct hclge_desc desc;
2243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2244 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2246 /* Alloc private buffer TCs */
2247 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2248 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2251 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2253 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2257 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2258 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2260 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2262 dev_err(&hdev->pdev->dev,
2263 "rx private buffer alloc cmd failed %d\n", ret);
2268 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2269 struct hclge_pkt_buf_alloc *buf_alloc)
2271 struct hclge_rx_priv_wl_buf *req;
2272 struct hclge_priv_buf *priv;
2273 struct hclge_desc desc[2];
2277 for (i = 0; i < 2; i++) {
2278 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2280 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2282 /* The first descriptor set the NEXT bit to 1 */
2284 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2286 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2288 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2289 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2291 priv = &buf_alloc->priv_buf[idx];
2292 req->tc_wl[j].high =
2293 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2294 req->tc_wl[j].high |=
2295 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2297 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2298 req->tc_wl[j].low |=
2299 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2303 /* Send 2 descriptor at one time */
2304 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2306 dev_err(&hdev->pdev->dev,
2307 "rx private waterline config cmd failed %d\n",
2312 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2313 struct hclge_pkt_buf_alloc *buf_alloc)
2315 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2316 struct hclge_rx_com_thrd *req;
2317 struct hclge_desc desc[2];
2318 struct hclge_tc_thrd *tc;
2322 for (i = 0; i < 2; i++) {
2323 hclge_cmd_setup_basic_desc(&desc[i],
2324 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2325 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2327 /* The first descriptor set the NEXT bit to 1 */
2329 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2333 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2334 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2336 req->com_thrd[j].high =
2337 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2338 req->com_thrd[j].high |=
2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 req->com_thrd[j].low =
2341 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2342 req->com_thrd[j].low |=
2343 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2347 /* Send 2 descriptors at one time */
2348 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2350 dev_err(&hdev->pdev->dev,
2351 "common threshold config cmd failed %d\n", ret);
2355 static int hclge_common_wl_config(struct hclge_dev *hdev,
2356 struct hclge_pkt_buf_alloc *buf_alloc)
2358 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2359 struct hclge_rx_com_wl *req;
2360 struct hclge_desc desc;
2363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2365 req = (struct hclge_rx_com_wl *)desc.data;
2366 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2367 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2369 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2370 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2372 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2374 dev_err(&hdev->pdev->dev,
2375 "common waterline config cmd failed %d\n", ret);
2380 int hclge_buffer_alloc(struct hclge_dev *hdev)
2382 struct hclge_pkt_buf_alloc *pkt_buf;
2385 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2389 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2391 dev_err(&hdev->pdev->dev,
2392 "could not calc tx buffer size for all TCs %d\n", ret);
2396 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2398 dev_err(&hdev->pdev->dev,
2399 "could not alloc tx buffers %d\n", ret);
2403 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2405 dev_err(&hdev->pdev->dev,
2406 "could not calc rx priv buffer size for all TCs %d\n",
2411 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2413 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2418 if (hnae3_dev_dcb_supported(hdev)) {
2419 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2421 dev_err(&hdev->pdev->dev,
2422 "could not configure rx private waterline %d\n",
2427 ret = hclge_common_thrd_config(hdev, pkt_buf);
2429 dev_err(&hdev->pdev->dev,
2430 "could not configure common threshold %d\n",
2436 ret = hclge_common_wl_config(hdev, pkt_buf);
2438 dev_err(&hdev->pdev->dev,
2439 "could not configure common waterline %d\n", ret);
2446 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2448 struct hnae3_handle *roce = &vport->roce;
2449 struct hnae3_handle *nic = &vport->nic;
2450 struct hclge_dev *hdev = vport->back;
2452 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2454 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2457 roce->rinfo.base_vector = hdev->roce_base_vector;
2459 roce->rinfo.netdev = nic->kinfo.netdev;
2460 roce->rinfo.roce_io_base = hdev->hw.io_base;
2461 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2463 roce->pdev = nic->pdev;
2464 roce->ae_algo = nic->ae_algo;
2465 roce->numa_node_mask = nic->numa_node_mask;
2470 static int hclge_init_msi(struct hclge_dev *hdev)
2472 struct pci_dev *pdev = hdev->pdev;
2476 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2478 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2481 "failed(%d) to allocate MSI/MSI-X vectors\n",
2485 if (vectors < hdev->num_msi)
2486 dev_warn(&hdev->pdev->dev,
2487 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2488 hdev->num_msi, vectors);
2490 hdev->num_msi = vectors;
2491 hdev->num_msi_left = vectors;
2493 hdev->base_msi_vector = pdev->irq;
2494 hdev->roce_base_vector = hdev->base_msi_vector +
2497 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2498 sizeof(u16), GFP_KERNEL);
2499 if (!hdev->vector_status) {
2500 pci_free_irq_vectors(pdev);
2504 for (i = 0; i < hdev->num_msi; i++)
2505 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2507 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2508 sizeof(int), GFP_KERNEL);
2509 if (!hdev->vector_irq) {
2510 pci_free_irq_vectors(pdev);
2517 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2519 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2520 duplex = HCLGE_MAC_FULL;
2525 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2528 struct hclge_config_mac_speed_dup_cmd *req;
2529 struct hclge_desc desc;
2532 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2537 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2540 case HCLGE_MAC_SPEED_10M:
2541 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2542 HCLGE_CFG_SPEED_S, 6);
2544 case HCLGE_MAC_SPEED_100M:
2545 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2546 HCLGE_CFG_SPEED_S, 7);
2548 case HCLGE_MAC_SPEED_1G:
2549 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2550 HCLGE_CFG_SPEED_S, 0);
2552 case HCLGE_MAC_SPEED_10G:
2553 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2554 HCLGE_CFG_SPEED_S, 1);
2556 case HCLGE_MAC_SPEED_25G:
2557 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2558 HCLGE_CFG_SPEED_S, 2);
2560 case HCLGE_MAC_SPEED_40G:
2561 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562 HCLGE_CFG_SPEED_S, 3);
2564 case HCLGE_MAC_SPEED_50G:
2565 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566 HCLGE_CFG_SPEED_S, 4);
2568 case HCLGE_MAC_SPEED_100G:
2569 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 HCLGE_CFG_SPEED_S, 5);
2572 case HCLGE_MAC_SPEED_200G:
2573 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 HCLGE_CFG_SPEED_S, 8);
2577 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2581 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2586 dev_err(&hdev->pdev->dev,
2587 "mac speed/duplex config cmd failed %d.\n", ret);
2594 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2596 struct hclge_mac *mac = &hdev->hw.mac;
2599 duplex = hclge_check_speed_dup(duplex, speed);
2600 if (!mac->support_autoneg && mac->speed == speed &&
2601 mac->duplex == duplex)
2604 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2608 hdev->hw.mac.speed = speed;
2609 hdev->hw.mac.duplex = duplex;
2614 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2617 struct hclge_vport *vport = hclge_get_vport(handle);
2618 struct hclge_dev *hdev = vport->back;
2620 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2623 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2625 struct hclge_config_auto_neg_cmd *req;
2626 struct hclge_desc desc;
2630 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2632 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2634 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2635 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2637 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2639 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2645 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2647 struct hclge_vport *vport = hclge_get_vport(handle);
2648 struct hclge_dev *hdev = vport->back;
2650 if (!hdev->hw.mac.support_autoneg) {
2652 dev_err(&hdev->pdev->dev,
2653 "autoneg is not supported by current port\n");
2660 return hclge_set_autoneg_en(hdev, enable);
2663 static int hclge_get_autoneg(struct hnae3_handle *handle)
2665 struct hclge_vport *vport = hclge_get_vport(handle);
2666 struct hclge_dev *hdev = vport->back;
2667 struct phy_device *phydev = hdev->hw.mac.phydev;
2670 return phydev->autoneg;
2672 return hdev->hw.mac.autoneg;
2675 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2677 struct hclge_vport *vport = hclge_get_vport(handle);
2678 struct hclge_dev *hdev = vport->back;
2681 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2683 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2686 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2689 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2691 struct hclge_vport *vport = hclge_get_vport(handle);
2692 struct hclge_dev *hdev = vport->back;
2694 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2695 return hclge_set_autoneg_en(hdev, !halt);
2700 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2702 struct hclge_config_fec_cmd *req;
2703 struct hclge_desc desc;
2706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2708 req = (struct hclge_config_fec_cmd *)desc.data;
2709 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2710 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2711 if (fec_mode & BIT(HNAE3_FEC_RS))
2712 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2713 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2714 if (fec_mode & BIT(HNAE3_FEC_BASER))
2715 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2716 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2720 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2725 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2727 struct hclge_vport *vport = hclge_get_vport(handle);
2728 struct hclge_dev *hdev = vport->back;
2729 struct hclge_mac *mac = &hdev->hw.mac;
2732 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2733 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2737 ret = hclge_set_fec_hw(hdev, fec_mode);
2741 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2745 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2748 struct hclge_vport *vport = hclge_get_vport(handle);
2749 struct hclge_dev *hdev = vport->back;
2750 struct hclge_mac *mac = &hdev->hw.mac;
2753 *fec_ability = mac->fec_ability;
2755 *fec_mode = mac->fec_mode;
2758 static int hclge_mac_init(struct hclge_dev *hdev)
2760 struct hclge_mac *mac = &hdev->hw.mac;
2763 hdev->support_sfp_query = true;
2764 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2765 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2766 hdev->hw.mac.duplex);
2770 if (hdev->hw.mac.support_autoneg) {
2771 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2778 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2779 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2784 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2786 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2790 ret = hclge_set_default_loopback(hdev);
2794 ret = hclge_buffer_alloc(hdev);
2796 dev_err(&hdev->pdev->dev,
2797 "allocate buffer fail, ret=%d\n", ret);
2802 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2804 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2805 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2806 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2807 hclge_wq, &hdev->service_task, 0);
2810 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2812 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2813 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2814 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2815 hclge_wq, &hdev->service_task, 0);
2818 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2820 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2821 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2822 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2823 hclge_wq, &hdev->service_task,
2827 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2829 struct hclge_link_status_cmd *req;
2830 struct hclge_desc desc;
2833 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2834 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2836 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2841 req = (struct hclge_link_status_cmd *)desc.data;
2842 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2843 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2848 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2850 struct phy_device *phydev = hdev->hw.mac.phydev;
2852 *link_status = HCLGE_LINK_STATUS_DOWN;
2854 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2857 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2860 return hclge_get_mac_link_status(hdev, link_status);
2863 static void hclge_update_link_status(struct hclge_dev *hdev)
2865 struct hnae3_client *rclient = hdev->roce_client;
2866 struct hnae3_client *client = hdev->nic_client;
2867 struct hnae3_handle *rhandle;
2868 struct hnae3_handle *handle;
2876 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2879 ret = hclge_get_mac_phy_link(hdev, &state);
2881 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2885 if (state != hdev->hw.mac.link) {
2886 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2887 handle = &hdev->vport[i].nic;
2888 client->ops->link_status_change(handle, state);
2889 hclge_config_mac_tnl_int(hdev, state);
2890 rhandle = &hdev->vport[i].roce;
2891 if (rclient && rclient->ops->link_status_change)
2892 rclient->ops->link_status_change(rhandle,
2895 hdev->hw.mac.link = state;
2898 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2901 static void hclge_update_port_capability(struct hclge_dev *hdev,
2902 struct hclge_mac *mac)
2904 if (hnae3_dev_fec_supported(hdev))
2905 /* update fec ability by speed */
2906 hclge_convert_setting_fec(mac);
2908 /* firmware can not identify back plane type, the media type
2909 * read from configuration can help deal it
2911 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2912 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2913 mac->module_type = HNAE3_MODULE_TYPE_KR;
2914 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2915 mac->module_type = HNAE3_MODULE_TYPE_TP;
2917 if (mac->support_autoneg) {
2918 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2919 linkmode_copy(mac->advertising, mac->supported);
2921 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2923 linkmode_zero(mac->advertising);
2927 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2929 struct hclge_sfp_info_cmd *resp;
2930 struct hclge_desc desc;
2933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2934 resp = (struct hclge_sfp_info_cmd *)desc.data;
2935 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2936 if (ret == -EOPNOTSUPP) {
2937 dev_warn(&hdev->pdev->dev,
2938 "IMP do not support get SFP speed %d\n", ret);
2941 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2945 *speed = le32_to_cpu(resp->speed);
2950 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2952 struct hclge_sfp_info_cmd *resp;
2953 struct hclge_desc desc;
2956 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2957 resp = (struct hclge_sfp_info_cmd *)desc.data;
2959 resp->query_type = QUERY_ACTIVE_SPEED;
2961 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2962 if (ret == -EOPNOTSUPP) {
2963 dev_warn(&hdev->pdev->dev,
2964 "IMP does not support get SFP info %d\n", ret);
2967 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2971 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2972 * set to mac->speed.
2974 if (!le32_to_cpu(resp->speed))
2977 mac->speed = le32_to_cpu(resp->speed);
2978 /* if resp->speed_ability is 0, it means it's an old version
2979 * firmware, do not update these params
2981 if (resp->speed_ability) {
2982 mac->module_type = le32_to_cpu(resp->module_type);
2983 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2984 mac->autoneg = resp->autoneg;
2985 mac->support_autoneg = resp->autoneg_ability;
2986 mac->speed_type = QUERY_ACTIVE_SPEED;
2987 if (!resp->active_fec)
2990 mac->fec_mode = BIT(resp->active_fec);
2992 mac->speed_type = QUERY_SFP_SPEED;
2998 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
2999 struct ethtool_link_ksettings *cmd)
3001 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3002 struct hclge_vport *vport = hclge_get_vport(handle);
3003 struct hclge_phy_link_ksetting_0_cmd *req0;
3004 struct hclge_phy_link_ksetting_1_cmd *req1;
3005 u32 supported, advertising, lp_advertising;
3006 struct hclge_dev *hdev = vport->back;
3009 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3011 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3012 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3015 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3017 dev_err(&hdev->pdev->dev,
3018 "failed to get phy link ksetting, ret = %d.\n", ret);
3022 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3023 cmd->base.autoneg = req0->autoneg;
3024 cmd->base.speed = le32_to_cpu(req0->speed);
3025 cmd->base.duplex = req0->duplex;
3026 cmd->base.port = req0->port;
3027 cmd->base.transceiver = req0->transceiver;
3028 cmd->base.phy_address = req0->phy_address;
3029 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3030 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3031 supported = le32_to_cpu(req0->supported);
3032 advertising = le32_to_cpu(req0->advertising);
3033 lp_advertising = le32_to_cpu(req0->lp_advertising);
3034 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3036 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3038 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3041 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3042 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3043 cmd->base.master_slave_state = req1->master_slave_state;
3049 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3050 const struct ethtool_link_ksettings *cmd)
3052 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3053 struct hclge_vport *vport = hclge_get_vport(handle);
3054 struct hclge_phy_link_ksetting_0_cmd *req0;
3055 struct hclge_phy_link_ksetting_1_cmd *req1;
3056 struct hclge_dev *hdev = vport->back;
3060 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3061 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3062 (cmd->base.duplex != DUPLEX_HALF &&
3063 cmd->base.duplex != DUPLEX_FULL)))
3066 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3068 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3069 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3072 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3073 req0->autoneg = cmd->base.autoneg;
3074 req0->speed = cpu_to_le32(cmd->base.speed);
3075 req0->duplex = cmd->base.duplex;
3076 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3077 cmd->link_modes.advertising);
3078 req0->advertising = cpu_to_le32(advertising);
3079 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3081 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3082 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3084 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3086 dev_err(&hdev->pdev->dev,
3087 "failed to set phy link ksettings, ret = %d.\n", ret);
3091 hdev->hw.mac.autoneg = cmd->base.autoneg;
3092 hdev->hw.mac.speed = cmd->base.speed;
3093 hdev->hw.mac.duplex = cmd->base.duplex;
3094 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3099 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3101 struct ethtool_link_ksettings cmd;
3104 if (!hnae3_dev_phy_imp_supported(hdev))
3107 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3111 hdev->hw.mac.autoneg = cmd.base.autoneg;
3112 hdev->hw.mac.speed = cmd.base.speed;
3113 hdev->hw.mac.duplex = cmd.base.duplex;
3118 static int hclge_tp_port_init(struct hclge_dev *hdev)
3120 struct ethtool_link_ksettings cmd;
3122 if (!hnae3_dev_phy_imp_supported(hdev))
3125 cmd.base.autoneg = hdev->hw.mac.autoneg;
3126 cmd.base.speed = hdev->hw.mac.speed;
3127 cmd.base.duplex = hdev->hw.mac.duplex;
3128 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3130 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3133 static int hclge_update_port_info(struct hclge_dev *hdev)
3135 struct hclge_mac *mac = &hdev->hw.mac;
3136 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3139 /* get the port info from SFP cmd if not copper port */
3140 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3141 return hclge_update_tp_port_info(hdev);
3143 /* if IMP does not support get SFP/qSFP info, return directly */
3144 if (!hdev->support_sfp_query)
3147 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3148 ret = hclge_get_sfp_info(hdev, mac);
3150 ret = hclge_get_sfp_speed(hdev, &speed);
3152 if (ret == -EOPNOTSUPP) {
3153 hdev->support_sfp_query = false;
3159 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3160 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3161 hclge_update_port_capability(hdev, mac);
3164 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3167 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3168 return 0; /* do nothing if no SFP */
3170 /* must config full duplex for SFP */
3171 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3175 static int hclge_get_status(struct hnae3_handle *handle)
3177 struct hclge_vport *vport = hclge_get_vport(handle);
3178 struct hclge_dev *hdev = vport->back;
3180 hclge_update_link_status(hdev);
3182 return hdev->hw.mac.link;
3185 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3187 if (!pci_num_vf(hdev->pdev)) {
3188 dev_err(&hdev->pdev->dev,
3189 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3193 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3194 dev_err(&hdev->pdev->dev,
3195 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3196 vf, pci_num_vf(hdev->pdev));
3200 /* VF start from 1 in vport */
3201 vf += HCLGE_VF_VPORT_START_NUM;
3202 return &hdev->vport[vf];
3205 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3206 struct ifla_vf_info *ivf)
3208 struct hclge_vport *vport = hclge_get_vport(handle);
3209 struct hclge_dev *hdev = vport->back;
3211 vport = hclge_get_vf_vport(hdev, vf);
3216 ivf->linkstate = vport->vf_info.link_state;
3217 ivf->spoofchk = vport->vf_info.spoofchk;
3218 ivf->trusted = vport->vf_info.trusted;
3219 ivf->min_tx_rate = 0;
3220 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3221 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3222 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3223 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3224 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3229 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3232 struct hclge_vport *vport = hclge_get_vport(handle);
3233 struct hclge_dev *hdev = vport->back;
3235 vport = hclge_get_vf_vport(hdev, vf);
3239 vport->vf_info.link_state = link_state;
3244 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3246 u32 cmdq_src_reg, msix_src_reg;
3248 /* fetch the events from their corresponding regs */
3249 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3250 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3252 /* Assumption: If by any chance reset and mailbox events are reported
3253 * together then we will only process reset event in this go and will
3254 * defer the processing of the mailbox events. Since, we would have not
3255 * cleared RX CMDQ event this time we would receive again another
3256 * interrupt from H/W just for the mailbox.
3258 * check for vector0 reset event sources
3260 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3261 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3262 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3263 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3264 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3265 hdev->rst_stats.imp_rst_cnt++;
3266 return HCLGE_VECTOR0_EVENT_RST;
3269 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3270 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3271 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3272 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3273 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3274 hdev->rst_stats.global_rst_cnt++;
3275 return HCLGE_VECTOR0_EVENT_RST;
3278 /* check for vector0 msix event source */
3279 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3280 *clearval = msix_src_reg;
3281 return HCLGE_VECTOR0_EVENT_ERR;
3284 /* check for vector0 mailbox(=CMDQ RX) event source */
3285 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3286 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3287 *clearval = cmdq_src_reg;
3288 return HCLGE_VECTOR0_EVENT_MBX;
3291 /* print other vector0 event source */
3292 dev_info(&hdev->pdev->dev,
3293 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3294 cmdq_src_reg, msix_src_reg);
3295 *clearval = msix_src_reg;
3297 return HCLGE_VECTOR0_EVENT_OTHER;
3300 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3303 switch (event_type) {
3304 case HCLGE_VECTOR0_EVENT_RST:
3305 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3307 case HCLGE_VECTOR0_EVENT_MBX:
3308 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3315 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3317 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3318 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3319 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3320 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3321 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3324 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3326 writel(enable ? 1 : 0, vector->addr);
3329 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3331 struct hclge_dev *hdev = data;
3335 hclge_enable_vector(&hdev->misc_vector, false);
3336 event_cause = hclge_check_event_cause(hdev, &clearval);
3338 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3339 switch (event_cause) {
3340 case HCLGE_VECTOR0_EVENT_ERR:
3341 /* we do not know what type of reset is required now. This could
3342 * only be decided after we fetch the type of errors which
3343 * caused this event. Therefore, we will do below for now:
3344 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3345 * have defered type of reset to be used.
3346 * 2. Schedule the reset serivce task.
3347 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3348 * will fetch the correct type of reset. This would be done
3349 * by first decoding the types of errors.
3351 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3353 case HCLGE_VECTOR0_EVENT_RST:
3354 hclge_reset_task_schedule(hdev);
3356 case HCLGE_VECTOR0_EVENT_MBX:
3357 /* If we are here then,
3358 * 1. Either we are not handling any mbx task and we are not
3361 * 2. We could be handling a mbx task but nothing more is
3363 * In both cases, we should schedule mbx task as there are more
3364 * mbx messages reported by this interrupt.
3366 hclge_mbx_task_schedule(hdev);
3369 dev_warn(&hdev->pdev->dev,
3370 "received unknown or unhandled event of vector0\n");
3374 hclge_clear_event_cause(hdev, event_cause, clearval);
3376 /* Enable interrupt if it is not cause by reset. And when
3377 * clearval equal to 0, it means interrupt status may be
3378 * cleared by hardware before driver reads status register.
3379 * For this case, vector0 interrupt also should be enabled.
3382 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3383 hclge_enable_vector(&hdev->misc_vector, true);
3389 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3391 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3392 dev_warn(&hdev->pdev->dev,
3393 "vector(vector_id %d) has been freed.\n", vector_id);
3397 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3398 hdev->num_msi_left += 1;
3399 hdev->num_msi_used -= 1;
3402 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3404 struct hclge_misc_vector *vector = &hdev->misc_vector;
3406 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3408 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3409 hdev->vector_status[0] = 0;
3411 hdev->num_msi_left -= 1;
3412 hdev->num_msi_used += 1;
3415 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3416 const cpumask_t *mask)
3418 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3421 cpumask_copy(&hdev->affinity_mask, mask);
3424 static void hclge_irq_affinity_release(struct kref *ref)
3428 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3430 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3431 &hdev->affinity_mask);
3433 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3434 hdev->affinity_notify.release = hclge_irq_affinity_release;
3435 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3436 &hdev->affinity_notify);
3439 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3441 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3442 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3445 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3449 hclge_get_misc_vector(hdev);
3451 /* this would be explicitly freed in the end */
3452 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3453 HCLGE_NAME, pci_name(hdev->pdev));
3454 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3455 0, hdev->misc_vector.name, hdev);
3457 hclge_free_vector(hdev, 0);
3458 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3459 hdev->misc_vector.vector_irq);
3465 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3467 free_irq(hdev->misc_vector.vector_irq, hdev);
3468 hclge_free_vector(hdev, 0);
3471 int hclge_notify_client(struct hclge_dev *hdev,
3472 enum hnae3_reset_notify_type type)
3474 struct hnae3_client *client = hdev->nic_client;
3477 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3480 if (!client->ops->reset_notify)
3483 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3484 struct hnae3_handle *handle = &hdev->vport[i].nic;
3487 ret = client->ops->reset_notify(handle, type);
3489 dev_err(&hdev->pdev->dev,
3490 "notify nic client failed %d(%d)\n", type, ret);
3498 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3499 enum hnae3_reset_notify_type type)
3501 struct hnae3_client *client = hdev->roce_client;
3505 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3508 if (!client->ops->reset_notify)
3511 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3512 struct hnae3_handle *handle = &hdev->vport[i].roce;
3514 ret = client->ops->reset_notify(handle, type);
3516 dev_err(&hdev->pdev->dev,
3517 "notify roce client failed %d(%d)",
3526 static int hclge_reset_wait(struct hclge_dev *hdev)
3528 #define HCLGE_RESET_WATI_MS 100
3529 #define HCLGE_RESET_WAIT_CNT 350
3531 u32 val, reg, reg_bit;
3534 switch (hdev->reset_type) {
3535 case HNAE3_IMP_RESET:
3536 reg = HCLGE_GLOBAL_RESET_REG;
3537 reg_bit = HCLGE_IMP_RESET_BIT;
3539 case HNAE3_GLOBAL_RESET:
3540 reg = HCLGE_GLOBAL_RESET_REG;
3541 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3543 case HNAE3_FUNC_RESET:
3544 reg = HCLGE_FUN_RST_ING;
3545 reg_bit = HCLGE_FUN_RST_ING_B;
3548 dev_err(&hdev->pdev->dev,
3549 "Wait for unsupported reset type: %d\n",
3554 val = hclge_read_dev(&hdev->hw, reg);
3555 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3556 msleep(HCLGE_RESET_WATI_MS);
3557 val = hclge_read_dev(&hdev->hw, reg);
3561 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3562 dev_warn(&hdev->pdev->dev,
3563 "Wait for reset timeout: %d\n", hdev->reset_type);
3570 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3572 struct hclge_vf_rst_cmd *req;
3573 struct hclge_desc desc;
3575 req = (struct hclge_vf_rst_cmd *)desc.data;
3576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3577 req->dest_vfid = func_id;
3582 return hclge_cmd_send(&hdev->hw, &desc, 1);
3585 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3589 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3590 struct hclge_vport *vport = &hdev->vport[i];
3593 /* Send cmd to set/clear VF's FUNC_RST_ING */
3594 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3596 dev_err(&hdev->pdev->dev,
3597 "set vf(%u) rst failed %d!\n",
3598 vport->vport_id, ret);
3602 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3605 /* Inform VF to process the reset.
3606 * hclge_inform_reset_assert_to_vf may fail if VF
3607 * driver is not loaded.
3609 ret = hclge_inform_reset_assert_to_vf(vport);
3611 dev_warn(&hdev->pdev->dev,
3612 "inform reset to vf(%u) failed %d!\n",
3613 vport->vport_id, ret);
3619 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3621 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3622 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3623 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3626 hclge_mbx_handler(hdev);
3628 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3631 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3633 struct hclge_pf_rst_sync_cmd *req;
3634 struct hclge_desc desc;
3638 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3642 /* vf need to down netdev by mbx during PF or FLR reset */
3643 hclge_mailbox_service_task(hdev);
3645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3646 /* for compatible with old firmware, wait
3647 * 100 ms for VF to stop IO
3649 if (ret == -EOPNOTSUPP) {
3650 msleep(HCLGE_RESET_SYNC_TIME);
3653 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3656 } else if (req->all_vf_ready) {
3659 msleep(HCLGE_PF_RESET_SYNC_TIME);
3660 hclge_cmd_reuse_desc(&desc, true);
3661 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3663 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3666 void hclge_report_hw_error(struct hclge_dev *hdev,
3667 enum hnae3_hw_error_type type)
3669 struct hnae3_client *client = hdev->nic_client;
3672 if (!client || !client->ops->process_hw_error ||
3673 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3676 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3677 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3680 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3684 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3685 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3686 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3687 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3688 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3691 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3692 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3693 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3694 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3698 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3700 struct hclge_desc desc;
3701 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3705 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3706 req->fun_reset_vfid = func_id;
3708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3710 dev_err(&hdev->pdev->dev,
3711 "send function reset cmd fail, status =%d\n", ret);
3716 static void hclge_do_reset(struct hclge_dev *hdev)
3718 struct hnae3_handle *handle = &hdev->vport[0].nic;
3719 struct pci_dev *pdev = hdev->pdev;
3722 if (hclge_get_hw_reset_stat(handle)) {
3723 dev_info(&pdev->dev, "hardware reset not finish\n");
3724 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3725 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3726 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3730 switch (hdev->reset_type) {
3731 case HNAE3_GLOBAL_RESET:
3732 dev_info(&pdev->dev, "global reset requested\n");
3733 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3734 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3735 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3737 case HNAE3_FUNC_RESET:
3738 dev_info(&pdev->dev, "PF reset requested\n");
3739 /* schedule again to check later */
3740 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3741 hclge_reset_task_schedule(hdev);
3744 dev_warn(&pdev->dev,
3745 "unsupported reset type: %d\n", hdev->reset_type);
3750 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3751 unsigned long *addr)
3753 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3754 struct hclge_dev *hdev = ae_dev->priv;
3756 /* first, resolve any unknown reset type to the known type(s) */
3757 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3758 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3759 HCLGE_MISC_VECTOR_INT_STS);
3760 /* we will intentionally ignore any errors from this function
3761 * as we will end up in *some* reset request in any case
3763 if (hclge_handle_hw_msix_error(hdev, addr))
3764 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3767 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3768 /* We defered the clearing of the error event which caused
3769 * interrupt since it was not posssible to do that in
3770 * interrupt context (and this is the reason we introduced
3771 * new UNKNOWN reset type). Now, the errors have been
3772 * handled and cleared in hardware we can safely enable
3773 * interrupts. This is an exception to the norm.
3775 hclge_enable_vector(&hdev->misc_vector, true);
3778 /* return the highest priority reset level amongst all */
3779 if (test_bit(HNAE3_IMP_RESET, addr)) {
3780 rst_level = HNAE3_IMP_RESET;
3781 clear_bit(HNAE3_IMP_RESET, addr);
3782 clear_bit(HNAE3_GLOBAL_RESET, addr);
3783 clear_bit(HNAE3_FUNC_RESET, addr);
3784 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3785 rst_level = HNAE3_GLOBAL_RESET;
3786 clear_bit(HNAE3_GLOBAL_RESET, addr);
3787 clear_bit(HNAE3_FUNC_RESET, addr);
3788 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3789 rst_level = HNAE3_FUNC_RESET;
3790 clear_bit(HNAE3_FUNC_RESET, addr);
3791 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3792 rst_level = HNAE3_FLR_RESET;
3793 clear_bit(HNAE3_FLR_RESET, addr);
3796 if (hdev->reset_type != HNAE3_NONE_RESET &&
3797 rst_level < hdev->reset_type)
3798 return HNAE3_NONE_RESET;
3803 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3807 switch (hdev->reset_type) {
3808 case HNAE3_IMP_RESET:
3809 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3811 case HNAE3_GLOBAL_RESET:
3812 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3821 /* For revision 0x20, the reset interrupt source
3822 * can only be cleared after hardware reset done
3824 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3825 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3828 hclge_enable_vector(&hdev->misc_vector, true);
3831 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3835 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3837 reg_val |= HCLGE_NIC_SW_RST_RDY;
3839 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3841 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3844 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3848 ret = hclge_set_all_vf_rst(hdev, true);
3852 hclge_func_reset_sync_vf(hdev);
3857 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3862 switch (hdev->reset_type) {
3863 case HNAE3_FUNC_RESET:
3864 ret = hclge_func_reset_notify_vf(hdev);
3868 ret = hclge_func_reset_cmd(hdev, 0);
3870 dev_err(&hdev->pdev->dev,
3871 "asserting function reset fail %d!\n", ret);
3875 /* After performaning pf reset, it is not necessary to do the
3876 * mailbox handling or send any command to firmware, because
3877 * any mailbox handling or command to firmware is only valid
3878 * after hclge_cmd_init is called.
3880 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3881 hdev->rst_stats.pf_rst_cnt++;
3883 case HNAE3_FLR_RESET:
3884 ret = hclge_func_reset_notify_vf(hdev);
3888 case HNAE3_IMP_RESET:
3889 hclge_handle_imp_error(hdev);
3890 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3892 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3898 /* inform hardware that preparatory work is done */
3899 msleep(HCLGE_RESET_SYNC_TIME);
3900 hclge_reset_handshake(hdev, true);
3901 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3906 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3908 #define MAX_RESET_FAIL_CNT 5
3910 if (hdev->reset_pending) {
3911 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3912 hdev->reset_pending);
3914 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3915 HCLGE_RESET_INT_M) {
3916 dev_info(&hdev->pdev->dev,
3917 "reset failed because new reset interrupt\n");
3918 hclge_clear_reset_cause(hdev);
3920 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3921 hdev->rst_stats.reset_fail_cnt++;
3922 set_bit(hdev->reset_type, &hdev->reset_pending);
3923 dev_info(&hdev->pdev->dev,
3924 "re-schedule reset task(%u)\n",
3925 hdev->rst_stats.reset_fail_cnt);
3929 hclge_clear_reset_cause(hdev);
3931 /* recover the handshake status when reset fail */
3932 hclge_reset_handshake(hdev, true);
3934 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3936 hclge_dbg_dump_rst_info(hdev);
3938 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3943 static int hclge_set_rst_done(struct hclge_dev *hdev)
3945 struct hclge_pf_rst_done_cmd *req;
3946 struct hclge_desc desc;
3949 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3950 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3951 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3953 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3954 /* To be compatible with the old firmware, which does not support
3955 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3958 if (ret == -EOPNOTSUPP) {
3959 dev_warn(&hdev->pdev->dev,
3960 "current firmware does not support command(0x%x)!\n",
3961 HCLGE_OPC_PF_RST_DONE);
3964 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3971 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3975 switch (hdev->reset_type) {
3976 case HNAE3_FUNC_RESET:
3977 case HNAE3_FLR_RESET:
3978 ret = hclge_set_all_vf_rst(hdev, false);
3980 case HNAE3_GLOBAL_RESET:
3981 case HNAE3_IMP_RESET:
3982 ret = hclge_set_rst_done(hdev);
3988 /* clear up the handshake status after re-initialize done */
3989 hclge_reset_handshake(hdev, false);
3994 static int hclge_reset_stack(struct hclge_dev *hdev)
3998 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4002 ret = hclge_reset_ae_dev(hdev->ae_dev);
4006 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4009 static int hclge_reset_prepare(struct hclge_dev *hdev)
4013 hdev->rst_stats.reset_cnt++;
4014 /* perform reset of the stack & ae device for a client */
4015 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4020 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4025 return hclge_reset_prepare_wait(hdev);
4028 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4030 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4031 enum hnae3_reset_type reset_level;
4034 hdev->rst_stats.hw_reset_done_cnt++;
4036 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4041 ret = hclge_reset_stack(hdev);
4046 hclge_clear_reset_cause(hdev);
4048 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4049 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4053 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4056 ret = hclge_reset_prepare_up(hdev);
4061 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4066 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4070 hdev->last_reset_time = jiffies;
4071 hdev->rst_stats.reset_fail_cnt = 0;
4072 hdev->rst_stats.reset_done_cnt++;
4073 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4075 /* if default_reset_request has a higher level reset request,
4076 * it should be handled as soon as possible. since some errors
4077 * need this kind of reset to fix.
4079 reset_level = hclge_get_reset_level(ae_dev,
4080 &hdev->default_reset_request);
4081 if (reset_level != HNAE3_NONE_RESET)
4082 set_bit(reset_level, &hdev->reset_request);
4087 static void hclge_reset(struct hclge_dev *hdev)
4089 if (hclge_reset_prepare(hdev))
4092 if (hclge_reset_wait(hdev))
4095 if (hclge_reset_rebuild(hdev))
4101 if (hclge_reset_err_handle(hdev))
4102 hclge_reset_task_schedule(hdev);
4105 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4107 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4108 struct hclge_dev *hdev = ae_dev->priv;
4110 /* We might end up getting called broadly because of 2 below cases:
4111 * 1. Recoverable error was conveyed through APEI and only way to bring
4112 * normalcy is to reset.
4113 * 2. A new reset request from the stack due to timeout
4115 * For the first case,error event might not have ae handle available.
4116 * check if this is a new reset request and we are not here just because
4117 * last reset attempt did not succeed and watchdog hit us again. We will
4118 * know this if last reset request did not occur very recently (watchdog
4119 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4120 * In case of new request we reset the "reset level" to PF reset.
4121 * And if it is a repeat reset request of the most recent one then we
4122 * want to make sure we throttle the reset request. Therefore, we will
4123 * not allow it again before 3*HZ times.
4126 handle = &hdev->vport[0].nic;
4128 if (time_before(jiffies, (hdev->last_reset_time +
4129 HCLGE_RESET_INTERVAL))) {
4130 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4132 } else if (hdev->default_reset_request) {
4134 hclge_get_reset_level(ae_dev,
4135 &hdev->default_reset_request);
4136 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4137 hdev->reset_level = HNAE3_FUNC_RESET;
4140 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4143 /* request reset & schedule reset task */
4144 set_bit(hdev->reset_level, &hdev->reset_request);
4145 hclge_reset_task_schedule(hdev);
4147 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4148 hdev->reset_level++;
4151 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4152 enum hnae3_reset_type rst_type)
4154 struct hclge_dev *hdev = ae_dev->priv;
4156 set_bit(rst_type, &hdev->default_reset_request);
4159 static void hclge_reset_timer(struct timer_list *t)
4161 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4163 /* if default_reset_request has no value, it means that this reset
4164 * request has already be handled, so just return here
4166 if (!hdev->default_reset_request)
4169 dev_info(&hdev->pdev->dev,
4170 "triggering reset in reset timer\n");
4171 hclge_reset_event(hdev->pdev, NULL);
4174 static void hclge_reset_subtask(struct hclge_dev *hdev)
4176 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4178 /* check if there is any ongoing reset in the hardware. This status can
4179 * be checked from reset_pending. If there is then, we need to wait for
4180 * hardware to complete reset.
4181 * a. If we are able to figure out in reasonable time that hardware
4182 * has fully resetted then, we can proceed with driver, client
4184 * b. else, we can come back later to check this status so re-sched
4187 hdev->last_reset_time = jiffies;
4188 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4189 if (hdev->reset_type != HNAE3_NONE_RESET)
4192 /* check if we got any *new* reset requests to be honored */
4193 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4194 if (hdev->reset_type != HNAE3_NONE_RESET)
4195 hclge_do_reset(hdev);
4197 hdev->reset_type = HNAE3_NONE_RESET;
4200 static void hclge_reset_service_task(struct hclge_dev *hdev)
4202 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4205 down(&hdev->reset_sem);
4206 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4208 hclge_reset_subtask(hdev);
4210 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4211 up(&hdev->reset_sem);
4214 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4218 /* start from vport 1 for PF is always alive */
4219 for (i = 1; i < hdev->num_alloc_vport; i++) {
4220 struct hclge_vport *vport = &hdev->vport[i];
4222 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4223 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4225 /* If vf is not alive, set to default value */
4226 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4227 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4231 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4233 unsigned long delta = round_jiffies_relative(HZ);
4235 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4238 /* Always handle the link updating to make sure link state is
4239 * updated when it is triggered by mbx.
4241 hclge_update_link_status(hdev);
4242 hclge_sync_mac_table(hdev);
4243 hclge_sync_promisc_mode(hdev);
4245 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4246 delta = jiffies - hdev->last_serv_processed;
4248 if (delta < round_jiffies_relative(HZ)) {
4249 delta = round_jiffies_relative(HZ) - delta;
4254 hdev->serv_processed_cnt++;
4255 hclge_update_vport_alive(hdev);
4257 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4258 hdev->last_serv_processed = jiffies;
4262 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4263 hclge_update_stats_for_all(hdev);
4265 hclge_update_port_info(hdev);
4266 hclge_sync_vlan_filter(hdev);
4268 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4269 hclge_rfs_filter_expire(hdev);
4271 hdev->last_serv_processed = jiffies;
4274 hclge_task_schedule(hdev, delta);
4277 static void hclge_service_task(struct work_struct *work)
4279 struct hclge_dev *hdev =
4280 container_of(work, struct hclge_dev, service_task.work);
4282 hclge_reset_service_task(hdev);
4283 hclge_mailbox_service_task(hdev);
4284 hclge_periodic_service_task(hdev);
4286 /* Handle reset and mbx again in case periodical task delays the
4287 * handling by calling hclge_task_schedule() in
4288 * hclge_periodic_service_task().
4290 hclge_reset_service_task(hdev);
4291 hclge_mailbox_service_task(hdev);
4294 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4296 /* VF handle has no client */
4297 if (!handle->client)
4298 return container_of(handle, struct hclge_vport, nic);
4299 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4300 return container_of(handle, struct hclge_vport, roce);
4302 return container_of(handle, struct hclge_vport, nic);
4305 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4306 struct hnae3_vector_info *vector_info)
4308 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4310 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4312 /* need an extend offset to config vector >= 64 */
4313 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4314 vector_info->io_addr = hdev->hw.io_base +
4315 HCLGE_VECTOR_REG_BASE +
4316 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4318 vector_info->io_addr = hdev->hw.io_base +
4319 HCLGE_VECTOR_EXT_REG_BASE +
4320 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4321 HCLGE_VECTOR_REG_OFFSET_H +
4322 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4323 HCLGE_VECTOR_REG_OFFSET;
4325 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4326 hdev->vector_irq[idx] = vector_info->vector;
4329 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4330 struct hnae3_vector_info *vector_info)
4332 struct hclge_vport *vport = hclge_get_vport(handle);
4333 struct hnae3_vector_info *vector = vector_info;
4334 struct hclge_dev *hdev = vport->back;
4339 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4340 vector_num = min(hdev->num_msi_left, vector_num);
4342 for (j = 0; j < vector_num; j++) {
4343 while (++i < hdev->num_nic_msi) {
4344 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4345 hclge_get_vector_info(hdev, i, vector);
4353 hdev->num_msi_left -= alloc;
4354 hdev->num_msi_used += alloc;
4359 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4363 for (i = 0; i < hdev->num_msi; i++)
4364 if (vector == hdev->vector_irq[i])
4370 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4372 struct hclge_vport *vport = hclge_get_vport(handle);
4373 struct hclge_dev *hdev = vport->back;
4376 vector_id = hclge_get_vector_index(hdev, vector);
4377 if (vector_id < 0) {
4378 dev_err(&hdev->pdev->dev,
4379 "Get vector index fail. vector = %d\n", vector);
4383 hclge_free_vector(hdev, vector_id);
4388 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4390 return HCLGE_RSS_KEY_SIZE;
4393 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4394 const u8 hfunc, const u8 *key)
4396 struct hclge_rss_config_cmd *req;
4397 unsigned int key_offset = 0;
4398 struct hclge_desc desc;
4403 key_counts = HCLGE_RSS_KEY_SIZE;
4404 req = (struct hclge_rss_config_cmd *)desc.data;
4406 while (key_counts) {
4407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4410 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4411 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4413 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4414 memcpy(req->hash_key,
4415 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4417 key_counts -= key_size;
4419 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4421 dev_err(&hdev->pdev->dev,
4422 "Configure RSS config fail, status = %d\n",
4430 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4432 struct hclge_rss_indirection_table_cmd *req;
4433 struct hclge_desc desc;
4434 int rss_cfg_tbl_num;
4442 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4443 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4444 HCLGE_RSS_CFG_TBL_SIZE;
4446 for (i = 0; i < rss_cfg_tbl_num; i++) {
4447 hclge_cmd_setup_basic_desc
4448 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4450 req->start_table_index =
4451 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4452 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4453 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4454 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4455 req->rss_qid_l[j] = qid & 0xff;
4457 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4458 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4459 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4460 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4462 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4464 dev_err(&hdev->pdev->dev,
4465 "Configure rss indir table fail,status = %d\n",
4473 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4474 u16 *tc_size, u16 *tc_offset)
4476 struct hclge_rss_tc_mode_cmd *req;
4477 struct hclge_desc desc;
4481 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4482 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4484 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4487 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4488 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4489 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4490 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4491 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4492 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4493 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4495 req->rss_tc_mode[i] = cpu_to_le16(mode);
4498 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4500 dev_err(&hdev->pdev->dev,
4501 "Configure rss tc mode fail, status = %d\n", ret);
4506 static void hclge_get_rss_type(struct hclge_vport *vport)
4508 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4509 vport->rss_tuple_sets.ipv4_udp_en ||
4510 vport->rss_tuple_sets.ipv4_sctp_en ||
4511 vport->rss_tuple_sets.ipv6_tcp_en ||
4512 vport->rss_tuple_sets.ipv6_udp_en ||
4513 vport->rss_tuple_sets.ipv6_sctp_en)
4514 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4515 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4516 vport->rss_tuple_sets.ipv6_fragment_en)
4517 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4519 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4522 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4524 struct hclge_rss_input_tuple_cmd *req;
4525 struct hclge_desc desc;
4528 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4530 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4532 /* Get the tuple cfg from pf */
4533 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4534 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4535 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4536 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4537 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4538 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4539 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4540 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4541 hclge_get_rss_type(&hdev->vport[0]);
4542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4544 dev_err(&hdev->pdev->dev,
4545 "Configure rss input fail, status = %d\n", ret);
4549 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4552 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4553 struct hclge_vport *vport = hclge_get_vport(handle);
4556 /* Get hash algorithm */
4558 switch (vport->rss_algo) {
4559 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4560 *hfunc = ETH_RSS_HASH_TOP;
4562 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4563 *hfunc = ETH_RSS_HASH_XOR;
4566 *hfunc = ETH_RSS_HASH_UNKNOWN;
4571 /* Get the RSS Key required by the user */
4573 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4575 /* Get indirect table */
4577 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4578 indir[i] = vport->rss_indirection_tbl[i];
4583 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4584 const u8 *key, const u8 hfunc)
4586 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4587 struct hclge_vport *vport = hclge_get_vport(handle);
4588 struct hclge_dev *hdev = vport->back;
4592 /* Set the RSS Hash Key if specififed by the user */
4595 case ETH_RSS_HASH_TOP:
4596 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4598 case ETH_RSS_HASH_XOR:
4599 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4601 case ETH_RSS_HASH_NO_CHANGE:
4602 hash_algo = vport->rss_algo;
4608 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4612 /* Update the shadow RSS key with user specified qids */
4613 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4614 vport->rss_algo = hash_algo;
4617 /* Update the shadow RSS table with user specified qids */
4618 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4619 vport->rss_indirection_tbl[i] = indir[i];
4621 /* Update the hardware */
4622 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4625 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4627 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4629 if (nfc->data & RXH_L4_B_2_3)
4630 hash_sets |= HCLGE_D_PORT_BIT;
4632 hash_sets &= ~HCLGE_D_PORT_BIT;
4634 if (nfc->data & RXH_IP_SRC)
4635 hash_sets |= HCLGE_S_IP_BIT;
4637 hash_sets &= ~HCLGE_S_IP_BIT;
4639 if (nfc->data & RXH_IP_DST)
4640 hash_sets |= HCLGE_D_IP_BIT;
4642 hash_sets &= ~HCLGE_D_IP_BIT;
4644 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4645 hash_sets |= HCLGE_V_TAG_BIT;
4650 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4651 struct ethtool_rxnfc *nfc,
4652 struct hclge_rss_input_tuple_cmd *req)
4654 struct hclge_dev *hdev = vport->back;
4657 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4658 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4659 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4660 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4661 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4662 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4663 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4664 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4666 tuple_sets = hclge_get_rss_hash_bits(nfc);
4667 switch (nfc->flow_type) {
4669 req->ipv4_tcp_en = tuple_sets;
4672 req->ipv6_tcp_en = tuple_sets;
4675 req->ipv4_udp_en = tuple_sets;
4678 req->ipv6_udp_en = tuple_sets;
4681 req->ipv4_sctp_en = tuple_sets;
4684 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4685 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4688 req->ipv6_sctp_en = tuple_sets;
4691 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4694 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4703 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4704 struct ethtool_rxnfc *nfc)
4706 struct hclge_vport *vport = hclge_get_vport(handle);
4707 struct hclge_dev *hdev = vport->back;
4708 struct hclge_rss_input_tuple_cmd *req;
4709 struct hclge_desc desc;
4712 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4713 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4716 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4717 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4719 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4721 dev_err(&hdev->pdev->dev,
4722 "failed to init rss tuple cmd, ret = %d\n", ret);
4726 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4728 dev_err(&hdev->pdev->dev,
4729 "Set rss tuple fail, status = %d\n", ret);
4733 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4734 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4735 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4736 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4737 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4738 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4739 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4740 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4741 hclge_get_rss_type(vport);
4745 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4748 switch (flow_type) {
4750 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4753 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4756 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4759 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4762 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4765 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4769 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4778 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4782 if (tuple_sets & HCLGE_D_PORT_BIT)
4783 tuple_data |= RXH_L4_B_2_3;
4784 if (tuple_sets & HCLGE_S_PORT_BIT)
4785 tuple_data |= RXH_L4_B_0_1;
4786 if (tuple_sets & HCLGE_D_IP_BIT)
4787 tuple_data |= RXH_IP_DST;
4788 if (tuple_sets & HCLGE_S_IP_BIT)
4789 tuple_data |= RXH_IP_SRC;
4794 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4795 struct ethtool_rxnfc *nfc)
4797 struct hclge_vport *vport = hclge_get_vport(handle);
4803 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4804 if (ret || !tuple_sets)
4807 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4812 static int hclge_get_tc_size(struct hnae3_handle *handle)
4814 struct hclge_vport *vport = hclge_get_vport(handle);
4815 struct hclge_dev *hdev = vport->back;
4817 return hdev->pf_rss_size_max;
4820 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4822 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4823 struct hclge_vport *vport = hdev->vport;
4824 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4825 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4826 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4827 struct hnae3_tc_info *tc_info;
4832 tc_info = &vport->nic.kinfo.tc_info;
4833 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4834 rss_size = tc_info->tqp_count[i];
4837 if (!(hdev->hw_tc_map & BIT(i)))
4840 /* tc_size set to hardware is the log2 of roundup power of two
4841 * of rss_size, the acutal queue size is limited by indirection
4844 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4846 dev_err(&hdev->pdev->dev,
4847 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4852 roundup_size = roundup_pow_of_two(rss_size);
4853 roundup_size = ilog2(roundup_size);
4856 tc_size[i] = roundup_size;
4857 tc_offset[i] = tc_info->tqp_offset[i];
4860 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4863 int hclge_rss_init_hw(struct hclge_dev *hdev)
4865 struct hclge_vport *vport = hdev->vport;
4866 u16 *rss_indir = vport[0].rss_indirection_tbl;
4867 u8 *key = vport[0].rss_hash_key;
4868 u8 hfunc = vport[0].rss_algo;
4871 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4875 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4879 ret = hclge_set_rss_input_tuple(hdev);
4883 return hclge_init_rss_tc_mode(hdev);
4886 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4888 struct hclge_vport *vport = hdev->vport;
4891 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4892 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4893 vport[j].rss_indirection_tbl[i] =
4894 i % vport[j].alloc_rss_size;
4898 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4900 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4901 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4902 struct hclge_vport *vport = hdev->vport;
4904 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4905 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4907 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4910 vport[i].rss_tuple_sets.ipv4_tcp_en =
4911 HCLGE_RSS_INPUT_TUPLE_OTHER;
4912 vport[i].rss_tuple_sets.ipv4_udp_en =
4913 HCLGE_RSS_INPUT_TUPLE_OTHER;
4914 vport[i].rss_tuple_sets.ipv4_sctp_en =
4915 HCLGE_RSS_INPUT_TUPLE_SCTP;
4916 vport[i].rss_tuple_sets.ipv4_fragment_en =
4917 HCLGE_RSS_INPUT_TUPLE_OTHER;
4918 vport[i].rss_tuple_sets.ipv6_tcp_en =
4919 HCLGE_RSS_INPUT_TUPLE_OTHER;
4920 vport[i].rss_tuple_sets.ipv6_udp_en =
4921 HCLGE_RSS_INPUT_TUPLE_OTHER;
4922 vport[i].rss_tuple_sets.ipv6_sctp_en =
4923 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4924 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4925 HCLGE_RSS_INPUT_TUPLE_SCTP;
4926 vport[i].rss_tuple_sets.ipv6_fragment_en =
4927 HCLGE_RSS_INPUT_TUPLE_OTHER;
4929 vport[i].rss_algo = rss_algo;
4931 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4932 sizeof(*rss_ind_tbl), GFP_KERNEL);
4936 vport[i].rss_indirection_tbl = rss_ind_tbl;
4937 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4938 HCLGE_RSS_KEY_SIZE);
4941 hclge_rss_indir_init_cfg(hdev);
4946 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4947 int vector_id, bool en,
4948 struct hnae3_ring_chain_node *ring_chain)
4950 struct hclge_dev *hdev = vport->back;
4951 struct hnae3_ring_chain_node *node;
4952 struct hclge_desc desc;
4953 struct hclge_ctrl_vector_chain_cmd *req =
4954 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4955 enum hclge_cmd_status status;
4956 enum hclge_opcode_type op;
4957 u16 tqp_type_and_id;
4960 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4961 hclge_cmd_setup_basic_desc(&desc, op, false);
4962 req->int_vector_id_l = hnae3_get_field(vector_id,
4963 HCLGE_VECTOR_ID_L_M,
4964 HCLGE_VECTOR_ID_L_S);
4965 req->int_vector_id_h = hnae3_get_field(vector_id,
4966 HCLGE_VECTOR_ID_H_M,
4967 HCLGE_VECTOR_ID_H_S);
4970 for (node = ring_chain; node; node = node->next) {
4971 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4972 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4974 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4975 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4976 HCLGE_TQP_ID_S, node->tqp_index);
4977 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4979 hnae3_get_field(node->int_gl_idx,
4980 HNAE3_RING_GL_IDX_M,
4981 HNAE3_RING_GL_IDX_S));
4982 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4983 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4984 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4985 req->vfid = vport->vport_id;
4987 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4989 dev_err(&hdev->pdev->dev,
4990 "Map TQP fail, status is %d.\n",
4996 hclge_cmd_setup_basic_desc(&desc,
4999 req->int_vector_id_l =
5000 hnae3_get_field(vector_id,
5001 HCLGE_VECTOR_ID_L_M,
5002 HCLGE_VECTOR_ID_L_S);
5003 req->int_vector_id_h =
5004 hnae3_get_field(vector_id,
5005 HCLGE_VECTOR_ID_H_M,
5006 HCLGE_VECTOR_ID_H_S);
5011 req->int_cause_num = i;
5012 req->vfid = vport->vport_id;
5013 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5015 dev_err(&hdev->pdev->dev,
5016 "Map TQP fail, status is %d.\n", status);
5024 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5025 struct hnae3_ring_chain_node *ring_chain)
5027 struct hclge_vport *vport = hclge_get_vport(handle);
5028 struct hclge_dev *hdev = vport->back;
5031 vector_id = hclge_get_vector_index(hdev, vector);
5032 if (vector_id < 0) {
5033 dev_err(&hdev->pdev->dev,
5034 "failed to get vector index. vector=%d\n", vector);
5038 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5041 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5042 struct hnae3_ring_chain_node *ring_chain)
5044 struct hclge_vport *vport = hclge_get_vport(handle);
5045 struct hclge_dev *hdev = vport->back;
5048 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5051 vector_id = hclge_get_vector_index(hdev, vector);
5052 if (vector_id < 0) {
5053 dev_err(&handle->pdev->dev,
5054 "Get vector index fail. ret =%d\n", vector_id);
5058 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5060 dev_err(&handle->pdev->dev,
5061 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5067 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5068 bool en_uc, bool en_mc, bool en_bc)
5070 struct hclge_vport *vport = &hdev->vport[vf_id];
5071 struct hnae3_handle *handle = &vport->nic;
5072 struct hclge_promisc_cfg_cmd *req;
5073 struct hclge_desc desc;
5074 bool uc_tx_en = en_uc;
5078 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5080 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5083 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5086 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5087 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5088 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5089 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5090 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5091 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5092 req->extend_promisc = promisc_cfg;
5094 /* to be compatible with DEVICE_VERSION_V1/2 */
5096 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5097 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5098 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5099 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5100 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5101 req->promisc = promisc_cfg;
5103 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5105 dev_err(&hdev->pdev->dev,
5106 "failed to set vport %u promisc mode, ret = %d.\n",
5112 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5113 bool en_mc_pmc, bool en_bc_pmc)
5115 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5116 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5119 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5122 struct hclge_vport *vport = hclge_get_vport(handle);
5123 struct hclge_dev *hdev = vport->back;
5124 bool en_bc_pmc = true;
5126 /* For device whose version below V2, if broadcast promisc enabled,
5127 * vlan filter is always bypassed. So broadcast promisc should be
5128 * disabled until user enable promisc mode
5130 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5131 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5133 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5137 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5139 struct hclge_vport *vport = hclge_get_vport(handle);
5140 struct hclge_dev *hdev = vport->back;
5142 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5145 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5147 struct hclge_get_fd_mode_cmd *req;
5148 struct hclge_desc desc;
5151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5153 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5155 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5157 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5161 *fd_mode = req->mode;
5166 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5167 u32 *stage1_entry_num,
5168 u32 *stage2_entry_num,
5169 u16 *stage1_counter_num,
5170 u16 *stage2_counter_num)
5172 struct hclge_get_fd_allocation_cmd *req;
5173 struct hclge_desc desc;
5176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5178 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5180 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5182 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5187 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5188 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5189 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5190 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5195 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5196 enum HCLGE_FD_STAGE stage_num)
5198 struct hclge_set_fd_key_config_cmd *req;
5199 struct hclge_fd_key_cfg *stage;
5200 struct hclge_desc desc;
5203 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5205 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5206 stage = &hdev->fd_cfg.key_cfg[stage_num];
5207 req->stage = stage_num;
5208 req->key_select = stage->key_sel;
5209 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5210 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5211 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5212 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5213 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5214 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5216 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5218 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5223 static int hclge_init_fd_config(struct hclge_dev *hdev)
5225 #define LOW_2_WORDS 0x03
5226 struct hclge_fd_key_cfg *key_cfg;
5229 if (!hnae3_dev_fd_supported(hdev))
5232 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5236 switch (hdev->fd_cfg.fd_mode) {
5237 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5238 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5240 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5241 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5244 dev_err(&hdev->pdev->dev,
5245 "Unsupported flow director mode %u\n",
5246 hdev->fd_cfg.fd_mode);
5250 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5251 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5252 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5253 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5254 key_cfg->outer_sipv6_word_en = 0;
5255 key_cfg->outer_dipv6_word_en = 0;
5257 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5258 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5259 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5260 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5262 /* If use max 400bit key, we can support tuples for ether type */
5263 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5264 key_cfg->tuple_active |=
5265 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5267 /* roce_type is used to filter roce frames
5268 * dst_vport is used to specify the rule
5270 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5272 ret = hclge_get_fd_allocation(hdev,
5273 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5274 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5275 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5276 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5280 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5283 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5284 int loc, u8 *key, bool is_add)
5286 struct hclge_fd_tcam_config_1_cmd *req1;
5287 struct hclge_fd_tcam_config_2_cmd *req2;
5288 struct hclge_fd_tcam_config_3_cmd *req3;
5289 struct hclge_desc desc[3];
5292 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5293 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5294 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5295 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5296 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5298 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5299 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5300 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5302 req1->stage = stage;
5303 req1->xy_sel = sel_x ? 1 : 0;
5304 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5305 req1->index = cpu_to_le32(loc);
5306 req1->entry_vld = sel_x ? is_add : 0;
5309 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5310 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5311 sizeof(req2->tcam_data));
5312 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5313 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5316 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5318 dev_err(&hdev->pdev->dev,
5319 "config tcam key fail, ret=%d\n",
5325 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5326 struct hclge_fd_ad_data *action)
5328 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5329 struct hclge_fd_ad_config_cmd *req;
5330 struct hclge_desc desc;
5334 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5336 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5337 req->index = cpu_to_le32(loc);
5340 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5341 action->write_rule_id_to_bd);
5342 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5344 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5345 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5346 action->override_tc);
5347 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5348 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5351 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5352 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5353 action->forward_to_direct_queue);
5354 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5356 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5357 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5358 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5359 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5360 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5361 action->counter_id);
5363 req->ad_data = cpu_to_le64(ad_data);
5364 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5366 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5371 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5372 struct hclge_fd_rule *rule)
5374 u16 tmp_x_s, tmp_y_s;
5375 u32 tmp_x_l, tmp_y_l;
5378 if (rule->unused_tuple & tuple_bit)
5381 switch (tuple_bit) {
5382 case BIT(INNER_DST_MAC):
5383 for (i = 0; i < ETH_ALEN; i++) {
5384 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5385 rule->tuples_mask.dst_mac[i]);
5386 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5387 rule->tuples_mask.dst_mac[i]);
5391 case BIT(INNER_SRC_MAC):
5392 for (i = 0; i < ETH_ALEN; i++) {
5393 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5394 rule->tuples_mask.src_mac[i]);
5395 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5396 rule->tuples_mask.src_mac[i]);
5400 case BIT(INNER_VLAN_TAG_FST):
5401 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5402 rule->tuples_mask.vlan_tag1);
5403 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5404 rule->tuples_mask.vlan_tag1);
5405 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5406 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5409 case BIT(INNER_ETH_TYPE):
5410 calc_x(tmp_x_s, rule->tuples.ether_proto,
5411 rule->tuples_mask.ether_proto);
5412 calc_y(tmp_y_s, rule->tuples.ether_proto,
5413 rule->tuples_mask.ether_proto);
5414 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5415 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5418 case BIT(INNER_IP_TOS):
5419 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5420 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5423 case BIT(INNER_IP_PROTO):
5424 calc_x(*key_x, rule->tuples.ip_proto,
5425 rule->tuples_mask.ip_proto);
5426 calc_y(*key_y, rule->tuples.ip_proto,
5427 rule->tuples_mask.ip_proto);
5430 case BIT(INNER_SRC_IP):
5431 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5432 rule->tuples_mask.src_ip[IPV4_INDEX]);
5433 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5434 rule->tuples_mask.src_ip[IPV4_INDEX]);
5435 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5436 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5439 case BIT(INNER_DST_IP):
5440 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5441 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5442 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5443 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5444 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5445 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5448 case BIT(INNER_SRC_PORT):
5449 calc_x(tmp_x_s, rule->tuples.src_port,
5450 rule->tuples_mask.src_port);
5451 calc_y(tmp_y_s, rule->tuples.src_port,
5452 rule->tuples_mask.src_port);
5453 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5454 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5457 case BIT(INNER_DST_PORT):
5458 calc_x(tmp_x_s, rule->tuples.dst_port,
5459 rule->tuples_mask.dst_port);
5460 calc_y(tmp_y_s, rule->tuples.dst_port,
5461 rule->tuples_mask.dst_port);
5462 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5463 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5471 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5472 u8 vf_id, u8 network_port_id)
5474 u32 port_number = 0;
5476 if (port_type == HOST_PORT) {
5477 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5479 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5481 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5483 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5484 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5485 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5491 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5492 __le32 *key_x, __le32 *key_y,
5493 struct hclge_fd_rule *rule)
5495 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5496 u8 cur_pos = 0, tuple_size, shift_bits;
5499 for (i = 0; i < MAX_META_DATA; i++) {
5500 tuple_size = meta_data_key_info[i].key_length;
5501 tuple_bit = key_cfg->meta_data_active & BIT(i);
5503 switch (tuple_bit) {
5504 case BIT(ROCE_TYPE):
5505 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5506 cur_pos += tuple_size;
5508 case BIT(DST_VPORT):
5509 port_number = hclge_get_port_number(HOST_PORT, 0,
5511 hnae3_set_field(meta_data,
5512 GENMASK(cur_pos + tuple_size, cur_pos),
5513 cur_pos, port_number);
5514 cur_pos += tuple_size;
5521 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5522 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5523 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5525 *key_x = cpu_to_le32(tmp_x << shift_bits);
5526 *key_y = cpu_to_le32(tmp_y << shift_bits);
5529 /* A complete key is combined with meta data key and tuple key.
5530 * Meta data key is stored at the MSB region, and tuple key is stored at
5531 * the LSB region, unused bits will be filled 0.
5533 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5534 struct hclge_fd_rule *rule)
5536 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5537 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5538 u8 *cur_key_x, *cur_key_y;
5539 u8 meta_data_region;
5544 memset(key_x, 0, sizeof(key_x));
5545 memset(key_y, 0, sizeof(key_y));
5549 for (i = 0 ; i < MAX_TUPLE; i++) {
5553 tuple_size = tuple_key_info[i].key_length / 8;
5554 check_tuple = key_cfg->tuple_active & BIT(i);
5556 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5559 cur_key_x += tuple_size;
5560 cur_key_y += tuple_size;
5564 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5565 MAX_META_DATA_LENGTH / 8;
5567 hclge_fd_convert_meta_data(key_cfg,
5568 (__le32 *)(key_x + meta_data_region),
5569 (__le32 *)(key_y + meta_data_region),
5572 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5575 dev_err(&hdev->pdev->dev,
5576 "fd key_y config fail, loc=%u, ret=%d\n",
5577 rule->queue_id, ret);
5581 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5584 dev_err(&hdev->pdev->dev,
5585 "fd key_x config fail, loc=%u, ret=%d\n",
5586 rule->queue_id, ret);
5590 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5591 struct hclge_fd_rule *rule)
5593 struct hclge_vport *vport = hdev->vport;
5594 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5595 struct hclge_fd_ad_data ad_data;
5597 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5598 ad_data.ad_id = rule->location;
5600 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5601 ad_data.drop_packet = true;
5602 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5603 ad_data.override_tc = true;
5605 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5607 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5609 ad_data.forward_to_direct_queue = true;
5610 ad_data.queue_id = rule->queue_id;
5613 ad_data.use_counter = false;
5614 ad_data.counter_id = 0;
5616 ad_data.use_next_stage = false;
5617 ad_data.next_input_key = 0;
5619 ad_data.write_rule_id_to_bd = true;
5620 ad_data.rule_id = rule->location;
5622 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5625 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5628 if (!spec || !unused_tuple)
5631 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5634 *unused_tuple |= BIT(INNER_SRC_IP);
5637 *unused_tuple |= BIT(INNER_DST_IP);
5640 *unused_tuple |= BIT(INNER_SRC_PORT);
5643 *unused_tuple |= BIT(INNER_DST_PORT);
5646 *unused_tuple |= BIT(INNER_IP_TOS);
5651 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5654 if (!spec || !unused_tuple)
5657 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5658 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5661 *unused_tuple |= BIT(INNER_SRC_IP);
5664 *unused_tuple |= BIT(INNER_DST_IP);
5667 *unused_tuple |= BIT(INNER_IP_TOS);
5670 *unused_tuple |= BIT(INNER_IP_PROTO);
5672 if (spec->l4_4_bytes)
5675 if (spec->ip_ver != ETH_RX_NFC_IP4)
5681 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5684 if (!spec || !unused_tuple)
5687 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5690 /* check whether src/dst ip address used */
5691 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5692 *unused_tuple |= BIT(INNER_SRC_IP);
5694 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5695 *unused_tuple |= BIT(INNER_DST_IP);
5698 *unused_tuple |= BIT(INNER_SRC_PORT);
5701 *unused_tuple |= BIT(INNER_DST_PORT);
5709 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5712 if (!spec || !unused_tuple)
5715 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5716 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5718 /* check whether src/dst ip address used */
5719 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5720 *unused_tuple |= BIT(INNER_SRC_IP);
5722 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5723 *unused_tuple |= BIT(INNER_DST_IP);
5725 if (!spec->l4_proto)
5726 *unused_tuple |= BIT(INNER_IP_PROTO);
5731 if (spec->l4_4_bytes)
5737 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5739 if (!spec || !unused_tuple)
5742 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5743 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5744 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5746 if (is_zero_ether_addr(spec->h_source))
5747 *unused_tuple |= BIT(INNER_SRC_MAC);
5749 if (is_zero_ether_addr(spec->h_dest))
5750 *unused_tuple |= BIT(INNER_DST_MAC);
5753 *unused_tuple |= BIT(INNER_ETH_TYPE);
5758 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5759 struct ethtool_rx_flow_spec *fs,
5762 if (fs->flow_type & FLOW_EXT) {
5763 if (fs->h_ext.vlan_etype) {
5764 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5768 if (!fs->h_ext.vlan_tci)
5769 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5771 if (fs->m_ext.vlan_tci &&
5772 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5773 dev_err(&hdev->pdev->dev,
5774 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5775 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5779 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5782 if (fs->flow_type & FLOW_MAC_EXT) {
5783 if (hdev->fd_cfg.fd_mode !=
5784 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5785 dev_err(&hdev->pdev->dev,
5786 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5790 if (is_zero_ether_addr(fs->h_ext.h_dest))
5791 *unused_tuple |= BIT(INNER_DST_MAC);
5793 *unused_tuple &= ~BIT(INNER_DST_MAC);
5799 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5800 struct ethtool_rx_flow_spec *fs,
5806 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5807 dev_err(&hdev->pdev->dev,
5808 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5810 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5814 if ((fs->flow_type & FLOW_EXT) &&
5815 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5816 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5820 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5821 switch (flow_type) {
5825 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5829 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5835 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5838 case IPV6_USER_FLOW:
5839 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5843 if (hdev->fd_cfg.fd_mode !=
5844 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5845 dev_err(&hdev->pdev->dev,
5846 "ETHER_FLOW is not supported in current fd mode!\n");
5850 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5854 dev_err(&hdev->pdev->dev,
5855 "unsupported protocol type, protocol type = %#x\n",
5861 dev_err(&hdev->pdev->dev,
5862 "failed to check flow union tuple, ret = %d\n",
5867 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5870 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5872 struct hclge_fd_rule *rule = NULL;
5873 struct hlist_node *node2;
5875 spin_lock_bh(&hdev->fd_rule_lock);
5876 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5877 if (rule->location >= location)
5881 spin_unlock_bh(&hdev->fd_rule_lock);
5883 return rule && rule->location == location;
5886 /* make sure being called after lock up with fd_rule_lock */
5887 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5888 struct hclge_fd_rule *new_rule,
5892 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5893 struct hlist_node *node2;
5895 if (is_add && !new_rule)
5898 hlist_for_each_entry_safe(rule, node2,
5899 &hdev->fd_rule_list, rule_node) {
5900 if (rule->location >= location)
5905 if (rule && rule->location == location) {
5906 hlist_del(&rule->rule_node);
5908 hdev->hclge_fd_rule_num--;
5911 if (!hdev->hclge_fd_rule_num)
5912 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5913 clear_bit(location, hdev->fd_bmap);
5917 } else if (!is_add) {
5918 dev_err(&hdev->pdev->dev,
5919 "delete fail, rule %u is inexistent\n",
5924 INIT_HLIST_NODE(&new_rule->rule_node);
5927 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5929 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5931 set_bit(location, hdev->fd_bmap);
5932 hdev->hclge_fd_rule_num++;
5933 hdev->fd_active_type = new_rule->rule_type;
5938 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5939 struct ethtool_rx_flow_spec *fs,
5940 struct hclge_fd_rule *rule)
5942 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5944 switch (flow_type) {
5948 rule->tuples.src_ip[IPV4_INDEX] =
5949 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5950 rule->tuples_mask.src_ip[IPV4_INDEX] =
5951 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5953 rule->tuples.dst_ip[IPV4_INDEX] =
5954 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5955 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5956 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5958 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5959 rule->tuples_mask.src_port =
5960 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5962 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5963 rule->tuples_mask.dst_port =
5964 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5966 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5967 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5969 rule->tuples.ether_proto = ETH_P_IP;
5970 rule->tuples_mask.ether_proto = 0xFFFF;
5974 rule->tuples.src_ip[IPV4_INDEX] =
5975 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5976 rule->tuples_mask.src_ip[IPV4_INDEX] =
5977 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5979 rule->tuples.dst_ip[IPV4_INDEX] =
5980 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5981 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5982 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5984 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5985 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5987 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5988 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5990 rule->tuples.ether_proto = ETH_P_IP;
5991 rule->tuples_mask.ether_proto = 0xFFFF;
5997 be32_to_cpu_array(rule->tuples.src_ip,
5998 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5999 be32_to_cpu_array(rule->tuples_mask.src_ip,
6000 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
6002 be32_to_cpu_array(rule->tuples.dst_ip,
6003 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6004 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6005 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6007 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6008 rule->tuples_mask.src_port =
6009 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6011 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6012 rule->tuples_mask.dst_port =
6013 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6015 rule->tuples.ether_proto = ETH_P_IPV6;
6016 rule->tuples_mask.ether_proto = 0xFFFF;
6019 case IPV6_USER_FLOW:
6020 be32_to_cpu_array(rule->tuples.src_ip,
6021 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6022 be32_to_cpu_array(rule->tuples_mask.src_ip,
6023 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6025 be32_to_cpu_array(rule->tuples.dst_ip,
6026 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6027 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6028 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6030 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6031 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6033 rule->tuples.ether_proto = ETH_P_IPV6;
6034 rule->tuples_mask.ether_proto = 0xFFFF;
6038 ether_addr_copy(rule->tuples.src_mac,
6039 fs->h_u.ether_spec.h_source);
6040 ether_addr_copy(rule->tuples_mask.src_mac,
6041 fs->m_u.ether_spec.h_source);
6043 ether_addr_copy(rule->tuples.dst_mac,
6044 fs->h_u.ether_spec.h_dest);
6045 ether_addr_copy(rule->tuples_mask.dst_mac,
6046 fs->m_u.ether_spec.h_dest);
6048 rule->tuples.ether_proto =
6049 be16_to_cpu(fs->h_u.ether_spec.h_proto);
6050 rule->tuples_mask.ether_proto =
6051 be16_to_cpu(fs->m_u.ether_spec.h_proto);
6058 switch (flow_type) {
6061 rule->tuples.ip_proto = IPPROTO_SCTP;
6062 rule->tuples_mask.ip_proto = 0xFF;
6066 rule->tuples.ip_proto = IPPROTO_TCP;
6067 rule->tuples_mask.ip_proto = 0xFF;
6071 rule->tuples.ip_proto = IPPROTO_UDP;
6072 rule->tuples_mask.ip_proto = 0xFF;
6078 if (fs->flow_type & FLOW_EXT) {
6079 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6080 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6083 if (fs->flow_type & FLOW_MAC_EXT) {
6084 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6085 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6091 /* make sure being called after lock up with fd_rule_lock */
6092 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6093 struct hclge_fd_rule *rule)
6098 dev_err(&hdev->pdev->dev,
6099 "The flow director rule is NULL\n");
6103 /* it will never fail here, so needn't to check return value */
6104 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
6106 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6110 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6117 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
6121 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6123 struct hclge_vport *vport = hclge_get_vport(handle);
6124 struct hclge_dev *hdev = vport->back;
6126 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6129 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6130 struct ethtool_rxnfc *cmd)
6132 struct hclge_vport *vport = hclge_get_vport(handle);
6133 struct hclge_dev *hdev = vport->back;
6134 u16 dst_vport_id = 0, q_index = 0;
6135 struct ethtool_rx_flow_spec *fs;
6136 struct hclge_fd_rule *rule;
6141 if (!hnae3_dev_fd_supported(hdev)) {
6142 dev_err(&hdev->pdev->dev,
6143 "flow table director is not supported\n");
6148 dev_err(&hdev->pdev->dev,
6149 "please enable flow director first\n");
6153 if (hclge_is_cls_flower_active(handle)) {
6154 dev_err(&hdev->pdev->dev,
6155 "please delete all exist cls flower rules first\n");
6159 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6161 ret = hclge_fd_check_spec(hdev, fs, &unused);
6165 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6166 action = HCLGE_FD_ACTION_DROP_PACKET;
6168 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6169 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6172 if (vf > hdev->num_req_vfs) {
6173 dev_err(&hdev->pdev->dev,
6174 "Error: vf id (%u) > max vf num (%u)\n",
6175 vf, hdev->num_req_vfs);
6179 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6180 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6183 dev_err(&hdev->pdev->dev,
6184 "Error: queue id (%u) > max tqp num (%u)\n",
6189 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6193 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6197 ret = hclge_fd_get_tuple(hdev, fs, rule);
6203 rule->flow_type = fs->flow_type;
6204 rule->location = fs->location;
6205 rule->unused_tuple = unused;
6206 rule->vf_id = dst_vport_id;
6207 rule->queue_id = q_index;
6208 rule->action = action;
6209 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6211 /* to avoid rule conflict, when user configure rule by ethtool,
6212 * we need to clear all arfs rules
6214 spin_lock_bh(&hdev->fd_rule_lock);
6215 hclge_clear_arfs_rules(handle);
6217 ret = hclge_fd_config_rule(hdev, rule);
6219 spin_unlock_bh(&hdev->fd_rule_lock);
6224 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6225 struct ethtool_rxnfc *cmd)
6227 struct hclge_vport *vport = hclge_get_vport(handle);
6228 struct hclge_dev *hdev = vport->back;
6229 struct ethtool_rx_flow_spec *fs;
6232 if (!hnae3_dev_fd_supported(hdev))
6235 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6237 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6240 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6241 !hclge_fd_rule_exist(hdev, fs->location)) {
6242 dev_err(&hdev->pdev->dev,
6243 "Delete fail, rule %u is inexistent\n", fs->location);
6247 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6252 spin_lock_bh(&hdev->fd_rule_lock);
6253 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6255 spin_unlock_bh(&hdev->fd_rule_lock);
6260 /* make sure being called after lock up with fd_rule_lock */
6261 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6264 struct hclge_vport *vport = hclge_get_vport(handle);
6265 struct hclge_dev *hdev = vport->back;
6266 struct hclge_fd_rule *rule;
6267 struct hlist_node *node;
6270 if (!hnae3_dev_fd_supported(hdev))
6273 for_each_set_bit(location, hdev->fd_bmap,
6274 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6275 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6279 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6281 hlist_del(&rule->rule_node);
6284 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6285 hdev->hclge_fd_rule_num = 0;
6286 bitmap_zero(hdev->fd_bmap,
6287 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6291 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6293 struct hclge_vport *vport = hclge_get_vport(handle);
6294 struct hclge_dev *hdev = vport->back;
6295 struct hclge_fd_rule *rule;
6296 struct hlist_node *node;
6299 /* Return ok here, because reset error handling will check this
6300 * return value. If error is returned here, the reset process will
6303 if (!hnae3_dev_fd_supported(hdev))
6306 /* if fd is disabled, should not restore it when reset */
6310 spin_lock_bh(&hdev->fd_rule_lock);
6311 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6312 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6314 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6317 dev_warn(&hdev->pdev->dev,
6318 "Restore rule %u failed, remove it\n",
6320 clear_bit(rule->location, hdev->fd_bmap);
6321 hlist_del(&rule->rule_node);
6323 hdev->hclge_fd_rule_num--;
6327 if (hdev->hclge_fd_rule_num)
6328 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6330 spin_unlock_bh(&hdev->fd_rule_lock);
6335 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6336 struct ethtool_rxnfc *cmd)
6338 struct hclge_vport *vport = hclge_get_vport(handle);
6339 struct hclge_dev *hdev = vport->back;
6341 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6344 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6345 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6350 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6351 struct ethtool_tcpip4_spec *spec,
6352 struct ethtool_tcpip4_spec *spec_mask)
6354 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6355 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6356 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6358 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6359 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6360 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6362 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6363 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6364 0 : cpu_to_be16(rule->tuples_mask.src_port);
6366 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6367 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6368 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6370 spec->tos = rule->tuples.ip_tos;
6371 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6372 0 : rule->tuples_mask.ip_tos;
6375 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6376 struct ethtool_usrip4_spec *spec,
6377 struct ethtool_usrip4_spec *spec_mask)
6379 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6380 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6381 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6383 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6384 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6385 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6387 spec->tos = rule->tuples.ip_tos;
6388 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6389 0 : rule->tuples_mask.ip_tos;
6391 spec->proto = rule->tuples.ip_proto;
6392 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6393 0 : rule->tuples_mask.ip_proto;
6395 spec->ip_ver = ETH_RX_NFC_IP4;
6398 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6399 struct ethtool_tcpip6_spec *spec,
6400 struct ethtool_tcpip6_spec *spec_mask)
6402 cpu_to_be32_array(spec->ip6src,
6403 rule->tuples.src_ip, IPV6_SIZE);
6404 cpu_to_be32_array(spec->ip6dst,
6405 rule->tuples.dst_ip, IPV6_SIZE);
6406 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6407 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6409 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6412 if (rule->unused_tuple & BIT(INNER_DST_IP))
6413 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6415 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6418 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6419 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6420 0 : cpu_to_be16(rule->tuples_mask.src_port);
6422 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6423 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6424 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6427 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6428 struct ethtool_usrip6_spec *spec,
6429 struct ethtool_usrip6_spec *spec_mask)
6431 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6432 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6433 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6434 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6436 cpu_to_be32_array(spec_mask->ip6src,
6437 rule->tuples_mask.src_ip, IPV6_SIZE);
6439 if (rule->unused_tuple & BIT(INNER_DST_IP))
6440 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6442 cpu_to_be32_array(spec_mask->ip6dst,
6443 rule->tuples_mask.dst_ip, IPV6_SIZE);
6445 spec->l4_proto = rule->tuples.ip_proto;
6446 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6447 0 : rule->tuples_mask.ip_proto;
6450 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6451 struct ethhdr *spec,
6452 struct ethhdr *spec_mask)
6454 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6455 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6457 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6458 eth_zero_addr(spec_mask->h_source);
6460 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6462 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6463 eth_zero_addr(spec_mask->h_dest);
6465 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6467 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6468 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6469 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6472 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6473 struct hclge_fd_rule *rule)
6475 if (fs->flow_type & FLOW_EXT) {
6476 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6477 fs->m_ext.vlan_tci =
6478 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6479 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6482 if (fs->flow_type & FLOW_MAC_EXT) {
6483 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6484 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6485 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6487 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6488 rule->tuples_mask.dst_mac);
6492 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6493 struct ethtool_rxnfc *cmd)
6495 struct hclge_vport *vport = hclge_get_vport(handle);
6496 struct hclge_fd_rule *rule = NULL;
6497 struct hclge_dev *hdev = vport->back;
6498 struct ethtool_rx_flow_spec *fs;
6499 struct hlist_node *node2;
6501 if (!hnae3_dev_fd_supported(hdev))
6504 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6506 spin_lock_bh(&hdev->fd_rule_lock);
6508 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6509 if (rule->location >= fs->location)
6513 if (!rule || fs->location != rule->location) {
6514 spin_unlock_bh(&hdev->fd_rule_lock);
6519 fs->flow_type = rule->flow_type;
6520 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6524 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6525 &fs->m_u.tcp_ip4_spec);
6528 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6529 &fs->m_u.usr_ip4_spec);
6534 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6535 &fs->m_u.tcp_ip6_spec);
6537 case IPV6_USER_FLOW:
6538 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6539 &fs->m_u.usr_ip6_spec);
6541 /* The flow type of fd rule has been checked before adding in to rule
6542 * list. As other flow types have been handled, it must be ETHER_FLOW
6543 * for the default case
6546 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6547 &fs->m_u.ether_spec);
6551 hclge_fd_get_ext_info(fs, rule);
6553 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6554 fs->ring_cookie = RX_CLS_FLOW_DISC;
6558 fs->ring_cookie = rule->queue_id;
6559 vf_id = rule->vf_id;
6560 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6561 fs->ring_cookie |= vf_id;
6564 spin_unlock_bh(&hdev->fd_rule_lock);
6569 static int hclge_get_all_rules(struct hnae3_handle *handle,
6570 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6572 struct hclge_vport *vport = hclge_get_vport(handle);
6573 struct hclge_dev *hdev = vport->back;
6574 struct hclge_fd_rule *rule;
6575 struct hlist_node *node2;
6578 if (!hnae3_dev_fd_supported(hdev))
6581 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6583 spin_lock_bh(&hdev->fd_rule_lock);
6584 hlist_for_each_entry_safe(rule, node2,
6585 &hdev->fd_rule_list, rule_node) {
6586 if (cnt == cmd->rule_cnt) {
6587 spin_unlock_bh(&hdev->fd_rule_lock);
6591 rule_locs[cnt] = rule->location;
6595 spin_unlock_bh(&hdev->fd_rule_lock);
6597 cmd->rule_cnt = cnt;
6602 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6603 struct hclge_fd_rule_tuples *tuples)
6605 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6606 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6608 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6609 tuples->ip_proto = fkeys->basic.ip_proto;
6610 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6612 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6613 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6614 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6618 for (i = 0; i < IPV6_SIZE; i++) {
6619 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6620 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6625 /* traverse all rules, check whether an existed rule has the same tuples */
6626 static struct hclge_fd_rule *
6627 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6628 const struct hclge_fd_rule_tuples *tuples)
6630 struct hclge_fd_rule *rule = NULL;
6631 struct hlist_node *node;
6633 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6634 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6641 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6642 struct hclge_fd_rule *rule)
6644 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6645 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6646 BIT(INNER_SRC_PORT);
6649 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6650 if (tuples->ether_proto == ETH_P_IP) {
6651 if (tuples->ip_proto == IPPROTO_TCP)
6652 rule->flow_type = TCP_V4_FLOW;
6654 rule->flow_type = UDP_V4_FLOW;
6656 if (tuples->ip_proto == IPPROTO_TCP)
6657 rule->flow_type = TCP_V6_FLOW;
6659 rule->flow_type = UDP_V6_FLOW;
6661 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6662 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6665 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6666 u16 flow_id, struct flow_keys *fkeys)
6668 struct hclge_vport *vport = hclge_get_vport(handle);
6669 struct hclge_fd_rule_tuples new_tuples = {};
6670 struct hclge_dev *hdev = vport->back;
6671 struct hclge_fd_rule *rule;
6676 if (!hnae3_dev_fd_supported(hdev))
6679 /* when there is already fd rule existed add by user,
6680 * arfs should not work
6682 spin_lock_bh(&hdev->fd_rule_lock);
6683 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6684 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6685 spin_unlock_bh(&hdev->fd_rule_lock);
6689 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6691 /* check is there flow director filter existed for this flow,
6692 * if not, create a new filter for it;
6693 * if filter exist with different queue id, modify the filter;
6694 * if filter exist with same queue id, do nothing
6696 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6698 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6699 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6700 spin_unlock_bh(&hdev->fd_rule_lock);
6704 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6706 spin_unlock_bh(&hdev->fd_rule_lock);
6710 set_bit(bit_id, hdev->fd_bmap);
6711 rule->location = bit_id;
6712 rule->arfs.flow_id = flow_id;
6713 rule->queue_id = queue_id;
6714 hclge_fd_build_arfs_rule(&new_tuples, rule);
6715 ret = hclge_fd_config_rule(hdev, rule);
6717 spin_unlock_bh(&hdev->fd_rule_lock);
6722 return rule->location;
6725 spin_unlock_bh(&hdev->fd_rule_lock);
6727 if (rule->queue_id == queue_id)
6728 return rule->location;
6730 tmp_queue_id = rule->queue_id;
6731 rule->queue_id = queue_id;
6732 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6734 rule->queue_id = tmp_queue_id;
6738 return rule->location;
6741 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6743 #ifdef CONFIG_RFS_ACCEL
6744 struct hnae3_handle *handle = &hdev->vport[0].nic;
6745 struct hclge_fd_rule *rule;
6746 struct hlist_node *node;
6747 HLIST_HEAD(del_list);
6749 spin_lock_bh(&hdev->fd_rule_lock);
6750 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6751 spin_unlock_bh(&hdev->fd_rule_lock);
6754 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6755 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6756 rule->arfs.flow_id, rule->location)) {
6757 hlist_del_init(&rule->rule_node);
6758 hlist_add_head(&rule->rule_node, &del_list);
6759 hdev->hclge_fd_rule_num--;
6760 clear_bit(rule->location, hdev->fd_bmap);
6763 spin_unlock_bh(&hdev->fd_rule_lock);
6765 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6766 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6767 rule->location, NULL, false);
6773 /* make sure being called after lock up with fd_rule_lock */
6774 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6776 #ifdef CONFIG_RFS_ACCEL
6777 struct hclge_vport *vport = hclge_get_vport(handle);
6778 struct hclge_dev *hdev = vport->back;
6780 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6781 hclge_del_all_fd_entries(handle, true);
6785 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6786 struct hclge_fd_rule *rule)
6788 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6789 struct flow_match_basic match;
6790 u16 ethtype_key, ethtype_mask;
6792 flow_rule_match_basic(flow, &match);
6793 ethtype_key = ntohs(match.key->n_proto);
6794 ethtype_mask = ntohs(match.mask->n_proto);
6796 if (ethtype_key == ETH_P_ALL) {
6800 rule->tuples.ether_proto = ethtype_key;
6801 rule->tuples_mask.ether_proto = ethtype_mask;
6802 rule->tuples.ip_proto = match.key->ip_proto;
6803 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6805 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6806 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6810 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6811 struct hclge_fd_rule *rule)
6813 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6814 struct flow_match_eth_addrs match;
6816 flow_rule_match_eth_addrs(flow, &match);
6817 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6818 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6819 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6820 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6822 rule->unused_tuple |= BIT(INNER_DST_MAC);
6823 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6827 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6828 struct hclge_fd_rule *rule)
6830 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6831 struct flow_match_vlan match;
6833 flow_rule_match_vlan(flow, &match);
6834 rule->tuples.vlan_tag1 = match.key->vlan_id |
6835 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6836 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6837 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6839 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6843 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6844 struct hclge_fd_rule *rule)
6848 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6849 struct flow_match_control match;
6851 flow_rule_match_control(flow, &match);
6852 addr_type = match.key->addr_type;
6855 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6856 struct flow_match_ipv4_addrs match;
6858 flow_rule_match_ipv4_addrs(flow, &match);
6859 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6860 rule->tuples_mask.src_ip[IPV4_INDEX] =
6861 be32_to_cpu(match.mask->src);
6862 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6863 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6864 be32_to_cpu(match.mask->dst);
6865 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6866 struct flow_match_ipv6_addrs match;
6868 flow_rule_match_ipv6_addrs(flow, &match);
6869 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6871 be32_to_cpu_array(rule->tuples_mask.src_ip,
6872 match.mask->src.s6_addr32, IPV6_SIZE);
6873 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6875 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6876 match.mask->dst.s6_addr32, IPV6_SIZE);
6878 rule->unused_tuple |= BIT(INNER_SRC_IP);
6879 rule->unused_tuple |= BIT(INNER_DST_IP);
6883 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6884 struct hclge_fd_rule *rule)
6886 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6887 struct flow_match_ports match;
6889 flow_rule_match_ports(flow, &match);
6891 rule->tuples.src_port = be16_to_cpu(match.key->src);
6892 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6893 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6894 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6896 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6897 rule->unused_tuple |= BIT(INNER_DST_PORT);
6901 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6902 struct flow_cls_offload *cls_flower,
6903 struct hclge_fd_rule *rule)
6905 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6906 struct flow_dissector *dissector = flow->match.dissector;
6908 if (dissector->used_keys &
6909 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6910 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6911 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6912 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6913 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6914 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6915 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6916 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6917 dissector->used_keys);
6921 hclge_get_cls_key_basic(flow, rule);
6922 hclge_get_cls_key_mac(flow, rule);
6923 hclge_get_cls_key_vlan(flow, rule);
6924 hclge_get_cls_key_ip(flow, rule);
6925 hclge_get_cls_key_port(flow, rule);
6930 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6931 struct flow_cls_offload *cls_flower, int tc)
6933 u32 prio = cls_flower->common.prio;
6935 if (tc < 0 || tc > hdev->tc_max) {
6936 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6941 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6942 dev_err(&hdev->pdev->dev,
6943 "prio %u should be in range[1, %u]\n",
6944 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6948 if (test_bit(prio - 1, hdev->fd_bmap)) {
6949 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6955 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6956 struct flow_cls_offload *cls_flower,
6959 struct hclge_vport *vport = hclge_get_vport(handle);
6960 struct hclge_dev *hdev = vport->back;
6961 struct hclge_fd_rule *rule;
6964 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6965 dev_err(&hdev->pdev->dev,
6966 "please remove all exist fd rules via ethtool first\n");
6970 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6972 dev_err(&hdev->pdev->dev,
6973 "failed to check cls flower params, ret = %d\n", ret);
6977 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6981 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6985 rule->action = HCLGE_FD_ACTION_SELECT_TC;
6986 rule->cls_flower.tc = tc;
6987 rule->location = cls_flower->common.prio - 1;
6989 rule->cls_flower.cookie = cls_flower->cookie;
6990 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6992 spin_lock_bh(&hdev->fd_rule_lock);
6993 hclge_clear_arfs_rules(handle);
6995 ret = hclge_fd_config_rule(hdev, rule);
6997 spin_unlock_bh(&hdev->fd_rule_lock);
7000 dev_err(&hdev->pdev->dev,
7001 "failed to add cls flower rule, ret = %d\n", ret);
7011 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7012 unsigned long cookie)
7014 struct hclge_fd_rule *rule;
7015 struct hlist_node *node;
7017 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7018 if (rule->cls_flower.cookie == cookie)
7025 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7026 struct flow_cls_offload *cls_flower)
7028 struct hclge_vport *vport = hclge_get_vport(handle);
7029 struct hclge_dev *hdev = vport->back;
7030 struct hclge_fd_rule *rule;
7033 spin_lock_bh(&hdev->fd_rule_lock);
7035 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7037 spin_unlock_bh(&hdev->fd_rule_lock);
7041 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7044 dev_err(&hdev->pdev->dev,
7045 "failed to delete cls flower rule %u, ret = %d\n",
7046 rule->location, ret);
7047 spin_unlock_bh(&hdev->fd_rule_lock);
7051 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
7053 dev_err(&hdev->pdev->dev,
7054 "failed to delete cls flower rule %u in list, ret = %d\n",
7055 rule->location, ret);
7056 spin_unlock_bh(&hdev->fd_rule_lock);
7060 spin_unlock_bh(&hdev->fd_rule_lock);
7065 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7067 struct hclge_vport *vport = hclge_get_vport(handle);
7068 struct hclge_dev *hdev = vport->back;
7070 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7071 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7074 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7076 struct hclge_vport *vport = hclge_get_vport(handle);
7077 struct hclge_dev *hdev = vport->back;
7079 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7082 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7084 struct hclge_vport *vport = hclge_get_vport(handle);
7085 struct hclge_dev *hdev = vport->back;
7087 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7090 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7092 struct hclge_vport *vport = hclge_get_vport(handle);
7093 struct hclge_dev *hdev = vport->back;
7095 return hdev->rst_stats.hw_reset_done_cnt;
7098 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7100 struct hclge_vport *vport = hclge_get_vport(handle);
7101 struct hclge_dev *hdev = vport->back;
7104 hdev->fd_en = enable;
7105 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7108 spin_lock_bh(&hdev->fd_rule_lock);
7109 hclge_del_all_fd_entries(handle, clear);
7110 spin_unlock_bh(&hdev->fd_rule_lock);
7112 hclge_restore_fd_entries(handle);
7116 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7118 struct hclge_desc desc;
7119 struct hclge_config_mac_mode_cmd *req =
7120 (struct hclge_config_mac_mode_cmd *)desc.data;
7124 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7127 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7128 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7129 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7130 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7131 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7132 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7133 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7134 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7135 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7136 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7139 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7141 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7143 dev_err(&hdev->pdev->dev,
7144 "mac enable fail, ret =%d.\n", ret);
7147 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7148 u8 switch_param, u8 param_mask)
7150 struct hclge_mac_vlan_switch_cmd *req;
7151 struct hclge_desc desc;
7155 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7156 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7158 /* read current config parameter */
7159 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7161 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7162 req->func_id = cpu_to_le32(func_id);
7164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7166 dev_err(&hdev->pdev->dev,
7167 "read mac vlan switch parameter fail, ret = %d\n", ret);
7171 /* modify and write new config parameter */
7172 hclge_cmd_reuse_desc(&desc, false);
7173 req->switch_param = (req->switch_param & param_mask) | switch_param;
7174 req->param_mask = param_mask;
7176 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7178 dev_err(&hdev->pdev->dev,
7179 "set mac vlan switch parameter fail, ret = %d\n", ret);
7183 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7186 #define HCLGE_PHY_LINK_STATUS_NUM 200
7188 struct phy_device *phydev = hdev->hw.mac.phydev;
7193 ret = phy_read_status(phydev);
7195 dev_err(&hdev->pdev->dev,
7196 "phy update link status fail, ret = %d\n", ret);
7200 if (phydev->link == link_ret)
7203 msleep(HCLGE_LINK_STATUS_MS);
7204 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7207 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7209 #define HCLGE_MAC_LINK_STATUS_NUM 100
7216 ret = hclge_get_mac_link_status(hdev, &link_status);
7219 if (link_status == link_ret)
7222 msleep(HCLGE_LINK_STATUS_MS);
7223 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7227 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7232 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7235 hclge_phy_link_status_wait(hdev, link_ret);
7237 return hclge_mac_link_status_wait(hdev, link_ret);
7240 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7242 struct hclge_config_mac_mode_cmd *req;
7243 struct hclge_desc desc;
7247 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7248 /* 1 Read out the MAC mode config at first */
7249 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7250 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7252 dev_err(&hdev->pdev->dev,
7253 "mac loopback get fail, ret =%d.\n", ret);
7257 /* 2 Then setup the loopback flag */
7258 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7259 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7261 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7263 /* 3 Config mac work mode with loopback flag
7264 * and its original configure parameters
7266 hclge_cmd_reuse_desc(&desc, false);
7267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7269 dev_err(&hdev->pdev->dev,
7270 "mac loopback set fail, ret =%d.\n", ret);
7274 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7275 enum hnae3_loop loop_mode)
7277 #define HCLGE_COMMON_LB_RETRY_MS 10
7278 #define HCLGE_COMMON_LB_RETRY_NUM 100
7280 struct hclge_common_lb_cmd *req;
7281 struct hclge_desc desc;
7285 req = (struct hclge_common_lb_cmd *)desc.data;
7286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7288 switch (loop_mode) {
7289 case HNAE3_LOOP_SERIAL_SERDES:
7290 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7292 case HNAE3_LOOP_PARALLEL_SERDES:
7293 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7295 case HNAE3_LOOP_PHY:
7296 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7299 dev_err(&hdev->pdev->dev,
7300 "unsupported common loopback mode %d\n", loop_mode);
7305 req->enable = loop_mode_b;
7306 req->mask = loop_mode_b;
7308 req->mask = loop_mode_b;
7311 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7313 dev_err(&hdev->pdev->dev,
7314 "common loopback set fail, ret = %d\n", ret);
7319 msleep(HCLGE_COMMON_LB_RETRY_MS);
7320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7322 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7324 dev_err(&hdev->pdev->dev,
7325 "common loopback get, ret = %d\n", ret);
7328 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7329 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7331 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7332 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7334 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7335 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7341 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7342 enum hnae3_loop loop_mode)
7346 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7350 hclge_cfg_mac_mode(hdev, en);
7352 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7354 dev_err(&hdev->pdev->dev,
7355 "serdes loopback config mac mode timeout\n");
7360 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7361 struct phy_device *phydev)
7365 if (!phydev->suspended) {
7366 ret = phy_suspend(phydev);
7371 ret = phy_resume(phydev);
7375 return phy_loopback(phydev, true);
7378 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7379 struct phy_device *phydev)
7383 ret = phy_loopback(phydev, false);
7387 return phy_suspend(phydev);
7390 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7392 struct phy_device *phydev = hdev->hw.mac.phydev;
7396 if (hnae3_dev_phy_imp_supported(hdev))
7397 return hclge_set_common_loopback(hdev, en,
7403 ret = hclge_enable_phy_loopback(hdev, phydev);
7405 ret = hclge_disable_phy_loopback(hdev, phydev);
7407 dev_err(&hdev->pdev->dev,
7408 "set phy loopback fail, ret = %d\n", ret);
7412 hclge_cfg_mac_mode(hdev, en);
7414 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7416 dev_err(&hdev->pdev->dev,
7417 "phy loopback config mac mode timeout\n");
7422 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7423 int stream_id, bool enable)
7425 struct hclge_desc desc;
7426 struct hclge_cfg_com_tqp_queue_cmd *req =
7427 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7430 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7431 req->tqp_id = cpu_to_le16(tqp_id);
7432 req->stream_id = cpu_to_le16(stream_id);
7434 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7436 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7438 dev_err(&hdev->pdev->dev,
7439 "Tqp enable fail, status =%d.\n", ret);
7443 static int hclge_set_loopback(struct hnae3_handle *handle,
7444 enum hnae3_loop loop_mode, bool en)
7446 struct hclge_vport *vport = hclge_get_vport(handle);
7447 struct hnae3_knic_private_info *kinfo;
7448 struct hclge_dev *hdev = vport->back;
7451 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7452 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7453 * the same, the packets are looped back in the SSU. If SSU loopback
7454 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7456 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7457 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7459 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7460 HCLGE_SWITCH_ALW_LPBK_MASK);
7465 switch (loop_mode) {
7466 case HNAE3_LOOP_APP:
7467 ret = hclge_set_app_loopback(hdev, en);
7469 case HNAE3_LOOP_SERIAL_SERDES:
7470 case HNAE3_LOOP_PARALLEL_SERDES:
7471 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7473 case HNAE3_LOOP_PHY:
7474 ret = hclge_set_phy_loopback(hdev, en);
7478 dev_err(&hdev->pdev->dev,
7479 "loop_mode %d is not supported\n", loop_mode);
7486 kinfo = &vport->nic.kinfo;
7487 for (i = 0; i < kinfo->num_tqps; i++) {
7488 ret = hclge_tqp_enable(hdev, i, 0, en);
7496 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7500 ret = hclge_set_app_loopback(hdev, false);
7504 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7508 return hclge_cfg_common_loopback(hdev, false,
7509 HNAE3_LOOP_PARALLEL_SERDES);
7512 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7514 struct hclge_vport *vport = hclge_get_vport(handle);
7515 struct hnae3_knic_private_info *kinfo;
7516 struct hnae3_queue *queue;
7517 struct hclge_tqp *tqp;
7520 kinfo = &vport->nic.kinfo;
7521 for (i = 0; i < kinfo->num_tqps; i++) {
7522 queue = handle->kinfo.tqp[i];
7523 tqp = container_of(queue, struct hclge_tqp, q);
7524 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7528 static void hclge_flush_link_update(struct hclge_dev *hdev)
7530 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7532 unsigned long last = hdev->serv_processed_cnt;
7535 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7536 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7537 last == hdev->serv_processed_cnt)
7541 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7543 struct hclge_vport *vport = hclge_get_vport(handle);
7544 struct hclge_dev *hdev = vport->back;
7547 hclge_task_schedule(hdev, 0);
7549 /* Set the DOWN flag here to disable link updating */
7550 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7552 /* flush memory to make sure DOWN is seen by service task */
7553 smp_mb__before_atomic();
7554 hclge_flush_link_update(hdev);
7558 static int hclge_ae_start(struct hnae3_handle *handle)
7560 struct hclge_vport *vport = hclge_get_vport(handle);
7561 struct hclge_dev *hdev = vport->back;
7564 hclge_cfg_mac_mode(hdev, true);
7565 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7566 hdev->hw.mac.link = 0;
7568 /* reset tqp stats */
7569 hclge_reset_tqp_stats(handle);
7571 hclge_mac_start_phy(hdev);
7576 static void hclge_ae_stop(struct hnae3_handle *handle)
7578 struct hclge_vport *vport = hclge_get_vport(handle);
7579 struct hclge_dev *hdev = vport->back;
7582 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7583 spin_lock_bh(&hdev->fd_rule_lock);
7584 hclge_clear_arfs_rules(handle);
7585 spin_unlock_bh(&hdev->fd_rule_lock);
7587 /* If it is not PF reset, the firmware will disable the MAC,
7588 * so it only need to stop phy here.
7590 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7591 hdev->reset_type != HNAE3_FUNC_RESET) {
7592 hclge_mac_stop_phy(hdev);
7593 hclge_update_link_status(hdev);
7597 for (i = 0; i < handle->kinfo.num_tqps; i++)
7598 hclge_reset_tqp(handle, i);
7600 hclge_config_mac_tnl_int(hdev, false);
7603 hclge_cfg_mac_mode(hdev, false);
7605 hclge_mac_stop_phy(hdev);
7607 /* reset tqp stats */
7608 hclge_reset_tqp_stats(handle);
7609 hclge_update_link_status(hdev);
7612 int hclge_vport_start(struct hclge_vport *vport)
7614 struct hclge_dev *hdev = vport->back;
7616 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7617 vport->last_active_jiffies = jiffies;
7619 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7620 if (vport->vport_id) {
7621 hclge_restore_mac_table_common(vport);
7622 hclge_restore_vport_vlan_table(vport);
7624 hclge_restore_hw_table(hdev);
7628 clear_bit(vport->vport_id, hdev->vport_config_block);
7633 void hclge_vport_stop(struct hclge_vport *vport)
7635 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7638 static int hclge_client_start(struct hnae3_handle *handle)
7640 struct hclge_vport *vport = hclge_get_vport(handle);
7642 return hclge_vport_start(vport);
7645 static void hclge_client_stop(struct hnae3_handle *handle)
7647 struct hclge_vport *vport = hclge_get_vport(handle);
7649 hclge_vport_stop(vport);
7652 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7653 u16 cmdq_resp, u8 resp_code,
7654 enum hclge_mac_vlan_tbl_opcode op)
7656 struct hclge_dev *hdev = vport->back;
7659 dev_err(&hdev->pdev->dev,
7660 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7665 if (op == HCLGE_MAC_VLAN_ADD) {
7666 if (!resp_code || resp_code == 1)
7668 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7669 resp_code == HCLGE_ADD_MC_OVERFLOW)
7672 dev_err(&hdev->pdev->dev,
7673 "add mac addr failed for undefined, code=%u.\n",
7676 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7679 } else if (resp_code == 1) {
7680 dev_dbg(&hdev->pdev->dev,
7681 "remove mac addr failed for miss.\n");
7685 dev_err(&hdev->pdev->dev,
7686 "remove mac addr failed for undefined, code=%u.\n",
7689 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7692 } else if (resp_code == 1) {
7693 dev_dbg(&hdev->pdev->dev,
7694 "lookup mac addr failed for miss.\n");
7698 dev_err(&hdev->pdev->dev,
7699 "lookup mac addr failed for undefined, code=%u.\n",
7704 dev_err(&hdev->pdev->dev,
7705 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7710 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7712 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7714 unsigned int word_num;
7715 unsigned int bit_num;
7717 if (vfid > 255 || vfid < 0)
7720 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7721 word_num = vfid / 32;
7722 bit_num = vfid % 32;
7724 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7726 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7728 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7729 bit_num = vfid % 32;
7731 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7733 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7739 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7741 #define HCLGE_DESC_NUMBER 3
7742 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7745 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7746 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7747 if (desc[i].data[j])
7753 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7754 const u8 *addr, bool is_mc)
7756 const unsigned char *mac_addr = addr;
7757 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7758 (mac_addr[0]) | (mac_addr[1] << 8);
7759 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7761 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7763 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7764 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7767 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7768 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7771 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7772 struct hclge_mac_vlan_tbl_entry_cmd *req)
7774 struct hclge_dev *hdev = vport->back;
7775 struct hclge_desc desc;
7780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7782 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7786 dev_err(&hdev->pdev->dev,
7787 "del mac addr failed for cmd_send, ret =%d.\n",
7791 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7792 retval = le16_to_cpu(desc.retval);
7794 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7795 HCLGE_MAC_VLAN_REMOVE);
7798 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7799 struct hclge_mac_vlan_tbl_entry_cmd *req,
7800 struct hclge_desc *desc,
7803 struct hclge_dev *hdev = vport->back;
7808 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7810 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7811 memcpy(desc[0].data,
7813 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7814 hclge_cmd_setup_basic_desc(&desc[1],
7815 HCLGE_OPC_MAC_VLAN_ADD,
7817 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7818 hclge_cmd_setup_basic_desc(&desc[2],
7819 HCLGE_OPC_MAC_VLAN_ADD,
7821 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7823 memcpy(desc[0].data,
7825 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7826 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7829 dev_err(&hdev->pdev->dev,
7830 "lookup mac addr failed for cmd_send, ret =%d.\n",
7834 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7835 retval = le16_to_cpu(desc[0].retval);
7837 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7838 HCLGE_MAC_VLAN_LKUP);
7841 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7842 struct hclge_mac_vlan_tbl_entry_cmd *req,
7843 struct hclge_desc *mc_desc)
7845 struct hclge_dev *hdev = vport->back;
7852 struct hclge_desc desc;
7854 hclge_cmd_setup_basic_desc(&desc,
7855 HCLGE_OPC_MAC_VLAN_ADD,
7857 memcpy(desc.data, req,
7858 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7860 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7861 retval = le16_to_cpu(desc.retval);
7863 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7865 HCLGE_MAC_VLAN_ADD);
7867 hclge_cmd_reuse_desc(&mc_desc[0], false);
7868 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7869 hclge_cmd_reuse_desc(&mc_desc[1], false);
7870 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7871 hclge_cmd_reuse_desc(&mc_desc[2], false);
7872 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7873 memcpy(mc_desc[0].data, req,
7874 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7875 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7876 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7877 retval = le16_to_cpu(mc_desc[0].retval);
7879 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7881 HCLGE_MAC_VLAN_ADD);
7885 dev_err(&hdev->pdev->dev,
7886 "add mac addr failed for cmd_send, ret =%d.\n",
7894 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7895 u16 *allocated_size)
7897 struct hclge_umv_spc_alc_cmd *req;
7898 struct hclge_desc desc;
7901 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7902 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7904 req->space_size = cpu_to_le32(space_size);
7906 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7908 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7913 *allocated_size = le32_to_cpu(desc.data[1]);
7918 static int hclge_init_umv_space(struct hclge_dev *hdev)
7920 u16 allocated_size = 0;
7923 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7927 if (allocated_size < hdev->wanted_umv_size)
7928 dev_warn(&hdev->pdev->dev,
7929 "failed to alloc umv space, want %u, get %u\n",
7930 hdev->wanted_umv_size, allocated_size);
7932 hdev->max_umv_size = allocated_size;
7933 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7934 hdev->share_umv_size = hdev->priv_umv_size +
7935 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7940 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7942 struct hclge_vport *vport;
7945 for (i = 0; i < hdev->num_alloc_vport; i++) {
7946 vport = &hdev->vport[i];
7947 vport->used_umv_num = 0;
7950 mutex_lock(&hdev->vport_lock);
7951 hdev->share_umv_size = hdev->priv_umv_size +
7952 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7953 mutex_unlock(&hdev->vport_lock);
7956 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7958 struct hclge_dev *hdev = vport->back;
7962 mutex_lock(&hdev->vport_lock);
7964 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7965 hdev->share_umv_size == 0);
7968 mutex_unlock(&hdev->vport_lock);
7973 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7975 struct hclge_dev *hdev = vport->back;
7978 if (vport->used_umv_num > hdev->priv_umv_size)
7979 hdev->share_umv_size++;
7981 if (vport->used_umv_num > 0)
7982 vport->used_umv_num--;
7984 if (vport->used_umv_num >= hdev->priv_umv_size &&
7985 hdev->share_umv_size > 0)
7986 hdev->share_umv_size--;
7987 vport->used_umv_num++;
7991 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7994 struct hclge_mac_node *mac_node, *tmp;
7996 list_for_each_entry_safe(mac_node, tmp, list, node)
7997 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8003 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8004 enum HCLGE_MAC_NODE_STATE state)
8007 /* from set_rx_mode or tmp_add_list */
8008 case HCLGE_MAC_TO_ADD:
8009 if (mac_node->state == HCLGE_MAC_TO_DEL)
8010 mac_node->state = HCLGE_MAC_ACTIVE;
8012 /* only from set_rx_mode */
8013 case HCLGE_MAC_TO_DEL:
8014 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8015 list_del(&mac_node->node);
8018 mac_node->state = HCLGE_MAC_TO_DEL;
8021 /* only from tmp_add_list, the mac_node->state won't be
8024 case HCLGE_MAC_ACTIVE:
8025 if (mac_node->state == HCLGE_MAC_TO_ADD)
8026 mac_node->state = HCLGE_MAC_ACTIVE;
8032 int hclge_update_mac_list(struct hclge_vport *vport,
8033 enum HCLGE_MAC_NODE_STATE state,
8034 enum HCLGE_MAC_ADDR_TYPE mac_type,
8035 const unsigned char *addr)
8037 struct hclge_dev *hdev = vport->back;
8038 struct hclge_mac_node *mac_node;
8039 struct list_head *list;
8041 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8042 &vport->uc_mac_list : &vport->mc_mac_list;
8044 spin_lock_bh(&vport->mac_list_lock);
8046 /* if the mac addr is already in the mac list, no need to add a new
8047 * one into it, just check the mac addr state, convert it to a new
8048 * new state, or just remove it, or do nothing.
8050 mac_node = hclge_find_mac_node(list, addr);
8052 hclge_update_mac_node(mac_node, state);
8053 spin_unlock_bh(&vport->mac_list_lock);
8054 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8058 /* if this address is never added, unnecessary to delete */
8059 if (state == HCLGE_MAC_TO_DEL) {
8060 spin_unlock_bh(&vport->mac_list_lock);
8061 dev_err(&hdev->pdev->dev,
8062 "failed to delete address %pM from mac list\n",
8067 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8069 spin_unlock_bh(&vport->mac_list_lock);
8073 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8075 mac_node->state = state;
8076 ether_addr_copy(mac_node->mac_addr, addr);
8077 list_add_tail(&mac_node->node, list);
8079 spin_unlock_bh(&vport->mac_list_lock);
8084 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8085 const unsigned char *addr)
8087 struct hclge_vport *vport = hclge_get_vport(handle);
8089 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8093 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8094 const unsigned char *addr)
8096 struct hclge_dev *hdev = vport->back;
8097 struct hclge_mac_vlan_tbl_entry_cmd req;
8098 struct hclge_desc desc;
8099 u16 egress_port = 0;
8102 /* mac addr check */
8103 if (is_zero_ether_addr(addr) ||
8104 is_broadcast_ether_addr(addr) ||
8105 is_multicast_ether_addr(addr)) {
8106 dev_err(&hdev->pdev->dev,
8107 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8108 addr, is_zero_ether_addr(addr),
8109 is_broadcast_ether_addr(addr),
8110 is_multicast_ether_addr(addr));
8114 memset(&req, 0, sizeof(req));
8116 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8117 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8119 req.egress_port = cpu_to_le16(egress_port);
8121 hclge_prepare_mac_addr(&req, addr, false);
8123 /* Lookup the mac address in the mac_vlan table, and add
8124 * it if the entry is inexistent. Repeated unicast entry
8125 * is not allowed in the mac vlan table.
8127 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8128 if (ret == -ENOENT) {
8129 mutex_lock(&hdev->vport_lock);
8130 if (!hclge_is_umv_space_full(vport, false)) {
8131 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8133 hclge_update_umv_space(vport, false);
8134 mutex_unlock(&hdev->vport_lock);
8137 mutex_unlock(&hdev->vport_lock);
8139 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8140 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8141 hdev->priv_umv_size);
8146 /* check if we just hit the duplicate */
8148 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8149 vport->vport_id, addr);
8153 dev_err(&hdev->pdev->dev,
8154 "PF failed to add unicast entry(%pM) in the MAC table\n",
8160 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8161 const unsigned char *addr)
8163 struct hclge_vport *vport = hclge_get_vport(handle);
8165 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8169 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8170 const unsigned char *addr)
8172 struct hclge_dev *hdev = vport->back;
8173 struct hclge_mac_vlan_tbl_entry_cmd req;
8176 /* mac addr check */
8177 if (is_zero_ether_addr(addr) ||
8178 is_broadcast_ether_addr(addr) ||
8179 is_multicast_ether_addr(addr)) {
8180 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8185 memset(&req, 0, sizeof(req));
8186 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8187 hclge_prepare_mac_addr(&req, addr, false);
8188 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8190 mutex_lock(&hdev->vport_lock);
8191 hclge_update_umv_space(vport, true);
8192 mutex_unlock(&hdev->vport_lock);
8193 } else if (ret == -ENOENT) {
8200 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8201 const unsigned char *addr)
8203 struct hclge_vport *vport = hclge_get_vport(handle);
8205 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8209 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8210 const unsigned char *addr)
8212 struct hclge_dev *hdev = vport->back;
8213 struct hclge_mac_vlan_tbl_entry_cmd req;
8214 struct hclge_desc desc[3];
8217 /* mac addr check */
8218 if (!is_multicast_ether_addr(addr)) {
8219 dev_err(&hdev->pdev->dev,
8220 "Add mc mac err! invalid mac:%pM.\n",
8224 memset(&req, 0, sizeof(req));
8225 hclge_prepare_mac_addr(&req, addr, true);
8226 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8228 /* This mac addr do not exist, add new entry for it */
8229 memset(desc[0].data, 0, sizeof(desc[0].data));
8230 memset(desc[1].data, 0, sizeof(desc[0].data));
8231 memset(desc[2].data, 0, sizeof(desc[0].data));
8233 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8236 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8238 /* if already overflow, not to print each time */
8239 if (status == -ENOSPC &&
8240 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8241 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8246 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8247 const unsigned char *addr)
8249 struct hclge_vport *vport = hclge_get_vport(handle);
8251 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8255 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8256 const unsigned char *addr)
8258 struct hclge_dev *hdev = vport->back;
8259 struct hclge_mac_vlan_tbl_entry_cmd req;
8260 enum hclge_cmd_status status;
8261 struct hclge_desc desc[3];
8263 /* mac addr check */
8264 if (!is_multicast_ether_addr(addr)) {
8265 dev_dbg(&hdev->pdev->dev,
8266 "Remove mc mac err! invalid mac:%pM.\n",
8271 memset(&req, 0, sizeof(req));
8272 hclge_prepare_mac_addr(&req, addr, true);
8273 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8275 /* This mac addr exist, remove this handle's VFID for it */
8276 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8280 if (hclge_is_all_function_id_zero(desc))
8281 /* All the vfid is zero, so need to delete this entry */
8282 status = hclge_remove_mac_vlan_tbl(vport, &req);
8284 /* Not all the vfid is zero, update the vfid */
8285 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8287 } else if (status == -ENOENT) {
8294 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8295 struct list_head *list,
8296 int (*sync)(struct hclge_vport *,
8297 const unsigned char *))
8299 struct hclge_mac_node *mac_node, *tmp;
8302 list_for_each_entry_safe(mac_node, tmp, list, node) {
8303 ret = sync(vport, mac_node->mac_addr);
8305 mac_node->state = HCLGE_MAC_ACTIVE;
8307 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8314 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8315 struct list_head *list,
8316 int (*unsync)(struct hclge_vport *,
8317 const unsigned char *))
8319 struct hclge_mac_node *mac_node, *tmp;
8322 list_for_each_entry_safe(mac_node, tmp, list, node) {
8323 ret = unsync(vport, mac_node->mac_addr);
8324 if (!ret || ret == -ENOENT) {
8325 list_del(&mac_node->node);
8328 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8335 static bool hclge_sync_from_add_list(struct list_head *add_list,
8336 struct list_head *mac_list)
8338 struct hclge_mac_node *mac_node, *tmp, *new_node;
8339 bool all_added = true;
8341 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8342 if (mac_node->state == HCLGE_MAC_TO_ADD)
8345 /* if the mac address from tmp_add_list is not in the
8346 * uc/mc_mac_list, it means have received a TO_DEL request
8347 * during the time window of adding the mac address into mac
8348 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8349 * then it will be removed at next time. else it must be TO_ADD,
8350 * this address hasn't been added into mac table,
8351 * so just remove the mac node.
8353 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8355 hclge_update_mac_node(new_node, mac_node->state);
8356 list_del(&mac_node->node);
8358 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8359 mac_node->state = HCLGE_MAC_TO_DEL;
8360 list_del(&mac_node->node);
8361 list_add_tail(&mac_node->node, mac_list);
8363 list_del(&mac_node->node);
8371 static void hclge_sync_from_del_list(struct list_head *del_list,
8372 struct list_head *mac_list)
8374 struct hclge_mac_node *mac_node, *tmp, *new_node;
8376 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8377 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8379 /* If the mac addr exists in the mac list, it means
8380 * received a new TO_ADD request during the time window
8381 * of configuring the mac address. For the mac node
8382 * state is TO_ADD, and the address is already in the
8383 * in the hardware(due to delete fail), so we just need
8384 * to change the mac node state to ACTIVE.
8386 new_node->state = HCLGE_MAC_ACTIVE;
8387 list_del(&mac_node->node);
8390 list_del(&mac_node->node);
8391 list_add_tail(&mac_node->node, mac_list);
8396 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8397 enum HCLGE_MAC_ADDR_TYPE mac_type,
8400 if (mac_type == HCLGE_MAC_ADDR_UC) {
8402 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8404 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8407 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8409 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8413 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8414 enum HCLGE_MAC_ADDR_TYPE mac_type)
8416 struct hclge_mac_node *mac_node, *tmp, *new_node;
8417 struct list_head tmp_add_list, tmp_del_list;
8418 struct list_head *list;
8421 INIT_LIST_HEAD(&tmp_add_list);
8422 INIT_LIST_HEAD(&tmp_del_list);
8424 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8425 * we can add/delete these mac addr outside the spin lock
8427 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8428 &vport->uc_mac_list : &vport->mc_mac_list;
8430 spin_lock_bh(&vport->mac_list_lock);
8432 list_for_each_entry_safe(mac_node, tmp, list, node) {
8433 switch (mac_node->state) {
8434 case HCLGE_MAC_TO_DEL:
8435 list_del(&mac_node->node);
8436 list_add_tail(&mac_node->node, &tmp_del_list);
8438 case HCLGE_MAC_TO_ADD:
8439 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8442 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8443 new_node->state = mac_node->state;
8444 list_add_tail(&new_node->node, &tmp_add_list);
8452 spin_unlock_bh(&vport->mac_list_lock);
8454 /* delete first, in order to get max mac table space for adding */
8455 if (mac_type == HCLGE_MAC_ADDR_UC) {
8456 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8457 hclge_rm_uc_addr_common);
8458 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8459 hclge_add_uc_addr_common);
8461 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8462 hclge_rm_mc_addr_common);
8463 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8464 hclge_add_mc_addr_common);
8467 /* if some mac addresses were added/deleted fail, move back to the
8468 * mac_list, and retry at next time.
8470 spin_lock_bh(&vport->mac_list_lock);
8472 hclge_sync_from_del_list(&tmp_del_list, list);
8473 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8475 spin_unlock_bh(&vport->mac_list_lock);
8477 hclge_update_overflow_flags(vport, mac_type, all_added);
8480 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8482 struct hclge_dev *hdev = vport->back;
8484 if (test_bit(vport->vport_id, hdev->vport_config_block))
8487 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8493 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8497 for (i = 0; i < hdev->num_alloc_vport; i++) {
8498 struct hclge_vport *vport = &hdev->vport[i];
8500 if (!hclge_need_sync_mac_table(vport))
8503 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8504 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8508 static void hclge_build_del_list(struct list_head *list,
8510 struct list_head *tmp_del_list)
8512 struct hclge_mac_node *mac_cfg, *tmp;
8514 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8515 switch (mac_cfg->state) {
8516 case HCLGE_MAC_TO_DEL:
8517 case HCLGE_MAC_ACTIVE:
8518 list_del(&mac_cfg->node);
8519 list_add_tail(&mac_cfg->node, tmp_del_list);
8521 case HCLGE_MAC_TO_ADD:
8523 list_del(&mac_cfg->node);
8531 static void hclge_unsync_del_list(struct hclge_vport *vport,
8532 int (*unsync)(struct hclge_vport *vport,
8533 const unsigned char *addr),
8535 struct list_head *tmp_del_list)
8537 struct hclge_mac_node *mac_cfg, *tmp;
8540 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8541 ret = unsync(vport, mac_cfg->mac_addr);
8542 if (!ret || ret == -ENOENT) {
8543 /* clear all mac addr from hardware, but remain these
8544 * mac addr in the mac list, and restore them after
8545 * vf reset finished.
8548 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8549 mac_cfg->state = HCLGE_MAC_TO_ADD;
8551 list_del(&mac_cfg->node);
8554 } else if (is_del_list) {
8555 mac_cfg->state = HCLGE_MAC_TO_DEL;
8560 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8561 enum HCLGE_MAC_ADDR_TYPE mac_type)
8563 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8564 struct hclge_dev *hdev = vport->back;
8565 struct list_head tmp_del_list, *list;
8567 if (mac_type == HCLGE_MAC_ADDR_UC) {
8568 list = &vport->uc_mac_list;
8569 unsync = hclge_rm_uc_addr_common;
8571 list = &vport->mc_mac_list;
8572 unsync = hclge_rm_mc_addr_common;
8575 INIT_LIST_HEAD(&tmp_del_list);
8578 set_bit(vport->vport_id, hdev->vport_config_block);
8580 spin_lock_bh(&vport->mac_list_lock);
8582 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8584 spin_unlock_bh(&vport->mac_list_lock);
8586 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8588 spin_lock_bh(&vport->mac_list_lock);
8590 hclge_sync_from_del_list(&tmp_del_list, list);
8592 spin_unlock_bh(&vport->mac_list_lock);
8595 /* remove all mac address when uninitailize */
8596 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8597 enum HCLGE_MAC_ADDR_TYPE mac_type)
8599 struct hclge_mac_node *mac_node, *tmp;
8600 struct hclge_dev *hdev = vport->back;
8601 struct list_head tmp_del_list, *list;
8603 INIT_LIST_HEAD(&tmp_del_list);
8605 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8606 &vport->uc_mac_list : &vport->mc_mac_list;
8608 spin_lock_bh(&vport->mac_list_lock);
8610 list_for_each_entry_safe(mac_node, tmp, list, node) {
8611 switch (mac_node->state) {
8612 case HCLGE_MAC_TO_DEL:
8613 case HCLGE_MAC_ACTIVE:
8614 list_del(&mac_node->node);
8615 list_add_tail(&mac_node->node, &tmp_del_list);
8617 case HCLGE_MAC_TO_ADD:
8618 list_del(&mac_node->node);
8624 spin_unlock_bh(&vport->mac_list_lock);
8626 if (mac_type == HCLGE_MAC_ADDR_UC)
8627 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8628 hclge_rm_uc_addr_common);
8630 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8631 hclge_rm_mc_addr_common);
8633 if (!list_empty(&tmp_del_list))
8634 dev_warn(&hdev->pdev->dev,
8635 "uninit %s mac list for vport %u not completely.\n",
8636 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8639 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8640 list_del(&mac_node->node);
8645 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8647 struct hclge_vport *vport;
8650 for (i = 0; i < hdev->num_alloc_vport; i++) {
8651 vport = &hdev->vport[i];
8652 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8653 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8657 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8658 u16 cmdq_resp, u8 resp_code)
8660 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8661 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8662 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8663 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8668 dev_err(&hdev->pdev->dev,
8669 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8674 switch (resp_code) {
8675 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8676 case HCLGE_ETHERTYPE_ALREADY_ADD:
8679 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8680 dev_err(&hdev->pdev->dev,
8681 "add mac ethertype failed for manager table overflow.\n");
8682 return_status = -EIO;
8684 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8685 dev_err(&hdev->pdev->dev,
8686 "add mac ethertype failed for key conflict.\n");
8687 return_status = -EIO;
8690 dev_err(&hdev->pdev->dev,
8691 "add mac ethertype failed for undefined, code=%u.\n",
8693 return_status = -EIO;
8696 return return_status;
8699 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8702 struct hclge_mac_vlan_tbl_entry_cmd req;
8703 struct hclge_dev *hdev = vport->back;
8704 struct hclge_desc desc;
8705 u16 egress_port = 0;
8708 if (is_zero_ether_addr(mac_addr))
8711 memset(&req, 0, sizeof(req));
8712 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8713 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8714 req.egress_port = cpu_to_le16(egress_port);
8715 hclge_prepare_mac_addr(&req, mac_addr, false);
8717 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8720 vf_idx += HCLGE_VF_VPORT_START_NUM;
8721 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8723 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8729 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8732 struct hclge_vport *vport = hclge_get_vport(handle);
8733 struct hclge_dev *hdev = vport->back;
8735 vport = hclge_get_vf_vport(hdev, vf);
8739 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8740 dev_info(&hdev->pdev->dev,
8741 "Specified MAC(=%pM) is same as before, no change committed!\n",
8746 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8747 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8752 ether_addr_copy(vport->vf_info.mac, mac_addr);
8754 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8755 dev_info(&hdev->pdev->dev,
8756 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8758 return hclge_inform_reset_assert_to_vf(vport);
8761 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8766 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8767 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8769 struct hclge_desc desc;
8774 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8775 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8777 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8779 dev_err(&hdev->pdev->dev,
8780 "add mac ethertype failed for cmd_send, ret =%d.\n",
8785 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8786 retval = le16_to_cpu(desc.retval);
8788 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8791 static int init_mgr_tbl(struct hclge_dev *hdev)
8796 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8797 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8799 dev_err(&hdev->pdev->dev,
8800 "add mac ethertype failed, ret =%d.\n",
8809 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8811 struct hclge_vport *vport = hclge_get_vport(handle);
8812 struct hclge_dev *hdev = vport->back;
8814 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8817 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8818 const u8 *old_addr, const u8 *new_addr)
8820 struct list_head *list = &vport->uc_mac_list;
8821 struct hclge_mac_node *old_node, *new_node;
8823 new_node = hclge_find_mac_node(list, new_addr);
8825 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8829 new_node->state = HCLGE_MAC_TO_ADD;
8830 ether_addr_copy(new_node->mac_addr, new_addr);
8831 list_add(&new_node->node, list);
8833 if (new_node->state == HCLGE_MAC_TO_DEL)
8834 new_node->state = HCLGE_MAC_ACTIVE;
8836 /* make sure the new addr is in the list head, avoid dev
8837 * addr may be not re-added into mac table for the umv space
8838 * limitation after global/imp reset which will clear mac
8839 * table by hardware.
8841 list_move(&new_node->node, list);
8844 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8845 old_node = hclge_find_mac_node(list, old_addr);
8847 if (old_node->state == HCLGE_MAC_TO_ADD) {
8848 list_del(&old_node->node);
8851 old_node->state = HCLGE_MAC_TO_DEL;
8856 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8861 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8864 const unsigned char *new_addr = (const unsigned char *)p;
8865 struct hclge_vport *vport = hclge_get_vport(handle);
8866 struct hclge_dev *hdev = vport->back;
8867 unsigned char *old_addr = NULL;
8870 /* mac addr check */
8871 if (is_zero_ether_addr(new_addr) ||
8872 is_broadcast_ether_addr(new_addr) ||
8873 is_multicast_ether_addr(new_addr)) {
8874 dev_err(&hdev->pdev->dev,
8875 "change uc mac err! invalid mac: %pM.\n",
8880 ret = hclge_pause_addr_cfg(hdev, new_addr);
8882 dev_err(&hdev->pdev->dev,
8883 "failed to configure mac pause address, ret = %d\n",
8889 old_addr = hdev->hw.mac.mac_addr;
8891 spin_lock_bh(&vport->mac_list_lock);
8892 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8894 dev_err(&hdev->pdev->dev,
8895 "failed to change the mac addr:%pM, ret = %d\n",
8897 spin_unlock_bh(&vport->mac_list_lock);
8900 hclge_pause_addr_cfg(hdev, old_addr);
8904 /* we must update dev addr with spin lock protect, preventing dev addr
8905 * being removed by set_rx_mode path.
8907 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8908 spin_unlock_bh(&vport->mac_list_lock);
8910 hclge_task_schedule(hdev, 0);
8915 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
8917 struct mii_ioctl_data *data = if_mii(ifr);
8919 if (!hnae3_dev_phy_imp_supported(hdev))
8924 data->phy_id = hdev->hw.mac.phy_addr;
8925 /* this command reads phy id and register at the same time */
8928 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
8932 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
8938 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8941 struct hclge_vport *vport = hclge_get_vport(handle);
8942 struct hclge_dev *hdev = vport->back;
8944 if (!hdev->hw.mac.phydev)
8945 return hclge_mii_ioctl(hdev, ifr, cmd);
8947 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8950 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8951 u8 fe_type, bool filter_en, u8 vf_id)
8953 struct hclge_vlan_filter_ctrl_cmd *req;
8954 struct hclge_desc desc;
8957 /* read current vlan filter parameter */
8958 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8959 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8960 req->vlan_type = vlan_type;
8963 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8965 dev_err(&hdev->pdev->dev,
8966 "failed to get vlan filter config, ret = %d.\n", ret);
8970 /* modify and write new config parameter */
8971 hclge_cmd_reuse_desc(&desc, false);
8972 req->vlan_fe = filter_en ?
8973 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8975 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8977 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8983 #define HCLGE_FILTER_TYPE_VF 0
8984 #define HCLGE_FILTER_TYPE_PORT 1
8985 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8986 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8987 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8988 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8989 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8990 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8991 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8992 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8993 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8995 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8997 struct hclge_vport *vport = hclge_get_vport(handle);
8998 struct hclge_dev *hdev = vport->back;
9000 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9001 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9002 HCLGE_FILTER_FE_EGRESS, enable, 0);
9003 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9004 HCLGE_FILTER_FE_INGRESS, enable, 0);
9006 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9007 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9011 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9013 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9016 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9017 bool is_kill, u16 vlan,
9018 struct hclge_desc *desc)
9020 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9021 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9026 hclge_cmd_setup_basic_desc(&desc[0],
9027 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9028 hclge_cmd_setup_basic_desc(&desc[1],
9029 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9031 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9033 vf_byte_off = vfid / 8;
9034 vf_byte_val = 1 << (vfid % 8);
9036 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9037 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9039 req0->vlan_id = cpu_to_le16(vlan);
9040 req0->vlan_cfg = is_kill;
9042 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9043 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9045 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9047 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9049 dev_err(&hdev->pdev->dev,
9050 "Send vf vlan command fail, ret =%d.\n",
9058 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9059 bool is_kill, struct hclge_desc *desc)
9061 struct hclge_vlan_filter_vf_cfg_cmd *req;
9063 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9066 #define HCLGE_VF_VLAN_NO_ENTRY 2
9067 if (!req->resp_code || req->resp_code == 1)
9070 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9071 set_bit(vfid, hdev->vf_vlan_full);
9072 dev_warn(&hdev->pdev->dev,
9073 "vf vlan table is full, vf vlan filter is disabled\n");
9077 dev_err(&hdev->pdev->dev,
9078 "Add vf vlan filter fail, ret =%u.\n",
9081 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9082 if (!req->resp_code)
9085 /* vf vlan filter is disabled when vf vlan table is full,
9086 * then new vlan id will not be added into vf vlan table.
9087 * Just return 0 without warning, avoid massive verbose
9088 * print logs when unload.
9090 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9093 dev_err(&hdev->pdev->dev,
9094 "Kill vf vlan filter fail, ret =%u.\n",
9101 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9102 bool is_kill, u16 vlan,
9105 struct hclge_vport *vport = &hdev->vport[vfid];
9106 struct hclge_desc desc[2];
9109 /* if vf vlan table is full, firmware will close vf vlan filter, it
9110 * is unable and unnecessary to add new vlan id to vf vlan filter.
9111 * If spoof check is enable, and vf vlan is full, it shouldn't add
9112 * new vlan, because tx packets with these vlan id will be dropped.
9114 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9115 if (vport->vf_info.spoofchk && vlan) {
9116 dev_err(&hdev->pdev->dev,
9117 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9123 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9127 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9130 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9131 u16 vlan_id, bool is_kill)
9133 struct hclge_vlan_filter_pf_cfg_cmd *req;
9134 struct hclge_desc desc;
9135 u8 vlan_offset_byte_val;
9136 u8 vlan_offset_byte;
9140 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9142 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9143 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9144 HCLGE_VLAN_BYTE_SIZE;
9145 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9147 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9148 req->vlan_offset = vlan_offset_160;
9149 req->vlan_cfg = is_kill;
9150 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9154 dev_err(&hdev->pdev->dev,
9155 "port vlan command, send fail, ret =%d.\n", ret);
9159 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9160 u16 vport_id, u16 vlan_id,
9163 u16 vport_idx, vport_num = 0;
9166 if (is_kill && !vlan_id)
9169 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9172 dev_err(&hdev->pdev->dev,
9173 "Set %u vport vlan filter config fail, ret =%d.\n",
9178 /* vlan 0 may be added twice when 8021q module is enabled */
9179 if (!is_kill && !vlan_id &&
9180 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9183 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9184 dev_err(&hdev->pdev->dev,
9185 "Add port vlan failed, vport %u is already in vlan %u\n",
9191 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9192 dev_err(&hdev->pdev->dev,
9193 "Delete port vlan failed, vport %u is not in vlan %u\n",
9198 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9201 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9202 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9208 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9210 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9211 struct hclge_vport_vtag_tx_cfg_cmd *req;
9212 struct hclge_dev *hdev = vport->back;
9213 struct hclge_desc desc;
9217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9219 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9220 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9221 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9222 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9223 vcfg->accept_tag1 ? 1 : 0);
9224 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9225 vcfg->accept_untag1 ? 1 : 0);
9226 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9227 vcfg->accept_tag2 ? 1 : 0);
9228 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9229 vcfg->accept_untag2 ? 1 : 0);
9230 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9231 vcfg->insert_tag1_en ? 1 : 0);
9232 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9233 vcfg->insert_tag2_en ? 1 : 0);
9234 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9235 vcfg->tag_shift_mode_en ? 1 : 0);
9236 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9238 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9239 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9240 HCLGE_VF_NUM_PER_BYTE;
9241 req->vf_bitmap[bmap_index] =
9242 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9244 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9246 dev_err(&hdev->pdev->dev,
9247 "Send port txvlan cfg command fail, ret =%d\n",
9253 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9255 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9256 struct hclge_vport_vtag_rx_cfg_cmd *req;
9257 struct hclge_dev *hdev = vport->back;
9258 struct hclge_desc desc;
9262 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9264 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9265 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9266 vcfg->strip_tag1_en ? 1 : 0);
9267 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9268 vcfg->strip_tag2_en ? 1 : 0);
9269 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9270 vcfg->vlan1_vlan_prionly ? 1 : 0);
9271 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9272 vcfg->vlan2_vlan_prionly ? 1 : 0);
9273 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9274 vcfg->strip_tag1_discard_en ? 1 : 0);
9275 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9276 vcfg->strip_tag2_discard_en ? 1 : 0);
9278 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9279 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9280 HCLGE_VF_NUM_PER_BYTE;
9281 req->vf_bitmap[bmap_index] =
9282 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9284 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9286 dev_err(&hdev->pdev->dev,
9287 "Send port rxvlan cfg command fail, ret =%d\n",
9293 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9294 u16 port_base_vlan_state,
9299 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9300 vport->txvlan_cfg.accept_tag1 = true;
9301 vport->txvlan_cfg.insert_tag1_en = false;
9302 vport->txvlan_cfg.default_tag1 = 0;
9304 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9306 vport->txvlan_cfg.accept_tag1 =
9307 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9308 vport->txvlan_cfg.insert_tag1_en = true;
9309 vport->txvlan_cfg.default_tag1 = vlan_tag;
9312 vport->txvlan_cfg.accept_untag1 = true;
9314 /* accept_tag2 and accept_untag2 are not supported on
9315 * pdev revision(0x20), new revision support them,
9316 * this two fields can not be configured by user.
9318 vport->txvlan_cfg.accept_tag2 = true;
9319 vport->txvlan_cfg.accept_untag2 = true;
9320 vport->txvlan_cfg.insert_tag2_en = false;
9321 vport->txvlan_cfg.default_tag2 = 0;
9322 vport->txvlan_cfg.tag_shift_mode_en = true;
9324 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9325 vport->rxvlan_cfg.strip_tag1_en = false;
9326 vport->rxvlan_cfg.strip_tag2_en =
9327 vport->rxvlan_cfg.rx_vlan_offload_en;
9328 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9330 vport->rxvlan_cfg.strip_tag1_en =
9331 vport->rxvlan_cfg.rx_vlan_offload_en;
9332 vport->rxvlan_cfg.strip_tag2_en = true;
9333 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9336 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9337 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9338 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9340 ret = hclge_set_vlan_tx_offload_cfg(vport);
9344 return hclge_set_vlan_rx_offload_cfg(vport);
9347 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9349 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9350 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9351 struct hclge_desc desc;
9354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9355 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9356 rx_req->ot_fst_vlan_type =
9357 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9358 rx_req->ot_sec_vlan_type =
9359 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9360 rx_req->in_fst_vlan_type =
9361 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9362 rx_req->in_sec_vlan_type =
9363 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9365 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9367 dev_err(&hdev->pdev->dev,
9368 "Send rxvlan protocol type command fail, ret =%d\n",
9373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9375 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9376 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9377 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9379 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9381 dev_err(&hdev->pdev->dev,
9382 "Send txvlan protocol type command fail, ret =%d\n",
9388 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9390 #define HCLGE_DEF_VLAN_TYPE 0x8100
9392 struct hnae3_handle *handle = &hdev->vport[0].nic;
9393 struct hclge_vport *vport;
9397 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9398 /* for revision 0x21, vf vlan filter is per function */
9399 for (i = 0; i < hdev->num_alloc_vport; i++) {
9400 vport = &hdev->vport[i];
9401 ret = hclge_set_vlan_filter_ctrl(hdev,
9402 HCLGE_FILTER_TYPE_VF,
9403 HCLGE_FILTER_FE_EGRESS,
9410 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9411 HCLGE_FILTER_FE_INGRESS, true,
9416 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9417 HCLGE_FILTER_FE_EGRESS_V1_B,
9423 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9425 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9426 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9427 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9428 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9429 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9430 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9432 ret = hclge_set_vlan_protocol_type(hdev);
9436 for (i = 0; i < hdev->num_alloc_vport; i++) {
9439 vport = &hdev->vport[i];
9440 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9442 ret = hclge_vlan_offload_cfg(vport,
9443 vport->port_base_vlan_cfg.state,
9449 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9452 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9455 struct hclge_vport_vlan_cfg *vlan;
9457 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9461 vlan->hd_tbl_status = writen_to_tbl;
9462 vlan->vlan_id = vlan_id;
9464 list_add_tail(&vlan->node, &vport->vlan_list);
9467 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9469 struct hclge_vport_vlan_cfg *vlan, *tmp;
9470 struct hclge_dev *hdev = vport->back;
9473 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9474 if (!vlan->hd_tbl_status) {
9475 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9477 vlan->vlan_id, false);
9479 dev_err(&hdev->pdev->dev,
9480 "restore vport vlan list failed, ret=%d\n",
9485 vlan->hd_tbl_status = true;
9491 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9494 struct hclge_vport_vlan_cfg *vlan, *tmp;
9495 struct hclge_dev *hdev = vport->back;
9497 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9498 if (vlan->vlan_id == vlan_id) {
9499 if (is_write_tbl && vlan->hd_tbl_status)
9500 hclge_set_vlan_filter_hw(hdev,
9506 list_del(&vlan->node);
9513 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9515 struct hclge_vport_vlan_cfg *vlan, *tmp;
9516 struct hclge_dev *hdev = vport->back;
9518 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9519 if (vlan->hd_tbl_status)
9520 hclge_set_vlan_filter_hw(hdev,
9526 vlan->hd_tbl_status = false;
9528 list_del(&vlan->node);
9532 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9535 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9537 struct hclge_vport_vlan_cfg *vlan, *tmp;
9538 struct hclge_vport *vport;
9541 for (i = 0; i < hdev->num_alloc_vport; i++) {
9542 vport = &hdev->vport[i];
9543 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9544 list_del(&vlan->node);
9550 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9552 struct hclge_vport_vlan_cfg *vlan, *tmp;
9553 struct hclge_dev *hdev = vport->back;
9559 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9560 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9561 state = vport->port_base_vlan_cfg.state;
9563 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9564 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9565 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9566 vport->vport_id, vlan_id,
9571 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9572 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9574 vlan->vlan_id, false);
9577 vlan->hd_tbl_status = true;
9581 /* For global reset and imp reset, hardware will clear the mac table,
9582 * so we change the mac address state from ACTIVE to TO_ADD, then they
9583 * can be restored in the service task after reset complete. Furtherly,
9584 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9585 * be restored after reset, so just remove these mac nodes from mac_list.
9587 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9589 struct hclge_mac_node *mac_node, *tmp;
9591 list_for_each_entry_safe(mac_node, tmp, list, node) {
9592 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9593 mac_node->state = HCLGE_MAC_TO_ADD;
9594 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9595 list_del(&mac_node->node);
9601 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9603 spin_lock_bh(&vport->mac_list_lock);
9605 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9606 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9607 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9609 spin_unlock_bh(&vport->mac_list_lock);
9612 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9614 struct hclge_vport *vport = &hdev->vport[0];
9615 struct hnae3_handle *handle = &vport->nic;
9617 hclge_restore_mac_table_common(vport);
9618 hclge_restore_vport_vlan_table(vport);
9619 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9621 hclge_restore_fd_entries(handle);
9624 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9626 struct hclge_vport *vport = hclge_get_vport(handle);
9628 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9629 vport->rxvlan_cfg.strip_tag1_en = false;
9630 vport->rxvlan_cfg.strip_tag2_en = enable;
9631 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9633 vport->rxvlan_cfg.strip_tag1_en = enable;
9634 vport->rxvlan_cfg.strip_tag2_en = true;
9635 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9638 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9639 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9640 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9641 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9643 return hclge_set_vlan_rx_offload_cfg(vport);
9646 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9647 u16 port_base_vlan_state,
9648 struct hclge_vlan_info *new_info,
9649 struct hclge_vlan_info *old_info)
9651 struct hclge_dev *hdev = vport->back;
9654 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9655 hclge_rm_vport_all_vlan_table(vport, false);
9656 return hclge_set_vlan_filter_hw(hdev,
9657 htons(new_info->vlan_proto),
9663 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9664 vport->vport_id, old_info->vlan_tag,
9669 return hclge_add_vport_all_vlan_table(vport);
9672 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9673 struct hclge_vlan_info *vlan_info)
9675 struct hnae3_handle *nic = &vport->nic;
9676 struct hclge_vlan_info *old_vlan_info;
9677 struct hclge_dev *hdev = vport->back;
9680 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9682 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9686 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9687 /* add new VLAN tag */
9688 ret = hclge_set_vlan_filter_hw(hdev,
9689 htons(vlan_info->vlan_proto),
9691 vlan_info->vlan_tag,
9696 /* remove old VLAN tag */
9697 ret = hclge_set_vlan_filter_hw(hdev,
9698 htons(old_vlan_info->vlan_proto),
9700 old_vlan_info->vlan_tag,
9708 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9713 /* update state only when disable/enable port based VLAN */
9714 vport->port_base_vlan_cfg.state = state;
9715 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9716 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9718 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9721 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9722 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9723 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9728 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9729 enum hnae3_port_base_vlan_state state,
9732 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9734 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9736 return HNAE3_PORT_BASE_VLAN_ENABLE;
9739 return HNAE3_PORT_BASE_VLAN_DISABLE;
9740 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9741 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9743 return HNAE3_PORT_BASE_VLAN_MODIFY;
9747 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9748 u16 vlan, u8 qos, __be16 proto)
9750 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9751 struct hclge_vport *vport = hclge_get_vport(handle);
9752 struct hclge_dev *hdev = vport->back;
9753 struct hclge_vlan_info vlan_info;
9757 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9760 vport = hclge_get_vf_vport(hdev, vfid);
9764 /* qos is a 3 bits value, so can not be bigger than 7 */
9765 if (vlan > VLAN_N_VID - 1 || qos > 7)
9767 if (proto != htons(ETH_P_8021Q))
9768 return -EPROTONOSUPPORT;
9770 state = hclge_get_port_base_vlan_state(vport,
9771 vport->port_base_vlan_cfg.state,
9773 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9776 vlan_info.vlan_tag = vlan;
9777 vlan_info.qos = qos;
9778 vlan_info.vlan_proto = ntohs(proto);
9780 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9782 dev_err(&hdev->pdev->dev,
9783 "failed to update port base vlan for vf %d, ret = %d\n",
9788 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9791 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9792 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9793 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9794 vport->vport_id, state,
9801 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9803 struct hclge_vlan_info *vlan_info;
9804 struct hclge_vport *vport;
9808 /* clear port base vlan for all vf */
9809 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9810 vport = &hdev->vport[vf];
9811 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9813 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9815 vlan_info->vlan_tag, true);
9817 dev_err(&hdev->pdev->dev,
9818 "failed to clear vf vlan for vf%d, ret = %d\n",
9819 vf - HCLGE_VF_VPORT_START_NUM, ret);
9823 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9824 u16 vlan_id, bool is_kill)
9826 struct hclge_vport *vport = hclge_get_vport(handle);
9827 struct hclge_dev *hdev = vport->back;
9828 bool writen_to_tbl = false;
9831 /* When device is resetting or reset failed, firmware is unable to
9832 * handle mailbox. Just record the vlan id, and remove it after
9835 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9836 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9837 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9841 /* when port base vlan enabled, we use port base vlan as the vlan
9842 * filter entry. In this case, we don't update vlan filter table
9843 * when user add new vlan or remove exist vlan, just update the vport
9844 * vlan list. The vlan id in vlan list will be writen in vlan filter
9845 * table until port base vlan disabled
9847 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9848 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9850 writen_to_tbl = true;
9855 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9857 hclge_add_vport_vlan_table(vport, vlan_id,
9859 } else if (is_kill) {
9860 /* when remove hw vlan filter failed, record the vlan id,
9861 * and try to remove it from hw later, to be consistence
9864 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9869 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9871 #define HCLGE_MAX_SYNC_COUNT 60
9873 int i, ret, sync_cnt = 0;
9876 /* start from vport 1 for PF is always alive */
9877 for (i = 0; i < hdev->num_alloc_vport; i++) {
9878 struct hclge_vport *vport = &hdev->vport[i];
9880 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9882 while (vlan_id != VLAN_N_VID) {
9883 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9884 vport->vport_id, vlan_id,
9886 if (ret && ret != -EINVAL)
9889 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9890 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9893 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9896 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9902 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9904 struct hclge_config_max_frm_size_cmd *req;
9905 struct hclge_desc desc;
9907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9909 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9910 req->max_frm_size = cpu_to_le16(new_mps);
9911 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9913 return hclge_cmd_send(&hdev->hw, &desc, 1);
9916 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9918 struct hclge_vport *vport = hclge_get_vport(handle);
9920 return hclge_set_vport_mtu(vport, new_mtu);
9923 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9925 struct hclge_dev *hdev = vport->back;
9926 int i, max_frm_size, ret;
9928 /* HW supprt 2 layer vlan */
9929 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9930 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9931 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9934 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9935 mutex_lock(&hdev->vport_lock);
9936 /* VF's mps must fit within hdev->mps */
9937 if (vport->vport_id && max_frm_size > hdev->mps) {
9938 mutex_unlock(&hdev->vport_lock);
9940 } else if (vport->vport_id) {
9941 vport->mps = max_frm_size;
9942 mutex_unlock(&hdev->vport_lock);
9946 /* PF's mps must be greater then VF's mps */
9947 for (i = 1; i < hdev->num_alloc_vport; i++)
9948 if (max_frm_size < hdev->vport[i].mps) {
9949 mutex_unlock(&hdev->vport_lock);
9953 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9955 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9957 dev_err(&hdev->pdev->dev,
9958 "Change mtu fail, ret =%d\n", ret);
9962 hdev->mps = max_frm_size;
9963 vport->mps = max_frm_size;
9965 ret = hclge_buffer_alloc(hdev);
9967 dev_err(&hdev->pdev->dev,
9968 "Allocate buffer fail, ret =%d\n", ret);
9971 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9972 mutex_unlock(&hdev->vport_lock);
9976 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9979 struct hclge_reset_tqp_queue_cmd *req;
9980 struct hclge_desc desc;
9983 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9985 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9986 req->tqp_id = cpu_to_le16(queue_id);
9988 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9992 dev_err(&hdev->pdev->dev,
9993 "Send tqp reset cmd error, status =%d\n", ret);
10000 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10002 struct hclge_reset_tqp_queue_cmd *req;
10003 struct hclge_desc desc;
10006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10008 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10009 req->tqp_id = cpu_to_le16(queue_id);
10011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10013 dev_err(&hdev->pdev->dev,
10014 "Get reset status error, status =%d\n", ret);
10018 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10021 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10023 struct hnae3_queue *queue;
10024 struct hclge_tqp *tqp;
10026 queue = handle->kinfo.tqp[queue_id];
10027 tqp = container_of(queue, struct hclge_tqp, q);
10032 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10034 struct hclge_vport *vport = hclge_get_vport(handle);
10035 struct hclge_dev *hdev = vport->back;
10036 int reset_try_times = 0;
10041 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10043 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10045 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10049 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10051 dev_err(&hdev->pdev->dev,
10052 "Send reset tqp cmd fail, ret = %d\n", ret);
10056 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10057 reset_status = hclge_get_reset_status(hdev, queue_gid);
10061 /* Wait for tqp hw reset */
10062 usleep_range(1000, 1200);
10065 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10066 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10070 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10072 dev_err(&hdev->pdev->dev,
10073 "Deassert the soft reset fail, ret = %d\n", ret);
10078 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10080 struct hnae3_handle *handle = &vport->nic;
10081 struct hclge_dev *hdev = vport->back;
10082 int reset_try_times = 0;
10087 if (queue_id >= handle->kinfo.num_tqps) {
10088 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10093 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10095 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10097 dev_warn(&hdev->pdev->dev,
10098 "Send reset tqp cmd fail, ret = %d\n", ret);
10102 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10103 reset_status = hclge_get_reset_status(hdev, queue_gid);
10107 /* Wait for tqp hw reset */
10108 usleep_range(1000, 1200);
10111 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10112 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10116 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10118 dev_warn(&hdev->pdev->dev,
10119 "Deassert the soft reset fail, ret = %d\n", ret);
10122 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10124 struct hclge_vport *vport = hclge_get_vport(handle);
10125 struct hclge_dev *hdev = vport->back;
10127 return hdev->fw_version;
10130 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10132 struct phy_device *phydev = hdev->hw.mac.phydev;
10137 phy_set_asym_pause(phydev, rx_en, tx_en);
10140 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10144 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10147 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10149 dev_err(&hdev->pdev->dev,
10150 "configure pauseparam error, ret = %d.\n", ret);
10155 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10157 struct phy_device *phydev = hdev->hw.mac.phydev;
10158 u16 remote_advertising = 0;
10159 u16 local_advertising;
10160 u32 rx_pause, tx_pause;
10163 if (!phydev->link || !phydev->autoneg)
10166 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10169 remote_advertising = LPA_PAUSE_CAP;
10171 if (phydev->asym_pause)
10172 remote_advertising |= LPA_PAUSE_ASYM;
10174 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10175 remote_advertising);
10176 tx_pause = flowctl & FLOW_CTRL_TX;
10177 rx_pause = flowctl & FLOW_CTRL_RX;
10179 if (phydev->duplex == HCLGE_MAC_HALF) {
10184 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10187 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10188 u32 *rx_en, u32 *tx_en)
10190 struct hclge_vport *vport = hclge_get_vport(handle);
10191 struct hclge_dev *hdev = vport->back;
10192 u8 media_type = hdev->hw.mac.media_type;
10194 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10195 hclge_get_autoneg(handle) : 0;
10197 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10203 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10206 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10209 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10218 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10219 u32 rx_en, u32 tx_en)
10221 if (rx_en && tx_en)
10222 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10223 else if (rx_en && !tx_en)
10224 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10225 else if (!rx_en && tx_en)
10226 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10228 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10230 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10233 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10234 u32 rx_en, u32 tx_en)
10236 struct hclge_vport *vport = hclge_get_vport(handle);
10237 struct hclge_dev *hdev = vport->back;
10238 struct phy_device *phydev = hdev->hw.mac.phydev;
10241 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10242 fc_autoneg = hclge_get_autoneg(handle);
10243 if (auto_neg != fc_autoneg) {
10244 dev_info(&hdev->pdev->dev,
10245 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10246 return -EOPNOTSUPP;
10250 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10251 dev_info(&hdev->pdev->dev,
10252 "Priority flow control enabled. Cannot set link flow control.\n");
10253 return -EOPNOTSUPP;
10256 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10258 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10260 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10261 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10264 return phy_start_aneg(phydev);
10266 return -EOPNOTSUPP;
10269 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10270 u8 *auto_neg, u32 *speed, u8 *duplex)
10272 struct hclge_vport *vport = hclge_get_vport(handle);
10273 struct hclge_dev *hdev = vport->back;
10276 *speed = hdev->hw.mac.speed;
10278 *duplex = hdev->hw.mac.duplex;
10280 *auto_neg = hdev->hw.mac.autoneg;
10283 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10286 struct hclge_vport *vport = hclge_get_vport(handle);
10287 struct hclge_dev *hdev = vport->back;
10289 /* When nic is down, the service task is not running, doesn't update
10290 * the port information per second. Query the port information before
10291 * return the media type, ensure getting the correct media information.
10293 hclge_update_port_info(hdev);
10296 *media_type = hdev->hw.mac.media_type;
10299 *module_type = hdev->hw.mac.module_type;
10302 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10303 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10305 struct hclge_vport *vport = hclge_get_vport(handle);
10306 struct hclge_dev *hdev = vport->back;
10307 struct phy_device *phydev = hdev->hw.mac.phydev;
10308 int mdix_ctrl, mdix, is_resolved;
10309 unsigned int retval;
10312 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10313 *tp_mdix = ETH_TP_MDI_INVALID;
10317 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10319 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10320 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10321 HCLGE_PHY_MDIX_CTRL_S);
10323 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10324 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10325 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10327 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10329 switch (mdix_ctrl) {
10331 *tp_mdix_ctrl = ETH_TP_MDI;
10334 *tp_mdix_ctrl = ETH_TP_MDI_X;
10337 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10340 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10345 *tp_mdix = ETH_TP_MDI_INVALID;
10347 *tp_mdix = ETH_TP_MDI_X;
10349 *tp_mdix = ETH_TP_MDI;
10352 static void hclge_info_show(struct hclge_dev *hdev)
10354 struct device *dev = &hdev->pdev->dev;
10356 dev_info(dev, "PF info begin:\n");
10358 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10359 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10360 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10361 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10362 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10363 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10364 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10365 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10366 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10367 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10368 dev_info(dev, "This is %s PF\n",
10369 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10370 dev_info(dev, "DCB %s\n",
10371 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10372 dev_info(dev, "MQPRIO %s\n",
10373 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10375 dev_info(dev, "PF info end.\n");
10378 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10379 struct hclge_vport *vport)
10381 struct hnae3_client *client = vport->nic.client;
10382 struct hclge_dev *hdev = ae_dev->priv;
10383 int rst_cnt = hdev->rst_stats.reset_cnt;
10386 ret = client->ops->init_instance(&vport->nic);
10390 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10391 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10392 rst_cnt != hdev->rst_stats.reset_cnt) {
10397 /* Enable nic hw error interrupts */
10398 ret = hclge_config_nic_hw_error(hdev, true);
10400 dev_err(&ae_dev->pdev->dev,
10401 "fail(%d) to enable hw error interrupts\n", ret);
10405 hnae3_set_client_init_flag(client, ae_dev, 1);
10407 if (netif_msg_drv(&hdev->vport->nic))
10408 hclge_info_show(hdev);
10413 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10414 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10415 msleep(HCLGE_WAIT_RESET_DONE);
10417 client->ops->uninit_instance(&vport->nic, 0);
10422 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10423 struct hclge_vport *vport)
10425 struct hclge_dev *hdev = ae_dev->priv;
10426 struct hnae3_client *client;
10430 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10434 client = hdev->roce_client;
10435 ret = hclge_init_roce_base_info(vport);
10439 rst_cnt = hdev->rst_stats.reset_cnt;
10440 ret = client->ops->init_instance(&vport->roce);
10444 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10445 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10446 rst_cnt != hdev->rst_stats.reset_cnt) {
10448 goto init_roce_err;
10451 /* Enable roce ras interrupts */
10452 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10454 dev_err(&ae_dev->pdev->dev,
10455 "fail(%d) to enable roce ras interrupts\n", ret);
10456 goto init_roce_err;
10459 hnae3_set_client_init_flag(client, ae_dev, 1);
10464 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10465 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10466 msleep(HCLGE_WAIT_RESET_DONE);
10468 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10473 static int hclge_init_client_instance(struct hnae3_client *client,
10474 struct hnae3_ae_dev *ae_dev)
10476 struct hclge_dev *hdev = ae_dev->priv;
10477 struct hclge_vport *vport;
10480 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10481 vport = &hdev->vport[i];
10483 switch (client->type) {
10484 case HNAE3_CLIENT_KNIC:
10485 hdev->nic_client = client;
10486 vport->nic.client = client;
10487 ret = hclge_init_nic_client_instance(ae_dev, vport);
10491 ret = hclge_init_roce_client_instance(ae_dev, vport);
10496 case HNAE3_CLIENT_ROCE:
10497 if (hnae3_dev_roce_supported(hdev)) {
10498 hdev->roce_client = client;
10499 vport->roce.client = client;
10502 ret = hclge_init_roce_client_instance(ae_dev, vport);
10515 hdev->nic_client = NULL;
10516 vport->nic.client = NULL;
10519 hdev->roce_client = NULL;
10520 vport->roce.client = NULL;
10524 static void hclge_uninit_client_instance(struct hnae3_client *client,
10525 struct hnae3_ae_dev *ae_dev)
10527 struct hclge_dev *hdev = ae_dev->priv;
10528 struct hclge_vport *vport;
10531 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10532 vport = &hdev->vport[i];
10533 if (hdev->roce_client) {
10534 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10535 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10536 msleep(HCLGE_WAIT_RESET_DONE);
10538 hdev->roce_client->ops->uninit_instance(&vport->roce,
10540 hdev->roce_client = NULL;
10541 vport->roce.client = NULL;
10543 if (client->type == HNAE3_CLIENT_ROCE)
10545 if (hdev->nic_client && client->ops->uninit_instance) {
10546 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10547 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10548 msleep(HCLGE_WAIT_RESET_DONE);
10550 client->ops->uninit_instance(&vport->nic, 0);
10551 hdev->nic_client = NULL;
10552 vport->nic.client = NULL;
10557 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10559 #define HCLGE_MEM_BAR 4
10561 struct pci_dev *pdev = hdev->pdev;
10562 struct hclge_hw *hw = &hdev->hw;
10564 /* for device does not have device memory, return directly */
10565 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10568 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10569 pci_resource_start(pdev, HCLGE_MEM_BAR),
10570 pci_resource_len(pdev, HCLGE_MEM_BAR));
10571 if (!hw->mem_base) {
10572 dev_err(&pdev->dev, "failed to map device memory\n");
10579 static int hclge_pci_init(struct hclge_dev *hdev)
10581 struct pci_dev *pdev = hdev->pdev;
10582 struct hclge_hw *hw;
10585 ret = pci_enable_device(pdev);
10587 dev_err(&pdev->dev, "failed to enable PCI device\n");
10591 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10593 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10595 dev_err(&pdev->dev,
10596 "can't set consistent PCI DMA");
10597 goto err_disable_device;
10599 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10602 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10604 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10605 goto err_disable_device;
10608 pci_set_master(pdev);
10610 hw->io_base = pcim_iomap(pdev, 2, 0);
10611 if (!hw->io_base) {
10612 dev_err(&pdev->dev, "Can't map configuration register space\n");
10614 goto err_clr_master;
10617 ret = hclge_dev_mem_map(hdev);
10619 goto err_unmap_io_base;
10621 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10626 pcim_iounmap(pdev, hdev->hw.io_base);
10628 pci_clear_master(pdev);
10629 pci_release_regions(pdev);
10630 err_disable_device:
10631 pci_disable_device(pdev);
10636 static void hclge_pci_uninit(struct hclge_dev *hdev)
10638 struct pci_dev *pdev = hdev->pdev;
10640 if (hdev->hw.mem_base)
10641 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10643 pcim_iounmap(pdev, hdev->hw.io_base);
10644 pci_free_irq_vectors(pdev);
10645 pci_clear_master(pdev);
10646 pci_release_mem_regions(pdev);
10647 pci_disable_device(pdev);
10650 static void hclge_state_init(struct hclge_dev *hdev)
10652 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10653 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10654 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10655 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10656 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10657 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10658 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10661 static void hclge_state_uninit(struct hclge_dev *hdev)
10663 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10664 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10666 if (hdev->reset_timer.function)
10667 del_timer_sync(&hdev->reset_timer);
10668 if (hdev->service_task.work.func)
10669 cancel_delayed_work_sync(&hdev->service_task);
10672 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10674 #define HCLGE_FLR_RETRY_WAIT_MS 500
10675 #define HCLGE_FLR_RETRY_CNT 5
10677 struct hclge_dev *hdev = ae_dev->priv;
10682 down(&hdev->reset_sem);
10683 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10684 hdev->reset_type = HNAE3_FLR_RESET;
10685 ret = hclge_reset_prepare(hdev);
10686 if (ret || hdev->reset_pending) {
10687 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10689 if (hdev->reset_pending ||
10690 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10691 dev_err(&hdev->pdev->dev,
10692 "reset_pending:0x%lx, retry_cnt:%d\n",
10693 hdev->reset_pending, retry_cnt);
10694 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10695 up(&hdev->reset_sem);
10696 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10701 /* disable misc vector before FLR done */
10702 hclge_enable_vector(&hdev->misc_vector, false);
10703 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10704 hdev->rst_stats.flr_rst_cnt++;
10707 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10709 struct hclge_dev *hdev = ae_dev->priv;
10712 hclge_enable_vector(&hdev->misc_vector, true);
10714 ret = hclge_reset_rebuild(hdev);
10716 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10718 hdev->reset_type = HNAE3_NONE_RESET;
10719 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10720 up(&hdev->reset_sem);
10723 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10727 for (i = 0; i < hdev->num_alloc_vport; i++) {
10728 struct hclge_vport *vport = &hdev->vport[i];
10731 /* Send cmd to clear VF's FUNC_RST_ING */
10732 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10734 dev_warn(&hdev->pdev->dev,
10735 "clear vf(%u) rst failed %d!\n",
10736 vport->vport_id, ret);
10740 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10742 struct pci_dev *pdev = ae_dev->pdev;
10743 struct hclge_dev *hdev;
10746 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10751 hdev->ae_dev = ae_dev;
10752 hdev->reset_type = HNAE3_NONE_RESET;
10753 hdev->reset_level = HNAE3_FUNC_RESET;
10754 ae_dev->priv = hdev;
10756 /* HW supprt 2 layer vlan */
10757 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10759 mutex_init(&hdev->vport_lock);
10760 spin_lock_init(&hdev->fd_rule_lock);
10761 sema_init(&hdev->reset_sem, 1);
10763 ret = hclge_pci_init(hdev);
10767 /* Firmware command queue initialize */
10768 ret = hclge_cmd_queue_init(hdev);
10770 goto err_pci_uninit;
10772 /* Firmware command initialize */
10773 ret = hclge_cmd_init(hdev);
10775 goto err_cmd_uninit;
10777 ret = hclge_get_cap(hdev);
10779 goto err_cmd_uninit;
10781 ret = hclge_query_dev_specs(hdev);
10783 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10785 goto err_cmd_uninit;
10788 ret = hclge_configure(hdev);
10790 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10791 goto err_cmd_uninit;
10794 ret = hclge_init_msi(hdev);
10796 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10797 goto err_cmd_uninit;
10800 ret = hclge_misc_irq_init(hdev);
10802 goto err_msi_uninit;
10804 ret = hclge_alloc_tqps(hdev);
10806 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10807 goto err_msi_irq_uninit;
10810 ret = hclge_alloc_vport(hdev);
10812 goto err_msi_irq_uninit;
10814 ret = hclge_map_tqp(hdev);
10816 goto err_msi_irq_uninit;
10818 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
10819 !hnae3_dev_phy_imp_supported(hdev)) {
10820 ret = hclge_mac_mdio_config(hdev);
10822 goto err_msi_irq_uninit;
10825 ret = hclge_init_umv_space(hdev);
10827 goto err_mdiobus_unreg;
10829 ret = hclge_mac_init(hdev);
10831 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10832 goto err_mdiobus_unreg;
10835 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10837 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10838 goto err_mdiobus_unreg;
10841 ret = hclge_config_gro(hdev, true);
10843 goto err_mdiobus_unreg;
10845 ret = hclge_init_vlan_config(hdev);
10847 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10848 goto err_mdiobus_unreg;
10851 ret = hclge_tm_schd_init(hdev);
10853 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10854 goto err_mdiobus_unreg;
10857 ret = hclge_rss_init_cfg(hdev);
10859 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10860 goto err_mdiobus_unreg;
10863 ret = hclge_rss_init_hw(hdev);
10865 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10866 goto err_mdiobus_unreg;
10869 ret = init_mgr_tbl(hdev);
10871 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10872 goto err_mdiobus_unreg;
10875 ret = hclge_init_fd_config(hdev);
10877 dev_err(&pdev->dev,
10878 "fd table init fail, ret=%d\n", ret);
10879 goto err_mdiobus_unreg;
10882 INIT_KFIFO(hdev->mac_tnl_log);
10884 hclge_dcb_ops_set(hdev);
10886 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10887 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10889 /* Setup affinity after service timer setup because add_timer_on
10890 * is called in affinity notify.
10892 hclge_misc_affinity_setup(hdev);
10894 hclge_clear_all_event_cause(hdev);
10895 hclge_clear_resetting_state(hdev);
10897 /* Log and clear the hw errors those already occurred */
10898 hclge_handle_all_hns_hw_errors(ae_dev);
10900 /* request delayed reset for the error recovery because an immediate
10901 * global reset on a PF affecting pending initialization of other PFs
10903 if (ae_dev->hw_err_reset_req) {
10904 enum hnae3_reset_type reset_level;
10906 reset_level = hclge_get_reset_level(ae_dev,
10907 &ae_dev->hw_err_reset_req);
10908 hclge_set_def_reset_request(ae_dev, reset_level);
10909 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10912 /* Enable MISC vector(vector0) */
10913 hclge_enable_vector(&hdev->misc_vector, true);
10915 hclge_state_init(hdev);
10916 hdev->last_reset_time = jiffies;
10918 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10919 HCLGE_DRIVER_NAME);
10921 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10926 if (hdev->hw.mac.phydev)
10927 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10928 err_msi_irq_uninit:
10929 hclge_misc_irq_uninit(hdev);
10931 pci_free_irq_vectors(pdev);
10933 hclge_cmd_uninit(hdev);
10935 pcim_iounmap(pdev, hdev->hw.io_base);
10936 pci_clear_master(pdev);
10937 pci_release_regions(pdev);
10938 pci_disable_device(pdev);
10940 mutex_destroy(&hdev->vport_lock);
10944 static void hclge_stats_clear(struct hclge_dev *hdev)
10946 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10949 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10951 return hclge_config_switch_param(hdev, vf, enable,
10952 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10955 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10957 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10958 HCLGE_FILTER_FE_NIC_INGRESS_B,
10962 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10966 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10968 dev_err(&hdev->pdev->dev,
10969 "Set vf %d mac spoof check %s failed, ret=%d\n",
10970 vf, enable ? "on" : "off", ret);
10974 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10976 dev_err(&hdev->pdev->dev,
10977 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10978 vf, enable ? "on" : "off", ret);
10983 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10986 struct hclge_vport *vport = hclge_get_vport(handle);
10987 struct hclge_dev *hdev = vport->back;
10988 u32 new_spoofchk = enable ? 1 : 0;
10991 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10992 return -EOPNOTSUPP;
10994 vport = hclge_get_vf_vport(hdev, vf);
10998 if (vport->vf_info.spoofchk == new_spoofchk)
11001 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11002 dev_warn(&hdev->pdev->dev,
11003 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11005 else if (enable && hclge_is_umv_space_full(vport, true))
11006 dev_warn(&hdev->pdev->dev,
11007 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11010 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11014 vport->vf_info.spoofchk = new_spoofchk;
11018 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11020 struct hclge_vport *vport = hdev->vport;
11024 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11027 /* resume the vf spoof check state after reset */
11028 for (i = 0; i < hdev->num_alloc_vport; i++) {
11029 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11030 vport->vf_info.spoofchk);
11040 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11042 struct hclge_vport *vport = hclge_get_vport(handle);
11043 struct hclge_dev *hdev = vport->back;
11044 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11045 u32 new_trusted = enable ? 1 : 0;
11049 vport = hclge_get_vf_vport(hdev, vf);
11053 if (vport->vf_info.trusted == new_trusted)
11056 /* Disable promisc mode for VF if it is not trusted any more. */
11057 if (!enable && vport->vf_info.promisc_enable) {
11058 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11059 ret = hclge_set_vport_promisc_mode(vport, false, false,
11063 vport->vf_info.promisc_enable = 0;
11064 hclge_inform_vf_promisc_info(vport);
11067 vport->vf_info.trusted = new_trusted;
11072 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11077 /* reset vf rate to default value */
11078 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11079 struct hclge_vport *vport = &hdev->vport[vf];
11081 vport->vf_info.max_tx_rate = 0;
11082 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11084 dev_err(&hdev->pdev->dev,
11085 "vf%d failed to reset to default, ret=%d\n",
11086 vf - HCLGE_VF_VPORT_START_NUM, ret);
11090 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11091 int min_tx_rate, int max_tx_rate)
11093 if (min_tx_rate != 0 ||
11094 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11095 dev_err(&hdev->pdev->dev,
11096 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11097 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11104 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11105 int min_tx_rate, int max_tx_rate, bool force)
11107 struct hclge_vport *vport = hclge_get_vport(handle);
11108 struct hclge_dev *hdev = vport->back;
11111 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11115 vport = hclge_get_vf_vport(hdev, vf);
11119 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11122 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11126 vport->vf_info.max_tx_rate = max_tx_rate;
11131 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11133 struct hnae3_handle *handle = &hdev->vport->nic;
11134 struct hclge_vport *vport;
11138 /* resume the vf max_tx_rate after reset */
11139 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11140 vport = hclge_get_vf_vport(hdev, vf);
11144 /* zero means max rate, after reset, firmware already set it to
11145 * max rate, so just continue.
11147 if (!vport->vf_info.max_tx_rate)
11150 ret = hclge_set_vf_rate(handle, vf, 0,
11151 vport->vf_info.max_tx_rate, true);
11153 dev_err(&hdev->pdev->dev,
11154 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11155 vf, vport->vf_info.max_tx_rate, ret);
11163 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11165 struct hclge_vport *vport = hdev->vport;
11168 for (i = 0; i < hdev->num_alloc_vport; i++) {
11169 hclge_vport_stop(vport);
11174 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11176 struct hclge_dev *hdev = ae_dev->priv;
11177 struct pci_dev *pdev = ae_dev->pdev;
11180 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11182 hclge_stats_clear(hdev);
11183 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11184 * so here should not clean table in memory.
11186 if (hdev->reset_type == HNAE3_IMP_RESET ||
11187 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11188 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11189 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11190 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11191 hclge_reset_umv_space(hdev);
11194 ret = hclge_cmd_init(hdev);
11196 dev_err(&pdev->dev, "Cmd queue init failed\n");
11200 ret = hclge_map_tqp(hdev);
11202 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11206 ret = hclge_mac_init(hdev);
11208 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11212 ret = hclge_tp_port_init(hdev);
11214 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11219 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11221 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11225 ret = hclge_config_gro(hdev, true);
11229 ret = hclge_init_vlan_config(hdev);
11231 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11235 ret = hclge_tm_init_hw(hdev, true);
11237 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11241 ret = hclge_rss_init_hw(hdev);
11243 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11247 ret = init_mgr_tbl(hdev);
11249 dev_err(&pdev->dev,
11250 "failed to reinit manager table, ret = %d\n", ret);
11254 ret = hclge_init_fd_config(hdev);
11256 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11260 /* Log and clear the hw errors those already occurred */
11261 hclge_handle_all_hns_hw_errors(ae_dev);
11263 /* Re-enable the hw error interrupts because
11264 * the interrupts get disabled on global reset.
11266 ret = hclge_config_nic_hw_error(hdev, true);
11268 dev_err(&pdev->dev,
11269 "fail(%d) to re-enable NIC hw error interrupts\n",
11274 if (hdev->roce_client) {
11275 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11277 dev_err(&pdev->dev,
11278 "fail(%d) to re-enable roce ras interrupts\n",
11284 hclge_reset_vport_state(hdev);
11285 ret = hclge_reset_vport_spoofchk(hdev);
11289 ret = hclge_resume_vf_rate(hdev);
11293 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11294 HCLGE_DRIVER_NAME);
11299 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11301 struct hclge_dev *hdev = ae_dev->priv;
11302 struct hclge_mac *mac = &hdev->hw.mac;
11304 hclge_reset_vf_rate(hdev);
11305 hclge_clear_vf_vlan(hdev);
11306 hclge_misc_affinity_teardown(hdev);
11307 hclge_state_uninit(hdev);
11308 hclge_uninit_mac_table(hdev);
11311 mdiobus_unregister(mac->mdio_bus);
11313 /* Disable MISC vector(vector0) */
11314 hclge_enable_vector(&hdev->misc_vector, false);
11315 synchronize_irq(hdev->misc_vector.vector_irq);
11317 /* Disable all hw interrupts */
11318 hclge_config_mac_tnl_int(hdev, false);
11319 hclge_config_nic_hw_error(hdev, false);
11320 hclge_config_rocee_ras_interrupt(hdev, false);
11322 hclge_cmd_uninit(hdev);
11323 hclge_misc_irq_uninit(hdev);
11324 hclge_pci_uninit(hdev);
11325 mutex_destroy(&hdev->vport_lock);
11326 hclge_uninit_vport_vlan_table(hdev);
11327 ae_dev->priv = NULL;
11330 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11332 struct hclge_vport *vport = hclge_get_vport(handle);
11333 struct hclge_dev *hdev = vport->back;
11335 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11338 static void hclge_get_channels(struct hnae3_handle *handle,
11339 struct ethtool_channels *ch)
11341 ch->max_combined = hclge_get_max_channels(handle);
11342 ch->other_count = 1;
11344 ch->combined_count = handle->kinfo.rss_size;
11347 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11348 u16 *alloc_tqps, u16 *max_rss_size)
11350 struct hclge_vport *vport = hclge_get_vport(handle);
11351 struct hclge_dev *hdev = vport->back;
11353 *alloc_tqps = vport->alloc_tqps;
11354 *max_rss_size = hdev->pf_rss_size_max;
11357 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11358 bool rxfh_configured)
11360 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11361 struct hclge_vport *vport = hclge_get_vport(handle);
11362 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11363 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11364 struct hclge_dev *hdev = vport->back;
11365 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11366 u16 cur_rss_size = kinfo->rss_size;
11367 u16 cur_tqps = kinfo->num_tqps;
11368 u16 tc_valid[HCLGE_MAX_TC_NUM];
11374 kinfo->req_rss_size = new_tqps_num;
11376 ret = hclge_tm_vport_map_update(hdev);
11378 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11382 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11383 roundup_size = ilog2(roundup_size);
11384 /* Set the RSS TC mode according to the new RSS size */
11385 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11388 if (!(hdev->hw_tc_map & BIT(i)))
11392 tc_size[i] = roundup_size;
11393 tc_offset[i] = kinfo->rss_size * i;
11395 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11399 /* RSS indirection table has been configuared by user */
11400 if (rxfh_configured)
11403 /* Reinitializes the rss indirect table according to the new RSS size */
11404 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11409 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11410 rss_indir[i] = i % kinfo->rss_size;
11412 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11414 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11421 dev_info(&hdev->pdev->dev,
11422 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11423 cur_rss_size, kinfo->rss_size,
11424 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11429 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11430 u32 *regs_num_64_bit)
11432 struct hclge_desc desc;
11436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11439 dev_err(&hdev->pdev->dev,
11440 "Query register number cmd failed, ret = %d.\n", ret);
11444 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11445 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11447 total_num = *regs_num_32_bit + *regs_num_64_bit;
11454 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11457 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11458 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11460 struct hclge_desc *desc;
11461 u32 *reg_val = data;
11471 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11472 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11473 HCLGE_32_BIT_REG_RTN_DATANUM);
11474 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11478 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11479 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11481 dev_err(&hdev->pdev->dev,
11482 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11487 for (i = 0; i < cmd_num; i++) {
11489 desc_data = (__le32 *)(&desc[i].data[0]);
11490 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11492 desc_data = (__le32 *)(&desc[i]);
11493 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11495 for (k = 0; k < n; k++) {
11496 *reg_val++ = le32_to_cpu(*desc_data++);
11508 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11511 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11512 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11514 struct hclge_desc *desc;
11515 u64 *reg_val = data;
11525 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11526 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11527 HCLGE_64_BIT_REG_RTN_DATANUM);
11528 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11532 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11533 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11535 dev_err(&hdev->pdev->dev,
11536 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11541 for (i = 0; i < cmd_num; i++) {
11543 desc_data = (__le64 *)(&desc[i].data[0]);
11544 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11546 desc_data = (__le64 *)(&desc[i]);
11547 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11549 for (k = 0; k < n; k++) {
11550 *reg_val++ = le64_to_cpu(*desc_data++);
11562 #define MAX_SEPARATE_NUM 4
11563 #define SEPARATOR_VALUE 0xFDFCFBFA
11564 #define REG_NUM_PER_LINE 4
11565 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11566 #define REG_SEPARATOR_LINE 1
11567 #define REG_NUM_REMAIN_MASK 3
11568 #define BD_LIST_MAX_NUM 30
11570 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11574 /* initialize command BD except the last one */
11575 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11576 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11578 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11581 /* initialize the last command BD */
11582 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11584 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11587 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11591 u32 entries_per_desc, desc_index, index, offset, i;
11592 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11595 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11597 dev_err(&hdev->pdev->dev,
11598 "Get dfx bd num fail, status is %d.\n", ret);
11602 entries_per_desc = ARRAY_SIZE(desc[0].data);
11603 for (i = 0; i < type_num; i++) {
11604 offset = hclge_dfx_bd_offset_list[i];
11605 index = offset % entries_per_desc;
11606 desc_index = offset / entries_per_desc;
11607 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11613 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11614 struct hclge_desc *desc_src, int bd_num,
11615 enum hclge_opcode_type cmd)
11617 struct hclge_desc *desc = desc_src;
11620 hclge_cmd_setup_basic_desc(desc, cmd, true);
11621 for (i = 0; i < bd_num - 1; i++) {
11622 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11624 hclge_cmd_setup_basic_desc(desc, cmd, true);
11628 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11630 dev_err(&hdev->pdev->dev,
11631 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11637 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11640 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11641 struct hclge_desc *desc = desc_src;
11644 entries_per_desc = ARRAY_SIZE(desc->data);
11645 reg_num = entries_per_desc * bd_num;
11646 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11647 for (i = 0; i < reg_num; i++) {
11648 index = i % entries_per_desc;
11649 desc_index = i / entries_per_desc;
11650 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11652 for (i = 0; i < separator_num; i++)
11653 *reg++ = SEPARATOR_VALUE;
11655 return reg_num + separator_num;
11658 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11660 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11661 int data_len_per_desc, bd_num, i;
11662 int bd_num_list[BD_LIST_MAX_NUM];
11666 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11668 dev_err(&hdev->pdev->dev,
11669 "Get dfx reg bd num fail, status is %d.\n", ret);
11673 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11675 for (i = 0; i < dfx_reg_type_num; i++) {
11676 bd_num = bd_num_list[i];
11677 data_len = data_len_per_desc * bd_num;
11678 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11684 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11686 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11687 int bd_num, bd_num_max, buf_len, i;
11688 int bd_num_list[BD_LIST_MAX_NUM];
11689 struct hclge_desc *desc_src;
11693 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11695 dev_err(&hdev->pdev->dev,
11696 "Get dfx reg bd num fail, status is %d.\n", ret);
11700 bd_num_max = bd_num_list[0];
11701 for (i = 1; i < dfx_reg_type_num; i++)
11702 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11704 buf_len = sizeof(*desc_src) * bd_num_max;
11705 desc_src = kzalloc(buf_len, GFP_KERNEL);
11709 for (i = 0; i < dfx_reg_type_num; i++) {
11710 bd_num = bd_num_list[i];
11711 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11712 hclge_dfx_reg_opcode_list[i]);
11714 dev_err(&hdev->pdev->dev,
11715 "Get dfx reg fail, status is %d.\n", ret);
11719 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11726 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11727 struct hnae3_knic_private_info *kinfo)
11729 #define HCLGE_RING_REG_OFFSET 0x200
11730 #define HCLGE_RING_INT_REG_OFFSET 0x4
11732 int i, j, reg_num, separator_num;
11736 /* fetching per-PF registers valus from PF PCIe register space */
11737 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11738 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11739 for (i = 0; i < reg_num; i++)
11740 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11741 for (i = 0; i < separator_num; i++)
11742 *reg++ = SEPARATOR_VALUE;
11743 data_num_sum = reg_num + separator_num;
11745 reg_num = ARRAY_SIZE(common_reg_addr_list);
11746 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11747 for (i = 0; i < reg_num; i++)
11748 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11749 for (i = 0; i < separator_num; i++)
11750 *reg++ = SEPARATOR_VALUE;
11751 data_num_sum += reg_num + separator_num;
11753 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11754 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11755 for (j = 0; j < kinfo->num_tqps; j++) {
11756 for (i = 0; i < reg_num; i++)
11757 *reg++ = hclge_read_dev(&hdev->hw,
11758 ring_reg_addr_list[i] +
11759 HCLGE_RING_REG_OFFSET * j);
11760 for (i = 0; i < separator_num; i++)
11761 *reg++ = SEPARATOR_VALUE;
11763 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11765 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11766 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11767 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11768 for (i = 0; i < reg_num; i++)
11769 *reg++ = hclge_read_dev(&hdev->hw,
11770 tqp_intr_reg_addr_list[i] +
11771 HCLGE_RING_INT_REG_OFFSET * j);
11772 for (i = 0; i < separator_num; i++)
11773 *reg++ = SEPARATOR_VALUE;
11775 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11777 return data_num_sum;
11780 static int hclge_get_regs_len(struct hnae3_handle *handle)
11782 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11783 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11784 struct hclge_vport *vport = hclge_get_vport(handle);
11785 struct hclge_dev *hdev = vport->back;
11786 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11787 int regs_lines_32_bit, regs_lines_64_bit;
11790 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11792 dev_err(&hdev->pdev->dev,
11793 "Get register number failed, ret = %d.\n", ret);
11797 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11799 dev_err(&hdev->pdev->dev,
11800 "Get dfx reg len failed, ret = %d.\n", ret);
11804 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11805 REG_SEPARATOR_LINE;
11806 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11807 REG_SEPARATOR_LINE;
11808 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11809 REG_SEPARATOR_LINE;
11810 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11811 REG_SEPARATOR_LINE;
11812 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11813 REG_SEPARATOR_LINE;
11814 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11815 REG_SEPARATOR_LINE;
11817 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11818 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11819 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11822 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11825 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11826 struct hclge_vport *vport = hclge_get_vport(handle);
11827 struct hclge_dev *hdev = vport->back;
11828 u32 regs_num_32_bit, regs_num_64_bit;
11829 int i, reg_num, separator_num, ret;
11832 *version = hdev->fw_version;
11834 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11836 dev_err(&hdev->pdev->dev,
11837 "Get register number failed, ret = %d.\n", ret);
11841 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11843 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11845 dev_err(&hdev->pdev->dev,
11846 "Get 32 bit register failed, ret = %d.\n", ret);
11849 reg_num = regs_num_32_bit;
11851 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11852 for (i = 0; i < separator_num; i++)
11853 *reg++ = SEPARATOR_VALUE;
11855 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11857 dev_err(&hdev->pdev->dev,
11858 "Get 64 bit register failed, ret = %d.\n", ret);
11861 reg_num = regs_num_64_bit * 2;
11863 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11864 for (i = 0; i < separator_num; i++)
11865 *reg++ = SEPARATOR_VALUE;
11867 ret = hclge_get_dfx_reg(hdev, reg);
11869 dev_err(&hdev->pdev->dev,
11870 "Get dfx register failed, ret = %d.\n", ret);
11873 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11875 struct hclge_set_led_state_cmd *req;
11876 struct hclge_desc desc;
11879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11881 req = (struct hclge_set_led_state_cmd *)desc.data;
11882 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11883 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11887 dev_err(&hdev->pdev->dev,
11888 "Send set led state cmd error, ret =%d\n", ret);
11893 enum hclge_led_status {
11896 HCLGE_LED_NO_CHANGE = 0xFF,
11899 static int hclge_set_led_id(struct hnae3_handle *handle,
11900 enum ethtool_phys_id_state status)
11902 struct hclge_vport *vport = hclge_get_vport(handle);
11903 struct hclge_dev *hdev = vport->back;
11906 case ETHTOOL_ID_ACTIVE:
11907 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11908 case ETHTOOL_ID_INACTIVE:
11909 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11915 static void hclge_get_link_mode(struct hnae3_handle *handle,
11916 unsigned long *supported,
11917 unsigned long *advertising)
11919 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11920 struct hclge_vport *vport = hclge_get_vport(handle);
11921 struct hclge_dev *hdev = vport->back;
11922 unsigned int idx = 0;
11924 for (; idx < size; idx++) {
11925 supported[idx] = hdev->hw.mac.supported[idx];
11926 advertising[idx] = hdev->hw.mac.advertising[idx];
11930 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11932 struct hclge_vport *vport = hclge_get_vport(handle);
11933 struct hclge_dev *hdev = vport->back;
11935 return hclge_config_gro(hdev, enable);
11938 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11940 struct hclge_vport *vport = &hdev->vport[0];
11941 struct hnae3_handle *handle = &vport->nic;
11945 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11946 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11947 vport->last_promisc_flags = vport->overflow_promisc_flags;
11950 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11951 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11952 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11953 tmp_flags & HNAE3_MPE);
11955 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11956 hclge_enable_vlan_filter(handle,
11957 tmp_flags & HNAE3_VLAN_FLTR);
11962 static bool hclge_module_existed(struct hclge_dev *hdev)
11964 struct hclge_desc desc;
11968 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11971 dev_err(&hdev->pdev->dev,
11972 "failed to get SFP exist state, ret = %d\n", ret);
11976 existed = le32_to_cpu(desc.data[0]);
11978 return existed != 0;
11981 /* need 6 bds(total 140 bytes) in one reading
11982 * return the number of bytes actually read, 0 means read failed.
11984 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11987 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11988 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11994 /* setup all 6 bds to read module eeprom info. */
11995 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11996 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11999 /* bd0~bd4 need next flag */
12000 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12001 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12004 /* setup bd0, this bd contains offset and read length. */
12005 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12006 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12007 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12008 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12010 ret = hclge_cmd_send(&hdev->hw, desc, i);
12012 dev_err(&hdev->pdev->dev,
12013 "failed to get SFP eeprom info, ret = %d\n", ret);
12017 /* copy sfp info from bd0 to out buffer. */
12018 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12019 memcpy(data, sfp_info_bd0->data, copy_len);
12020 read_len = copy_len;
12022 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12023 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12024 if (read_len >= len)
12027 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12028 memcpy(data + read_len, desc[i].data, copy_len);
12029 read_len += copy_len;
12035 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12038 struct hclge_vport *vport = hclge_get_vport(handle);
12039 struct hclge_dev *hdev = vport->back;
12043 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12044 return -EOPNOTSUPP;
12046 if (!hclge_module_existed(hdev))
12049 while (read_len < len) {
12050 data_len = hclge_get_sfp_eeprom_info(hdev,
12057 read_len += data_len;
12063 static const struct hnae3_ae_ops hclge_ops = {
12064 .init_ae_dev = hclge_init_ae_dev,
12065 .uninit_ae_dev = hclge_uninit_ae_dev,
12066 .flr_prepare = hclge_flr_prepare,
12067 .flr_done = hclge_flr_done,
12068 .init_client_instance = hclge_init_client_instance,
12069 .uninit_client_instance = hclge_uninit_client_instance,
12070 .map_ring_to_vector = hclge_map_ring_to_vector,
12071 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12072 .get_vector = hclge_get_vector,
12073 .put_vector = hclge_put_vector,
12074 .set_promisc_mode = hclge_set_promisc_mode,
12075 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12076 .set_loopback = hclge_set_loopback,
12077 .start = hclge_ae_start,
12078 .stop = hclge_ae_stop,
12079 .client_start = hclge_client_start,
12080 .client_stop = hclge_client_stop,
12081 .get_status = hclge_get_status,
12082 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12083 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12084 .get_media_type = hclge_get_media_type,
12085 .check_port_speed = hclge_check_port_speed,
12086 .get_fec = hclge_get_fec,
12087 .set_fec = hclge_set_fec,
12088 .get_rss_key_size = hclge_get_rss_key_size,
12089 .get_rss = hclge_get_rss,
12090 .set_rss = hclge_set_rss,
12091 .set_rss_tuple = hclge_set_rss_tuple,
12092 .get_rss_tuple = hclge_get_rss_tuple,
12093 .get_tc_size = hclge_get_tc_size,
12094 .get_mac_addr = hclge_get_mac_addr,
12095 .set_mac_addr = hclge_set_mac_addr,
12096 .do_ioctl = hclge_do_ioctl,
12097 .add_uc_addr = hclge_add_uc_addr,
12098 .rm_uc_addr = hclge_rm_uc_addr,
12099 .add_mc_addr = hclge_add_mc_addr,
12100 .rm_mc_addr = hclge_rm_mc_addr,
12101 .set_autoneg = hclge_set_autoneg,
12102 .get_autoneg = hclge_get_autoneg,
12103 .restart_autoneg = hclge_restart_autoneg,
12104 .halt_autoneg = hclge_halt_autoneg,
12105 .get_pauseparam = hclge_get_pauseparam,
12106 .set_pauseparam = hclge_set_pauseparam,
12107 .set_mtu = hclge_set_mtu,
12108 .reset_queue = hclge_reset_tqp,
12109 .get_stats = hclge_get_stats,
12110 .get_mac_stats = hclge_get_mac_stat,
12111 .update_stats = hclge_update_stats,
12112 .get_strings = hclge_get_strings,
12113 .get_sset_count = hclge_get_sset_count,
12114 .get_fw_version = hclge_get_fw_version,
12115 .get_mdix_mode = hclge_get_mdix_mode,
12116 .enable_vlan_filter = hclge_enable_vlan_filter,
12117 .set_vlan_filter = hclge_set_vlan_filter,
12118 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12119 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12120 .reset_event = hclge_reset_event,
12121 .get_reset_level = hclge_get_reset_level,
12122 .set_default_reset_request = hclge_set_def_reset_request,
12123 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12124 .set_channels = hclge_set_channels,
12125 .get_channels = hclge_get_channels,
12126 .get_regs_len = hclge_get_regs_len,
12127 .get_regs = hclge_get_regs,
12128 .set_led_id = hclge_set_led_id,
12129 .get_link_mode = hclge_get_link_mode,
12130 .add_fd_entry = hclge_add_fd_entry,
12131 .del_fd_entry = hclge_del_fd_entry,
12132 .del_all_fd_entries = hclge_del_all_fd_entries,
12133 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12134 .get_fd_rule_info = hclge_get_fd_rule_info,
12135 .get_fd_all_rules = hclge_get_all_rules,
12136 .enable_fd = hclge_enable_fd,
12137 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12138 .dbg_run_cmd = hclge_dbg_run_cmd,
12139 .dbg_read_cmd = hclge_dbg_read_cmd,
12140 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12141 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12142 .ae_dev_resetting = hclge_ae_dev_resetting,
12143 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12144 .set_gro_en = hclge_gro_en,
12145 .get_global_queue_id = hclge_covert_handle_qid_global,
12146 .set_timer_task = hclge_set_timer_task,
12147 .mac_connect_phy = hclge_mac_connect_phy,
12148 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12149 .get_vf_config = hclge_get_vf_config,
12150 .set_vf_link_state = hclge_set_vf_link_state,
12151 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12152 .set_vf_trust = hclge_set_vf_trust,
12153 .set_vf_rate = hclge_set_vf_rate,
12154 .set_vf_mac = hclge_set_vf_mac,
12155 .get_module_eeprom = hclge_get_module_eeprom,
12156 .get_cmdq_stat = hclge_get_cmdq_stat,
12157 .add_cls_flower = hclge_add_cls_flower,
12158 .del_cls_flower = hclge_del_cls_flower,
12159 .cls_flower_active = hclge_is_cls_flower_active,
12160 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12161 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12164 static struct hnae3_ae_algo ae_algo = {
12166 .pdev_id_table = ae_algo_pci_tbl,
12169 static int hclge_init(void)
12171 pr_info("%s is initializing\n", HCLGE_NAME);
12173 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12175 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12179 hnae3_register_ae_algo(&ae_algo);
12184 static void hclge_exit(void)
12186 hnae3_unregister_ae_algo(&ae_algo);
12187 destroy_workqueue(hclge_wq);
12189 module_init(hclge_init);
12190 module_exit(hclge_exit);
12192 MODULE_LICENSE("GPL");
12193 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12194 MODULE_DESCRIPTION("HCLGE Driver");
12195 MODULE_VERSION(HCLGE_MOD_VERSION);