1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static struct hnae3_ae_algo ae_algo;
76 static struct workqueue_struct *hclge_wq;
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32(tqp->index);
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32(tqp->index);
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 hdev->hw.mac.phydev->drv->set_loopback) ||
756 hnae3_dev_phy_imp_supported(hdev)) {
758 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
761 } else if (stringset == ETH_SS_STATS) {
762 count = ARRAY_SIZE(g_mac_stats_string) +
763 hclge_tqps_get_sset_count(handle, stringset);
769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
772 u8 *p = (char *)data;
775 if (stringset == ETH_SS_STATS) {
776 size = ARRAY_SIZE(g_mac_stats_string);
777 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
779 p = hclge_tqps_get_strings(handle, p);
780 } else if (stringset == ETH_SS_TEST) {
781 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
784 p += ETH_GSTRING_LEN;
786 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
789 p += ETH_GSTRING_LEN;
791 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
793 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
795 p += ETH_GSTRING_LEN;
797 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
800 p += ETH_GSTRING_LEN;
805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
807 struct hclge_vport *vport = hclge_get_vport(handle);
808 struct hclge_dev *hdev = vport->back;
811 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812 ARRAY_SIZE(g_mac_stats_string), data);
813 p = hclge_tqps_get_stats(handle, p);
816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817 struct hns3_mac_stats *mac_stats)
819 struct hclge_vport *vport = hclge_get_vport(handle);
820 struct hclge_dev *hdev = vport->back;
822 hclge_update_stats(handle, NULL);
824 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
831 #define HCLGE_MAC_ID_MASK 0xF
833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
836 /* Set the pf to main pf */
837 if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 hdev->flag |= HCLGE_FLAG_MAIN;
840 hdev->flag &= ~HCLGE_FLAG_MAIN;
842 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
846 static int hclge_query_function_status(struct hclge_dev *hdev)
848 #define HCLGE_QUERY_MAX_CNT 5
850 struct hclge_func_status_cmd *req;
851 struct hclge_desc desc;
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status_cmd *)desc.data;
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n", ret);
866 /* Check pf reset is done */
869 usleep_range(1000, 2000);
870 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
872 return hclge_parse_func_status(hdev, req);
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
877 struct hclge_pf_res_cmd *req;
878 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
884 dev_err(&hdev->pdev->dev,
885 "query pf resource failed %d.\n", ret);
889 req = (struct hclge_pf_res_cmd *)desc.data;
890 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
891 le16_to_cpu(req->ext_tqp_num);
892 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
894 if (req->tx_buf_size)
896 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
900 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (req->dv_buf_size)
904 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
906 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
908 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
910 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
911 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
912 dev_err(&hdev->pdev->dev,
913 "only %u msi resources available, not enough for pf(min:2).\n",
918 if (hnae3_dev_roce_supported(hdev)) {
920 le16_to_cpu(req->pf_intr_vector_number_roce);
922 /* PF should have NIC vectors and Roce vectors,
923 * NIC vectors are queued before Roce vectors.
925 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
927 hdev->num_msi = hdev->num_nic_msi;
933 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
937 *speed = HCLGE_MAC_SPEED_10M;
940 *speed = HCLGE_MAC_SPEED_100M;
943 *speed = HCLGE_MAC_SPEED_1G;
946 *speed = HCLGE_MAC_SPEED_10G;
949 *speed = HCLGE_MAC_SPEED_25G;
952 *speed = HCLGE_MAC_SPEED_40G;
955 *speed = HCLGE_MAC_SPEED_50G;
958 *speed = HCLGE_MAC_SPEED_100G;
961 *speed = HCLGE_MAC_SPEED_200G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1002 case HCLGE_MAC_SPEED_200G:
1003 speed_bit = HCLGE_SUPPORT_200G_BIT;
1009 if (speed_bit & speed_ability)
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1017 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1029 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1032 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1037 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1039 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1056 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1060 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1062 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1065 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1066 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1068 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1069 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1071 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1082 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1084 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1107 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1110 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1112 switch (mac->speed) {
1113 case HCLGE_MAC_SPEED_10G:
1114 case HCLGE_MAC_SPEED_40G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1118 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1120 case HCLGE_MAC_SPEED_25G:
1121 case HCLGE_MAC_SPEED_50G:
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1125 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1126 BIT(HNAE3_FEC_AUTO);
1128 case HCLGE_MAC_SPEED_100G:
1129 case HCLGE_MAC_SPEED_200G:
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1131 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1134 mac->fec_ability = 0;
1139 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1142 struct hclge_mac *mac = &hdev->hw.mac;
1144 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1145 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1148 hclge_convert_setting_sr(mac, speed_ability);
1149 hclge_convert_setting_lr(mac, speed_ability);
1150 hclge_convert_setting_cr(mac, speed_ability);
1151 if (hnae3_dev_fec_supported(hdev))
1152 hclge_convert_setting_fec(mac);
1154 if (hnae3_dev_pause_supported(hdev))
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1158 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1161 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1164 struct hclge_mac *mac = &hdev->hw.mac;
1166 hclge_convert_setting_kr(mac, speed_ability);
1167 if (hnae3_dev_fec_supported(hdev))
1168 hclge_convert_setting_fec(mac);
1170 if (hnae3_dev_pause_supported(hdev))
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1177 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1180 unsigned long *supported = hdev->hw.mac.supported;
1182 /* default to support all speed for GE port */
1184 speed_ability = HCLGE_SUPPORT_GE;
1186 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1190 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1191 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1197 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1202 if (hnae3_dev_pause_supported(hdev)) {
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1204 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1211 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1213 u8 media_type = hdev->hw.mac.media_type;
1215 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1216 hclge_parse_fiber_link_mode(hdev, speed_ability);
1217 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1218 hclge_parse_copper_link_mode(hdev, speed_ability);
1219 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1220 hclge_parse_backplane_link_mode(hdev, speed_ability);
1223 static u32 hclge_get_max_speed(u16 speed_ability)
1225 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1226 return HCLGE_MAC_SPEED_200G;
1228 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1229 return HCLGE_MAC_SPEED_100G;
1231 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1232 return HCLGE_MAC_SPEED_50G;
1234 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1235 return HCLGE_MAC_SPEED_40G;
1237 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1238 return HCLGE_MAC_SPEED_25G;
1240 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1241 return HCLGE_MAC_SPEED_10G;
1243 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1244 return HCLGE_MAC_SPEED_1G;
1246 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1247 return HCLGE_MAC_SPEED_100M;
1249 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1250 return HCLGE_MAC_SPEED_10M;
1252 return HCLGE_MAC_SPEED_1G;
1255 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1257 #define SPEED_ABILITY_EXT_SHIFT 8
1259 struct hclge_cfg_param_cmd *req;
1260 u64 mac_addr_tmp_high;
1261 u16 speed_ability_ext;
1265 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1267 /* get the configuration */
1268 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1272 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1273 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1274 HCLGE_CFG_TQP_DESC_N_M,
1275 HCLGE_CFG_TQP_DESC_N_S);
1277 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1278 HCLGE_CFG_PHY_ADDR_M,
1279 HCLGE_CFG_PHY_ADDR_S);
1280 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1281 HCLGE_CFG_MEDIA_TP_M,
1282 HCLGE_CFG_MEDIA_TP_S);
1283 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1284 HCLGE_CFG_RX_BUF_LEN_M,
1285 HCLGE_CFG_RX_BUF_LEN_S);
1286 /* get mac_address */
1287 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1288 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 HCLGE_CFG_MAC_ADDR_H_M,
1290 HCLGE_CFG_MAC_ADDR_H_S);
1292 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1294 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1295 HCLGE_CFG_DEFAULT_SPEED_M,
1296 HCLGE_CFG_DEFAULT_SPEED_S);
1297 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1298 HCLGE_CFG_RSS_SIZE_M,
1299 HCLGE_CFG_RSS_SIZE_S);
1301 for (i = 0; i < ETH_ALEN; i++)
1302 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1304 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1305 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1307 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308 HCLGE_CFG_SPEED_ABILITY_M,
1309 HCLGE_CFG_SPEED_ABILITY_S);
1310 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1311 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1312 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1313 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1315 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1316 HCLGE_CFG_UMV_TBL_SPACE_M,
1317 HCLGE_CFG_UMV_TBL_SPACE_S);
1318 if (!cfg->umv_space)
1319 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1321 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1322 HCLGE_CFG_PF_RSS_SIZE_M,
1323 HCLGE_CFG_PF_RSS_SIZE_S);
1325 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1326 * power of 2, instead of reading out directly. This would
1327 * be more flexible for future changes and expansions.
1328 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1329 * it does not make sense if PF's field is 0. In this case, PF and VF
1330 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1332 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1333 1U << cfg->pf_rss_size_max :
1334 cfg->vf_rss_size_max;
1337 /* hclge_get_cfg: query the static parameter from flash
1338 * @hdev: pointer to struct hclge_dev
1339 * @hcfg: the config structure to be getted
1341 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1343 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1344 struct hclge_cfg_param_cmd *req;
1348 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1351 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1352 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1354 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1355 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1356 /* Len should be united by 4 bytes when send to hardware */
1357 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1358 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1359 req->offset = cpu_to_le32(offset);
1362 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1364 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1368 hclge_parse_cfg(hcfg, desc);
1373 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1375 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1377 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1379 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1380 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1381 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1382 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1383 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1384 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1385 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1388 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1389 struct hclge_desc *desc)
1391 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1392 struct hclge_dev_specs_0_cmd *req0;
1393 struct hclge_dev_specs_1_cmd *req1;
1395 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1396 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1398 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1399 ae_dev->dev_specs.rss_ind_tbl_size =
1400 le16_to_cpu(req0->rss_ind_tbl_size);
1401 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1402 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1403 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1404 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1405 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1406 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1409 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1411 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1413 if (!dev_specs->max_non_tso_bd_num)
1414 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1415 if (!dev_specs->rss_ind_tbl_size)
1416 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 if (!dev_specs->rss_key_size)
1418 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1419 if (!dev_specs->max_tm_rate)
1420 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1421 if (!dev_specs->max_qset_num)
1422 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1423 if (!dev_specs->max_int_gl)
1424 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1425 if (!dev_specs->max_frm_size)
1426 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1429 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1431 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1435 /* set default specifications as devices lower than version V3 do not
1436 * support querying specifications from firmware.
1438 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1439 hclge_set_default_dev_specs(hdev);
1443 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1444 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1446 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1448 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1450 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1454 hclge_parse_dev_specs(hdev, desc);
1455 hclge_check_dev_specs(hdev);
1460 static int hclge_get_cap(struct hclge_dev *hdev)
1464 ret = hclge_query_function_status(hdev);
1466 dev_err(&hdev->pdev->dev,
1467 "query function status error %d.\n", ret);
1471 /* get pf resource */
1472 return hclge_query_pf_resource(hdev);
1475 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1477 #define HCLGE_MIN_TX_DESC 64
1478 #define HCLGE_MIN_RX_DESC 64
1480 if (!is_kdump_kernel())
1483 dev_info(&hdev->pdev->dev,
1484 "Running kdump kernel. Using minimal resources\n");
1486 /* minimal queue pairs equals to the number of vports */
1487 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1488 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1489 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1492 static int hclge_configure(struct hclge_dev *hdev)
1494 struct hclge_cfg cfg;
1498 ret = hclge_get_cfg(hdev, &cfg);
1502 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1503 hdev->base_tqp_pid = 0;
1504 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1505 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1506 hdev->rx_buf_len = cfg.rx_buf_len;
1507 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1508 hdev->hw.mac.media_type = cfg.media_type;
1509 hdev->hw.mac.phy_addr = cfg.phy_addr;
1510 hdev->num_tx_desc = cfg.tqp_desc_num;
1511 hdev->num_rx_desc = cfg.tqp_desc_num;
1512 hdev->tm_info.num_pg = 1;
1513 hdev->tc_max = cfg.tc_num;
1514 hdev->tm_info.hw_pfc_map = 0;
1515 hdev->wanted_umv_size = cfg.umv_space;
1517 if (hnae3_dev_fd_supported(hdev)) {
1519 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1522 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1524 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1525 cfg.default_speed, ret);
1529 hclge_parse_link_mode(hdev, cfg.speed_ability);
1531 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1533 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1534 (hdev->tc_max < 1)) {
1535 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1540 /* Dev does not support DCB */
1541 if (!hnae3_dev_dcb_supported(hdev)) {
1545 hdev->pfc_max = hdev->tc_max;
1548 hdev->tm_info.num_tc = 1;
1550 /* Currently not support uncontiuous tc */
1551 for (i = 0; i < hdev->tm_info.num_tc; i++)
1552 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1554 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1556 hclge_init_kdump_kernel_config(hdev);
1558 /* Set the init affinity based on pci func number */
1559 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1560 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1561 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1562 &hdev->affinity_mask);
1567 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1570 struct hclge_cfg_tso_status_cmd *req;
1571 struct hclge_desc desc;
1573 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1575 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1576 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1577 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1579 return hclge_cmd_send(&hdev->hw, &desc, 1);
1582 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1584 struct hclge_cfg_gro_status_cmd *req;
1585 struct hclge_desc desc;
1588 if (!hnae3_dev_gro_supported(hdev))
1591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1592 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1594 req->gro_en = en ? 1 : 0;
1596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1598 dev_err(&hdev->pdev->dev,
1599 "GRO hardware config cmd failed, ret = %d\n", ret);
1604 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1606 struct hclge_tqp *tqp;
1609 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1610 sizeof(struct hclge_tqp), GFP_KERNEL);
1616 for (i = 0; i < hdev->num_tqps; i++) {
1617 tqp->dev = &hdev->pdev->dev;
1620 tqp->q.ae_algo = &ae_algo;
1621 tqp->q.buf_size = hdev->rx_buf_len;
1622 tqp->q.tx_desc_num = hdev->num_tx_desc;
1623 tqp->q.rx_desc_num = hdev->num_rx_desc;
1625 /* need an extended offset to configure queues >=
1626 * HCLGE_TQP_MAX_SIZE_DEV_V2
1628 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1629 tqp->q.io_base = hdev->hw.io_base +
1630 HCLGE_TQP_REG_OFFSET +
1631 i * HCLGE_TQP_REG_SIZE;
1633 tqp->q.io_base = hdev->hw.io_base +
1634 HCLGE_TQP_REG_OFFSET +
1635 HCLGE_TQP_EXT_REG_OFFSET +
1636 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1645 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1646 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1648 struct hclge_tqp_map_cmd *req;
1649 struct hclge_desc desc;
1652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1654 req = (struct hclge_tqp_map_cmd *)desc.data;
1655 req->tqp_id = cpu_to_le16(tqp_pid);
1656 req->tqp_vf = func_id;
1657 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1659 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1660 req->tqp_vid = cpu_to_le16(tqp_vid);
1662 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1664 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1669 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1671 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1672 struct hclge_dev *hdev = vport->back;
1675 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1676 alloced < num_tqps; i++) {
1677 if (!hdev->htqp[i].alloced) {
1678 hdev->htqp[i].q.handle = &vport->nic;
1679 hdev->htqp[i].q.tqp_index = alloced;
1680 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1681 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1682 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1683 hdev->htqp[i].alloced = true;
1687 vport->alloc_tqps = alloced;
1688 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1689 vport->alloc_tqps / hdev->tm_info.num_tc);
1691 /* ensure one to one mapping between irq and queue at default */
1692 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1693 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1698 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1699 u16 num_tx_desc, u16 num_rx_desc)
1702 struct hnae3_handle *nic = &vport->nic;
1703 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1704 struct hclge_dev *hdev = vport->back;
1707 kinfo->num_tx_desc = num_tx_desc;
1708 kinfo->num_rx_desc = num_rx_desc;
1710 kinfo->rx_buf_len = hdev->rx_buf_len;
1712 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1713 sizeof(struct hnae3_queue *), GFP_KERNEL);
1717 ret = hclge_assign_tqp(vport, num_tqps);
1719 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1724 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1725 struct hclge_vport *vport)
1727 struct hnae3_handle *nic = &vport->nic;
1728 struct hnae3_knic_private_info *kinfo;
1731 kinfo = &nic->kinfo;
1732 for (i = 0; i < vport->alloc_tqps; i++) {
1733 struct hclge_tqp *q =
1734 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1738 is_pf = !(vport->vport_id);
1739 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1748 static int hclge_map_tqp(struct hclge_dev *hdev)
1750 struct hclge_vport *vport = hdev->vport;
1753 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1754 for (i = 0; i < num_vport; i++) {
1757 ret = hclge_map_tqp_to_vport(hdev, vport);
1767 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1769 struct hnae3_handle *nic = &vport->nic;
1770 struct hclge_dev *hdev = vport->back;
1773 nic->pdev = hdev->pdev;
1774 nic->ae_algo = &ae_algo;
1775 nic->numa_node_mask = hdev->numa_node_mask;
1777 ret = hclge_knic_setup(vport, num_tqps,
1778 hdev->num_tx_desc, hdev->num_rx_desc);
1780 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1785 static int hclge_alloc_vport(struct hclge_dev *hdev)
1787 struct pci_dev *pdev = hdev->pdev;
1788 struct hclge_vport *vport;
1794 /* We need to alloc a vport for main NIC of PF */
1795 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1797 if (hdev->num_tqps < num_vport) {
1798 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1799 hdev->num_tqps, num_vport);
1803 /* Alloc the same number of TQPs for every vport */
1804 tqp_per_vport = hdev->num_tqps / num_vport;
1805 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1807 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1812 hdev->vport = vport;
1813 hdev->num_alloc_vport = num_vport;
1815 if (IS_ENABLED(CONFIG_PCI_IOV))
1816 hdev->num_alloc_vfs = hdev->num_req_vfs;
1818 for (i = 0; i < num_vport; i++) {
1820 vport->vport_id = i;
1821 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1822 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1823 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1824 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1825 INIT_LIST_HEAD(&vport->vlan_list);
1826 INIT_LIST_HEAD(&vport->uc_mac_list);
1827 INIT_LIST_HEAD(&vport->mc_mac_list);
1828 spin_lock_init(&vport->mac_list_lock);
1831 ret = hclge_vport_setup(vport, tqp_main_vport);
1833 ret = hclge_vport_setup(vport, tqp_per_vport);
1836 "vport setup failed for vport %d, %d\n",
1847 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1848 struct hclge_pkt_buf_alloc *buf_alloc)
1850 /* TX buffer size is unit by 128 byte */
1851 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1852 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1853 struct hclge_tx_buff_alloc_cmd *req;
1854 struct hclge_desc desc;
1858 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1860 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1861 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1862 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1864 req->tx_pkt_buff[i] =
1865 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1866 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1869 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1871 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1877 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1878 struct hclge_pkt_buf_alloc *buf_alloc)
1880 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1883 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1888 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1893 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1894 if (hdev->hw_tc_map & BIT(i))
1899 /* Get the number of pfc enabled TCs, which have private buffer */
1900 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1901 struct hclge_pkt_buf_alloc *buf_alloc)
1903 struct hclge_priv_buf *priv;
1907 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908 priv = &buf_alloc->priv_buf[i];
1909 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1917 /* Get the number of pfc disabled TCs, which have private buffer */
1918 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1919 struct hclge_pkt_buf_alloc *buf_alloc)
1921 struct hclge_priv_buf *priv;
1925 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1926 priv = &buf_alloc->priv_buf[i];
1927 if (hdev->hw_tc_map & BIT(i) &&
1928 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1936 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1938 struct hclge_priv_buf *priv;
1942 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1943 priv = &buf_alloc->priv_buf[i];
1945 rx_priv += priv->buf_size;
1950 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1952 u32 i, total_tx_size = 0;
1954 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1955 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1957 return total_tx_size;
1960 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1961 struct hclge_pkt_buf_alloc *buf_alloc,
1964 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1965 u32 tc_num = hclge_get_tc_num(hdev);
1966 u32 shared_buf, aligned_mps;
1970 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1972 if (hnae3_dev_dcb_supported(hdev))
1973 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1976 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1977 + hdev->dv_buf_size;
1979 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1980 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1981 HCLGE_BUF_SIZE_UNIT);
1983 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1984 if (rx_all < rx_priv + shared_std)
1987 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1988 buf_alloc->s_buf.buf_size = shared_buf;
1989 if (hnae3_dev_dcb_supported(hdev)) {
1990 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1991 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1992 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1993 HCLGE_BUF_SIZE_UNIT);
1995 buf_alloc->s_buf.self.high = aligned_mps +
1996 HCLGE_NON_DCB_ADDITIONAL_BUF;
1997 buf_alloc->s_buf.self.low = aligned_mps;
2000 if (hnae3_dev_dcb_supported(hdev)) {
2001 hi_thrd = shared_buf - hdev->dv_buf_size;
2003 if (tc_num <= NEED_RESERVE_TC_NUM)
2004 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2008 hi_thrd = hi_thrd / tc_num;
2010 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2011 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2012 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2014 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2015 lo_thrd = aligned_mps;
2018 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2019 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2020 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2026 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2027 struct hclge_pkt_buf_alloc *buf_alloc)
2031 total_size = hdev->pkt_buf_size;
2033 /* alloc tx buffer for all enabled tc */
2034 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2035 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2037 if (hdev->hw_tc_map & BIT(i)) {
2038 if (total_size < hdev->tx_buf_size)
2041 priv->tx_buf_size = hdev->tx_buf_size;
2043 priv->tx_buf_size = 0;
2046 total_size -= priv->tx_buf_size;
2052 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2053 struct hclge_pkt_buf_alloc *buf_alloc)
2055 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2056 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2059 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2067 if (!(hdev->hw_tc_map & BIT(i)))
2072 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2073 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2074 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2075 HCLGE_BUF_SIZE_UNIT);
2078 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2082 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2085 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2088 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2089 struct hclge_pkt_buf_alloc *buf_alloc)
2091 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2092 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2095 /* let the last to be cleared first */
2096 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2097 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2098 unsigned int mask = BIT((unsigned int)i);
2100 if (hdev->hw_tc_map & mask &&
2101 !(hdev->tm_info.hw_pfc_map & mask)) {
2102 /* Clear the no pfc TC private buffer */
2110 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2111 no_pfc_priv_num == 0)
2115 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2118 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2119 struct hclge_pkt_buf_alloc *buf_alloc)
2121 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2122 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2125 /* let the last to be cleared first */
2126 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2127 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2128 unsigned int mask = BIT((unsigned int)i);
2130 if (hdev->hw_tc_map & mask &&
2131 hdev->tm_info.hw_pfc_map & mask) {
2132 /* Reduce the number of pfc TC with private buffer */
2140 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2145 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2148 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2149 struct hclge_pkt_buf_alloc *buf_alloc)
2151 #define COMPENSATE_BUFFER 0x3C00
2152 #define COMPENSATE_HALF_MPS_NUM 5
2153 #define PRIV_WL_GAP 0x1800
2155 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2156 u32 tc_num = hclge_get_tc_num(hdev);
2157 u32 half_mps = hdev->mps >> 1;
2162 rx_priv = rx_priv / tc_num;
2164 if (tc_num <= NEED_RESERVE_TC_NUM)
2165 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2167 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2168 COMPENSATE_HALF_MPS_NUM * half_mps;
2169 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2170 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2172 if (rx_priv < min_rx_priv)
2175 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2176 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2183 if (!(hdev->hw_tc_map & BIT(i)))
2187 priv->buf_size = rx_priv;
2188 priv->wl.high = rx_priv - hdev->dv_buf_size;
2189 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2192 buf_alloc->s_buf.buf_size = 0;
2197 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2198 * @hdev: pointer to struct hclge_dev
2199 * @buf_alloc: pointer to buffer calculation data
2200 * @return: 0: calculate sucessful, negative: fail
2202 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2203 struct hclge_pkt_buf_alloc *buf_alloc)
2205 /* When DCB is not supported, rx private buffer is not allocated. */
2206 if (!hnae3_dev_dcb_supported(hdev)) {
2207 u32 rx_all = hdev->pkt_buf_size;
2209 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2210 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2216 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2219 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2222 /* try to decrease the buffer size */
2223 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2226 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2229 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2235 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2236 struct hclge_pkt_buf_alloc *buf_alloc)
2238 struct hclge_rx_priv_buff_cmd *req;
2239 struct hclge_desc desc;
2243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2244 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2246 /* Alloc private buffer TCs */
2247 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2248 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2251 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2253 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2257 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2258 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2260 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2262 dev_err(&hdev->pdev->dev,
2263 "rx private buffer alloc cmd failed %d\n", ret);
2268 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2269 struct hclge_pkt_buf_alloc *buf_alloc)
2271 struct hclge_rx_priv_wl_buf *req;
2272 struct hclge_priv_buf *priv;
2273 struct hclge_desc desc[2];
2277 for (i = 0; i < 2; i++) {
2278 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2280 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2282 /* The first descriptor set the NEXT bit to 1 */
2284 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2286 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2288 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2289 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2291 priv = &buf_alloc->priv_buf[idx];
2292 req->tc_wl[j].high =
2293 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2294 req->tc_wl[j].high |=
2295 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2297 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2298 req->tc_wl[j].low |=
2299 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2303 /* Send 2 descriptor at one time */
2304 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2306 dev_err(&hdev->pdev->dev,
2307 "rx private waterline config cmd failed %d\n",
2312 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2313 struct hclge_pkt_buf_alloc *buf_alloc)
2315 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2316 struct hclge_rx_com_thrd *req;
2317 struct hclge_desc desc[2];
2318 struct hclge_tc_thrd *tc;
2322 for (i = 0; i < 2; i++) {
2323 hclge_cmd_setup_basic_desc(&desc[i],
2324 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2325 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2327 /* The first descriptor set the NEXT bit to 1 */
2329 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2333 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2334 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2336 req->com_thrd[j].high =
2337 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2338 req->com_thrd[j].high |=
2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 req->com_thrd[j].low =
2341 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2342 req->com_thrd[j].low |=
2343 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2347 /* Send 2 descriptors at one time */
2348 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2350 dev_err(&hdev->pdev->dev,
2351 "common threshold config cmd failed %d\n", ret);
2355 static int hclge_common_wl_config(struct hclge_dev *hdev,
2356 struct hclge_pkt_buf_alloc *buf_alloc)
2358 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2359 struct hclge_rx_com_wl *req;
2360 struct hclge_desc desc;
2363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2365 req = (struct hclge_rx_com_wl *)desc.data;
2366 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2367 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2369 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2370 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2372 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2374 dev_err(&hdev->pdev->dev,
2375 "common waterline config cmd failed %d\n", ret);
2380 int hclge_buffer_alloc(struct hclge_dev *hdev)
2382 struct hclge_pkt_buf_alloc *pkt_buf;
2385 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2389 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2391 dev_err(&hdev->pdev->dev,
2392 "could not calc tx buffer size for all TCs %d\n", ret);
2396 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2398 dev_err(&hdev->pdev->dev,
2399 "could not alloc tx buffers %d\n", ret);
2403 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2405 dev_err(&hdev->pdev->dev,
2406 "could not calc rx priv buffer size for all TCs %d\n",
2411 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2413 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2418 if (hnae3_dev_dcb_supported(hdev)) {
2419 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2421 dev_err(&hdev->pdev->dev,
2422 "could not configure rx private waterline %d\n",
2427 ret = hclge_common_thrd_config(hdev, pkt_buf);
2429 dev_err(&hdev->pdev->dev,
2430 "could not configure common threshold %d\n",
2436 ret = hclge_common_wl_config(hdev, pkt_buf);
2438 dev_err(&hdev->pdev->dev,
2439 "could not configure common waterline %d\n", ret);
2446 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2448 struct hnae3_handle *roce = &vport->roce;
2449 struct hnae3_handle *nic = &vport->nic;
2450 struct hclge_dev *hdev = vport->back;
2452 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2454 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2457 roce->rinfo.base_vector = hdev->roce_base_vector;
2459 roce->rinfo.netdev = nic->kinfo.netdev;
2460 roce->rinfo.roce_io_base = hdev->hw.io_base;
2461 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2463 roce->pdev = nic->pdev;
2464 roce->ae_algo = nic->ae_algo;
2465 roce->numa_node_mask = nic->numa_node_mask;
2470 static int hclge_init_msi(struct hclge_dev *hdev)
2472 struct pci_dev *pdev = hdev->pdev;
2476 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2478 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2481 "failed(%d) to allocate MSI/MSI-X vectors\n",
2485 if (vectors < hdev->num_msi)
2486 dev_warn(&hdev->pdev->dev,
2487 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2488 hdev->num_msi, vectors);
2490 hdev->num_msi = vectors;
2491 hdev->num_msi_left = vectors;
2493 hdev->base_msi_vector = pdev->irq;
2494 hdev->roce_base_vector = hdev->base_msi_vector +
2497 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2498 sizeof(u16), GFP_KERNEL);
2499 if (!hdev->vector_status) {
2500 pci_free_irq_vectors(pdev);
2504 for (i = 0; i < hdev->num_msi; i++)
2505 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2507 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2508 sizeof(int), GFP_KERNEL);
2509 if (!hdev->vector_irq) {
2510 pci_free_irq_vectors(pdev);
2517 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2519 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2520 duplex = HCLGE_MAC_FULL;
2525 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2528 struct hclge_config_mac_speed_dup_cmd *req;
2529 struct hclge_desc desc;
2532 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2537 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2540 case HCLGE_MAC_SPEED_10M:
2541 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2542 HCLGE_CFG_SPEED_S, 6);
2544 case HCLGE_MAC_SPEED_100M:
2545 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2546 HCLGE_CFG_SPEED_S, 7);
2548 case HCLGE_MAC_SPEED_1G:
2549 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2550 HCLGE_CFG_SPEED_S, 0);
2552 case HCLGE_MAC_SPEED_10G:
2553 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2554 HCLGE_CFG_SPEED_S, 1);
2556 case HCLGE_MAC_SPEED_25G:
2557 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2558 HCLGE_CFG_SPEED_S, 2);
2560 case HCLGE_MAC_SPEED_40G:
2561 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562 HCLGE_CFG_SPEED_S, 3);
2564 case HCLGE_MAC_SPEED_50G:
2565 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566 HCLGE_CFG_SPEED_S, 4);
2568 case HCLGE_MAC_SPEED_100G:
2569 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 HCLGE_CFG_SPEED_S, 5);
2572 case HCLGE_MAC_SPEED_200G:
2573 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 HCLGE_CFG_SPEED_S, 8);
2577 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2581 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2586 dev_err(&hdev->pdev->dev,
2587 "mac speed/duplex config cmd failed %d.\n", ret);
2594 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2596 struct hclge_mac *mac = &hdev->hw.mac;
2599 duplex = hclge_check_speed_dup(duplex, speed);
2600 if (!mac->support_autoneg && mac->speed == speed &&
2601 mac->duplex == duplex)
2604 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2608 hdev->hw.mac.speed = speed;
2609 hdev->hw.mac.duplex = duplex;
2614 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2617 struct hclge_vport *vport = hclge_get_vport(handle);
2618 struct hclge_dev *hdev = vport->back;
2620 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2623 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2625 struct hclge_config_auto_neg_cmd *req;
2626 struct hclge_desc desc;
2630 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2632 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2634 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2635 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2637 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2639 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2645 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2647 struct hclge_vport *vport = hclge_get_vport(handle);
2648 struct hclge_dev *hdev = vport->back;
2650 if (!hdev->hw.mac.support_autoneg) {
2652 dev_err(&hdev->pdev->dev,
2653 "autoneg is not supported by current port\n");
2660 return hclge_set_autoneg_en(hdev, enable);
2663 static int hclge_get_autoneg(struct hnae3_handle *handle)
2665 struct hclge_vport *vport = hclge_get_vport(handle);
2666 struct hclge_dev *hdev = vport->back;
2667 struct phy_device *phydev = hdev->hw.mac.phydev;
2670 return phydev->autoneg;
2672 return hdev->hw.mac.autoneg;
2675 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2677 struct hclge_vport *vport = hclge_get_vport(handle);
2678 struct hclge_dev *hdev = vport->back;
2681 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2683 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2686 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2689 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2691 struct hclge_vport *vport = hclge_get_vport(handle);
2692 struct hclge_dev *hdev = vport->back;
2694 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2695 return hclge_set_autoneg_en(hdev, !halt);
2700 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2702 struct hclge_config_fec_cmd *req;
2703 struct hclge_desc desc;
2706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2708 req = (struct hclge_config_fec_cmd *)desc.data;
2709 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2710 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2711 if (fec_mode & BIT(HNAE3_FEC_RS))
2712 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2713 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2714 if (fec_mode & BIT(HNAE3_FEC_BASER))
2715 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2716 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2720 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2725 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2727 struct hclge_vport *vport = hclge_get_vport(handle);
2728 struct hclge_dev *hdev = vport->back;
2729 struct hclge_mac *mac = &hdev->hw.mac;
2732 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2733 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2737 ret = hclge_set_fec_hw(hdev, fec_mode);
2741 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2745 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2748 struct hclge_vport *vport = hclge_get_vport(handle);
2749 struct hclge_dev *hdev = vport->back;
2750 struct hclge_mac *mac = &hdev->hw.mac;
2753 *fec_ability = mac->fec_ability;
2755 *fec_mode = mac->fec_mode;
2758 static int hclge_mac_init(struct hclge_dev *hdev)
2760 struct hclge_mac *mac = &hdev->hw.mac;
2763 hdev->support_sfp_query = true;
2764 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2765 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2766 hdev->hw.mac.duplex);
2770 if (hdev->hw.mac.support_autoneg) {
2771 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2778 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2779 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2784 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2786 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2790 ret = hclge_set_default_loopback(hdev);
2794 ret = hclge_buffer_alloc(hdev);
2796 dev_err(&hdev->pdev->dev,
2797 "allocate buffer fail, ret=%d\n", ret);
2802 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2804 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2805 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2806 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2807 hclge_wq, &hdev->service_task, 0);
2810 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2812 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2813 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2814 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2815 hclge_wq, &hdev->service_task, 0);
2818 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2820 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2821 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2822 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2823 hclge_wq, &hdev->service_task,
2827 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2829 struct hclge_link_status_cmd *req;
2830 struct hclge_desc desc;
2833 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2834 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2836 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2841 req = (struct hclge_link_status_cmd *)desc.data;
2842 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2843 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2848 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2850 struct phy_device *phydev = hdev->hw.mac.phydev;
2852 *link_status = HCLGE_LINK_STATUS_DOWN;
2854 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2857 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2860 return hclge_get_mac_link_status(hdev, link_status);
2863 static void hclge_update_link_status(struct hclge_dev *hdev)
2865 struct hnae3_client *rclient = hdev->roce_client;
2866 struct hnae3_client *client = hdev->nic_client;
2867 struct hnae3_handle *rhandle;
2868 struct hnae3_handle *handle;
2876 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2879 ret = hclge_get_mac_phy_link(hdev, &state);
2881 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2885 if (state != hdev->hw.mac.link) {
2886 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2887 handle = &hdev->vport[i].nic;
2888 client->ops->link_status_change(handle, state);
2889 hclge_config_mac_tnl_int(hdev, state);
2890 rhandle = &hdev->vport[i].roce;
2891 if (rclient && rclient->ops->link_status_change)
2892 rclient->ops->link_status_change(rhandle,
2895 hdev->hw.mac.link = state;
2898 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2901 static void hclge_update_port_capability(struct hclge_dev *hdev,
2902 struct hclge_mac *mac)
2904 if (hnae3_dev_fec_supported(hdev))
2905 /* update fec ability by speed */
2906 hclge_convert_setting_fec(mac);
2908 /* firmware can not identify back plane type, the media type
2909 * read from configuration can help deal it
2911 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2912 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2913 mac->module_type = HNAE3_MODULE_TYPE_KR;
2914 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2915 mac->module_type = HNAE3_MODULE_TYPE_TP;
2917 if (mac->support_autoneg) {
2918 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2919 linkmode_copy(mac->advertising, mac->supported);
2921 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2923 linkmode_zero(mac->advertising);
2927 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2929 struct hclge_sfp_info_cmd *resp;
2930 struct hclge_desc desc;
2933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2934 resp = (struct hclge_sfp_info_cmd *)desc.data;
2935 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2936 if (ret == -EOPNOTSUPP) {
2937 dev_warn(&hdev->pdev->dev,
2938 "IMP do not support get SFP speed %d\n", ret);
2941 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2945 *speed = le32_to_cpu(resp->speed);
2950 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2952 struct hclge_sfp_info_cmd *resp;
2953 struct hclge_desc desc;
2956 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2957 resp = (struct hclge_sfp_info_cmd *)desc.data;
2959 resp->query_type = QUERY_ACTIVE_SPEED;
2961 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2962 if (ret == -EOPNOTSUPP) {
2963 dev_warn(&hdev->pdev->dev,
2964 "IMP does not support get SFP info %d\n", ret);
2967 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2971 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2972 * set to mac->speed.
2974 if (!le32_to_cpu(resp->speed))
2977 mac->speed = le32_to_cpu(resp->speed);
2978 /* if resp->speed_ability is 0, it means it's an old version
2979 * firmware, do not update these params
2981 if (resp->speed_ability) {
2982 mac->module_type = le32_to_cpu(resp->module_type);
2983 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2984 mac->autoneg = resp->autoneg;
2985 mac->support_autoneg = resp->autoneg_ability;
2986 mac->speed_type = QUERY_ACTIVE_SPEED;
2987 if (!resp->active_fec)
2990 mac->fec_mode = BIT(resp->active_fec);
2992 mac->speed_type = QUERY_SFP_SPEED;
2998 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
2999 struct ethtool_link_ksettings *cmd)
3001 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3002 struct hclge_vport *vport = hclge_get_vport(handle);
3003 struct hclge_phy_link_ksetting_0_cmd *req0;
3004 struct hclge_phy_link_ksetting_1_cmd *req1;
3005 u32 supported, advertising, lp_advertising;
3006 struct hclge_dev *hdev = vport->back;
3009 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3011 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3012 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3015 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3017 dev_err(&hdev->pdev->dev,
3018 "failed to get phy link ksetting, ret = %d.\n", ret);
3022 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3023 cmd->base.autoneg = req0->autoneg;
3024 cmd->base.speed = le32_to_cpu(req0->speed);
3025 cmd->base.duplex = req0->duplex;
3026 cmd->base.port = req0->port;
3027 cmd->base.transceiver = req0->transceiver;
3028 cmd->base.phy_address = req0->phy_address;
3029 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3030 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3031 supported = le32_to_cpu(req0->supported);
3032 advertising = le32_to_cpu(req0->advertising);
3033 lp_advertising = le32_to_cpu(req0->lp_advertising);
3034 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3036 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3038 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3041 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3042 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3043 cmd->base.master_slave_state = req1->master_slave_state;
3049 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3050 const struct ethtool_link_ksettings *cmd)
3052 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3053 struct hclge_vport *vport = hclge_get_vport(handle);
3054 struct hclge_phy_link_ksetting_0_cmd *req0;
3055 struct hclge_phy_link_ksetting_1_cmd *req1;
3056 struct hclge_dev *hdev = vport->back;
3060 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3061 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3062 (cmd->base.duplex != DUPLEX_HALF &&
3063 cmd->base.duplex != DUPLEX_FULL)))
3066 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3068 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3069 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3072 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3073 req0->autoneg = cmd->base.autoneg;
3074 req0->speed = cpu_to_le32(cmd->base.speed);
3075 req0->duplex = cmd->base.duplex;
3076 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3077 cmd->link_modes.advertising);
3078 req0->advertising = cpu_to_le32(advertising);
3079 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3081 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3082 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3084 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3086 dev_err(&hdev->pdev->dev,
3087 "failed to set phy link ksettings, ret = %d.\n", ret);
3091 hdev->hw.mac.autoneg = cmd->base.autoneg;
3092 hdev->hw.mac.speed = cmd->base.speed;
3093 hdev->hw.mac.duplex = cmd->base.duplex;
3094 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3099 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3101 struct ethtool_link_ksettings cmd;
3104 if (!hnae3_dev_phy_imp_supported(hdev))
3107 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3111 hdev->hw.mac.autoneg = cmd.base.autoneg;
3112 hdev->hw.mac.speed = cmd.base.speed;
3113 hdev->hw.mac.duplex = cmd.base.duplex;
3118 static int hclge_tp_port_init(struct hclge_dev *hdev)
3120 struct ethtool_link_ksettings cmd;
3122 if (!hnae3_dev_phy_imp_supported(hdev))
3125 cmd.base.autoneg = hdev->hw.mac.autoneg;
3126 cmd.base.speed = hdev->hw.mac.speed;
3127 cmd.base.duplex = hdev->hw.mac.duplex;
3128 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3130 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3133 static int hclge_update_port_info(struct hclge_dev *hdev)
3135 struct hclge_mac *mac = &hdev->hw.mac;
3136 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3139 /* get the port info from SFP cmd if not copper port */
3140 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3141 return hclge_update_tp_port_info(hdev);
3143 /* if IMP does not support get SFP/qSFP info, return directly */
3144 if (!hdev->support_sfp_query)
3147 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3148 ret = hclge_get_sfp_info(hdev, mac);
3150 ret = hclge_get_sfp_speed(hdev, &speed);
3152 if (ret == -EOPNOTSUPP) {
3153 hdev->support_sfp_query = false;
3159 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3160 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3161 hclge_update_port_capability(hdev, mac);
3164 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3167 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3168 return 0; /* do nothing if no SFP */
3170 /* must config full duplex for SFP */
3171 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3175 static int hclge_get_status(struct hnae3_handle *handle)
3177 struct hclge_vport *vport = hclge_get_vport(handle);
3178 struct hclge_dev *hdev = vport->back;
3180 hclge_update_link_status(hdev);
3182 return hdev->hw.mac.link;
3185 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3187 if (!pci_num_vf(hdev->pdev)) {
3188 dev_err(&hdev->pdev->dev,
3189 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3193 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3194 dev_err(&hdev->pdev->dev,
3195 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3196 vf, pci_num_vf(hdev->pdev));
3200 /* VF start from 1 in vport */
3201 vf += HCLGE_VF_VPORT_START_NUM;
3202 return &hdev->vport[vf];
3205 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3206 struct ifla_vf_info *ivf)
3208 struct hclge_vport *vport = hclge_get_vport(handle);
3209 struct hclge_dev *hdev = vport->back;
3211 vport = hclge_get_vf_vport(hdev, vf);
3216 ivf->linkstate = vport->vf_info.link_state;
3217 ivf->spoofchk = vport->vf_info.spoofchk;
3218 ivf->trusted = vport->vf_info.trusted;
3219 ivf->min_tx_rate = 0;
3220 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3221 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3222 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3223 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3224 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3229 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3232 struct hclge_vport *vport = hclge_get_vport(handle);
3233 struct hclge_dev *hdev = vport->back;
3235 vport = hclge_get_vf_vport(hdev, vf);
3239 vport->vf_info.link_state = link_state;
3244 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3246 u32 cmdq_src_reg, msix_src_reg;
3248 /* fetch the events from their corresponding regs */
3249 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3250 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3252 /* Assumption: If by any chance reset and mailbox events are reported
3253 * together then we will only process reset event in this go and will
3254 * defer the processing of the mailbox events. Since, we would have not
3255 * cleared RX CMDQ event this time we would receive again another
3256 * interrupt from H/W just for the mailbox.
3258 * check for vector0 reset event sources
3260 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3261 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3262 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3263 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3264 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3265 hdev->rst_stats.imp_rst_cnt++;
3266 return HCLGE_VECTOR0_EVENT_RST;
3269 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3270 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3271 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3272 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3273 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3274 hdev->rst_stats.global_rst_cnt++;
3275 return HCLGE_VECTOR0_EVENT_RST;
3278 /* check for vector0 msix event source */
3279 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3280 *clearval = msix_src_reg;
3281 return HCLGE_VECTOR0_EVENT_ERR;
3284 /* check for vector0 mailbox(=CMDQ RX) event source */
3285 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3286 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3287 *clearval = cmdq_src_reg;
3288 return HCLGE_VECTOR0_EVENT_MBX;
3291 /* print other vector0 event source */
3292 dev_info(&hdev->pdev->dev,
3293 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3294 cmdq_src_reg, msix_src_reg);
3295 *clearval = msix_src_reg;
3297 return HCLGE_VECTOR0_EVENT_OTHER;
3300 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3303 switch (event_type) {
3304 case HCLGE_VECTOR0_EVENT_RST:
3305 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3307 case HCLGE_VECTOR0_EVENT_MBX:
3308 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3315 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3317 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3318 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3319 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3320 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3321 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3324 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3326 writel(enable ? 1 : 0, vector->addr);
3329 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3331 struct hclge_dev *hdev = data;
3335 hclge_enable_vector(&hdev->misc_vector, false);
3336 event_cause = hclge_check_event_cause(hdev, &clearval);
3338 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3339 switch (event_cause) {
3340 case HCLGE_VECTOR0_EVENT_ERR:
3341 /* we do not know what type of reset is required now. This could
3342 * only be decided after we fetch the type of errors which
3343 * caused this event. Therefore, we will do below for now:
3344 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3345 * have defered type of reset to be used.
3346 * 2. Schedule the reset serivce task.
3347 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3348 * will fetch the correct type of reset. This would be done
3349 * by first decoding the types of errors.
3351 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3353 case HCLGE_VECTOR0_EVENT_RST:
3354 hclge_reset_task_schedule(hdev);
3356 case HCLGE_VECTOR0_EVENT_MBX:
3357 /* If we are here then,
3358 * 1. Either we are not handling any mbx task and we are not
3361 * 2. We could be handling a mbx task but nothing more is
3363 * In both cases, we should schedule mbx task as there are more
3364 * mbx messages reported by this interrupt.
3366 hclge_mbx_task_schedule(hdev);
3369 dev_warn(&hdev->pdev->dev,
3370 "received unknown or unhandled event of vector0\n");
3374 hclge_clear_event_cause(hdev, event_cause, clearval);
3376 /* Enable interrupt if it is not cause by reset. And when
3377 * clearval equal to 0, it means interrupt status may be
3378 * cleared by hardware before driver reads status register.
3379 * For this case, vector0 interrupt also should be enabled.
3382 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3383 hclge_enable_vector(&hdev->misc_vector, true);
3389 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3391 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3392 dev_warn(&hdev->pdev->dev,
3393 "vector(vector_id %d) has been freed.\n", vector_id);
3397 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3398 hdev->num_msi_left += 1;
3399 hdev->num_msi_used -= 1;
3402 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3404 struct hclge_misc_vector *vector = &hdev->misc_vector;
3406 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3408 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3409 hdev->vector_status[0] = 0;
3411 hdev->num_msi_left -= 1;
3412 hdev->num_msi_used += 1;
3415 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3416 const cpumask_t *mask)
3418 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3421 cpumask_copy(&hdev->affinity_mask, mask);
3424 static void hclge_irq_affinity_release(struct kref *ref)
3428 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3430 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3431 &hdev->affinity_mask);
3433 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3434 hdev->affinity_notify.release = hclge_irq_affinity_release;
3435 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3436 &hdev->affinity_notify);
3439 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3441 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3442 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3445 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3449 hclge_get_misc_vector(hdev);
3451 /* this would be explicitly freed in the end */
3452 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3453 HCLGE_NAME, pci_name(hdev->pdev));
3454 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3455 0, hdev->misc_vector.name, hdev);
3457 hclge_free_vector(hdev, 0);
3458 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3459 hdev->misc_vector.vector_irq);
3465 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3467 free_irq(hdev->misc_vector.vector_irq, hdev);
3468 hclge_free_vector(hdev, 0);
3471 int hclge_notify_client(struct hclge_dev *hdev,
3472 enum hnae3_reset_notify_type type)
3474 struct hnae3_client *client = hdev->nic_client;
3477 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3480 if (!client->ops->reset_notify)
3483 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3484 struct hnae3_handle *handle = &hdev->vport[i].nic;
3487 ret = client->ops->reset_notify(handle, type);
3489 dev_err(&hdev->pdev->dev,
3490 "notify nic client failed %d(%d)\n", type, ret);
3498 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3499 enum hnae3_reset_notify_type type)
3501 struct hnae3_client *client = hdev->roce_client;
3505 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3508 if (!client->ops->reset_notify)
3511 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3512 struct hnae3_handle *handle = &hdev->vport[i].roce;
3514 ret = client->ops->reset_notify(handle, type);
3516 dev_err(&hdev->pdev->dev,
3517 "notify roce client failed %d(%d)",
3526 static int hclge_reset_wait(struct hclge_dev *hdev)
3528 #define HCLGE_RESET_WATI_MS 100
3529 #define HCLGE_RESET_WAIT_CNT 350
3531 u32 val, reg, reg_bit;
3534 switch (hdev->reset_type) {
3535 case HNAE3_IMP_RESET:
3536 reg = HCLGE_GLOBAL_RESET_REG;
3537 reg_bit = HCLGE_IMP_RESET_BIT;
3539 case HNAE3_GLOBAL_RESET:
3540 reg = HCLGE_GLOBAL_RESET_REG;
3541 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3543 case HNAE3_FUNC_RESET:
3544 reg = HCLGE_FUN_RST_ING;
3545 reg_bit = HCLGE_FUN_RST_ING_B;
3548 dev_err(&hdev->pdev->dev,
3549 "Wait for unsupported reset type: %d\n",
3554 val = hclge_read_dev(&hdev->hw, reg);
3555 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3556 msleep(HCLGE_RESET_WATI_MS);
3557 val = hclge_read_dev(&hdev->hw, reg);
3561 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3562 dev_warn(&hdev->pdev->dev,
3563 "Wait for reset timeout: %d\n", hdev->reset_type);
3570 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3572 struct hclge_vf_rst_cmd *req;
3573 struct hclge_desc desc;
3575 req = (struct hclge_vf_rst_cmd *)desc.data;
3576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3577 req->dest_vfid = func_id;
3582 return hclge_cmd_send(&hdev->hw, &desc, 1);
3585 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3589 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3590 struct hclge_vport *vport = &hdev->vport[i];
3593 /* Send cmd to set/clear VF's FUNC_RST_ING */
3594 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3596 dev_err(&hdev->pdev->dev,
3597 "set vf(%u) rst failed %d!\n",
3598 vport->vport_id, ret);
3602 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3605 /* Inform VF to process the reset.
3606 * hclge_inform_reset_assert_to_vf may fail if VF
3607 * driver is not loaded.
3609 ret = hclge_inform_reset_assert_to_vf(vport);
3611 dev_warn(&hdev->pdev->dev,
3612 "inform reset to vf(%u) failed %d!\n",
3613 vport->vport_id, ret);
3619 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3621 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3622 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3623 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3626 hclge_mbx_handler(hdev);
3628 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3631 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3633 struct hclge_pf_rst_sync_cmd *req;
3634 struct hclge_desc desc;
3638 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3642 /* vf need to down netdev by mbx during PF or FLR reset */
3643 hclge_mailbox_service_task(hdev);
3645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3646 /* for compatible with old firmware, wait
3647 * 100 ms for VF to stop IO
3649 if (ret == -EOPNOTSUPP) {
3650 msleep(HCLGE_RESET_SYNC_TIME);
3653 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3656 } else if (req->all_vf_ready) {
3659 msleep(HCLGE_PF_RESET_SYNC_TIME);
3660 hclge_cmd_reuse_desc(&desc, true);
3661 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3663 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3666 void hclge_report_hw_error(struct hclge_dev *hdev,
3667 enum hnae3_hw_error_type type)
3669 struct hnae3_client *client = hdev->nic_client;
3672 if (!client || !client->ops->process_hw_error ||
3673 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3676 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3677 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3680 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3684 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3685 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3686 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3687 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3688 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3691 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3692 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3693 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3694 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3698 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3700 struct hclge_desc desc;
3701 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3705 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3706 req->fun_reset_vfid = func_id;
3708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3710 dev_err(&hdev->pdev->dev,
3711 "send function reset cmd fail, status =%d\n", ret);
3716 static void hclge_do_reset(struct hclge_dev *hdev)
3718 struct hnae3_handle *handle = &hdev->vport[0].nic;
3719 struct pci_dev *pdev = hdev->pdev;
3722 if (hclge_get_hw_reset_stat(handle)) {
3723 dev_info(&pdev->dev, "hardware reset not finish\n");
3724 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3725 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3726 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3730 switch (hdev->reset_type) {
3731 case HNAE3_GLOBAL_RESET:
3732 dev_info(&pdev->dev, "global reset requested\n");
3733 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3734 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3735 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3737 case HNAE3_FUNC_RESET:
3738 dev_info(&pdev->dev, "PF reset requested\n");
3739 /* schedule again to check later */
3740 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3741 hclge_reset_task_schedule(hdev);
3744 dev_warn(&pdev->dev,
3745 "unsupported reset type: %d\n", hdev->reset_type);
3750 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3751 unsigned long *addr)
3753 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3754 struct hclge_dev *hdev = ae_dev->priv;
3756 /* first, resolve any unknown reset type to the known type(s) */
3757 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3758 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3759 HCLGE_MISC_VECTOR_INT_STS);
3760 /* we will intentionally ignore any errors from this function
3761 * as we will end up in *some* reset request in any case
3763 if (hclge_handle_hw_msix_error(hdev, addr))
3764 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3767 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3768 /* We defered the clearing of the error event which caused
3769 * interrupt since it was not posssible to do that in
3770 * interrupt context (and this is the reason we introduced
3771 * new UNKNOWN reset type). Now, the errors have been
3772 * handled and cleared in hardware we can safely enable
3773 * interrupts. This is an exception to the norm.
3775 hclge_enable_vector(&hdev->misc_vector, true);
3778 /* return the highest priority reset level amongst all */
3779 if (test_bit(HNAE3_IMP_RESET, addr)) {
3780 rst_level = HNAE3_IMP_RESET;
3781 clear_bit(HNAE3_IMP_RESET, addr);
3782 clear_bit(HNAE3_GLOBAL_RESET, addr);
3783 clear_bit(HNAE3_FUNC_RESET, addr);
3784 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3785 rst_level = HNAE3_GLOBAL_RESET;
3786 clear_bit(HNAE3_GLOBAL_RESET, addr);
3787 clear_bit(HNAE3_FUNC_RESET, addr);
3788 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3789 rst_level = HNAE3_FUNC_RESET;
3790 clear_bit(HNAE3_FUNC_RESET, addr);
3791 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3792 rst_level = HNAE3_FLR_RESET;
3793 clear_bit(HNAE3_FLR_RESET, addr);
3796 if (hdev->reset_type != HNAE3_NONE_RESET &&
3797 rst_level < hdev->reset_type)
3798 return HNAE3_NONE_RESET;
3803 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3807 switch (hdev->reset_type) {
3808 case HNAE3_IMP_RESET:
3809 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3811 case HNAE3_GLOBAL_RESET:
3812 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3821 /* For revision 0x20, the reset interrupt source
3822 * can only be cleared after hardware reset done
3824 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3825 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3828 hclge_enable_vector(&hdev->misc_vector, true);
3831 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3835 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3837 reg_val |= HCLGE_NIC_SW_RST_RDY;
3839 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3841 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3844 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3848 ret = hclge_set_all_vf_rst(hdev, true);
3852 hclge_func_reset_sync_vf(hdev);
3857 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3862 switch (hdev->reset_type) {
3863 case HNAE3_FUNC_RESET:
3864 ret = hclge_func_reset_notify_vf(hdev);
3868 ret = hclge_func_reset_cmd(hdev, 0);
3870 dev_err(&hdev->pdev->dev,
3871 "asserting function reset fail %d!\n", ret);
3875 /* After performaning pf reset, it is not necessary to do the
3876 * mailbox handling or send any command to firmware, because
3877 * any mailbox handling or command to firmware is only valid
3878 * after hclge_cmd_init is called.
3880 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3881 hdev->rst_stats.pf_rst_cnt++;
3883 case HNAE3_FLR_RESET:
3884 ret = hclge_func_reset_notify_vf(hdev);
3888 case HNAE3_IMP_RESET:
3889 hclge_handle_imp_error(hdev);
3890 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3892 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3898 /* inform hardware that preparatory work is done */
3899 msleep(HCLGE_RESET_SYNC_TIME);
3900 hclge_reset_handshake(hdev, true);
3901 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3906 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3908 #define MAX_RESET_FAIL_CNT 5
3910 if (hdev->reset_pending) {
3911 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3912 hdev->reset_pending);
3914 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3915 HCLGE_RESET_INT_M) {
3916 dev_info(&hdev->pdev->dev,
3917 "reset failed because new reset interrupt\n");
3918 hclge_clear_reset_cause(hdev);
3920 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3921 hdev->rst_stats.reset_fail_cnt++;
3922 set_bit(hdev->reset_type, &hdev->reset_pending);
3923 dev_info(&hdev->pdev->dev,
3924 "re-schedule reset task(%u)\n",
3925 hdev->rst_stats.reset_fail_cnt);
3929 hclge_clear_reset_cause(hdev);
3931 /* recover the handshake status when reset fail */
3932 hclge_reset_handshake(hdev, true);
3934 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3936 hclge_dbg_dump_rst_info(hdev);
3938 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3943 static int hclge_set_rst_done(struct hclge_dev *hdev)
3945 struct hclge_pf_rst_done_cmd *req;
3946 struct hclge_desc desc;
3949 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3950 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3951 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3953 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3954 /* To be compatible with the old firmware, which does not support
3955 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3958 if (ret == -EOPNOTSUPP) {
3959 dev_warn(&hdev->pdev->dev,
3960 "current firmware does not support command(0x%x)!\n",
3961 HCLGE_OPC_PF_RST_DONE);
3964 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3971 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3975 switch (hdev->reset_type) {
3976 case HNAE3_FUNC_RESET:
3977 case HNAE3_FLR_RESET:
3978 ret = hclge_set_all_vf_rst(hdev, false);
3980 case HNAE3_GLOBAL_RESET:
3981 case HNAE3_IMP_RESET:
3982 ret = hclge_set_rst_done(hdev);
3988 /* clear up the handshake status after re-initialize done */
3989 hclge_reset_handshake(hdev, false);
3994 static int hclge_reset_stack(struct hclge_dev *hdev)
3998 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4002 ret = hclge_reset_ae_dev(hdev->ae_dev);
4006 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4009 static int hclge_reset_prepare(struct hclge_dev *hdev)
4013 hdev->rst_stats.reset_cnt++;
4014 /* perform reset of the stack & ae device for a client */
4015 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4020 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4025 return hclge_reset_prepare_wait(hdev);
4028 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4030 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4031 enum hnae3_reset_type reset_level;
4034 hdev->rst_stats.hw_reset_done_cnt++;
4036 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4041 ret = hclge_reset_stack(hdev);
4046 hclge_clear_reset_cause(hdev);
4048 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4049 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4053 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4056 ret = hclge_reset_prepare_up(hdev);
4061 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4066 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4070 hdev->last_reset_time = jiffies;
4071 hdev->rst_stats.reset_fail_cnt = 0;
4072 hdev->rst_stats.reset_done_cnt++;
4073 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4075 /* if default_reset_request has a higher level reset request,
4076 * it should be handled as soon as possible. since some errors
4077 * need this kind of reset to fix.
4079 reset_level = hclge_get_reset_level(ae_dev,
4080 &hdev->default_reset_request);
4081 if (reset_level != HNAE3_NONE_RESET)
4082 set_bit(reset_level, &hdev->reset_request);
4087 static void hclge_reset(struct hclge_dev *hdev)
4089 if (hclge_reset_prepare(hdev))
4092 if (hclge_reset_wait(hdev))
4095 if (hclge_reset_rebuild(hdev))
4101 if (hclge_reset_err_handle(hdev))
4102 hclge_reset_task_schedule(hdev);
4105 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4107 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4108 struct hclge_dev *hdev = ae_dev->priv;
4110 /* We might end up getting called broadly because of 2 below cases:
4111 * 1. Recoverable error was conveyed through APEI and only way to bring
4112 * normalcy is to reset.
4113 * 2. A new reset request from the stack due to timeout
4115 * For the first case,error event might not have ae handle available.
4116 * check if this is a new reset request and we are not here just because
4117 * last reset attempt did not succeed and watchdog hit us again. We will
4118 * know this if last reset request did not occur very recently (watchdog
4119 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4120 * In case of new request we reset the "reset level" to PF reset.
4121 * And if it is a repeat reset request of the most recent one then we
4122 * want to make sure we throttle the reset request. Therefore, we will
4123 * not allow it again before 3*HZ times.
4126 handle = &hdev->vport[0].nic;
4128 if (time_before(jiffies, (hdev->last_reset_time +
4129 HCLGE_RESET_INTERVAL))) {
4130 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4132 } else if (hdev->default_reset_request) {
4134 hclge_get_reset_level(ae_dev,
4135 &hdev->default_reset_request);
4136 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4137 hdev->reset_level = HNAE3_FUNC_RESET;
4140 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4143 /* request reset & schedule reset task */
4144 set_bit(hdev->reset_level, &hdev->reset_request);
4145 hclge_reset_task_schedule(hdev);
4147 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4148 hdev->reset_level++;
4151 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4152 enum hnae3_reset_type rst_type)
4154 struct hclge_dev *hdev = ae_dev->priv;
4156 set_bit(rst_type, &hdev->default_reset_request);
4159 static void hclge_reset_timer(struct timer_list *t)
4161 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4163 /* if default_reset_request has no value, it means that this reset
4164 * request has already be handled, so just return here
4166 if (!hdev->default_reset_request)
4169 dev_info(&hdev->pdev->dev,
4170 "triggering reset in reset timer\n");
4171 hclge_reset_event(hdev->pdev, NULL);
4174 static void hclge_reset_subtask(struct hclge_dev *hdev)
4176 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4178 /* check if there is any ongoing reset in the hardware. This status can
4179 * be checked from reset_pending. If there is then, we need to wait for
4180 * hardware to complete reset.
4181 * a. If we are able to figure out in reasonable time that hardware
4182 * has fully resetted then, we can proceed with driver, client
4184 * b. else, we can come back later to check this status so re-sched
4187 hdev->last_reset_time = jiffies;
4188 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4189 if (hdev->reset_type != HNAE3_NONE_RESET)
4192 /* check if we got any *new* reset requests to be honored */
4193 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4194 if (hdev->reset_type != HNAE3_NONE_RESET)
4195 hclge_do_reset(hdev);
4197 hdev->reset_type = HNAE3_NONE_RESET;
4200 static void hclge_reset_service_task(struct hclge_dev *hdev)
4202 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4205 down(&hdev->reset_sem);
4206 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4208 hclge_reset_subtask(hdev);
4210 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4211 up(&hdev->reset_sem);
4214 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4218 /* start from vport 1 for PF is always alive */
4219 for (i = 1; i < hdev->num_alloc_vport; i++) {
4220 struct hclge_vport *vport = &hdev->vport[i];
4222 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4223 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4225 /* If vf is not alive, set to default value */
4226 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4227 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4231 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4233 unsigned long delta = round_jiffies_relative(HZ);
4235 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4238 /* Always handle the link updating to make sure link state is
4239 * updated when it is triggered by mbx.
4241 hclge_update_link_status(hdev);
4242 hclge_sync_mac_table(hdev);
4243 hclge_sync_promisc_mode(hdev);
4245 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4246 delta = jiffies - hdev->last_serv_processed;
4248 if (delta < round_jiffies_relative(HZ)) {
4249 delta = round_jiffies_relative(HZ) - delta;
4254 hdev->serv_processed_cnt++;
4255 hclge_update_vport_alive(hdev);
4257 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4258 hdev->last_serv_processed = jiffies;
4262 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4263 hclge_update_stats_for_all(hdev);
4265 hclge_update_port_info(hdev);
4266 hclge_sync_vlan_filter(hdev);
4268 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4269 hclge_rfs_filter_expire(hdev);
4271 hdev->last_serv_processed = jiffies;
4274 hclge_task_schedule(hdev, delta);
4277 static void hclge_service_task(struct work_struct *work)
4279 struct hclge_dev *hdev =
4280 container_of(work, struct hclge_dev, service_task.work);
4282 hclge_reset_service_task(hdev);
4283 hclge_mailbox_service_task(hdev);
4284 hclge_periodic_service_task(hdev);
4286 /* Handle reset and mbx again in case periodical task delays the
4287 * handling by calling hclge_task_schedule() in
4288 * hclge_periodic_service_task().
4290 hclge_reset_service_task(hdev);
4291 hclge_mailbox_service_task(hdev);
4294 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4296 /* VF handle has no client */
4297 if (!handle->client)
4298 return container_of(handle, struct hclge_vport, nic);
4299 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4300 return container_of(handle, struct hclge_vport, roce);
4302 return container_of(handle, struct hclge_vport, nic);
4305 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4306 struct hnae3_vector_info *vector_info)
4308 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4310 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4312 /* need an extend offset to config vector >= 64 */
4313 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4314 vector_info->io_addr = hdev->hw.io_base +
4315 HCLGE_VECTOR_REG_BASE +
4316 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4318 vector_info->io_addr = hdev->hw.io_base +
4319 HCLGE_VECTOR_EXT_REG_BASE +
4320 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4321 HCLGE_VECTOR_REG_OFFSET_H +
4322 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4323 HCLGE_VECTOR_REG_OFFSET;
4325 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4326 hdev->vector_irq[idx] = vector_info->vector;
4329 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4330 struct hnae3_vector_info *vector_info)
4332 struct hclge_vport *vport = hclge_get_vport(handle);
4333 struct hnae3_vector_info *vector = vector_info;
4334 struct hclge_dev *hdev = vport->back;
4339 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4340 vector_num = min(hdev->num_msi_left, vector_num);
4342 for (j = 0; j < vector_num; j++) {
4343 while (++i < hdev->num_nic_msi) {
4344 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4345 hclge_get_vector_info(hdev, i, vector);
4353 hdev->num_msi_left -= alloc;
4354 hdev->num_msi_used += alloc;
4359 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4363 for (i = 0; i < hdev->num_msi; i++)
4364 if (vector == hdev->vector_irq[i])
4370 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4372 struct hclge_vport *vport = hclge_get_vport(handle);
4373 struct hclge_dev *hdev = vport->back;
4376 vector_id = hclge_get_vector_index(hdev, vector);
4377 if (vector_id < 0) {
4378 dev_err(&hdev->pdev->dev,
4379 "Get vector index fail. vector = %d\n", vector);
4383 hclge_free_vector(hdev, vector_id);
4388 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4390 return HCLGE_RSS_KEY_SIZE;
4393 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4394 const u8 hfunc, const u8 *key)
4396 struct hclge_rss_config_cmd *req;
4397 unsigned int key_offset = 0;
4398 struct hclge_desc desc;
4403 key_counts = HCLGE_RSS_KEY_SIZE;
4404 req = (struct hclge_rss_config_cmd *)desc.data;
4406 while (key_counts) {
4407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4410 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4411 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4413 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4414 memcpy(req->hash_key,
4415 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4417 key_counts -= key_size;
4419 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4421 dev_err(&hdev->pdev->dev,
4422 "Configure RSS config fail, status = %d\n",
4430 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4432 struct hclge_rss_indirection_table_cmd *req;
4433 struct hclge_desc desc;
4434 int rss_cfg_tbl_num;
4442 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4443 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4444 HCLGE_RSS_CFG_TBL_SIZE;
4446 for (i = 0; i < rss_cfg_tbl_num; i++) {
4447 hclge_cmd_setup_basic_desc
4448 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4450 req->start_table_index =
4451 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4452 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4453 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4454 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4455 req->rss_qid_l[j] = qid & 0xff;
4457 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4458 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4459 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4460 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4462 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4464 dev_err(&hdev->pdev->dev,
4465 "Configure rss indir table fail,status = %d\n",
4473 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4474 u16 *tc_size, u16 *tc_offset)
4476 struct hclge_rss_tc_mode_cmd *req;
4477 struct hclge_desc desc;
4481 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4482 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4484 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4487 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4488 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4489 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4490 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4491 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4492 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4493 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4495 req->rss_tc_mode[i] = cpu_to_le16(mode);
4498 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4500 dev_err(&hdev->pdev->dev,
4501 "Configure rss tc mode fail, status = %d\n", ret);
4506 static void hclge_get_rss_type(struct hclge_vport *vport)
4508 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4509 vport->rss_tuple_sets.ipv4_udp_en ||
4510 vport->rss_tuple_sets.ipv4_sctp_en ||
4511 vport->rss_tuple_sets.ipv6_tcp_en ||
4512 vport->rss_tuple_sets.ipv6_udp_en ||
4513 vport->rss_tuple_sets.ipv6_sctp_en)
4514 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4515 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4516 vport->rss_tuple_sets.ipv6_fragment_en)
4517 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4519 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4522 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4524 struct hclge_rss_input_tuple_cmd *req;
4525 struct hclge_desc desc;
4528 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4530 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4532 /* Get the tuple cfg from pf */
4533 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4534 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4535 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4536 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4537 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4538 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4539 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4540 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4541 hclge_get_rss_type(&hdev->vport[0]);
4542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4544 dev_err(&hdev->pdev->dev,
4545 "Configure rss input fail, status = %d\n", ret);
4549 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4552 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4553 struct hclge_vport *vport = hclge_get_vport(handle);
4556 /* Get hash algorithm */
4558 switch (vport->rss_algo) {
4559 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4560 *hfunc = ETH_RSS_HASH_TOP;
4562 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4563 *hfunc = ETH_RSS_HASH_XOR;
4566 *hfunc = ETH_RSS_HASH_UNKNOWN;
4571 /* Get the RSS Key required by the user */
4573 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4575 /* Get indirect table */
4577 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4578 indir[i] = vport->rss_indirection_tbl[i];
4583 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4584 const u8 *key, const u8 hfunc)
4586 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4587 struct hclge_vport *vport = hclge_get_vport(handle);
4588 struct hclge_dev *hdev = vport->back;
4592 /* Set the RSS Hash Key if specififed by the user */
4595 case ETH_RSS_HASH_TOP:
4596 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4598 case ETH_RSS_HASH_XOR:
4599 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4601 case ETH_RSS_HASH_NO_CHANGE:
4602 hash_algo = vport->rss_algo;
4608 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4612 /* Update the shadow RSS key with user specified qids */
4613 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4614 vport->rss_algo = hash_algo;
4617 /* Update the shadow RSS table with user specified qids */
4618 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4619 vport->rss_indirection_tbl[i] = indir[i];
4621 /* Update the hardware */
4622 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4625 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4627 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4629 if (nfc->data & RXH_L4_B_2_3)
4630 hash_sets |= HCLGE_D_PORT_BIT;
4632 hash_sets &= ~HCLGE_D_PORT_BIT;
4634 if (nfc->data & RXH_IP_SRC)
4635 hash_sets |= HCLGE_S_IP_BIT;
4637 hash_sets &= ~HCLGE_S_IP_BIT;
4639 if (nfc->data & RXH_IP_DST)
4640 hash_sets |= HCLGE_D_IP_BIT;
4642 hash_sets &= ~HCLGE_D_IP_BIT;
4644 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4645 hash_sets |= HCLGE_V_TAG_BIT;
4650 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4651 struct ethtool_rxnfc *nfc,
4652 struct hclge_rss_input_tuple_cmd *req)
4654 struct hclge_dev *hdev = vport->back;
4657 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4658 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4659 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4660 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4661 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4662 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4663 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4664 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4666 tuple_sets = hclge_get_rss_hash_bits(nfc);
4667 switch (nfc->flow_type) {
4669 req->ipv4_tcp_en = tuple_sets;
4672 req->ipv6_tcp_en = tuple_sets;
4675 req->ipv4_udp_en = tuple_sets;
4678 req->ipv6_udp_en = tuple_sets;
4681 req->ipv4_sctp_en = tuple_sets;
4684 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4685 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4688 req->ipv6_sctp_en = tuple_sets;
4691 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4694 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4703 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4704 struct ethtool_rxnfc *nfc)
4706 struct hclge_vport *vport = hclge_get_vport(handle);
4707 struct hclge_dev *hdev = vport->back;
4708 struct hclge_rss_input_tuple_cmd *req;
4709 struct hclge_desc desc;
4712 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4713 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4716 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4717 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4719 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4721 dev_err(&hdev->pdev->dev,
4722 "failed to init rss tuple cmd, ret = %d\n", ret);
4726 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4728 dev_err(&hdev->pdev->dev,
4729 "Set rss tuple fail, status = %d\n", ret);
4733 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4734 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4735 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4736 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4737 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4738 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4739 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4740 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4741 hclge_get_rss_type(vport);
4745 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4748 switch (flow_type) {
4750 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4753 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4756 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4759 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4762 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4765 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4769 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4778 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4782 if (tuple_sets & HCLGE_D_PORT_BIT)
4783 tuple_data |= RXH_L4_B_2_3;
4784 if (tuple_sets & HCLGE_S_PORT_BIT)
4785 tuple_data |= RXH_L4_B_0_1;
4786 if (tuple_sets & HCLGE_D_IP_BIT)
4787 tuple_data |= RXH_IP_DST;
4788 if (tuple_sets & HCLGE_S_IP_BIT)
4789 tuple_data |= RXH_IP_SRC;
4794 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4795 struct ethtool_rxnfc *nfc)
4797 struct hclge_vport *vport = hclge_get_vport(handle);
4803 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4804 if (ret || !tuple_sets)
4807 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4812 static int hclge_get_tc_size(struct hnae3_handle *handle)
4814 struct hclge_vport *vport = hclge_get_vport(handle);
4815 struct hclge_dev *hdev = vport->back;
4817 return hdev->pf_rss_size_max;
4820 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4822 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4823 struct hclge_vport *vport = hdev->vport;
4824 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4825 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4826 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4827 struct hnae3_tc_info *tc_info;
4832 tc_info = &vport->nic.kinfo.tc_info;
4833 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4834 rss_size = tc_info->tqp_count[i];
4837 if (!(hdev->hw_tc_map & BIT(i)))
4840 /* tc_size set to hardware is the log2 of roundup power of two
4841 * of rss_size, the acutal queue size is limited by indirection
4844 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4846 dev_err(&hdev->pdev->dev,
4847 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4852 roundup_size = roundup_pow_of_two(rss_size);
4853 roundup_size = ilog2(roundup_size);
4856 tc_size[i] = roundup_size;
4857 tc_offset[i] = tc_info->tqp_offset[i];
4860 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4863 int hclge_rss_init_hw(struct hclge_dev *hdev)
4865 struct hclge_vport *vport = hdev->vport;
4866 u16 *rss_indir = vport[0].rss_indirection_tbl;
4867 u8 *key = vport[0].rss_hash_key;
4868 u8 hfunc = vport[0].rss_algo;
4871 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4875 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4879 ret = hclge_set_rss_input_tuple(hdev);
4883 return hclge_init_rss_tc_mode(hdev);
4886 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4888 struct hclge_vport *vport = hdev->vport;
4891 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4892 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4893 vport[j].rss_indirection_tbl[i] =
4894 i % vport[j].alloc_rss_size;
4898 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4900 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4901 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4902 struct hclge_vport *vport = hdev->vport;
4904 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4905 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4907 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4910 vport[i].rss_tuple_sets.ipv4_tcp_en =
4911 HCLGE_RSS_INPUT_TUPLE_OTHER;
4912 vport[i].rss_tuple_sets.ipv4_udp_en =
4913 HCLGE_RSS_INPUT_TUPLE_OTHER;
4914 vport[i].rss_tuple_sets.ipv4_sctp_en =
4915 HCLGE_RSS_INPUT_TUPLE_SCTP;
4916 vport[i].rss_tuple_sets.ipv4_fragment_en =
4917 HCLGE_RSS_INPUT_TUPLE_OTHER;
4918 vport[i].rss_tuple_sets.ipv6_tcp_en =
4919 HCLGE_RSS_INPUT_TUPLE_OTHER;
4920 vport[i].rss_tuple_sets.ipv6_udp_en =
4921 HCLGE_RSS_INPUT_TUPLE_OTHER;
4922 vport[i].rss_tuple_sets.ipv6_sctp_en =
4923 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4924 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4925 HCLGE_RSS_INPUT_TUPLE_SCTP;
4926 vport[i].rss_tuple_sets.ipv6_fragment_en =
4927 HCLGE_RSS_INPUT_TUPLE_OTHER;
4929 vport[i].rss_algo = rss_algo;
4931 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4932 sizeof(*rss_ind_tbl), GFP_KERNEL);
4936 vport[i].rss_indirection_tbl = rss_ind_tbl;
4937 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4938 HCLGE_RSS_KEY_SIZE);
4941 hclge_rss_indir_init_cfg(hdev);
4946 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4947 int vector_id, bool en,
4948 struct hnae3_ring_chain_node *ring_chain)
4950 struct hclge_dev *hdev = vport->back;
4951 struct hnae3_ring_chain_node *node;
4952 struct hclge_desc desc;
4953 struct hclge_ctrl_vector_chain_cmd *req =
4954 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4955 enum hclge_cmd_status status;
4956 enum hclge_opcode_type op;
4957 u16 tqp_type_and_id;
4960 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4961 hclge_cmd_setup_basic_desc(&desc, op, false);
4962 req->int_vector_id_l = hnae3_get_field(vector_id,
4963 HCLGE_VECTOR_ID_L_M,
4964 HCLGE_VECTOR_ID_L_S);
4965 req->int_vector_id_h = hnae3_get_field(vector_id,
4966 HCLGE_VECTOR_ID_H_M,
4967 HCLGE_VECTOR_ID_H_S);
4970 for (node = ring_chain; node; node = node->next) {
4971 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4972 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4974 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4975 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4976 HCLGE_TQP_ID_S, node->tqp_index);
4977 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4979 hnae3_get_field(node->int_gl_idx,
4980 HNAE3_RING_GL_IDX_M,
4981 HNAE3_RING_GL_IDX_S));
4982 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4983 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4984 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4985 req->vfid = vport->vport_id;
4987 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4989 dev_err(&hdev->pdev->dev,
4990 "Map TQP fail, status is %d.\n",
4996 hclge_cmd_setup_basic_desc(&desc,
4999 req->int_vector_id_l =
5000 hnae3_get_field(vector_id,
5001 HCLGE_VECTOR_ID_L_M,
5002 HCLGE_VECTOR_ID_L_S);
5003 req->int_vector_id_h =
5004 hnae3_get_field(vector_id,
5005 HCLGE_VECTOR_ID_H_M,
5006 HCLGE_VECTOR_ID_H_S);
5011 req->int_cause_num = i;
5012 req->vfid = vport->vport_id;
5013 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5015 dev_err(&hdev->pdev->dev,
5016 "Map TQP fail, status is %d.\n", status);
5024 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5025 struct hnae3_ring_chain_node *ring_chain)
5027 struct hclge_vport *vport = hclge_get_vport(handle);
5028 struct hclge_dev *hdev = vport->back;
5031 vector_id = hclge_get_vector_index(hdev, vector);
5032 if (vector_id < 0) {
5033 dev_err(&hdev->pdev->dev,
5034 "failed to get vector index. vector=%d\n", vector);
5038 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5041 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5042 struct hnae3_ring_chain_node *ring_chain)
5044 struct hclge_vport *vport = hclge_get_vport(handle);
5045 struct hclge_dev *hdev = vport->back;
5048 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5051 vector_id = hclge_get_vector_index(hdev, vector);
5052 if (vector_id < 0) {
5053 dev_err(&handle->pdev->dev,
5054 "Get vector index fail. ret =%d\n", vector_id);
5058 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5060 dev_err(&handle->pdev->dev,
5061 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5067 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5068 bool en_uc, bool en_mc, bool en_bc)
5070 struct hclge_vport *vport = &hdev->vport[vf_id];
5071 struct hnae3_handle *handle = &vport->nic;
5072 struct hclge_promisc_cfg_cmd *req;
5073 struct hclge_desc desc;
5074 bool uc_tx_en = en_uc;
5078 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5080 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5083 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5086 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5087 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5088 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5089 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5090 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5091 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5092 req->extend_promisc = promisc_cfg;
5094 /* to be compatible with DEVICE_VERSION_V1/2 */
5096 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5097 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5098 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5099 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5100 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5101 req->promisc = promisc_cfg;
5103 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5105 dev_err(&hdev->pdev->dev,
5106 "failed to set vport %u promisc mode, ret = %d.\n",
5112 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5113 bool en_mc_pmc, bool en_bc_pmc)
5115 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5116 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5119 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5122 struct hclge_vport *vport = hclge_get_vport(handle);
5123 struct hclge_dev *hdev = vport->back;
5124 bool en_bc_pmc = true;
5126 /* For device whose version below V2, if broadcast promisc enabled,
5127 * vlan filter is always bypassed. So broadcast promisc should be
5128 * disabled until user enable promisc mode
5130 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5131 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5133 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5137 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5139 struct hclge_vport *vport = hclge_get_vport(handle);
5140 struct hclge_dev *hdev = vport->back;
5142 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5145 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5147 struct hclge_get_fd_mode_cmd *req;
5148 struct hclge_desc desc;
5151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5153 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5155 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5157 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5161 *fd_mode = req->mode;
5166 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5167 u32 *stage1_entry_num,
5168 u32 *stage2_entry_num,
5169 u16 *stage1_counter_num,
5170 u16 *stage2_counter_num)
5172 struct hclge_get_fd_allocation_cmd *req;
5173 struct hclge_desc desc;
5176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5178 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5180 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5182 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5187 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5188 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5189 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5190 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5195 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5196 enum HCLGE_FD_STAGE stage_num)
5198 struct hclge_set_fd_key_config_cmd *req;
5199 struct hclge_fd_key_cfg *stage;
5200 struct hclge_desc desc;
5203 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5205 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5206 stage = &hdev->fd_cfg.key_cfg[stage_num];
5207 req->stage = stage_num;
5208 req->key_select = stage->key_sel;
5209 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5210 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5211 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5212 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5213 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5214 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5216 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5218 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5223 static int hclge_init_fd_config(struct hclge_dev *hdev)
5225 #define LOW_2_WORDS 0x03
5226 struct hclge_fd_key_cfg *key_cfg;
5229 if (!hnae3_dev_fd_supported(hdev))
5232 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5236 switch (hdev->fd_cfg.fd_mode) {
5237 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5238 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5240 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5241 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5244 dev_err(&hdev->pdev->dev,
5245 "Unsupported flow director mode %u\n",
5246 hdev->fd_cfg.fd_mode);
5250 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5251 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5252 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5253 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5254 key_cfg->outer_sipv6_word_en = 0;
5255 key_cfg->outer_dipv6_word_en = 0;
5257 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5258 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5259 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5260 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5262 /* If use max 400bit key, we can support tuples for ether type */
5263 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5264 key_cfg->tuple_active |=
5265 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5267 /* roce_type is used to filter roce frames
5268 * dst_vport is used to specify the rule
5270 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5272 ret = hclge_get_fd_allocation(hdev,
5273 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5274 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5275 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5276 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5280 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5283 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5284 int loc, u8 *key, bool is_add)
5286 struct hclge_fd_tcam_config_1_cmd *req1;
5287 struct hclge_fd_tcam_config_2_cmd *req2;
5288 struct hclge_fd_tcam_config_3_cmd *req3;
5289 struct hclge_desc desc[3];
5292 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5293 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5294 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5295 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5296 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5298 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5299 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5300 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5302 req1->stage = stage;
5303 req1->xy_sel = sel_x ? 1 : 0;
5304 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5305 req1->index = cpu_to_le32(loc);
5306 req1->entry_vld = sel_x ? is_add : 0;
5309 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5310 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5311 sizeof(req2->tcam_data));
5312 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5313 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5316 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5318 dev_err(&hdev->pdev->dev,
5319 "config tcam key fail, ret=%d\n",
5325 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5326 struct hclge_fd_ad_data *action)
5328 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5329 struct hclge_fd_ad_config_cmd *req;
5330 struct hclge_desc desc;
5334 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5336 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5337 req->index = cpu_to_le32(loc);
5340 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5341 action->write_rule_id_to_bd);
5342 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5344 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5345 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5346 action->override_tc);
5347 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5348 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5351 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5352 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5353 action->forward_to_direct_queue);
5354 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5356 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5357 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5358 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5359 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5360 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5361 action->counter_id);
5363 req->ad_data = cpu_to_le64(ad_data);
5364 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5366 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5371 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5372 struct hclge_fd_rule *rule)
5374 u16 tmp_x_s, tmp_y_s;
5375 u32 tmp_x_l, tmp_y_l;
5378 if (rule->unused_tuple & tuple_bit)
5381 switch (tuple_bit) {
5382 case BIT(INNER_DST_MAC):
5383 for (i = 0; i < ETH_ALEN; i++) {
5384 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5385 rule->tuples_mask.dst_mac[i]);
5386 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5387 rule->tuples_mask.dst_mac[i]);
5391 case BIT(INNER_SRC_MAC):
5392 for (i = 0; i < ETH_ALEN; i++) {
5393 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5394 rule->tuples_mask.src_mac[i]);
5395 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5396 rule->tuples_mask.src_mac[i]);
5400 case BIT(INNER_VLAN_TAG_FST):
5401 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5402 rule->tuples_mask.vlan_tag1);
5403 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5404 rule->tuples_mask.vlan_tag1);
5405 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5406 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5409 case BIT(INNER_ETH_TYPE):
5410 calc_x(tmp_x_s, rule->tuples.ether_proto,
5411 rule->tuples_mask.ether_proto);
5412 calc_y(tmp_y_s, rule->tuples.ether_proto,
5413 rule->tuples_mask.ether_proto);
5414 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5415 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5418 case BIT(INNER_IP_TOS):
5419 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5420 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5423 case BIT(INNER_IP_PROTO):
5424 calc_x(*key_x, rule->tuples.ip_proto,
5425 rule->tuples_mask.ip_proto);
5426 calc_y(*key_y, rule->tuples.ip_proto,
5427 rule->tuples_mask.ip_proto);
5430 case BIT(INNER_SRC_IP):
5431 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5432 rule->tuples_mask.src_ip[IPV4_INDEX]);
5433 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5434 rule->tuples_mask.src_ip[IPV4_INDEX]);
5435 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5436 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5439 case BIT(INNER_DST_IP):
5440 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5441 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5442 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5443 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5444 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5445 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5448 case BIT(INNER_SRC_PORT):
5449 calc_x(tmp_x_s, rule->tuples.src_port,
5450 rule->tuples_mask.src_port);
5451 calc_y(tmp_y_s, rule->tuples.src_port,
5452 rule->tuples_mask.src_port);
5453 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5454 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5457 case BIT(INNER_DST_PORT):
5458 calc_x(tmp_x_s, rule->tuples.dst_port,
5459 rule->tuples_mask.dst_port);
5460 calc_y(tmp_y_s, rule->tuples.dst_port,
5461 rule->tuples_mask.dst_port);
5462 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5463 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5471 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5472 u8 vf_id, u8 network_port_id)
5474 u32 port_number = 0;
5476 if (port_type == HOST_PORT) {
5477 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5479 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5481 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5483 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5484 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5485 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5491 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5492 __le32 *key_x, __le32 *key_y,
5493 struct hclge_fd_rule *rule)
5495 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5496 u8 cur_pos = 0, tuple_size, shift_bits;
5499 for (i = 0; i < MAX_META_DATA; i++) {
5500 tuple_size = meta_data_key_info[i].key_length;
5501 tuple_bit = key_cfg->meta_data_active & BIT(i);
5503 switch (tuple_bit) {
5504 case BIT(ROCE_TYPE):
5505 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5506 cur_pos += tuple_size;
5508 case BIT(DST_VPORT):
5509 port_number = hclge_get_port_number(HOST_PORT, 0,
5511 hnae3_set_field(meta_data,
5512 GENMASK(cur_pos + tuple_size, cur_pos),
5513 cur_pos, port_number);
5514 cur_pos += tuple_size;
5521 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5522 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5523 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5525 *key_x = cpu_to_le32(tmp_x << shift_bits);
5526 *key_y = cpu_to_le32(tmp_y << shift_bits);
5529 /* A complete key is combined with meta data key and tuple key.
5530 * Meta data key is stored at the MSB region, and tuple key is stored at
5531 * the LSB region, unused bits will be filled 0.
5533 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5534 struct hclge_fd_rule *rule)
5536 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5537 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5538 u8 *cur_key_x, *cur_key_y;
5539 u8 meta_data_region;
5544 memset(key_x, 0, sizeof(key_x));
5545 memset(key_y, 0, sizeof(key_y));
5549 for (i = 0 ; i < MAX_TUPLE; i++) {
5553 tuple_size = tuple_key_info[i].key_length / 8;
5554 check_tuple = key_cfg->tuple_active & BIT(i);
5556 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5559 cur_key_x += tuple_size;
5560 cur_key_y += tuple_size;
5564 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5565 MAX_META_DATA_LENGTH / 8;
5567 hclge_fd_convert_meta_data(key_cfg,
5568 (__le32 *)(key_x + meta_data_region),
5569 (__le32 *)(key_y + meta_data_region),
5572 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5575 dev_err(&hdev->pdev->dev,
5576 "fd key_y config fail, loc=%u, ret=%d\n",
5577 rule->queue_id, ret);
5581 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5584 dev_err(&hdev->pdev->dev,
5585 "fd key_x config fail, loc=%u, ret=%d\n",
5586 rule->queue_id, ret);
5590 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5591 struct hclge_fd_rule *rule)
5593 struct hclge_vport *vport = hdev->vport;
5594 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5595 struct hclge_fd_ad_data ad_data;
5597 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5598 ad_data.ad_id = rule->location;
5600 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5601 ad_data.drop_packet = true;
5602 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5603 ad_data.override_tc = true;
5605 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5607 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5609 ad_data.forward_to_direct_queue = true;
5610 ad_data.queue_id = rule->queue_id;
5613 ad_data.use_counter = false;
5614 ad_data.counter_id = 0;
5616 ad_data.use_next_stage = false;
5617 ad_data.next_input_key = 0;
5619 ad_data.write_rule_id_to_bd = true;
5620 ad_data.rule_id = rule->location;
5622 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5625 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5628 if (!spec || !unused_tuple)
5631 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5634 *unused_tuple |= BIT(INNER_SRC_IP);
5637 *unused_tuple |= BIT(INNER_DST_IP);
5640 *unused_tuple |= BIT(INNER_SRC_PORT);
5643 *unused_tuple |= BIT(INNER_DST_PORT);
5646 *unused_tuple |= BIT(INNER_IP_TOS);
5651 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5654 if (!spec || !unused_tuple)
5657 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5658 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5661 *unused_tuple |= BIT(INNER_SRC_IP);
5664 *unused_tuple |= BIT(INNER_DST_IP);
5667 *unused_tuple |= BIT(INNER_IP_TOS);
5670 *unused_tuple |= BIT(INNER_IP_PROTO);
5672 if (spec->l4_4_bytes)
5675 if (spec->ip_ver != ETH_RX_NFC_IP4)
5681 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5684 if (!spec || !unused_tuple)
5687 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5690 /* check whether src/dst ip address used */
5691 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5692 *unused_tuple |= BIT(INNER_SRC_IP);
5694 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5695 *unused_tuple |= BIT(INNER_DST_IP);
5698 *unused_tuple |= BIT(INNER_SRC_PORT);
5701 *unused_tuple |= BIT(INNER_DST_PORT);
5709 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5712 if (!spec || !unused_tuple)
5715 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5716 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5718 /* check whether src/dst ip address used */
5719 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5720 *unused_tuple |= BIT(INNER_SRC_IP);
5722 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5723 *unused_tuple |= BIT(INNER_DST_IP);
5725 if (!spec->l4_proto)
5726 *unused_tuple |= BIT(INNER_IP_PROTO);
5731 if (spec->l4_4_bytes)
5737 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5739 if (!spec || !unused_tuple)
5742 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5743 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5744 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5746 if (is_zero_ether_addr(spec->h_source))
5747 *unused_tuple |= BIT(INNER_SRC_MAC);
5749 if (is_zero_ether_addr(spec->h_dest))
5750 *unused_tuple |= BIT(INNER_DST_MAC);
5753 *unused_tuple |= BIT(INNER_ETH_TYPE);
5758 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5759 struct ethtool_rx_flow_spec *fs,
5762 if (fs->flow_type & FLOW_EXT) {
5763 if (fs->h_ext.vlan_etype) {
5764 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5768 if (!fs->h_ext.vlan_tci)
5769 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5771 if (fs->m_ext.vlan_tci &&
5772 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5773 dev_err(&hdev->pdev->dev,
5774 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5775 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5779 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5782 if (fs->flow_type & FLOW_MAC_EXT) {
5783 if (hdev->fd_cfg.fd_mode !=
5784 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5785 dev_err(&hdev->pdev->dev,
5786 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5790 if (is_zero_ether_addr(fs->h_ext.h_dest))
5791 *unused_tuple |= BIT(INNER_DST_MAC);
5793 *unused_tuple &= ~BIT(INNER_DST_MAC);
5799 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5800 struct ethtool_rx_flow_spec *fs,
5806 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5807 dev_err(&hdev->pdev->dev,
5808 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5810 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5814 if ((fs->flow_type & FLOW_EXT) &&
5815 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5816 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5820 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5821 switch (flow_type) {
5825 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5829 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5835 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5838 case IPV6_USER_FLOW:
5839 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5843 if (hdev->fd_cfg.fd_mode !=
5844 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5845 dev_err(&hdev->pdev->dev,
5846 "ETHER_FLOW is not supported in current fd mode!\n");
5850 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5854 dev_err(&hdev->pdev->dev,
5855 "unsupported protocol type, protocol type = %#x\n",
5861 dev_err(&hdev->pdev->dev,
5862 "failed to check flow union tuple, ret = %d\n",
5867 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5870 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5872 struct hclge_fd_rule *rule = NULL;
5873 struct hlist_node *node2;
5875 spin_lock_bh(&hdev->fd_rule_lock);
5876 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5877 if (rule->location >= location)
5881 spin_unlock_bh(&hdev->fd_rule_lock);
5883 return rule && rule->location == location;
5886 /* make sure being called after lock up with fd_rule_lock */
5887 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5888 struct hclge_fd_rule *new_rule,
5892 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5893 struct hlist_node *node2;
5895 if (is_add && !new_rule)
5898 hlist_for_each_entry_safe(rule, node2,
5899 &hdev->fd_rule_list, rule_node) {
5900 if (rule->location >= location)
5905 if (rule && rule->location == location) {
5906 hlist_del(&rule->rule_node);
5908 hdev->hclge_fd_rule_num--;
5911 if (!hdev->hclge_fd_rule_num)
5912 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5913 clear_bit(location, hdev->fd_bmap);
5917 } else if (!is_add) {
5918 dev_err(&hdev->pdev->dev,
5919 "delete fail, rule %u is inexistent\n",
5924 INIT_HLIST_NODE(&new_rule->rule_node);
5927 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5929 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5931 set_bit(location, hdev->fd_bmap);
5932 hdev->hclge_fd_rule_num++;
5933 hdev->fd_active_type = new_rule->rule_type;
5938 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
5939 struct ethtool_rx_flow_spec *fs,
5940 struct hclge_fd_rule *rule, u8 ip_proto)
5942 rule->tuples.src_ip[IPV4_INDEX] =
5943 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5944 rule->tuples_mask.src_ip[IPV4_INDEX] =
5945 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5947 rule->tuples.dst_ip[IPV4_INDEX] =
5948 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5949 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5950 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5952 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5953 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5955 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5956 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5958 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5959 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5961 rule->tuples.ether_proto = ETH_P_IP;
5962 rule->tuples_mask.ether_proto = 0xFFFF;
5964 rule->tuples.ip_proto = ip_proto;
5965 rule->tuples_mask.ip_proto = 0xFF;
5968 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
5969 struct ethtool_rx_flow_spec *fs,
5970 struct hclge_fd_rule *rule)
5972 rule->tuples.src_ip[IPV4_INDEX] =
5973 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5974 rule->tuples_mask.src_ip[IPV4_INDEX] =
5975 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5977 rule->tuples.dst_ip[IPV4_INDEX] =
5978 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5979 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5980 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5982 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5983 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5985 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5986 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5988 rule->tuples.ether_proto = ETH_P_IP;
5989 rule->tuples_mask.ether_proto = 0xFFFF;
5992 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
5993 struct ethtool_rx_flow_spec *fs,
5994 struct hclge_fd_rule *rule, u8 ip_proto)
5996 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
5998 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6001 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6003 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6006 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6007 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6009 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6010 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6012 rule->tuples.ether_proto = ETH_P_IPV6;
6013 rule->tuples_mask.ether_proto = 0xFFFF;
6015 rule->tuples.ip_proto = ip_proto;
6016 rule->tuples_mask.ip_proto = 0xFF;
6019 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6020 struct ethtool_rx_flow_spec *fs,
6021 struct hclge_fd_rule *rule)
6023 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6025 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6028 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6030 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6033 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6034 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6036 rule->tuples.ether_proto = ETH_P_IPV6;
6037 rule->tuples_mask.ether_proto = 0xFFFF;
6040 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6041 struct ethtool_rx_flow_spec *fs,
6042 struct hclge_fd_rule *rule)
6044 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6045 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6047 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6048 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6050 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6051 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6054 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6055 struct ethtool_rx_flow_spec *fs,
6056 struct hclge_fd_rule *rule)
6058 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6060 switch (flow_type) {
6062 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6065 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6068 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6071 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6074 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6077 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6080 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6082 case IPV6_USER_FLOW:
6083 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6086 hclge_fd_get_ether_tuple(hdev, fs, rule);
6092 if (fs->flow_type & FLOW_EXT) {
6093 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6094 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6097 if (fs->flow_type & FLOW_MAC_EXT) {
6098 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6099 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6105 /* make sure being called after lock up with fd_rule_lock */
6106 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6107 struct hclge_fd_rule *rule)
6112 dev_err(&hdev->pdev->dev,
6113 "The flow director rule is NULL\n");
6117 /* it will never fail here, so needn't to check return value */
6118 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
6120 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6124 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6131 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
6135 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6137 struct hclge_vport *vport = hclge_get_vport(handle);
6138 struct hclge_dev *hdev = vport->back;
6140 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6143 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6144 u16 *vport_id, u8 *action, u16 *queue_id)
6146 struct hclge_vport *vport = hdev->vport;
6148 if (ring_cookie == RX_CLS_FLOW_DISC) {
6149 *action = HCLGE_FD_ACTION_DROP_PACKET;
6151 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6152 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6155 if (vf > hdev->num_req_vfs) {
6156 dev_err(&hdev->pdev->dev,
6157 "Error: vf id (%u) > max vf num (%u)\n",
6158 vf, hdev->num_req_vfs);
6162 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6163 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6166 dev_err(&hdev->pdev->dev,
6167 "Error: queue id (%u) > max tqp num (%u)\n",
6172 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6179 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6180 struct ethtool_rxnfc *cmd)
6182 struct hclge_vport *vport = hclge_get_vport(handle);
6183 struct hclge_dev *hdev = vport->back;
6184 u16 dst_vport_id = 0, q_index = 0;
6185 struct ethtool_rx_flow_spec *fs;
6186 struct hclge_fd_rule *rule;
6191 if (!hnae3_dev_fd_supported(hdev)) {
6192 dev_err(&hdev->pdev->dev,
6193 "flow table director is not supported\n");
6198 dev_err(&hdev->pdev->dev,
6199 "please enable flow director first\n");
6203 if (hclge_is_cls_flower_active(handle)) {
6204 dev_err(&hdev->pdev->dev,
6205 "please delete all exist cls flower rules first\n");
6209 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6211 ret = hclge_fd_check_spec(hdev, fs, &unused);
6215 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6220 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6224 ret = hclge_fd_get_tuple(hdev, fs, rule);
6230 rule->flow_type = fs->flow_type;
6231 rule->location = fs->location;
6232 rule->unused_tuple = unused;
6233 rule->vf_id = dst_vport_id;
6234 rule->queue_id = q_index;
6235 rule->action = action;
6236 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6238 /* to avoid rule conflict, when user configure rule by ethtool,
6239 * we need to clear all arfs rules
6241 spin_lock_bh(&hdev->fd_rule_lock);
6242 hclge_clear_arfs_rules(handle);
6244 ret = hclge_fd_config_rule(hdev, rule);
6246 spin_unlock_bh(&hdev->fd_rule_lock);
6251 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6252 struct ethtool_rxnfc *cmd)
6254 struct hclge_vport *vport = hclge_get_vport(handle);
6255 struct hclge_dev *hdev = vport->back;
6256 struct ethtool_rx_flow_spec *fs;
6259 if (!hnae3_dev_fd_supported(hdev))
6262 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6264 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6267 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6268 !hclge_fd_rule_exist(hdev, fs->location)) {
6269 dev_err(&hdev->pdev->dev,
6270 "Delete fail, rule %u is inexistent\n", fs->location);
6274 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6279 spin_lock_bh(&hdev->fd_rule_lock);
6280 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6282 spin_unlock_bh(&hdev->fd_rule_lock);
6287 /* make sure being called after lock up with fd_rule_lock */
6288 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6291 struct hclge_vport *vport = hclge_get_vport(handle);
6292 struct hclge_dev *hdev = vport->back;
6293 struct hclge_fd_rule *rule;
6294 struct hlist_node *node;
6297 if (!hnae3_dev_fd_supported(hdev))
6300 for_each_set_bit(location, hdev->fd_bmap,
6301 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6302 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6306 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6308 hlist_del(&rule->rule_node);
6311 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6312 hdev->hclge_fd_rule_num = 0;
6313 bitmap_zero(hdev->fd_bmap,
6314 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6318 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6320 struct hclge_vport *vport = hclge_get_vport(handle);
6321 struct hclge_dev *hdev = vport->back;
6322 struct hclge_fd_rule *rule;
6323 struct hlist_node *node;
6326 /* Return ok here, because reset error handling will check this
6327 * return value. If error is returned here, the reset process will
6330 if (!hnae3_dev_fd_supported(hdev))
6333 /* if fd is disabled, should not restore it when reset */
6337 spin_lock_bh(&hdev->fd_rule_lock);
6338 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6339 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6341 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6344 dev_warn(&hdev->pdev->dev,
6345 "Restore rule %u failed, remove it\n",
6347 clear_bit(rule->location, hdev->fd_bmap);
6348 hlist_del(&rule->rule_node);
6350 hdev->hclge_fd_rule_num--;
6354 if (hdev->hclge_fd_rule_num)
6355 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6357 spin_unlock_bh(&hdev->fd_rule_lock);
6362 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6363 struct ethtool_rxnfc *cmd)
6365 struct hclge_vport *vport = hclge_get_vport(handle);
6366 struct hclge_dev *hdev = vport->back;
6368 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6371 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6372 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6377 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6378 struct ethtool_tcpip4_spec *spec,
6379 struct ethtool_tcpip4_spec *spec_mask)
6381 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6382 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6383 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6385 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6386 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6387 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6389 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6390 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6391 0 : cpu_to_be16(rule->tuples_mask.src_port);
6393 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6394 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6395 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6397 spec->tos = rule->tuples.ip_tos;
6398 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6399 0 : rule->tuples_mask.ip_tos;
6402 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6403 struct ethtool_usrip4_spec *spec,
6404 struct ethtool_usrip4_spec *spec_mask)
6406 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6407 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6408 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6410 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6411 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6412 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6414 spec->tos = rule->tuples.ip_tos;
6415 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6416 0 : rule->tuples_mask.ip_tos;
6418 spec->proto = rule->tuples.ip_proto;
6419 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6420 0 : rule->tuples_mask.ip_proto;
6422 spec->ip_ver = ETH_RX_NFC_IP4;
6425 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6426 struct ethtool_tcpip6_spec *spec,
6427 struct ethtool_tcpip6_spec *spec_mask)
6429 cpu_to_be32_array(spec->ip6src,
6430 rule->tuples.src_ip, IPV6_SIZE);
6431 cpu_to_be32_array(spec->ip6dst,
6432 rule->tuples.dst_ip, IPV6_SIZE);
6433 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6434 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6436 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6439 if (rule->unused_tuple & BIT(INNER_DST_IP))
6440 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6442 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6445 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6446 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6447 0 : cpu_to_be16(rule->tuples_mask.src_port);
6449 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6450 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6451 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6454 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6455 struct ethtool_usrip6_spec *spec,
6456 struct ethtool_usrip6_spec *spec_mask)
6458 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6459 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6460 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6461 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6463 cpu_to_be32_array(spec_mask->ip6src,
6464 rule->tuples_mask.src_ip, IPV6_SIZE);
6466 if (rule->unused_tuple & BIT(INNER_DST_IP))
6467 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6469 cpu_to_be32_array(spec_mask->ip6dst,
6470 rule->tuples_mask.dst_ip, IPV6_SIZE);
6472 spec->l4_proto = rule->tuples.ip_proto;
6473 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6474 0 : rule->tuples_mask.ip_proto;
6477 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6478 struct ethhdr *spec,
6479 struct ethhdr *spec_mask)
6481 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6482 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6484 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6485 eth_zero_addr(spec_mask->h_source);
6487 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6489 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6490 eth_zero_addr(spec_mask->h_dest);
6492 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6494 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6495 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6496 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6499 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6500 struct hclge_fd_rule *rule)
6502 if (fs->flow_type & FLOW_EXT) {
6503 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6504 fs->m_ext.vlan_tci =
6505 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6506 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6509 if (fs->flow_type & FLOW_MAC_EXT) {
6510 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6511 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6512 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6514 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6515 rule->tuples_mask.dst_mac);
6519 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6520 struct ethtool_rxnfc *cmd)
6522 struct hclge_vport *vport = hclge_get_vport(handle);
6523 struct hclge_fd_rule *rule = NULL;
6524 struct hclge_dev *hdev = vport->back;
6525 struct ethtool_rx_flow_spec *fs;
6526 struct hlist_node *node2;
6528 if (!hnae3_dev_fd_supported(hdev))
6531 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6533 spin_lock_bh(&hdev->fd_rule_lock);
6535 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6536 if (rule->location >= fs->location)
6540 if (!rule || fs->location != rule->location) {
6541 spin_unlock_bh(&hdev->fd_rule_lock);
6546 fs->flow_type = rule->flow_type;
6547 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6551 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6552 &fs->m_u.tcp_ip4_spec);
6555 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6556 &fs->m_u.usr_ip4_spec);
6561 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6562 &fs->m_u.tcp_ip6_spec);
6564 case IPV6_USER_FLOW:
6565 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6566 &fs->m_u.usr_ip6_spec);
6568 /* The flow type of fd rule has been checked before adding in to rule
6569 * list. As other flow types have been handled, it must be ETHER_FLOW
6570 * for the default case
6573 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6574 &fs->m_u.ether_spec);
6578 hclge_fd_get_ext_info(fs, rule);
6580 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6581 fs->ring_cookie = RX_CLS_FLOW_DISC;
6585 fs->ring_cookie = rule->queue_id;
6586 vf_id = rule->vf_id;
6587 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6588 fs->ring_cookie |= vf_id;
6591 spin_unlock_bh(&hdev->fd_rule_lock);
6596 static int hclge_get_all_rules(struct hnae3_handle *handle,
6597 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6599 struct hclge_vport *vport = hclge_get_vport(handle);
6600 struct hclge_dev *hdev = vport->back;
6601 struct hclge_fd_rule *rule;
6602 struct hlist_node *node2;
6605 if (!hnae3_dev_fd_supported(hdev))
6608 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6610 spin_lock_bh(&hdev->fd_rule_lock);
6611 hlist_for_each_entry_safe(rule, node2,
6612 &hdev->fd_rule_list, rule_node) {
6613 if (cnt == cmd->rule_cnt) {
6614 spin_unlock_bh(&hdev->fd_rule_lock);
6618 rule_locs[cnt] = rule->location;
6622 spin_unlock_bh(&hdev->fd_rule_lock);
6624 cmd->rule_cnt = cnt;
6629 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6630 struct hclge_fd_rule_tuples *tuples)
6632 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6633 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6635 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6636 tuples->ip_proto = fkeys->basic.ip_proto;
6637 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6639 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6640 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6641 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6645 for (i = 0; i < IPV6_SIZE; i++) {
6646 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6647 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6652 /* traverse all rules, check whether an existed rule has the same tuples */
6653 static struct hclge_fd_rule *
6654 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6655 const struct hclge_fd_rule_tuples *tuples)
6657 struct hclge_fd_rule *rule = NULL;
6658 struct hlist_node *node;
6660 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6661 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6668 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6669 struct hclge_fd_rule *rule)
6671 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6672 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6673 BIT(INNER_SRC_PORT);
6676 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6677 if (tuples->ether_proto == ETH_P_IP) {
6678 if (tuples->ip_proto == IPPROTO_TCP)
6679 rule->flow_type = TCP_V4_FLOW;
6681 rule->flow_type = UDP_V4_FLOW;
6683 if (tuples->ip_proto == IPPROTO_TCP)
6684 rule->flow_type = TCP_V6_FLOW;
6686 rule->flow_type = UDP_V6_FLOW;
6688 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6689 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6692 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6693 u16 flow_id, struct flow_keys *fkeys)
6695 struct hclge_vport *vport = hclge_get_vport(handle);
6696 struct hclge_fd_rule_tuples new_tuples = {};
6697 struct hclge_dev *hdev = vport->back;
6698 struct hclge_fd_rule *rule;
6703 if (!hnae3_dev_fd_supported(hdev))
6706 /* when there is already fd rule existed add by user,
6707 * arfs should not work
6709 spin_lock_bh(&hdev->fd_rule_lock);
6710 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6711 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6712 spin_unlock_bh(&hdev->fd_rule_lock);
6716 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6718 /* check is there flow director filter existed for this flow,
6719 * if not, create a new filter for it;
6720 * if filter exist with different queue id, modify the filter;
6721 * if filter exist with same queue id, do nothing
6723 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6725 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6726 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6727 spin_unlock_bh(&hdev->fd_rule_lock);
6731 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6733 spin_unlock_bh(&hdev->fd_rule_lock);
6737 set_bit(bit_id, hdev->fd_bmap);
6738 rule->location = bit_id;
6739 rule->arfs.flow_id = flow_id;
6740 rule->queue_id = queue_id;
6741 hclge_fd_build_arfs_rule(&new_tuples, rule);
6742 ret = hclge_fd_config_rule(hdev, rule);
6744 spin_unlock_bh(&hdev->fd_rule_lock);
6749 return rule->location;
6752 spin_unlock_bh(&hdev->fd_rule_lock);
6754 if (rule->queue_id == queue_id)
6755 return rule->location;
6757 tmp_queue_id = rule->queue_id;
6758 rule->queue_id = queue_id;
6759 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6761 rule->queue_id = tmp_queue_id;
6765 return rule->location;
6768 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6770 #ifdef CONFIG_RFS_ACCEL
6771 struct hnae3_handle *handle = &hdev->vport[0].nic;
6772 struct hclge_fd_rule *rule;
6773 struct hlist_node *node;
6774 HLIST_HEAD(del_list);
6776 spin_lock_bh(&hdev->fd_rule_lock);
6777 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6778 spin_unlock_bh(&hdev->fd_rule_lock);
6781 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6782 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6783 rule->arfs.flow_id, rule->location)) {
6784 hlist_del_init(&rule->rule_node);
6785 hlist_add_head(&rule->rule_node, &del_list);
6786 hdev->hclge_fd_rule_num--;
6787 clear_bit(rule->location, hdev->fd_bmap);
6790 spin_unlock_bh(&hdev->fd_rule_lock);
6792 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6793 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6794 rule->location, NULL, false);
6800 /* make sure being called after lock up with fd_rule_lock */
6801 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6803 #ifdef CONFIG_RFS_ACCEL
6804 struct hclge_vport *vport = hclge_get_vport(handle);
6805 struct hclge_dev *hdev = vport->back;
6807 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6808 hclge_del_all_fd_entries(handle, true);
6812 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6813 struct hclge_fd_rule *rule)
6815 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6816 struct flow_match_basic match;
6817 u16 ethtype_key, ethtype_mask;
6819 flow_rule_match_basic(flow, &match);
6820 ethtype_key = ntohs(match.key->n_proto);
6821 ethtype_mask = ntohs(match.mask->n_proto);
6823 if (ethtype_key == ETH_P_ALL) {
6827 rule->tuples.ether_proto = ethtype_key;
6828 rule->tuples_mask.ether_proto = ethtype_mask;
6829 rule->tuples.ip_proto = match.key->ip_proto;
6830 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6832 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6833 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6837 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6838 struct hclge_fd_rule *rule)
6840 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6841 struct flow_match_eth_addrs match;
6843 flow_rule_match_eth_addrs(flow, &match);
6844 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6845 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6846 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6847 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6849 rule->unused_tuple |= BIT(INNER_DST_MAC);
6850 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6854 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6855 struct hclge_fd_rule *rule)
6857 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6858 struct flow_match_vlan match;
6860 flow_rule_match_vlan(flow, &match);
6861 rule->tuples.vlan_tag1 = match.key->vlan_id |
6862 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6863 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6864 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6866 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6870 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6871 struct hclge_fd_rule *rule)
6875 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6876 struct flow_match_control match;
6878 flow_rule_match_control(flow, &match);
6879 addr_type = match.key->addr_type;
6882 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6883 struct flow_match_ipv4_addrs match;
6885 flow_rule_match_ipv4_addrs(flow, &match);
6886 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6887 rule->tuples_mask.src_ip[IPV4_INDEX] =
6888 be32_to_cpu(match.mask->src);
6889 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6890 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6891 be32_to_cpu(match.mask->dst);
6892 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6893 struct flow_match_ipv6_addrs match;
6895 flow_rule_match_ipv6_addrs(flow, &match);
6896 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6898 be32_to_cpu_array(rule->tuples_mask.src_ip,
6899 match.mask->src.s6_addr32, IPV6_SIZE);
6900 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6902 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6903 match.mask->dst.s6_addr32, IPV6_SIZE);
6905 rule->unused_tuple |= BIT(INNER_SRC_IP);
6906 rule->unused_tuple |= BIT(INNER_DST_IP);
6910 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6911 struct hclge_fd_rule *rule)
6913 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6914 struct flow_match_ports match;
6916 flow_rule_match_ports(flow, &match);
6918 rule->tuples.src_port = be16_to_cpu(match.key->src);
6919 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6920 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6921 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6923 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6924 rule->unused_tuple |= BIT(INNER_DST_PORT);
6928 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6929 struct flow_cls_offload *cls_flower,
6930 struct hclge_fd_rule *rule)
6932 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6933 struct flow_dissector *dissector = flow->match.dissector;
6935 if (dissector->used_keys &
6936 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6937 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6938 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6939 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6940 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6941 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6942 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6943 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6944 dissector->used_keys);
6948 hclge_get_cls_key_basic(flow, rule);
6949 hclge_get_cls_key_mac(flow, rule);
6950 hclge_get_cls_key_vlan(flow, rule);
6951 hclge_get_cls_key_ip(flow, rule);
6952 hclge_get_cls_key_port(flow, rule);
6957 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6958 struct flow_cls_offload *cls_flower, int tc)
6960 u32 prio = cls_flower->common.prio;
6962 if (tc < 0 || tc > hdev->tc_max) {
6963 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6968 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6969 dev_err(&hdev->pdev->dev,
6970 "prio %u should be in range[1, %u]\n",
6971 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6975 if (test_bit(prio - 1, hdev->fd_bmap)) {
6976 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6982 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6983 struct flow_cls_offload *cls_flower,
6986 struct hclge_vport *vport = hclge_get_vport(handle);
6987 struct hclge_dev *hdev = vport->back;
6988 struct hclge_fd_rule *rule;
6991 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6992 dev_err(&hdev->pdev->dev,
6993 "please remove all exist fd rules via ethtool first\n");
6997 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6999 dev_err(&hdev->pdev->dev,
7000 "failed to check cls flower params, ret = %d\n", ret);
7004 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7008 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7012 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7013 rule->cls_flower.tc = tc;
7014 rule->location = cls_flower->common.prio - 1;
7016 rule->cls_flower.cookie = cls_flower->cookie;
7017 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7019 spin_lock_bh(&hdev->fd_rule_lock);
7020 hclge_clear_arfs_rules(handle);
7022 ret = hclge_fd_config_rule(hdev, rule);
7024 spin_unlock_bh(&hdev->fd_rule_lock);
7027 dev_err(&hdev->pdev->dev,
7028 "failed to add cls flower rule, ret = %d\n", ret);
7038 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7039 unsigned long cookie)
7041 struct hclge_fd_rule *rule;
7042 struct hlist_node *node;
7044 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7045 if (rule->cls_flower.cookie == cookie)
7052 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7053 struct flow_cls_offload *cls_flower)
7055 struct hclge_vport *vport = hclge_get_vport(handle);
7056 struct hclge_dev *hdev = vport->back;
7057 struct hclge_fd_rule *rule;
7060 spin_lock_bh(&hdev->fd_rule_lock);
7062 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7064 spin_unlock_bh(&hdev->fd_rule_lock);
7068 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7071 dev_err(&hdev->pdev->dev,
7072 "failed to delete cls flower rule %u, ret = %d\n",
7073 rule->location, ret);
7074 spin_unlock_bh(&hdev->fd_rule_lock);
7078 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
7080 dev_err(&hdev->pdev->dev,
7081 "failed to delete cls flower rule %u in list, ret = %d\n",
7082 rule->location, ret);
7083 spin_unlock_bh(&hdev->fd_rule_lock);
7087 spin_unlock_bh(&hdev->fd_rule_lock);
7092 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7094 struct hclge_vport *vport = hclge_get_vport(handle);
7095 struct hclge_dev *hdev = vport->back;
7097 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7098 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7101 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7103 struct hclge_vport *vport = hclge_get_vport(handle);
7104 struct hclge_dev *hdev = vport->back;
7106 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7109 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7111 struct hclge_vport *vport = hclge_get_vport(handle);
7112 struct hclge_dev *hdev = vport->back;
7114 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7117 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7119 struct hclge_vport *vport = hclge_get_vport(handle);
7120 struct hclge_dev *hdev = vport->back;
7122 return hdev->rst_stats.hw_reset_done_cnt;
7125 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7127 struct hclge_vport *vport = hclge_get_vport(handle);
7128 struct hclge_dev *hdev = vport->back;
7131 hdev->fd_en = enable;
7132 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7135 spin_lock_bh(&hdev->fd_rule_lock);
7136 hclge_del_all_fd_entries(handle, clear);
7137 spin_unlock_bh(&hdev->fd_rule_lock);
7139 hclge_restore_fd_entries(handle);
7143 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7145 struct hclge_desc desc;
7146 struct hclge_config_mac_mode_cmd *req =
7147 (struct hclge_config_mac_mode_cmd *)desc.data;
7151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7154 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7155 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7156 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7157 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7158 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7159 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7160 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7161 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7162 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7163 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7166 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7170 dev_err(&hdev->pdev->dev,
7171 "mac enable fail, ret =%d.\n", ret);
7174 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7175 u8 switch_param, u8 param_mask)
7177 struct hclge_mac_vlan_switch_cmd *req;
7178 struct hclge_desc desc;
7182 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7183 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7185 /* read current config parameter */
7186 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7188 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7189 req->func_id = cpu_to_le32(func_id);
7191 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7193 dev_err(&hdev->pdev->dev,
7194 "read mac vlan switch parameter fail, ret = %d\n", ret);
7198 /* modify and write new config parameter */
7199 hclge_cmd_reuse_desc(&desc, false);
7200 req->switch_param = (req->switch_param & param_mask) | switch_param;
7201 req->param_mask = param_mask;
7203 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7205 dev_err(&hdev->pdev->dev,
7206 "set mac vlan switch parameter fail, ret = %d\n", ret);
7210 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7213 #define HCLGE_PHY_LINK_STATUS_NUM 200
7215 struct phy_device *phydev = hdev->hw.mac.phydev;
7220 ret = phy_read_status(phydev);
7222 dev_err(&hdev->pdev->dev,
7223 "phy update link status fail, ret = %d\n", ret);
7227 if (phydev->link == link_ret)
7230 msleep(HCLGE_LINK_STATUS_MS);
7231 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7234 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7236 #define HCLGE_MAC_LINK_STATUS_NUM 100
7243 ret = hclge_get_mac_link_status(hdev, &link_status);
7246 if (link_status == link_ret)
7249 msleep(HCLGE_LINK_STATUS_MS);
7250 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7254 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7259 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7262 hclge_phy_link_status_wait(hdev, link_ret);
7264 return hclge_mac_link_status_wait(hdev, link_ret);
7267 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7269 struct hclge_config_mac_mode_cmd *req;
7270 struct hclge_desc desc;
7274 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7275 /* 1 Read out the MAC mode config at first */
7276 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7277 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7279 dev_err(&hdev->pdev->dev,
7280 "mac loopback get fail, ret =%d.\n", ret);
7284 /* 2 Then setup the loopback flag */
7285 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7286 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7288 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7290 /* 3 Config mac work mode with loopback flag
7291 * and its original configure parameters
7293 hclge_cmd_reuse_desc(&desc, false);
7294 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7296 dev_err(&hdev->pdev->dev,
7297 "mac loopback set fail, ret =%d.\n", ret);
7301 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7302 enum hnae3_loop loop_mode)
7304 #define HCLGE_COMMON_LB_RETRY_MS 10
7305 #define HCLGE_COMMON_LB_RETRY_NUM 100
7307 struct hclge_common_lb_cmd *req;
7308 struct hclge_desc desc;
7312 req = (struct hclge_common_lb_cmd *)desc.data;
7313 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7315 switch (loop_mode) {
7316 case HNAE3_LOOP_SERIAL_SERDES:
7317 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7319 case HNAE3_LOOP_PARALLEL_SERDES:
7320 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7322 case HNAE3_LOOP_PHY:
7323 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7326 dev_err(&hdev->pdev->dev,
7327 "unsupported common loopback mode %d\n", loop_mode);
7332 req->enable = loop_mode_b;
7333 req->mask = loop_mode_b;
7335 req->mask = loop_mode_b;
7338 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7340 dev_err(&hdev->pdev->dev,
7341 "common loopback set fail, ret = %d\n", ret);
7346 msleep(HCLGE_COMMON_LB_RETRY_MS);
7347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7349 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7351 dev_err(&hdev->pdev->dev,
7352 "common loopback get, ret = %d\n", ret);
7355 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7356 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7358 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7359 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7361 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7362 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7368 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7369 enum hnae3_loop loop_mode)
7373 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7377 hclge_cfg_mac_mode(hdev, en);
7379 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7381 dev_err(&hdev->pdev->dev,
7382 "serdes loopback config mac mode timeout\n");
7387 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7388 struct phy_device *phydev)
7392 if (!phydev->suspended) {
7393 ret = phy_suspend(phydev);
7398 ret = phy_resume(phydev);
7402 return phy_loopback(phydev, true);
7405 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7406 struct phy_device *phydev)
7410 ret = phy_loopback(phydev, false);
7414 return phy_suspend(phydev);
7417 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7419 struct phy_device *phydev = hdev->hw.mac.phydev;
7423 if (hnae3_dev_phy_imp_supported(hdev))
7424 return hclge_set_common_loopback(hdev, en,
7430 ret = hclge_enable_phy_loopback(hdev, phydev);
7432 ret = hclge_disable_phy_loopback(hdev, phydev);
7434 dev_err(&hdev->pdev->dev,
7435 "set phy loopback fail, ret = %d\n", ret);
7439 hclge_cfg_mac_mode(hdev, en);
7441 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7443 dev_err(&hdev->pdev->dev,
7444 "phy loopback config mac mode timeout\n");
7449 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7450 int stream_id, bool enable)
7452 struct hclge_desc desc;
7453 struct hclge_cfg_com_tqp_queue_cmd *req =
7454 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7457 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7458 req->tqp_id = cpu_to_le16(tqp_id);
7459 req->stream_id = cpu_to_le16(stream_id);
7461 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7463 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7465 dev_err(&hdev->pdev->dev,
7466 "Tqp enable fail, status =%d.\n", ret);
7470 static int hclge_set_loopback(struct hnae3_handle *handle,
7471 enum hnae3_loop loop_mode, bool en)
7473 struct hclge_vport *vport = hclge_get_vport(handle);
7474 struct hnae3_knic_private_info *kinfo;
7475 struct hclge_dev *hdev = vport->back;
7478 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7479 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7480 * the same, the packets are looped back in the SSU. If SSU loopback
7481 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7483 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7484 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7486 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7487 HCLGE_SWITCH_ALW_LPBK_MASK);
7492 switch (loop_mode) {
7493 case HNAE3_LOOP_APP:
7494 ret = hclge_set_app_loopback(hdev, en);
7496 case HNAE3_LOOP_SERIAL_SERDES:
7497 case HNAE3_LOOP_PARALLEL_SERDES:
7498 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7500 case HNAE3_LOOP_PHY:
7501 ret = hclge_set_phy_loopback(hdev, en);
7505 dev_err(&hdev->pdev->dev,
7506 "loop_mode %d is not supported\n", loop_mode);
7513 kinfo = &vport->nic.kinfo;
7514 for (i = 0; i < kinfo->num_tqps; i++) {
7515 ret = hclge_tqp_enable(hdev, i, 0, en);
7523 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7527 ret = hclge_set_app_loopback(hdev, false);
7531 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7535 return hclge_cfg_common_loopback(hdev, false,
7536 HNAE3_LOOP_PARALLEL_SERDES);
7539 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7541 struct hclge_vport *vport = hclge_get_vport(handle);
7542 struct hnae3_knic_private_info *kinfo;
7543 struct hnae3_queue *queue;
7544 struct hclge_tqp *tqp;
7547 kinfo = &vport->nic.kinfo;
7548 for (i = 0; i < kinfo->num_tqps; i++) {
7549 queue = handle->kinfo.tqp[i];
7550 tqp = container_of(queue, struct hclge_tqp, q);
7551 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7555 static void hclge_flush_link_update(struct hclge_dev *hdev)
7557 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7559 unsigned long last = hdev->serv_processed_cnt;
7562 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7563 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7564 last == hdev->serv_processed_cnt)
7568 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7570 struct hclge_vport *vport = hclge_get_vport(handle);
7571 struct hclge_dev *hdev = vport->back;
7574 hclge_task_schedule(hdev, 0);
7576 /* Set the DOWN flag here to disable link updating */
7577 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7579 /* flush memory to make sure DOWN is seen by service task */
7580 smp_mb__before_atomic();
7581 hclge_flush_link_update(hdev);
7585 static int hclge_ae_start(struct hnae3_handle *handle)
7587 struct hclge_vport *vport = hclge_get_vport(handle);
7588 struct hclge_dev *hdev = vport->back;
7591 hclge_cfg_mac_mode(hdev, true);
7592 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7593 hdev->hw.mac.link = 0;
7595 /* reset tqp stats */
7596 hclge_reset_tqp_stats(handle);
7598 hclge_mac_start_phy(hdev);
7603 static void hclge_ae_stop(struct hnae3_handle *handle)
7605 struct hclge_vport *vport = hclge_get_vport(handle);
7606 struct hclge_dev *hdev = vport->back;
7609 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7610 spin_lock_bh(&hdev->fd_rule_lock);
7611 hclge_clear_arfs_rules(handle);
7612 spin_unlock_bh(&hdev->fd_rule_lock);
7614 /* If it is not PF reset, the firmware will disable the MAC,
7615 * so it only need to stop phy here.
7617 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7618 hdev->reset_type != HNAE3_FUNC_RESET) {
7619 hclge_mac_stop_phy(hdev);
7620 hclge_update_link_status(hdev);
7624 for (i = 0; i < handle->kinfo.num_tqps; i++)
7625 hclge_reset_tqp(handle, i);
7627 hclge_config_mac_tnl_int(hdev, false);
7630 hclge_cfg_mac_mode(hdev, false);
7632 hclge_mac_stop_phy(hdev);
7634 /* reset tqp stats */
7635 hclge_reset_tqp_stats(handle);
7636 hclge_update_link_status(hdev);
7639 int hclge_vport_start(struct hclge_vport *vport)
7641 struct hclge_dev *hdev = vport->back;
7643 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7644 vport->last_active_jiffies = jiffies;
7646 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7647 if (vport->vport_id) {
7648 hclge_restore_mac_table_common(vport);
7649 hclge_restore_vport_vlan_table(vport);
7651 hclge_restore_hw_table(hdev);
7655 clear_bit(vport->vport_id, hdev->vport_config_block);
7660 void hclge_vport_stop(struct hclge_vport *vport)
7662 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7665 static int hclge_client_start(struct hnae3_handle *handle)
7667 struct hclge_vport *vport = hclge_get_vport(handle);
7669 return hclge_vport_start(vport);
7672 static void hclge_client_stop(struct hnae3_handle *handle)
7674 struct hclge_vport *vport = hclge_get_vport(handle);
7676 hclge_vport_stop(vport);
7679 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7680 u16 cmdq_resp, u8 resp_code,
7681 enum hclge_mac_vlan_tbl_opcode op)
7683 struct hclge_dev *hdev = vport->back;
7686 dev_err(&hdev->pdev->dev,
7687 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7692 if (op == HCLGE_MAC_VLAN_ADD) {
7693 if (!resp_code || resp_code == 1)
7695 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7696 resp_code == HCLGE_ADD_MC_OVERFLOW)
7699 dev_err(&hdev->pdev->dev,
7700 "add mac addr failed for undefined, code=%u.\n",
7703 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7706 } else if (resp_code == 1) {
7707 dev_dbg(&hdev->pdev->dev,
7708 "remove mac addr failed for miss.\n");
7712 dev_err(&hdev->pdev->dev,
7713 "remove mac addr failed for undefined, code=%u.\n",
7716 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7719 } else if (resp_code == 1) {
7720 dev_dbg(&hdev->pdev->dev,
7721 "lookup mac addr failed for miss.\n");
7725 dev_err(&hdev->pdev->dev,
7726 "lookup mac addr failed for undefined, code=%u.\n",
7731 dev_err(&hdev->pdev->dev,
7732 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7737 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7739 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7741 unsigned int word_num;
7742 unsigned int bit_num;
7744 if (vfid > 255 || vfid < 0)
7747 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7748 word_num = vfid / 32;
7749 bit_num = vfid % 32;
7751 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7753 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7755 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7756 bit_num = vfid % 32;
7758 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7760 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7766 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7768 #define HCLGE_DESC_NUMBER 3
7769 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7772 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7773 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7774 if (desc[i].data[j])
7780 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7781 const u8 *addr, bool is_mc)
7783 const unsigned char *mac_addr = addr;
7784 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7785 (mac_addr[0]) | (mac_addr[1] << 8);
7786 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7788 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7790 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7791 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7794 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7795 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7798 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7799 struct hclge_mac_vlan_tbl_entry_cmd *req)
7801 struct hclge_dev *hdev = vport->back;
7802 struct hclge_desc desc;
7807 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7809 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7811 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7813 dev_err(&hdev->pdev->dev,
7814 "del mac addr failed for cmd_send, ret =%d.\n",
7818 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7819 retval = le16_to_cpu(desc.retval);
7821 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7822 HCLGE_MAC_VLAN_REMOVE);
7825 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7826 struct hclge_mac_vlan_tbl_entry_cmd *req,
7827 struct hclge_desc *desc,
7830 struct hclge_dev *hdev = vport->back;
7835 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7837 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7838 memcpy(desc[0].data,
7840 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7841 hclge_cmd_setup_basic_desc(&desc[1],
7842 HCLGE_OPC_MAC_VLAN_ADD,
7844 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7845 hclge_cmd_setup_basic_desc(&desc[2],
7846 HCLGE_OPC_MAC_VLAN_ADD,
7848 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7850 memcpy(desc[0].data,
7852 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7853 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7856 dev_err(&hdev->pdev->dev,
7857 "lookup mac addr failed for cmd_send, ret =%d.\n",
7861 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7862 retval = le16_to_cpu(desc[0].retval);
7864 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7865 HCLGE_MAC_VLAN_LKUP);
7868 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7869 struct hclge_mac_vlan_tbl_entry_cmd *req,
7870 struct hclge_desc *mc_desc)
7872 struct hclge_dev *hdev = vport->back;
7879 struct hclge_desc desc;
7881 hclge_cmd_setup_basic_desc(&desc,
7882 HCLGE_OPC_MAC_VLAN_ADD,
7884 memcpy(desc.data, req,
7885 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7887 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7888 retval = le16_to_cpu(desc.retval);
7890 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7892 HCLGE_MAC_VLAN_ADD);
7894 hclge_cmd_reuse_desc(&mc_desc[0], false);
7895 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7896 hclge_cmd_reuse_desc(&mc_desc[1], false);
7897 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7898 hclge_cmd_reuse_desc(&mc_desc[2], false);
7899 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7900 memcpy(mc_desc[0].data, req,
7901 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7902 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7903 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7904 retval = le16_to_cpu(mc_desc[0].retval);
7906 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7908 HCLGE_MAC_VLAN_ADD);
7912 dev_err(&hdev->pdev->dev,
7913 "add mac addr failed for cmd_send, ret =%d.\n",
7921 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7922 u16 *allocated_size)
7924 struct hclge_umv_spc_alc_cmd *req;
7925 struct hclge_desc desc;
7928 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7929 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7931 req->space_size = cpu_to_le32(space_size);
7933 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7935 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7940 *allocated_size = le32_to_cpu(desc.data[1]);
7945 static int hclge_init_umv_space(struct hclge_dev *hdev)
7947 u16 allocated_size = 0;
7950 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7954 if (allocated_size < hdev->wanted_umv_size)
7955 dev_warn(&hdev->pdev->dev,
7956 "failed to alloc umv space, want %u, get %u\n",
7957 hdev->wanted_umv_size, allocated_size);
7959 hdev->max_umv_size = allocated_size;
7960 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7961 hdev->share_umv_size = hdev->priv_umv_size +
7962 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7967 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7969 struct hclge_vport *vport;
7972 for (i = 0; i < hdev->num_alloc_vport; i++) {
7973 vport = &hdev->vport[i];
7974 vport->used_umv_num = 0;
7977 mutex_lock(&hdev->vport_lock);
7978 hdev->share_umv_size = hdev->priv_umv_size +
7979 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7980 mutex_unlock(&hdev->vport_lock);
7983 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7985 struct hclge_dev *hdev = vport->back;
7989 mutex_lock(&hdev->vport_lock);
7991 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7992 hdev->share_umv_size == 0);
7995 mutex_unlock(&hdev->vport_lock);
8000 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8002 struct hclge_dev *hdev = vport->back;
8005 if (vport->used_umv_num > hdev->priv_umv_size)
8006 hdev->share_umv_size++;
8008 if (vport->used_umv_num > 0)
8009 vport->used_umv_num--;
8011 if (vport->used_umv_num >= hdev->priv_umv_size &&
8012 hdev->share_umv_size > 0)
8013 hdev->share_umv_size--;
8014 vport->used_umv_num++;
8018 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8021 struct hclge_mac_node *mac_node, *tmp;
8023 list_for_each_entry_safe(mac_node, tmp, list, node)
8024 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8030 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8031 enum HCLGE_MAC_NODE_STATE state)
8034 /* from set_rx_mode or tmp_add_list */
8035 case HCLGE_MAC_TO_ADD:
8036 if (mac_node->state == HCLGE_MAC_TO_DEL)
8037 mac_node->state = HCLGE_MAC_ACTIVE;
8039 /* only from set_rx_mode */
8040 case HCLGE_MAC_TO_DEL:
8041 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8042 list_del(&mac_node->node);
8045 mac_node->state = HCLGE_MAC_TO_DEL;
8048 /* only from tmp_add_list, the mac_node->state won't be
8051 case HCLGE_MAC_ACTIVE:
8052 if (mac_node->state == HCLGE_MAC_TO_ADD)
8053 mac_node->state = HCLGE_MAC_ACTIVE;
8059 int hclge_update_mac_list(struct hclge_vport *vport,
8060 enum HCLGE_MAC_NODE_STATE state,
8061 enum HCLGE_MAC_ADDR_TYPE mac_type,
8062 const unsigned char *addr)
8064 struct hclge_dev *hdev = vport->back;
8065 struct hclge_mac_node *mac_node;
8066 struct list_head *list;
8068 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8069 &vport->uc_mac_list : &vport->mc_mac_list;
8071 spin_lock_bh(&vport->mac_list_lock);
8073 /* if the mac addr is already in the mac list, no need to add a new
8074 * one into it, just check the mac addr state, convert it to a new
8075 * new state, or just remove it, or do nothing.
8077 mac_node = hclge_find_mac_node(list, addr);
8079 hclge_update_mac_node(mac_node, state);
8080 spin_unlock_bh(&vport->mac_list_lock);
8081 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8085 /* if this address is never added, unnecessary to delete */
8086 if (state == HCLGE_MAC_TO_DEL) {
8087 spin_unlock_bh(&vport->mac_list_lock);
8088 dev_err(&hdev->pdev->dev,
8089 "failed to delete address %pM from mac list\n",
8094 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8096 spin_unlock_bh(&vport->mac_list_lock);
8100 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8102 mac_node->state = state;
8103 ether_addr_copy(mac_node->mac_addr, addr);
8104 list_add_tail(&mac_node->node, list);
8106 spin_unlock_bh(&vport->mac_list_lock);
8111 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8112 const unsigned char *addr)
8114 struct hclge_vport *vport = hclge_get_vport(handle);
8116 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8120 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8121 const unsigned char *addr)
8123 struct hclge_dev *hdev = vport->back;
8124 struct hclge_mac_vlan_tbl_entry_cmd req;
8125 struct hclge_desc desc;
8126 u16 egress_port = 0;
8129 /* mac addr check */
8130 if (is_zero_ether_addr(addr) ||
8131 is_broadcast_ether_addr(addr) ||
8132 is_multicast_ether_addr(addr)) {
8133 dev_err(&hdev->pdev->dev,
8134 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8135 addr, is_zero_ether_addr(addr),
8136 is_broadcast_ether_addr(addr),
8137 is_multicast_ether_addr(addr));
8141 memset(&req, 0, sizeof(req));
8143 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8144 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8146 req.egress_port = cpu_to_le16(egress_port);
8148 hclge_prepare_mac_addr(&req, addr, false);
8150 /* Lookup the mac address in the mac_vlan table, and add
8151 * it if the entry is inexistent. Repeated unicast entry
8152 * is not allowed in the mac vlan table.
8154 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8155 if (ret == -ENOENT) {
8156 mutex_lock(&hdev->vport_lock);
8157 if (!hclge_is_umv_space_full(vport, false)) {
8158 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8160 hclge_update_umv_space(vport, false);
8161 mutex_unlock(&hdev->vport_lock);
8164 mutex_unlock(&hdev->vport_lock);
8166 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8167 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8168 hdev->priv_umv_size);
8173 /* check if we just hit the duplicate */
8175 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8176 vport->vport_id, addr);
8180 dev_err(&hdev->pdev->dev,
8181 "PF failed to add unicast entry(%pM) in the MAC table\n",
8187 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8188 const unsigned char *addr)
8190 struct hclge_vport *vport = hclge_get_vport(handle);
8192 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8196 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8197 const unsigned char *addr)
8199 struct hclge_dev *hdev = vport->back;
8200 struct hclge_mac_vlan_tbl_entry_cmd req;
8203 /* mac addr check */
8204 if (is_zero_ether_addr(addr) ||
8205 is_broadcast_ether_addr(addr) ||
8206 is_multicast_ether_addr(addr)) {
8207 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8212 memset(&req, 0, sizeof(req));
8213 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8214 hclge_prepare_mac_addr(&req, addr, false);
8215 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8217 mutex_lock(&hdev->vport_lock);
8218 hclge_update_umv_space(vport, true);
8219 mutex_unlock(&hdev->vport_lock);
8220 } else if (ret == -ENOENT) {
8227 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8228 const unsigned char *addr)
8230 struct hclge_vport *vport = hclge_get_vport(handle);
8232 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8236 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8237 const unsigned char *addr)
8239 struct hclge_dev *hdev = vport->back;
8240 struct hclge_mac_vlan_tbl_entry_cmd req;
8241 struct hclge_desc desc[3];
8244 /* mac addr check */
8245 if (!is_multicast_ether_addr(addr)) {
8246 dev_err(&hdev->pdev->dev,
8247 "Add mc mac err! invalid mac:%pM.\n",
8251 memset(&req, 0, sizeof(req));
8252 hclge_prepare_mac_addr(&req, addr, true);
8253 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8255 /* This mac addr do not exist, add new entry for it */
8256 memset(desc[0].data, 0, sizeof(desc[0].data));
8257 memset(desc[1].data, 0, sizeof(desc[0].data));
8258 memset(desc[2].data, 0, sizeof(desc[0].data));
8260 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8263 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8265 /* if already overflow, not to print each time */
8266 if (status == -ENOSPC &&
8267 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8268 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8273 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8274 const unsigned char *addr)
8276 struct hclge_vport *vport = hclge_get_vport(handle);
8278 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8282 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8283 const unsigned char *addr)
8285 struct hclge_dev *hdev = vport->back;
8286 struct hclge_mac_vlan_tbl_entry_cmd req;
8287 enum hclge_cmd_status status;
8288 struct hclge_desc desc[3];
8290 /* mac addr check */
8291 if (!is_multicast_ether_addr(addr)) {
8292 dev_dbg(&hdev->pdev->dev,
8293 "Remove mc mac err! invalid mac:%pM.\n",
8298 memset(&req, 0, sizeof(req));
8299 hclge_prepare_mac_addr(&req, addr, true);
8300 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8302 /* This mac addr exist, remove this handle's VFID for it */
8303 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8307 if (hclge_is_all_function_id_zero(desc))
8308 /* All the vfid is zero, so need to delete this entry */
8309 status = hclge_remove_mac_vlan_tbl(vport, &req);
8311 /* Not all the vfid is zero, update the vfid */
8312 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8314 } else if (status == -ENOENT) {
8321 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8322 struct list_head *list,
8323 int (*sync)(struct hclge_vport *,
8324 const unsigned char *))
8326 struct hclge_mac_node *mac_node, *tmp;
8329 list_for_each_entry_safe(mac_node, tmp, list, node) {
8330 ret = sync(vport, mac_node->mac_addr);
8332 mac_node->state = HCLGE_MAC_ACTIVE;
8334 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8341 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8342 struct list_head *list,
8343 int (*unsync)(struct hclge_vport *,
8344 const unsigned char *))
8346 struct hclge_mac_node *mac_node, *tmp;
8349 list_for_each_entry_safe(mac_node, tmp, list, node) {
8350 ret = unsync(vport, mac_node->mac_addr);
8351 if (!ret || ret == -ENOENT) {
8352 list_del(&mac_node->node);
8355 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8362 static bool hclge_sync_from_add_list(struct list_head *add_list,
8363 struct list_head *mac_list)
8365 struct hclge_mac_node *mac_node, *tmp, *new_node;
8366 bool all_added = true;
8368 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8369 if (mac_node->state == HCLGE_MAC_TO_ADD)
8372 /* if the mac address from tmp_add_list is not in the
8373 * uc/mc_mac_list, it means have received a TO_DEL request
8374 * during the time window of adding the mac address into mac
8375 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8376 * then it will be removed at next time. else it must be TO_ADD,
8377 * this address hasn't been added into mac table,
8378 * so just remove the mac node.
8380 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8382 hclge_update_mac_node(new_node, mac_node->state);
8383 list_del(&mac_node->node);
8385 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8386 mac_node->state = HCLGE_MAC_TO_DEL;
8387 list_del(&mac_node->node);
8388 list_add_tail(&mac_node->node, mac_list);
8390 list_del(&mac_node->node);
8398 static void hclge_sync_from_del_list(struct list_head *del_list,
8399 struct list_head *mac_list)
8401 struct hclge_mac_node *mac_node, *tmp, *new_node;
8403 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8404 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8406 /* If the mac addr exists in the mac list, it means
8407 * received a new TO_ADD request during the time window
8408 * of configuring the mac address. For the mac node
8409 * state is TO_ADD, and the address is already in the
8410 * in the hardware(due to delete fail), so we just need
8411 * to change the mac node state to ACTIVE.
8413 new_node->state = HCLGE_MAC_ACTIVE;
8414 list_del(&mac_node->node);
8417 list_del(&mac_node->node);
8418 list_add_tail(&mac_node->node, mac_list);
8423 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8424 enum HCLGE_MAC_ADDR_TYPE mac_type,
8427 if (mac_type == HCLGE_MAC_ADDR_UC) {
8429 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8431 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8434 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8436 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8440 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8441 enum HCLGE_MAC_ADDR_TYPE mac_type)
8443 struct hclge_mac_node *mac_node, *tmp, *new_node;
8444 struct list_head tmp_add_list, tmp_del_list;
8445 struct list_head *list;
8448 INIT_LIST_HEAD(&tmp_add_list);
8449 INIT_LIST_HEAD(&tmp_del_list);
8451 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8452 * we can add/delete these mac addr outside the spin lock
8454 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8455 &vport->uc_mac_list : &vport->mc_mac_list;
8457 spin_lock_bh(&vport->mac_list_lock);
8459 list_for_each_entry_safe(mac_node, tmp, list, node) {
8460 switch (mac_node->state) {
8461 case HCLGE_MAC_TO_DEL:
8462 list_del(&mac_node->node);
8463 list_add_tail(&mac_node->node, &tmp_del_list);
8465 case HCLGE_MAC_TO_ADD:
8466 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8469 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8470 new_node->state = mac_node->state;
8471 list_add_tail(&new_node->node, &tmp_add_list);
8479 spin_unlock_bh(&vport->mac_list_lock);
8481 /* delete first, in order to get max mac table space for adding */
8482 if (mac_type == HCLGE_MAC_ADDR_UC) {
8483 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8484 hclge_rm_uc_addr_common);
8485 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8486 hclge_add_uc_addr_common);
8488 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8489 hclge_rm_mc_addr_common);
8490 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8491 hclge_add_mc_addr_common);
8494 /* if some mac addresses were added/deleted fail, move back to the
8495 * mac_list, and retry at next time.
8497 spin_lock_bh(&vport->mac_list_lock);
8499 hclge_sync_from_del_list(&tmp_del_list, list);
8500 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8502 spin_unlock_bh(&vport->mac_list_lock);
8504 hclge_update_overflow_flags(vport, mac_type, all_added);
8507 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8509 struct hclge_dev *hdev = vport->back;
8511 if (test_bit(vport->vport_id, hdev->vport_config_block))
8514 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8520 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8524 for (i = 0; i < hdev->num_alloc_vport; i++) {
8525 struct hclge_vport *vport = &hdev->vport[i];
8527 if (!hclge_need_sync_mac_table(vport))
8530 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8531 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8535 static void hclge_build_del_list(struct list_head *list,
8537 struct list_head *tmp_del_list)
8539 struct hclge_mac_node *mac_cfg, *tmp;
8541 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8542 switch (mac_cfg->state) {
8543 case HCLGE_MAC_TO_DEL:
8544 case HCLGE_MAC_ACTIVE:
8545 list_del(&mac_cfg->node);
8546 list_add_tail(&mac_cfg->node, tmp_del_list);
8548 case HCLGE_MAC_TO_ADD:
8550 list_del(&mac_cfg->node);
8558 static void hclge_unsync_del_list(struct hclge_vport *vport,
8559 int (*unsync)(struct hclge_vport *vport,
8560 const unsigned char *addr),
8562 struct list_head *tmp_del_list)
8564 struct hclge_mac_node *mac_cfg, *tmp;
8567 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8568 ret = unsync(vport, mac_cfg->mac_addr);
8569 if (!ret || ret == -ENOENT) {
8570 /* clear all mac addr from hardware, but remain these
8571 * mac addr in the mac list, and restore them after
8572 * vf reset finished.
8575 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8576 mac_cfg->state = HCLGE_MAC_TO_ADD;
8578 list_del(&mac_cfg->node);
8581 } else if (is_del_list) {
8582 mac_cfg->state = HCLGE_MAC_TO_DEL;
8587 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8588 enum HCLGE_MAC_ADDR_TYPE mac_type)
8590 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8591 struct hclge_dev *hdev = vport->back;
8592 struct list_head tmp_del_list, *list;
8594 if (mac_type == HCLGE_MAC_ADDR_UC) {
8595 list = &vport->uc_mac_list;
8596 unsync = hclge_rm_uc_addr_common;
8598 list = &vport->mc_mac_list;
8599 unsync = hclge_rm_mc_addr_common;
8602 INIT_LIST_HEAD(&tmp_del_list);
8605 set_bit(vport->vport_id, hdev->vport_config_block);
8607 spin_lock_bh(&vport->mac_list_lock);
8609 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8611 spin_unlock_bh(&vport->mac_list_lock);
8613 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8615 spin_lock_bh(&vport->mac_list_lock);
8617 hclge_sync_from_del_list(&tmp_del_list, list);
8619 spin_unlock_bh(&vport->mac_list_lock);
8622 /* remove all mac address when uninitailize */
8623 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8624 enum HCLGE_MAC_ADDR_TYPE mac_type)
8626 struct hclge_mac_node *mac_node, *tmp;
8627 struct hclge_dev *hdev = vport->back;
8628 struct list_head tmp_del_list, *list;
8630 INIT_LIST_HEAD(&tmp_del_list);
8632 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8633 &vport->uc_mac_list : &vport->mc_mac_list;
8635 spin_lock_bh(&vport->mac_list_lock);
8637 list_for_each_entry_safe(mac_node, tmp, list, node) {
8638 switch (mac_node->state) {
8639 case HCLGE_MAC_TO_DEL:
8640 case HCLGE_MAC_ACTIVE:
8641 list_del(&mac_node->node);
8642 list_add_tail(&mac_node->node, &tmp_del_list);
8644 case HCLGE_MAC_TO_ADD:
8645 list_del(&mac_node->node);
8651 spin_unlock_bh(&vport->mac_list_lock);
8653 if (mac_type == HCLGE_MAC_ADDR_UC)
8654 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8655 hclge_rm_uc_addr_common);
8657 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8658 hclge_rm_mc_addr_common);
8660 if (!list_empty(&tmp_del_list))
8661 dev_warn(&hdev->pdev->dev,
8662 "uninit %s mac list for vport %u not completely.\n",
8663 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8666 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8667 list_del(&mac_node->node);
8672 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8674 struct hclge_vport *vport;
8677 for (i = 0; i < hdev->num_alloc_vport; i++) {
8678 vport = &hdev->vport[i];
8679 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8680 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8684 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8685 u16 cmdq_resp, u8 resp_code)
8687 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8688 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8689 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8690 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8695 dev_err(&hdev->pdev->dev,
8696 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8701 switch (resp_code) {
8702 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8703 case HCLGE_ETHERTYPE_ALREADY_ADD:
8706 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8707 dev_err(&hdev->pdev->dev,
8708 "add mac ethertype failed for manager table overflow.\n");
8709 return_status = -EIO;
8711 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8712 dev_err(&hdev->pdev->dev,
8713 "add mac ethertype failed for key conflict.\n");
8714 return_status = -EIO;
8717 dev_err(&hdev->pdev->dev,
8718 "add mac ethertype failed for undefined, code=%u.\n",
8720 return_status = -EIO;
8723 return return_status;
8726 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8729 struct hclge_mac_vlan_tbl_entry_cmd req;
8730 struct hclge_dev *hdev = vport->back;
8731 struct hclge_desc desc;
8732 u16 egress_port = 0;
8735 if (is_zero_ether_addr(mac_addr))
8738 memset(&req, 0, sizeof(req));
8739 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8740 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8741 req.egress_port = cpu_to_le16(egress_port);
8742 hclge_prepare_mac_addr(&req, mac_addr, false);
8744 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8747 vf_idx += HCLGE_VF_VPORT_START_NUM;
8748 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8750 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8756 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8759 struct hclge_vport *vport = hclge_get_vport(handle);
8760 struct hclge_dev *hdev = vport->back;
8762 vport = hclge_get_vf_vport(hdev, vf);
8766 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8767 dev_info(&hdev->pdev->dev,
8768 "Specified MAC(=%pM) is same as before, no change committed!\n",
8773 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8774 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8779 ether_addr_copy(vport->vf_info.mac, mac_addr);
8781 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8782 dev_info(&hdev->pdev->dev,
8783 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8785 return hclge_inform_reset_assert_to_vf(vport);
8788 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8793 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8794 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8796 struct hclge_desc desc;
8801 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8802 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8804 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8806 dev_err(&hdev->pdev->dev,
8807 "add mac ethertype failed for cmd_send, ret =%d.\n",
8812 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8813 retval = le16_to_cpu(desc.retval);
8815 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8818 static int init_mgr_tbl(struct hclge_dev *hdev)
8823 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8824 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8826 dev_err(&hdev->pdev->dev,
8827 "add mac ethertype failed, ret =%d.\n",
8836 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8838 struct hclge_vport *vport = hclge_get_vport(handle);
8839 struct hclge_dev *hdev = vport->back;
8841 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8844 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8845 const u8 *old_addr, const u8 *new_addr)
8847 struct list_head *list = &vport->uc_mac_list;
8848 struct hclge_mac_node *old_node, *new_node;
8850 new_node = hclge_find_mac_node(list, new_addr);
8852 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8856 new_node->state = HCLGE_MAC_TO_ADD;
8857 ether_addr_copy(new_node->mac_addr, new_addr);
8858 list_add(&new_node->node, list);
8860 if (new_node->state == HCLGE_MAC_TO_DEL)
8861 new_node->state = HCLGE_MAC_ACTIVE;
8863 /* make sure the new addr is in the list head, avoid dev
8864 * addr may be not re-added into mac table for the umv space
8865 * limitation after global/imp reset which will clear mac
8866 * table by hardware.
8868 list_move(&new_node->node, list);
8871 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8872 old_node = hclge_find_mac_node(list, old_addr);
8874 if (old_node->state == HCLGE_MAC_TO_ADD) {
8875 list_del(&old_node->node);
8878 old_node->state = HCLGE_MAC_TO_DEL;
8883 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8888 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8891 const unsigned char *new_addr = (const unsigned char *)p;
8892 struct hclge_vport *vport = hclge_get_vport(handle);
8893 struct hclge_dev *hdev = vport->back;
8894 unsigned char *old_addr = NULL;
8897 /* mac addr check */
8898 if (is_zero_ether_addr(new_addr) ||
8899 is_broadcast_ether_addr(new_addr) ||
8900 is_multicast_ether_addr(new_addr)) {
8901 dev_err(&hdev->pdev->dev,
8902 "change uc mac err! invalid mac: %pM.\n",
8907 ret = hclge_pause_addr_cfg(hdev, new_addr);
8909 dev_err(&hdev->pdev->dev,
8910 "failed to configure mac pause address, ret = %d\n",
8916 old_addr = hdev->hw.mac.mac_addr;
8918 spin_lock_bh(&vport->mac_list_lock);
8919 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8921 dev_err(&hdev->pdev->dev,
8922 "failed to change the mac addr:%pM, ret = %d\n",
8924 spin_unlock_bh(&vport->mac_list_lock);
8927 hclge_pause_addr_cfg(hdev, old_addr);
8931 /* we must update dev addr with spin lock protect, preventing dev addr
8932 * being removed by set_rx_mode path.
8934 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8935 spin_unlock_bh(&vport->mac_list_lock);
8937 hclge_task_schedule(hdev, 0);
8942 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
8944 struct mii_ioctl_data *data = if_mii(ifr);
8946 if (!hnae3_dev_phy_imp_supported(hdev))
8951 data->phy_id = hdev->hw.mac.phy_addr;
8952 /* this command reads phy id and register at the same time */
8955 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
8959 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
8965 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8968 struct hclge_vport *vport = hclge_get_vport(handle);
8969 struct hclge_dev *hdev = vport->back;
8971 if (!hdev->hw.mac.phydev)
8972 return hclge_mii_ioctl(hdev, ifr, cmd);
8974 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8977 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8978 u8 fe_type, bool filter_en, u8 vf_id)
8980 struct hclge_vlan_filter_ctrl_cmd *req;
8981 struct hclge_desc desc;
8984 /* read current vlan filter parameter */
8985 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8986 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8987 req->vlan_type = vlan_type;
8990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8992 dev_err(&hdev->pdev->dev,
8993 "failed to get vlan filter config, ret = %d.\n", ret);
8997 /* modify and write new config parameter */
8998 hclge_cmd_reuse_desc(&desc, false);
8999 req->vlan_fe = filter_en ?
9000 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9004 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9010 #define HCLGE_FILTER_TYPE_VF 0
9011 #define HCLGE_FILTER_TYPE_PORT 1
9012 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
9013 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
9014 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
9015 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
9016 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
9017 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
9018 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
9019 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
9020 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
9022 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9024 struct hclge_vport *vport = hclge_get_vport(handle);
9025 struct hclge_dev *hdev = vport->back;
9027 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9028 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9029 HCLGE_FILTER_FE_EGRESS, enable, 0);
9030 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9031 HCLGE_FILTER_FE_INGRESS, enable, 0);
9033 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9034 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9038 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9040 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9043 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9044 bool is_kill, u16 vlan,
9045 struct hclge_desc *desc)
9047 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9048 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9053 hclge_cmd_setup_basic_desc(&desc[0],
9054 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9055 hclge_cmd_setup_basic_desc(&desc[1],
9056 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9058 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9060 vf_byte_off = vfid / 8;
9061 vf_byte_val = 1 << (vfid % 8);
9063 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9064 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9066 req0->vlan_id = cpu_to_le16(vlan);
9067 req0->vlan_cfg = is_kill;
9069 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9070 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9072 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9074 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9076 dev_err(&hdev->pdev->dev,
9077 "Send vf vlan command fail, ret =%d.\n",
9085 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9086 bool is_kill, struct hclge_desc *desc)
9088 struct hclge_vlan_filter_vf_cfg_cmd *req;
9090 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9093 #define HCLGE_VF_VLAN_NO_ENTRY 2
9094 if (!req->resp_code || req->resp_code == 1)
9097 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9098 set_bit(vfid, hdev->vf_vlan_full);
9099 dev_warn(&hdev->pdev->dev,
9100 "vf vlan table is full, vf vlan filter is disabled\n");
9104 dev_err(&hdev->pdev->dev,
9105 "Add vf vlan filter fail, ret =%u.\n",
9108 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9109 if (!req->resp_code)
9112 /* vf vlan filter is disabled when vf vlan table is full,
9113 * then new vlan id will not be added into vf vlan table.
9114 * Just return 0 without warning, avoid massive verbose
9115 * print logs when unload.
9117 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9120 dev_err(&hdev->pdev->dev,
9121 "Kill vf vlan filter fail, ret =%u.\n",
9128 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9129 bool is_kill, u16 vlan,
9132 struct hclge_vport *vport = &hdev->vport[vfid];
9133 struct hclge_desc desc[2];
9136 /* if vf vlan table is full, firmware will close vf vlan filter, it
9137 * is unable and unnecessary to add new vlan id to vf vlan filter.
9138 * If spoof check is enable, and vf vlan is full, it shouldn't add
9139 * new vlan, because tx packets with these vlan id will be dropped.
9141 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9142 if (vport->vf_info.spoofchk && vlan) {
9143 dev_err(&hdev->pdev->dev,
9144 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9150 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9154 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9157 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9158 u16 vlan_id, bool is_kill)
9160 struct hclge_vlan_filter_pf_cfg_cmd *req;
9161 struct hclge_desc desc;
9162 u8 vlan_offset_byte_val;
9163 u8 vlan_offset_byte;
9167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9169 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9170 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9171 HCLGE_VLAN_BYTE_SIZE;
9172 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9174 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9175 req->vlan_offset = vlan_offset_160;
9176 req->vlan_cfg = is_kill;
9177 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9179 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9181 dev_err(&hdev->pdev->dev,
9182 "port vlan command, send fail, ret =%d.\n", ret);
9186 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9187 u16 vport_id, u16 vlan_id,
9190 u16 vport_idx, vport_num = 0;
9193 if (is_kill && !vlan_id)
9196 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9199 dev_err(&hdev->pdev->dev,
9200 "Set %u vport vlan filter config fail, ret =%d.\n",
9205 /* vlan 0 may be added twice when 8021q module is enabled */
9206 if (!is_kill && !vlan_id &&
9207 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9210 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9211 dev_err(&hdev->pdev->dev,
9212 "Add port vlan failed, vport %u is already in vlan %u\n",
9218 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9219 dev_err(&hdev->pdev->dev,
9220 "Delete port vlan failed, vport %u is not in vlan %u\n",
9225 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9228 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9229 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9235 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9237 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9238 struct hclge_vport_vtag_tx_cfg_cmd *req;
9239 struct hclge_dev *hdev = vport->back;
9240 struct hclge_desc desc;
9244 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9246 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9247 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9248 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9249 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9250 vcfg->accept_tag1 ? 1 : 0);
9251 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9252 vcfg->accept_untag1 ? 1 : 0);
9253 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9254 vcfg->accept_tag2 ? 1 : 0);
9255 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9256 vcfg->accept_untag2 ? 1 : 0);
9257 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9258 vcfg->insert_tag1_en ? 1 : 0);
9259 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9260 vcfg->insert_tag2_en ? 1 : 0);
9261 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9262 vcfg->tag_shift_mode_en ? 1 : 0);
9263 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9265 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9266 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9267 HCLGE_VF_NUM_PER_BYTE;
9268 req->vf_bitmap[bmap_index] =
9269 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9271 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9273 dev_err(&hdev->pdev->dev,
9274 "Send port txvlan cfg command fail, ret =%d\n",
9280 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9282 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9283 struct hclge_vport_vtag_rx_cfg_cmd *req;
9284 struct hclge_dev *hdev = vport->back;
9285 struct hclge_desc desc;
9289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9291 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9292 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9293 vcfg->strip_tag1_en ? 1 : 0);
9294 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9295 vcfg->strip_tag2_en ? 1 : 0);
9296 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9297 vcfg->vlan1_vlan_prionly ? 1 : 0);
9298 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9299 vcfg->vlan2_vlan_prionly ? 1 : 0);
9300 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9301 vcfg->strip_tag1_discard_en ? 1 : 0);
9302 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9303 vcfg->strip_tag2_discard_en ? 1 : 0);
9305 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9306 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9307 HCLGE_VF_NUM_PER_BYTE;
9308 req->vf_bitmap[bmap_index] =
9309 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9311 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9313 dev_err(&hdev->pdev->dev,
9314 "Send port rxvlan cfg command fail, ret =%d\n",
9320 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9321 u16 port_base_vlan_state,
9326 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9327 vport->txvlan_cfg.accept_tag1 = true;
9328 vport->txvlan_cfg.insert_tag1_en = false;
9329 vport->txvlan_cfg.default_tag1 = 0;
9331 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9333 vport->txvlan_cfg.accept_tag1 =
9334 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9335 vport->txvlan_cfg.insert_tag1_en = true;
9336 vport->txvlan_cfg.default_tag1 = vlan_tag;
9339 vport->txvlan_cfg.accept_untag1 = true;
9341 /* accept_tag2 and accept_untag2 are not supported on
9342 * pdev revision(0x20), new revision support them,
9343 * this two fields can not be configured by user.
9345 vport->txvlan_cfg.accept_tag2 = true;
9346 vport->txvlan_cfg.accept_untag2 = true;
9347 vport->txvlan_cfg.insert_tag2_en = false;
9348 vport->txvlan_cfg.default_tag2 = 0;
9349 vport->txvlan_cfg.tag_shift_mode_en = true;
9351 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9352 vport->rxvlan_cfg.strip_tag1_en = false;
9353 vport->rxvlan_cfg.strip_tag2_en =
9354 vport->rxvlan_cfg.rx_vlan_offload_en;
9355 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9357 vport->rxvlan_cfg.strip_tag1_en =
9358 vport->rxvlan_cfg.rx_vlan_offload_en;
9359 vport->rxvlan_cfg.strip_tag2_en = true;
9360 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9363 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9364 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9365 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9367 ret = hclge_set_vlan_tx_offload_cfg(vport);
9371 return hclge_set_vlan_rx_offload_cfg(vport);
9374 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9376 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9377 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9378 struct hclge_desc desc;
9381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9382 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9383 rx_req->ot_fst_vlan_type =
9384 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9385 rx_req->ot_sec_vlan_type =
9386 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9387 rx_req->in_fst_vlan_type =
9388 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9389 rx_req->in_sec_vlan_type =
9390 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9392 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9394 dev_err(&hdev->pdev->dev,
9395 "Send rxvlan protocol type command fail, ret =%d\n",
9400 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9402 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9403 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9404 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9406 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9408 dev_err(&hdev->pdev->dev,
9409 "Send txvlan protocol type command fail, ret =%d\n",
9415 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9417 #define HCLGE_DEF_VLAN_TYPE 0x8100
9419 struct hnae3_handle *handle = &hdev->vport[0].nic;
9420 struct hclge_vport *vport;
9424 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9425 /* for revision 0x21, vf vlan filter is per function */
9426 for (i = 0; i < hdev->num_alloc_vport; i++) {
9427 vport = &hdev->vport[i];
9428 ret = hclge_set_vlan_filter_ctrl(hdev,
9429 HCLGE_FILTER_TYPE_VF,
9430 HCLGE_FILTER_FE_EGRESS,
9437 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9438 HCLGE_FILTER_FE_INGRESS, true,
9443 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9444 HCLGE_FILTER_FE_EGRESS_V1_B,
9450 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9452 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9453 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9454 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9455 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9456 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9457 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9459 ret = hclge_set_vlan_protocol_type(hdev);
9463 for (i = 0; i < hdev->num_alloc_vport; i++) {
9466 vport = &hdev->vport[i];
9467 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9469 ret = hclge_vlan_offload_cfg(vport,
9470 vport->port_base_vlan_cfg.state,
9476 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9479 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9482 struct hclge_vport_vlan_cfg *vlan;
9484 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9488 vlan->hd_tbl_status = writen_to_tbl;
9489 vlan->vlan_id = vlan_id;
9491 list_add_tail(&vlan->node, &vport->vlan_list);
9494 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9496 struct hclge_vport_vlan_cfg *vlan, *tmp;
9497 struct hclge_dev *hdev = vport->back;
9500 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9501 if (!vlan->hd_tbl_status) {
9502 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9504 vlan->vlan_id, false);
9506 dev_err(&hdev->pdev->dev,
9507 "restore vport vlan list failed, ret=%d\n",
9512 vlan->hd_tbl_status = true;
9518 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9521 struct hclge_vport_vlan_cfg *vlan, *tmp;
9522 struct hclge_dev *hdev = vport->back;
9524 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9525 if (vlan->vlan_id == vlan_id) {
9526 if (is_write_tbl && vlan->hd_tbl_status)
9527 hclge_set_vlan_filter_hw(hdev,
9533 list_del(&vlan->node);
9540 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9542 struct hclge_vport_vlan_cfg *vlan, *tmp;
9543 struct hclge_dev *hdev = vport->back;
9545 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9546 if (vlan->hd_tbl_status)
9547 hclge_set_vlan_filter_hw(hdev,
9553 vlan->hd_tbl_status = false;
9555 list_del(&vlan->node);
9559 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9562 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9564 struct hclge_vport_vlan_cfg *vlan, *tmp;
9565 struct hclge_vport *vport;
9568 for (i = 0; i < hdev->num_alloc_vport; i++) {
9569 vport = &hdev->vport[i];
9570 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9571 list_del(&vlan->node);
9577 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9579 struct hclge_vport_vlan_cfg *vlan, *tmp;
9580 struct hclge_dev *hdev = vport->back;
9586 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9587 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9588 state = vport->port_base_vlan_cfg.state;
9590 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9591 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9592 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9593 vport->vport_id, vlan_id,
9598 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9599 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9601 vlan->vlan_id, false);
9604 vlan->hd_tbl_status = true;
9608 /* For global reset and imp reset, hardware will clear the mac table,
9609 * so we change the mac address state from ACTIVE to TO_ADD, then they
9610 * can be restored in the service task after reset complete. Furtherly,
9611 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9612 * be restored after reset, so just remove these mac nodes from mac_list.
9614 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9616 struct hclge_mac_node *mac_node, *tmp;
9618 list_for_each_entry_safe(mac_node, tmp, list, node) {
9619 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9620 mac_node->state = HCLGE_MAC_TO_ADD;
9621 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9622 list_del(&mac_node->node);
9628 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9630 spin_lock_bh(&vport->mac_list_lock);
9632 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9633 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9634 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9636 spin_unlock_bh(&vport->mac_list_lock);
9639 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9641 struct hclge_vport *vport = &hdev->vport[0];
9642 struct hnae3_handle *handle = &vport->nic;
9644 hclge_restore_mac_table_common(vport);
9645 hclge_restore_vport_vlan_table(vport);
9646 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9648 hclge_restore_fd_entries(handle);
9651 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9653 struct hclge_vport *vport = hclge_get_vport(handle);
9655 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9656 vport->rxvlan_cfg.strip_tag1_en = false;
9657 vport->rxvlan_cfg.strip_tag2_en = enable;
9658 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9660 vport->rxvlan_cfg.strip_tag1_en = enable;
9661 vport->rxvlan_cfg.strip_tag2_en = true;
9662 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9665 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9666 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9667 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9668 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9670 return hclge_set_vlan_rx_offload_cfg(vport);
9673 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9674 u16 port_base_vlan_state,
9675 struct hclge_vlan_info *new_info,
9676 struct hclge_vlan_info *old_info)
9678 struct hclge_dev *hdev = vport->back;
9681 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9682 hclge_rm_vport_all_vlan_table(vport, false);
9683 return hclge_set_vlan_filter_hw(hdev,
9684 htons(new_info->vlan_proto),
9690 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9691 vport->vport_id, old_info->vlan_tag,
9696 return hclge_add_vport_all_vlan_table(vport);
9699 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9700 struct hclge_vlan_info *vlan_info)
9702 struct hnae3_handle *nic = &vport->nic;
9703 struct hclge_vlan_info *old_vlan_info;
9704 struct hclge_dev *hdev = vport->back;
9707 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9709 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9713 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9714 /* add new VLAN tag */
9715 ret = hclge_set_vlan_filter_hw(hdev,
9716 htons(vlan_info->vlan_proto),
9718 vlan_info->vlan_tag,
9723 /* remove old VLAN tag */
9724 ret = hclge_set_vlan_filter_hw(hdev,
9725 htons(old_vlan_info->vlan_proto),
9727 old_vlan_info->vlan_tag,
9735 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9740 /* update state only when disable/enable port based VLAN */
9741 vport->port_base_vlan_cfg.state = state;
9742 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9743 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9745 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9748 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9749 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9750 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9755 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9756 enum hnae3_port_base_vlan_state state,
9759 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9761 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9763 return HNAE3_PORT_BASE_VLAN_ENABLE;
9766 return HNAE3_PORT_BASE_VLAN_DISABLE;
9767 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9768 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9770 return HNAE3_PORT_BASE_VLAN_MODIFY;
9774 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9775 u16 vlan, u8 qos, __be16 proto)
9777 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9778 struct hclge_vport *vport = hclge_get_vport(handle);
9779 struct hclge_dev *hdev = vport->back;
9780 struct hclge_vlan_info vlan_info;
9784 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9787 vport = hclge_get_vf_vport(hdev, vfid);
9791 /* qos is a 3 bits value, so can not be bigger than 7 */
9792 if (vlan > VLAN_N_VID - 1 || qos > 7)
9794 if (proto != htons(ETH_P_8021Q))
9795 return -EPROTONOSUPPORT;
9797 state = hclge_get_port_base_vlan_state(vport,
9798 vport->port_base_vlan_cfg.state,
9800 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9803 vlan_info.vlan_tag = vlan;
9804 vlan_info.qos = qos;
9805 vlan_info.vlan_proto = ntohs(proto);
9807 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9809 dev_err(&hdev->pdev->dev,
9810 "failed to update port base vlan for vf %d, ret = %d\n",
9815 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9818 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9819 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9820 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9821 vport->vport_id, state,
9828 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9830 struct hclge_vlan_info *vlan_info;
9831 struct hclge_vport *vport;
9835 /* clear port base vlan for all vf */
9836 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9837 vport = &hdev->vport[vf];
9838 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9840 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9842 vlan_info->vlan_tag, true);
9844 dev_err(&hdev->pdev->dev,
9845 "failed to clear vf vlan for vf%d, ret = %d\n",
9846 vf - HCLGE_VF_VPORT_START_NUM, ret);
9850 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9851 u16 vlan_id, bool is_kill)
9853 struct hclge_vport *vport = hclge_get_vport(handle);
9854 struct hclge_dev *hdev = vport->back;
9855 bool writen_to_tbl = false;
9858 /* When device is resetting or reset failed, firmware is unable to
9859 * handle mailbox. Just record the vlan id, and remove it after
9862 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9863 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9864 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9868 /* when port base vlan enabled, we use port base vlan as the vlan
9869 * filter entry. In this case, we don't update vlan filter table
9870 * when user add new vlan or remove exist vlan, just update the vport
9871 * vlan list. The vlan id in vlan list will be writen in vlan filter
9872 * table until port base vlan disabled
9874 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9875 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9877 writen_to_tbl = true;
9882 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9884 hclge_add_vport_vlan_table(vport, vlan_id,
9886 } else if (is_kill) {
9887 /* when remove hw vlan filter failed, record the vlan id,
9888 * and try to remove it from hw later, to be consistence
9891 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9896 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9898 #define HCLGE_MAX_SYNC_COUNT 60
9900 int i, ret, sync_cnt = 0;
9903 /* start from vport 1 for PF is always alive */
9904 for (i = 0; i < hdev->num_alloc_vport; i++) {
9905 struct hclge_vport *vport = &hdev->vport[i];
9907 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9909 while (vlan_id != VLAN_N_VID) {
9910 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9911 vport->vport_id, vlan_id,
9913 if (ret && ret != -EINVAL)
9916 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9917 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9920 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9923 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9929 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9931 struct hclge_config_max_frm_size_cmd *req;
9932 struct hclge_desc desc;
9934 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9936 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9937 req->max_frm_size = cpu_to_le16(new_mps);
9938 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9940 return hclge_cmd_send(&hdev->hw, &desc, 1);
9943 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9945 struct hclge_vport *vport = hclge_get_vport(handle);
9947 return hclge_set_vport_mtu(vport, new_mtu);
9950 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9952 struct hclge_dev *hdev = vport->back;
9953 int i, max_frm_size, ret;
9955 /* HW supprt 2 layer vlan */
9956 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9957 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9958 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9961 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9962 mutex_lock(&hdev->vport_lock);
9963 /* VF's mps must fit within hdev->mps */
9964 if (vport->vport_id && max_frm_size > hdev->mps) {
9965 mutex_unlock(&hdev->vport_lock);
9967 } else if (vport->vport_id) {
9968 vport->mps = max_frm_size;
9969 mutex_unlock(&hdev->vport_lock);
9973 /* PF's mps must be greater then VF's mps */
9974 for (i = 1; i < hdev->num_alloc_vport; i++)
9975 if (max_frm_size < hdev->vport[i].mps) {
9976 mutex_unlock(&hdev->vport_lock);
9980 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9982 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9984 dev_err(&hdev->pdev->dev,
9985 "Change mtu fail, ret =%d\n", ret);
9989 hdev->mps = max_frm_size;
9990 vport->mps = max_frm_size;
9992 ret = hclge_buffer_alloc(hdev);
9994 dev_err(&hdev->pdev->dev,
9995 "Allocate buffer fail, ret =%d\n", ret);
9998 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9999 mutex_unlock(&hdev->vport_lock);
10003 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
10006 struct hclge_reset_tqp_queue_cmd *req;
10007 struct hclge_desc desc;
10010 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10012 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10013 req->tqp_id = cpu_to_le16(queue_id);
10015 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10017 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10019 dev_err(&hdev->pdev->dev,
10020 "Send tqp reset cmd error, status =%d\n", ret);
10027 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10029 struct hclge_reset_tqp_queue_cmd *req;
10030 struct hclge_desc desc;
10033 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10035 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10036 req->tqp_id = cpu_to_le16(queue_id);
10038 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10040 dev_err(&hdev->pdev->dev,
10041 "Get reset status error, status =%d\n", ret);
10045 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10048 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10050 struct hnae3_queue *queue;
10051 struct hclge_tqp *tqp;
10053 queue = handle->kinfo.tqp[queue_id];
10054 tqp = container_of(queue, struct hclge_tqp, q);
10059 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10061 struct hclge_vport *vport = hclge_get_vport(handle);
10062 struct hclge_dev *hdev = vport->back;
10063 int reset_try_times = 0;
10068 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10070 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10072 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10076 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10078 dev_err(&hdev->pdev->dev,
10079 "Send reset tqp cmd fail, ret = %d\n", ret);
10083 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10084 reset_status = hclge_get_reset_status(hdev, queue_gid);
10088 /* Wait for tqp hw reset */
10089 usleep_range(1000, 1200);
10092 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10093 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10097 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10099 dev_err(&hdev->pdev->dev,
10100 "Deassert the soft reset fail, ret = %d\n", ret);
10105 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10107 struct hnae3_handle *handle = &vport->nic;
10108 struct hclge_dev *hdev = vport->back;
10109 int reset_try_times = 0;
10114 if (queue_id >= handle->kinfo.num_tqps) {
10115 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10120 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10122 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10124 dev_warn(&hdev->pdev->dev,
10125 "Send reset tqp cmd fail, ret = %d\n", ret);
10129 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10130 reset_status = hclge_get_reset_status(hdev, queue_gid);
10134 /* Wait for tqp hw reset */
10135 usleep_range(1000, 1200);
10138 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10139 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10143 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10145 dev_warn(&hdev->pdev->dev,
10146 "Deassert the soft reset fail, ret = %d\n", ret);
10149 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10151 struct hclge_vport *vport = hclge_get_vport(handle);
10152 struct hclge_dev *hdev = vport->back;
10154 return hdev->fw_version;
10157 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10159 struct phy_device *phydev = hdev->hw.mac.phydev;
10164 phy_set_asym_pause(phydev, rx_en, tx_en);
10167 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10171 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10174 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10176 dev_err(&hdev->pdev->dev,
10177 "configure pauseparam error, ret = %d.\n", ret);
10182 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10184 struct phy_device *phydev = hdev->hw.mac.phydev;
10185 u16 remote_advertising = 0;
10186 u16 local_advertising;
10187 u32 rx_pause, tx_pause;
10190 if (!phydev->link || !phydev->autoneg)
10193 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10196 remote_advertising = LPA_PAUSE_CAP;
10198 if (phydev->asym_pause)
10199 remote_advertising |= LPA_PAUSE_ASYM;
10201 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10202 remote_advertising);
10203 tx_pause = flowctl & FLOW_CTRL_TX;
10204 rx_pause = flowctl & FLOW_CTRL_RX;
10206 if (phydev->duplex == HCLGE_MAC_HALF) {
10211 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10214 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10215 u32 *rx_en, u32 *tx_en)
10217 struct hclge_vport *vport = hclge_get_vport(handle);
10218 struct hclge_dev *hdev = vport->back;
10219 u8 media_type = hdev->hw.mac.media_type;
10221 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10222 hclge_get_autoneg(handle) : 0;
10224 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10230 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10233 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10236 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10245 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10246 u32 rx_en, u32 tx_en)
10248 if (rx_en && tx_en)
10249 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10250 else if (rx_en && !tx_en)
10251 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10252 else if (!rx_en && tx_en)
10253 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10255 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10257 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10260 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10261 u32 rx_en, u32 tx_en)
10263 struct hclge_vport *vport = hclge_get_vport(handle);
10264 struct hclge_dev *hdev = vport->back;
10265 struct phy_device *phydev = hdev->hw.mac.phydev;
10268 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10269 fc_autoneg = hclge_get_autoneg(handle);
10270 if (auto_neg != fc_autoneg) {
10271 dev_info(&hdev->pdev->dev,
10272 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10273 return -EOPNOTSUPP;
10277 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10278 dev_info(&hdev->pdev->dev,
10279 "Priority flow control enabled. Cannot set link flow control.\n");
10280 return -EOPNOTSUPP;
10283 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10285 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10287 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10288 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10291 return phy_start_aneg(phydev);
10293 return -EOPNOTSUPP;
10296 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10297 u8 *auto_neg, u32 *speed, u8 *duplex)
10299 struct hclge_vport *vport = hclge_get_vport(handle);
10300 struct hclge_dev *hdev = vport->back;
10303 *speed = hdev->hw.mac.speed;
10305 *duplex = hdev->hw.mac.duplex;
10307 *auto_neg = hdev->hw.mac.autoneg;
10310 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10313 struct hclge_vport *vport = hclge_get_vport(handle);
10314 struct hclge_dev *hdev = vport->back;
10316 /* When nic is down, the service task is not running, doesn't update
10317 * the port information per second. Query the port information before
10318 * return the media type, ensure getting the correct media information.
10320 hclge_update_port_info(hdev);
10323 *media_type = hdev->hw.mac.media_type;
10326 *module_type = hdev->hw.mac.module_type;
10329 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10330 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10332 struct hclge_vport *vport = hclge_get_vport(handle);
10333 struct hclge_dev *hdev = vport->back;
10334 struct phy_device *phydev = hdev->hw.mac.phydev;
10335 int mdix_ctrl, mdix, is_resolved;
10336 unsigned int retval;
10339 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10340 *tp_mdix = ETH_TP_MDI_INVALID;
10344 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10346 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10347 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10348 HCLGE_PHY_MDIX_CTRL_S);
10350 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10351 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10352 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10354 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10356 switch (mdix_ctrl) {
10358 *tp_mdix_ctrl = ETH_TP_MDI;
10361 *tp_mdix_ctrl = ETH_TP_MDI_X;
10364 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10367 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10372 *tp_mdix = ETH_TP_MDI_INVALID;
10374 *tp_mdix = ETH_TP_MDI_X;
10376 *tp_mdix = ETH_TP_MDI;
10379 static void hclge_info_show(struct hclge_dev *hdev)
10381 struct device *dev = &hdev->pdev->dev;
10383 dev_info(dev, "PF info begin:\n");
10385 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10386 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10387 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10388 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10389 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10390 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10391 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10392 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10393 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10394 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10395 dev_info(dev, "This is %s PF\n",
10396 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10397 dev_info(dev, "DCB %s\n",
10398 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10399 dev_info(dev, "MQPRIO %s\n",
10400 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10402 dev_info(dev, "PF info end.\n");
10405 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10406 struct hclge_vport *vport)
10408 struct hnae3_client *client = vport->nic.client;
10409 struct hclge_dev *hdev = ae_dev->priv;
10410 int rst_cnt = hdev->rst_stats.reset_cnt;
10413 ret = client->ops->init_instance(&vport->nic);
10417 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10418 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10419 rst_cnt != hdev->rst_stats.reset_cnt) {
10424 /* Enable nic hw error interrupts */
10425 ret = hclge_config_nic_hw_error(hdev, true);
10427 dev_err(&ae_dev->pdev->dev,
10428 "fail(%d) to enable hw error interrupts\n", ret);
10432 hnae3_set_client_init_flag(client, ae_dev, 1);
10434 if (netif_msg_drv(&hdev->vport->nic))
10435 hclge_info_show(hdev);
10440 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10441 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10442 msleep(HCLGE_WAIT_RESET_DONE);
10444 client->ops->uninit_instance(&vport->nic, 0);
10449 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10450 struct hclge_vport *vport)
10452 struct hclge_dev *hdev = ae_dev->priv;
10453 struct hnae3_client *client;
10457 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10461 client = hdev->roce_client;
10462 ret = hclge_init_roce_base_info(vport);
10466 rst_cnt = hdev->rst_stats.reset_cnt;
10467 ret = client->ops->init_instance(&vport->roce);
10471 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10472 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10473 rst_cnt != hdev->rst_stats.reset_cnt) {
10475 goto init_roce_err;
10478 /* Enable roce ras interrupts */
10479 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10481 dev_err(&ae_dev->pdev->dev,
10482 "fail(%d) to enable roce ras interrupts\n", ret);
10483 goto init_roce_err;
10486 hnae3_set_client_init_flag(client, ae_dev, 1);
10491 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10492 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10493 msleep(HCLGE_WAIT_RESET_DONE);
10495 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10500 static int hclge_init_client_instance(struct hnae3_client *client,
10501 struct hnae3_ae_dev *ae_dev)
10503 struct hclge_dev *hdev = ae_dev->priv;
10504 struct hclge_vport *vport;
10507 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10508 vport = &hdev->vport[i];
10510 switch (client->type) {
10511 case HNAE3_CLIENT_KNIC:
10512 hdev->nic_client = client;
10513 vport->nic.client = client;
10514 ret = hclge_init_nic_client_instance(ae_dev, vport);
10518 ret = hclge_init_roce_client_instance(ae_dev, vport);
10523 case HNAE3_CLIENT_ROCE:
10524 if (hnae3_dev_roce_supported(hdev)) {
10525 hdev->roce_client = client;
10526 vport->roce.client = client;
10529 ret = hclge_init_roce_client_instance(ae_dev, vport);
10542 hdev->nic_client = NULL;
10543 vport->nic.client = NULL;
10546 hdev->roce_client = NULL;
10547 vport->roce.client = NULL;
10551 static void hclge_uninit_client_instance(struct hnae3_client *client,
10552 struct hnae3_ae_dev *ae_dev)
10554 struct hclge_dev *hdev = ae_dev->priv;
10555 struct hclge_vport *vport;
10558 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10559 vport = &hdev->vport[i];
10560 if (hdev->roce_client) {
10561 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10562 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10563 msleep(HCLGE_WAIT_RESET_DONE);
10565 hdev->roce_client->ops->uninit_instance(&vport->roce,
10567 hdev->roce_client = NULL;
10568 vport->roce.client = NULL;
10570 if (client->type == HNAE3_CLIENT_ROCE)
10572 if (hdev->nic_client && client->ops->uninit_instance) {
10573 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10574 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10575 msleep(HCLGE_WAIT_RESET_DONE);
10577 client->ops->uninit_instance(&vport->nic, 0);
10578 hdev->nic_client = NULL;
10579 vport->nic.client = NULL;
10584 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10586 #define HCLGE_MEM_BAR 4
10588 struct pci_dev *pdev = hdev->pdev;
10589 struct hclge_hw *hw = &hdev->hw;
10591 /* for device does not have device memory, return directly */
10592 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10595 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10596 pci_resource_start(pdev, HCLGE_MEM_BAR),
10597 pci_resource_len(pdev, HCLGE_MEM_BAR));
10598 if (!hw->mem_base) {
10599 dev_err(&pdev->dev, "failed to map device memory\n");
10606 static int hclge_pci_init(struct hclge_dev *hdev)
10608 struct pci_dev *pdev = hdev->pdev;
10609 struct hclge_hw *hw;
10612 ret = pci_enable_device(pdev);
10614 dev_err(&pdev->dev, "failed to enable PCI device\n");
10618 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10620 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10622 dev_err(&pdev->dev,
10623 "can't set consistent PCI DMA");
10624 goto err_disable_device;
10626 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10629 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10631 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10632 goto err_disable_device;
10635 pci_set_master(pdev);
10637 hw->io_base = pcim_iomap(pdev, 2, 0);
10638 if (!hw->io_base) {
10639 dev_err(&pdev->dev, "Can't map configuration register space\n");
10641 goto err_clr_master;
10644 ret = hclge_dev_mem_map(hdev);
10646 goto err_unmap_io_base;
10648 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10653 pcim_iounmap(pdev, hdev->hw.io_base);
10655 pci_clear_master(pdev);
10656 pci_release_regions(pdev);
10657 err_disable_device:
10658 pci_disable_device(pdev);
10663 static void hclge_pci_uninit(struct hclge_dev *hdev)
10665 struct pci_dev *pdev = hdev->pdev;
10667 if (hdev->hw.mem_base)
10668 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10670 pcim_iounmap(pdev, hdev->hw.io_base);
10671 pci_free_irq_vectors(pdev);
10672 pci_clear_master(pdev);
10673 pci_release_mem_regions(pdev);
10674 pci_disable_device(pdev);
10677 static void hclge_state_init(struct hclge_dev *hdev)
10679 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10680 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10681 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10682 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10683 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10684 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10685 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10688 static void hclge_state_uninit(struct hclge_dev *hdev)
10690 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10691 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10693 if (hdev->reset_timer.function)
10694 del_timer_sync(&hdev->reset_timer);
10695 if (hdev->service_task.work.func)
10696 cancel_delayed_work_sync(&hdev->service_task);
10699 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10701 #define HCLGE_FLR_RETRY_WAIT_MS 500
10702 #define HCLGE_FLR_RETRY_CNT 5
10704 struct hclge_dev *hdev = ae_dev->priv;
10709 down(&hdev->reset_sem);
10710 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10711 hdev->reset_type = HNAE3_FLR_RESET;
10712 ret = hclge_reset_prepare(hdev);
10713 if (ret || hdev->reset_pending) {
10714 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10716 if (hdev->reset_pending ||
10717 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10718 dev_err(&hdev->pdev->dev,
10719 "reset_pending:0x%lx, retry_cnt:%d\n",
10720 hdev->reset_pending, retry_cnt);
10721 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10722 up(&hdev->reset_sem);
10723 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10728 /* disable misc vector before FLR done */
10729 hclge_enable_vector(&hdev->misc_vector, false);
10730 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10731 hdev->rst_stats.flr_rst_cnt++;
10734 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10736 struct hclge_dev *hdev = ae_dev->priv;
10739 hclge_enable_vector(&hdev->misc_vector, true);
10741 ret = hclge_reset_rebuild(hdev);
10743 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10745 hdev->reset_type = HNAE3_NONE_RESET;
10746 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10747 up(&hdev->reset_sem);
10750 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10754 for (i = 0; i < hdev->num_alloc_vport; i++) {
10755 struct hclge_vport *vport = &hdev->vport[i];
10758 /* Send cmd to clear VF's FUNC_RST_ING */
10759 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10761 dev_warn(&hdev->pdev->dev,
10762 "clear vf(%u) rst failed %d!\n",
10763 vport->vport_id, ret);
10767 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10769 struct pci_dev *pdev = ae_dev->pdev;
10770 struct hclge_dev *hdev;
10773 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10778 hdev->ae_dev = ae_dev;
10779 hdev->reset_type = HNAE3_NONE_RESET;
10780 hdev->reset_level = HNAE3_FUNC_RESET;
10781 ae_dev->priv = hdev;
10783 /* HW supprt 2 layer vlan */
10784 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10786 mutex_init(&hdev->vport_lock);
10787 spin_lock_init(&hdev->fd_rule_lock);
10788 sema_init(&hdev->reset_sem, 1);
10790 ret = hclge_pci_init(hdev);
10794 /* Firmware command queue initialize */
10795 ret = hclge_cmd_queue_init(hdev);
10797 goto err_pci_uninit;
10799 /* Firmware command initialize */
10800 ret = hclge_cmd_init(hdev);
10802 goto err_cmd_uninit;
10804 ret = hclge_get_cap(hdev);
10806 goto err_cmd_uninit;
10808 ret = hclge_query_dev_specs(hdev);
10810 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10812 goto err_cmd_uninit;
10815 ret = hclge_configure(hdev);
10817 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10818 goto err_cmd_uninit;
10821 ret = hclge_init_msi(hdev);
10823 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10824 goto err_cmd_uninit;
10827 ret = hclge_misc_irq_init(hdev);
10829 goto err_msi_uninit;
10831 ret = hclge_alloc_tqps(hdev);
10833 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10834 goto err_msi_irq_uninit;
10837 ret = hclge_alloc_vport(hdev);
10839 goto err_msi_irq_uninit;
10841 ret = hclge_map_tqp(hdev);
10843 goto err_msi_irq_uninit;
10845 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
10846 !hnae3_dev_phy_imp_supported(hdev)) {
10847 ret = hclge_mac_mdio_config(hdev);
10849 goto err_msi_irq_uninit;
10852 ret = hclge_init_umv_space(hdev);
10854 goto err_mdiobus_unreg;
10856 ret = hclge_mac_init(hdev);
10858 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10859 goto err_mdiobus_unreg;
10862 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10864 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10865 goto err_mdiobus_unreg;
10868 ret = hclge_config_gro(hdev, true);
10870 goto err_mdiobus_unreg;
10872 ret = hclge_init_vlan_config(hdev);
10874 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10875 goto err_mdiobus_unreg;
10878 ret = hclge_tm_schd_init(hdev);
10880 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10881 goto err_mdiobus_unreg;
10884 ret = hclge_rss_init_cfg(hdev);
10886 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10887 goto err_mdiobus_unreg;
10890 ret = hclge_rss_init_hw(hdev);
10892 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10893 goto err_mdiobus_unreg;
10896 ret = init_mgr_tbl(hdev);
10898 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10899 goto err_mdiobus_unreg;
10902 ret = hclge_init_fd_config(hdev);
10904 dev_err(&pdev->dev,
10905 "fd table init fail, ret=%d\n", ret);
10906 goto err_mdiobus_unreg;
10909 INIT_KFIFO(hdev->mac_tnl_log);
10911 hclge_dcb_ops_set(hdev);
10913 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10914 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10916 /* Setup affinity after service timer setup because add_timer_on
10917 * is called in affinity notify.
10919 hclge_misc_affinity_setup(hdev);
10921 hclge_clear_all_event_cause(hdev);
10922 hclge_clear_resetting_state(hdev);
10924 /* Log and clear the hw errors those already occurred */
10925 hclge_handle_all_hns_hw_errors(ae_dev);
10927 /* request delayed reset for the error recovery because an immediate
10928 * global reset on a PF affecting pending initialization of other PFs
10930 if (ae_dev->hw_err_reset_req) {
10931 enum hnae3_reset_type reset_level;
10933 reset_level = hclge_get_reset_level(ae_dev,
10934 &ae_dev->hw_err_reset_req);
10935 hclge_set_def_reset_request(ae_dev, reset_level);
10936 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10939 /* Enable MISC vector(vector0) */
10940 hclge_enable_vector(&hdev->misc_vector, true);
10942 hclge_state_init(hdev);
10943 hdev->last_reset_time = jiffies;
10945 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10946 HCLGE_DRIVER_NAME);
10948 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10953 if (hdev->hw.mac.phydev)
10954 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10955 err_msi_irq_uninit:
10956 hclge_misc_irq_uninit(hdev);
10958 pci_free_irq_vectors(pdev);
10960 hclge_cmd_uninit(hdev);
10962 pcim_iounmap(pdev, hdev->hw.io_base);
10963 pci_clear_master(pdev);
10964 pci_release_regions(pdev);
10965 pci_disable_device(pdev);
10967 mutex_destroy(&hdev->vport_lock);
10971 static void hclge_stats_clear(struct hclge_dev *hdev)
10973 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10976 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10978 return hclge_config_switch_param(hdev, vf, enable,
10979 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10982 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10984 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10985 HCLGE_FILTER_FE_NIC_INGRESS_B,
10989 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10993 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10995 dev_err(&hdev->pdev->dev,
10996 "Set vf %d mac spoof check %s failed, ret=%d\n",
10997 vf, enable ? "on" : "off", ret);
11001 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11003 dev_err(&hdev->pdev->dev,
11004 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11005 vf, enable ? "on" : "off", ret);
11010 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11013 struct hclge_vport *vport = hclge_get_vport(handle);
11014 struct hclge_dev *hdev = vport->back;
11015 u32 new_spoofchk = enable ? 1 : 0;
11018 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11019 return -EOPNOTSUPP;
11021 vport = hclge_get_vf_vport(hdev, vf);
11025 if (vport->vf_info.spoofchk == new_spoofchk)
11028 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11029 dev_warn(&hdev->pdev->dev,
11030 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11032 else if (enable && hclge_is_umv_space_full(vport, true))
11033 dev_warn(&hdev->pdev->dev,
11034 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11037 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11041 vport->vf_info.spoofchk = new_spoofchk;
11045 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11047 struct hclge_vport *vport = hdev->vport;
11051 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11054 /* resume the vf spoof check state after reset */
11055 for (i = 0; i < hdev->num_alloc_vport; i++) {
11056 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11057 vport->vf_info.spoofchk);
11067 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11069 struct hclge_vport *vport = hclge_get_vport(handle);
11070 struct hclge_dev *hdev = vport->back;
11071 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11072 u32 new_trusted = enable ? 1 : 0;
11076 vport = hclge_get_vf_vport(hdev, vf);
11080 if (vport->vf_info.trusted == new_trusted)
11083 /* Disable promisc mode for VF if it is not trusted any more. */
11084 if (!enable && vport->vf_info.promisc_enable) {
11085 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11086 ret = hclge_set_vport_promisc_mode(vport, false, false,
11090 vport->vf_info.promisc_enable = 0;
11091 hclge_inform_vf_promisc_info(vport);
11094 vport->vf_info.trusted = new_trusted;
11099 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11104 /* reset vf rate to default value */
11105 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11106 struct hclge_vport *vport = &hdev->vport[vf];
11108 vport->vf_info.max_tx_rate = 0;
11109 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11111 dev_err(&hdev->pdev->dev,
11112 "vf%d failed to reset to default, ret=%d\n",
11113 vf - HCLGE_VF_VPORT_START_NUM, ret);
11117 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11118 int min_tx_rate, int max_tx_rate)
11120 if (min_tx_rate != 0 ||
11121 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11122 dev_err(&hdev->pdev->dev,
11123 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11124 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11131 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11132 int min_tx_rate, int max_tx_rate, bool force)
11134 struct hclge_vport *vport = hclge_get_vport(handle);
11135 struct hclge_dev *hdev = vport->back;
11138 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11142 vport = hclge_get_vf_vport(hdev, vf);
11146 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11149 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11153 vport->vf_info.max_tx_rate = max_tx_rate;
11158 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11160 struct hnae3_handle *handle = &hdev->vport->nic;
11161 struct hclge_vport *vport;
11165 /* resume the vf max_tx_rate after reset */
11166 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11167 vport = hclge_get_vf_vport(hdev, vf);
11171 /* zero means max rate, after reset, firmware already set it to
11172 * max rate, so just continue.
11174 if (!vport->vf_info.max_tx_rate)
11177 ret = hclge_set_vf_rate(handle, vf, 0,
11178 vport->vf_info.max_tx_rate, true);
11180 dev_err(&hdev->pdev->dev,
11181 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11182 vf, vport->vf_info.max_tx_rate, ret);
11190 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11192 struct hclge_vport *vport = hdev->vport;
11195 for (i = 0; i < hdev->num_alloc_vport; i++) {
11196 hclge_vport_stop(vport);
11201 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11203 struct hclge_dev *hdev = ae_dev->priv;
11204 struct pci_dev *pdev = ae_dev->pdev;
11207 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11209 hclge_stats_clear(hdev);
11210 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11211 * so here should not clean table in memory.
11213 if (hdev->reset_type == HNAE3_IMP_RESET ||
11214 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11215 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11216 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11217 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11218 hclge_reset_umv_space(hdev);
11221 ret = hclge_cmd_init(hdev);
11223 dev_err(&pdev->dev, "Cmd queue init failed\n");
11227 ret = hclge_map_tqp(hdev);
11229 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11233 ret = hclge_mac_init(hdev);
11235 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11239 ret = hclge_tp_port_init(hdev);
11241 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11246 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11248 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11252 ret = hclge_config_gro(hdev, true);
11256 ret = hclge_init_vlan_config(hdev);
11258 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11262 ret = hclge_tm_init_hw(hdev, true);
11264 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11268 ret = hclge_rss_init_hw(hdev);
11270 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11274 ret = init_mgr_tbl(hdev);
11276 dev_err(&pdev->dev,
11277 "failed to reinit manager table, ret = %d\n", ret);
11281 ret = hclge_init_fd_config(hdev);
11283 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11287 /* Log and clear the hw errors those already occurred */
11288 hclge_handle_all_hns_hw_errors(ae_dev);
11290 /* Re-enable the hw error interrupts because
11291 * the interrupts get disabled on global reset.
11293 ret = hclge_config_nic_hw_error(hdev, true);
11295 dev_err(&pdev->dev,
11296 "fail(%d) to re-enable NIC hw error interrupts\n",
11301 if (hdev->roce_client) {
11302 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11304 dev_err(&pdev->dev,
11305 "fail(%d) to re-enable roce ras interrupts\n",
11311 hclge_reset_vport_state(hdev);
11312 ret = hclge_reset_vport_spoofchk(hdev);
11316 ret = hclge_resume_vf_rate(hdev);
11320 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11321 HCLGE_DRIVER_NAME);
11326 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11328 struct hclge_dev *hdev = ae_dev->priv;
11329 struct hclge_mac *mac = &hdev->hw.mac;
11331 hclge_reset_vf_rate(hdev);
11332 hclge_clear_vf_vlan(hdev);
11333 hclge_misc_affinity_teardown(hdev);
11334 hclge_state_uninit(hdev);
11335 hclge_uninit_mac_table(hdev);
11338 mdiobus_unregister(mac->mdio_bus);
11340 /* Disable MISC vector(vector0) */
11341 hclge_enable_vector(&hdev->misc_vector, false);
11342 synchronize_irq(hdev->misc_vector.vector_irq);
11344 /* Disable all hw interrupts */
11345 hclge_config_mac_tnl_int(hdev, false);
11346 hclge_config_nic_hw_error(hdev, false);
11347 hclge_config_rocee_ras_interrupt(hdev, false);
11349 hclge_cmd_uninit(hdev);
11350 hclge_misc_irq_uninit(hdev);
11351 hclge_pci_uninit(hdev);
11352 mutex_destroy(&hdev->vport_lock);
11353 hclge_uninit_vport_vlan_table(hdev);
11354 ae_dev->priv = NULL;
11357 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11359 struct hclge_vport *vport = hclge_get_vport(handle);
11360 struct hclge_dev *hdev = vport->back;
11362 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11365 static void hclge_get_channels(struct hnae3_handle *handle,
11366 struct ethtool_channels *ch)
11368 ch->max_combined = hclge_get_max_channels(handle);
11369 ch->other_count = 1;
11371 ch->combined_count = handle->kinfo.rss_size;
11374 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11375 u16 *alloc_tqps, u16 *max_rss_size)
11377 struct hclge_vport *vport = hclge_get_vport(handle);
11378 struct hclge_dev *hdev = vport->back;
11380 *alloc_tqps = vport->alloc_tqps;
11381 *max_rss_size = hdev->pf_rss_size_max;
11384 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11385 bool rxfh_configured)
11387 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11388 struct hclge_vport *vport = hclge_get_vport(handle);
11389 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11390 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11391 struct hclge_dev *hdev = vport->back;
11392 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11393 u16 cur_rss_size = kinfo->rss_size;
11394 u16 cur_tqps = kinfo->num_tqps;
11395 u16 tc_valid[HCLGE_MAX_TC_NUM];
11401 kinfo->req_rss_size = new_tqps_num;
11403 ret = hclge_tm_vport_map_update(hdev);
11405 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11409 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11410 roundup_size = ilog2(roundup_size);
11411 /* Set the RSS TC mode according to the new RSS size */
11412 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11415 if (!(hdev->hw_tc_map & BIT(i)))
11419 tc_size[i] = roundup_size;
11420 tc_offset[i] = kinfo->rss_size * i;
11422 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11426 /* RSS indirection table has been configuared by user */
11427 if (rxfh_configured)
11430 /* Reinitializes the rss indirect table according to the new RSS size */
11431 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11436 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11437 rss_indir[i] = i % kinfo->rss_size;
11439 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11441 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11448 dev_info(&hdev->pdev->dev,
11449 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11450 cur_rss_size, kinfo->rss_size,
11451 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11456 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11457 u32 *regs_num_64_bit)
11459 struct hclge_desc desc;
11463 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11464 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11466 dev_err(&hdev->pdev->dev,
11467 "Query register number cmd failed, ret = %d.\n", ret);
11471 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11472 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11474 total_num = *regs_num_32_bit + *regs_num_64_bit;
11481 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11484 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11485 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11487 struct hclge_desc *desc;
11488 u32 *reg_val = data;
11498 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11499 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11500 HCLGE_32_BIT_REG_RTN_DATANUM);
11501 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11505 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11506 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11508 dev_err(&hdev->pdev->dev,
11509 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11514 for (i = 0; i < cmd_num; i++) {
11516 desc_data = (__le32 *)(&desc[i].data[0]);
11517 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11519 desc_data = (__le32 *)(&desc[i]);
11520 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11522 for (k = 0; k < n; k++) {
11523 *reg_val++ = le32_to_cpu(*desc_data++);
11535 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11538 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11539 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11541 struct hclge_desc *desc;
11542 u64 *reg_val = data;
11552 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11553 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11554 HCLGE_64_BIT_REG_RTN_DATANUM);
11555 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11559 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11560 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11562 dev_err(&hdev->pdev->dev,
11563 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11568 for (i = 0; i < cmd_num; i++) {
11570 desc_data = (__le64 *)(&desc[i].data[0]);
11571 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11573 desc_data = (__le64 *)(&desc[i]);
11574 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11576 for (k = 0; k < n; k++) {
11577 *reg_val++ = le64_to_cpu(*desc_data++);
11589 #define MAX_SEPARATE_NUM 4
11590 #define SEPARATOR_VALUE 0xFDFCFBFA
11591 #define REG_NUM_PER_LINE 4
11592 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11593 #define REG_SEPARATOR_LINE 1
11594 #define REG_NUM_REMAIN_MASK 3
11595 #define BD_LIST_MAX_NUM 30
11597 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11601 /* initialize command BD except the last one */
11602 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11603 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11605 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11608 /* initialize the last command BD */
11609 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11611 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11614 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11618 u32 entries_per_desc, desc_index, index, offset, i;
11619 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11622 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11624 dev_err(&hdev->pdev->dev,
11625 "Get dfx bd num fail, status is %d.\n", ret);
11629 entries_per_desc = ARRAY_SIZE(desc[0].data);
11630 for (i = 0; i < type_num; i++) {
11631 offset = hclge_dfx_bd_offset_list[i];
11632 index = offset % entries_per_desc;
11633 desc_index = offset / entries_per_desc;
11634 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11640 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11641 struct hclge_desc *desc_src, int bd_num,
11642 enum hclge_opcode_type cmd)
11644 struct hclge_desc *desc = desc_src;
11647 hclge_cmd_setup_basic_desc(desc, cmd, true);
11648 for (i = 0; i < bd_num - 1; i++) {
11649 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11651 hclge_cmd_setup_basic_desc(desc, cmd, true);
11655 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11657 dev_err(&hdev->pdev->dev,
11658 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11664 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11667 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11668 struct hclge_desc *desc = desc_src;
11671 entries_per_desc = ARRAY_SIZE(desc->data);
11672 reg_num = entries_per_desc * bd_num;
11673 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11674 for (i = 0; i < reg_num; i++) {
11675 index = i % entries_per_desc;
11676 desc_index = i / entries_per_desc;
11677 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11679 for (i = 0; i < separator_num; i++)
11680 *reg++ = SEPARATOR_VALUE;
11682 return reg_num + separator_num;
11685 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11687 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11688 int data_len_per_desc, bd_num, i;
11689 int bd_num_list[BD_LIST_MAX_NUM];
11693 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11695 dev_err(&hdev->pdev->dev,
11696 "Get dfx reg bd num fail, status is %d.\n", ret);
11700 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11702 for (i = 0; i < dfx_reg_type_num; i++) {
11703 bd_num = bd_num_list[i];
11704 data_len = data_len_per_desc * bd_num;
11705 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11711 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11713 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11714 int bd_num, bd_num_max, buf_len, i;
11715 int bd_num_list[BD_LIST_MAX_NUM];
11716 struct hclge_desc *desc_src;
11720 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11722 dev_err(&hdev->pdev->dev,
11723 "Get dfx reg bd num fail, status is %d.\n", ret);
11727 bd_num_max = bd_num_list[0];
11728 for (i = 1; i < dfx_reg_type_num; i++)
11729 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11731 buf_len = sizeof(*desc_src) * bd_num_max;
11732 desc_src = kzalloc(buf_len, GFP_KERNEL);
11736 for (i = 0; i < dfx_reg_type_num; i++) {
11737 bd_num = bd_num_list[i];
11738 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11739 hclge_dfx_reg_opcode_list[i]);
11741 dev_err(&hdev->pdev->dev,
11742 "Get dfx reg fail, status is %d.\n", ret);
11746 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11753 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11754 struct hnae3_knic_private_info *kinfo)
11756 #define HCLGE_RING_REG_OFFSET 0x200
11757 #define HCLGE_RING_INT_REG_OFFSET 0x4
11759 int i, j, reg_num, separator_num;
11763 /* fetching per-PF registers valus from PF PCIe register space */
11764 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11765 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11766 for (i = 0; i < reg_num; i++)
11767 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11768 for (i = 0; i < separator_num; i++)
11769 *reg++ = SEPARATOR_VALUE;
11770 data_num_sum = reg_num + separator_num;
11772 reg_num = ARRAY_SIZE(common_reg_addr_list);
11773 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11774 for (i = 0; i < reg_num; i++)
11775 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11776 for (i = 0; i < separator_num; i++)
11777 *reg++ = SEPARATOR_VALUE;
11778 data_num_sum += reg_num + separator_num;
11780 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11781 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11782 for (j = 0; j < kinfo->num_tqps; j++) {
11783 for (i = 0; i < reg_num; i++)
11784 *reg++ = hclge_read_dev(&hdev->hw,
11785 ring_reg_addr_list[i] +
11786 HCLGE_RING_REG_OFFSET * j);
11787 for (i = 0; i < separator_num; i++)
11788 *reg++ = SEPARATOR_VALUE;
11790 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11792 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11793 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11794 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11795 for (i = 0; i < reg_num; i++)
11796 *reg++ = hclge_read_dev(&hdev->hw,
11797 tqp_intr_reg_addr_list[i] +
11798 HCLGE_RING_INT_REG_OFFSET * j);
11799 for (i = 0; i < separator_num; i++)
11800 *reg++ = SEPARATOR_VALUE;
11802 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11804 return data_num_sum;
11807 static int hclge_get_regs_len(struct hnae3_handle *handle)
11809 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11810 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11811 struct hclge_vport *vport = hclge_get_vport(handle);
11812 struct hclge_dev *hdev = vport->back;
11813 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11814 int regs_lines_32_bit, regs_lines_64_bit;
11817 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11819 dev_err(&hdev->pdev->dev,
11820 "Get register number failed, ret = %d.\n", ret);
11824 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11826 dev_err(&hdev->pdev->dev,
11827 "Get dfx reg len failed, ret = %d.\n", ret);
11831 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11832 REG_SEPARATOR_LINE;
11833 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11834 REG_SEPARATOR_LINE;
11835 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11836 REG_SEPARATOR_LINE;
11837 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11838 REG_SEPARATOR_LINE;
11839 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11840 REG_SEPARATOR_LINE;
11841 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11842 REG_SEPARATOR_LINE;
11844 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11845 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11846 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11849 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11852 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11853 struct hclge_vport *vport = hclge_get_vport(handle);
11854 struct hclge_dev *hdev = vport->back;
11855 u32 regs_num_32_bit, regs_num_64_bit;
11856 int i, reg_num, separator_num, ret;
11859 *version = hdev->fw_version;
11861 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11863 dev_err(&hdev->pdev->dev,
11864 "Get register number failed, ret = %d.\n", ret);
11868 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11870 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11872 dev_err(&hdev->pdev->dev,
11873 "Get 32 bit register failed, ret = %d.\n", ret);
11876 reg_num = regs_num_32_bit;
11878 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11879 for (i = 0; i < separator_num; i++)
11880 *reg++ = SEPARATOR_VALUE;
11882 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11884 dev_err(&hdev->pdev->dev,
11885 "Get 64 bit register failed, ret = %d.\n", ret);
11888 reg_num = regs_num_64_bit * 2;
11890 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11891 for (i = 0; i < separator_num; i++)
11892 *reg++ = SEPARATOR_VALUE;
11894 ret = hclge_get_dfx_reg(hdev, reg);
11896 dev_err(&hdev->pdev->dev,
11897 "Get dfx register failed, ret = %d.\n", ret);
11900 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11902 struct hclge_set_led_state_cmd *req;
11903 struct hclge_desc desc;
11906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11908 req = (struct hclge_set_led_state_cmd *)desc.data;
11909 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11910 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11912 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11914 dev_err(&hdev->pdev->dev,
11915 "Send set led state cmd error, ret =%d\n", ret);
11920 enum hclge_led_status {
11923 HCLGE_LED_NO_CHANGE = 0xFF,
11926 static int hclge_set_led_id(struct hnae3_handle *handle,
11927 enum ethtool_phys_id_state status)
11929 struct hclge_vport *vport = hclge_get_vport(handle);
11930 struct hclge_dev *hdev = vport->back;
11933 case ETHTOOL_ID_ACTIVE:
11934 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11935 case ETHTOOL_ID_INACTIVE:
11936 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11942 static void hclge_get_link_mode(struct hnae3_handle *handle,
11943 unsigned long *supported,
11944 unsigned long *advertising)
11946 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11947 struct hclge_vport *vport = hclge_get_vport(handle);
11948 struct hclge_dev *hdev = vport->back;
11949 unsigned int idx = 0;
11951 for (; idx < size; idx++) {
11952 supported[idx] = hdev->hw.mac.supported[idx];
11953 advertising[idx] = hdev->hw.mac.advertising[idx];
11957 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11959 struct hclge_vport *vport = hclge_get_vport(handle);
11960 struct hclge_dev *hdev = vport->back;
11962 return hclge_config_gro(hdev, enable);
11965 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11967 struct hclge_vport *vport = &hdev->vport[0];
11968 struct hnae3_handle *handle = &vport->nic;
11972 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11973 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11974 vport->last_promisc_flags = vport->overflow_promisc_flags;
11977 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11978 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11979 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11980 tmp_flags & HNAE3_MPE);
11982 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11983 hclge_enable_vlan_filter(handle,
11984 tmp_flags & HNAE3_VLAN_FLTR);
11989 static bool hclge_module_existed(struct hclge_dev *hdev)
11991 struct hclge_desc desc;
11995 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11998 dev_err(&hdev->pdev->dev,
11999 "failed to get SFP exist state, ret = %d\n", ret);
12003 existed = le32_to_cpu(desc.data[0]);
12005 return existed != 0;
12008 /* need 6 bds(total 140 bytes) in one reading
12009 * return the number of bytes actually read, 0 means read failed.
12011 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12014 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12015 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12021 /* setup all 6 bds to read module eeprom info. */
12022 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12023 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12026 /* bd0~bd4 need next flag */
12027 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12028 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12031 /* setup bd0, this bd contains offset and read length. */
12032 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12033 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12034 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12035 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12037 ret = hclge_cmd_send(&hdev->hw, desc, i);
12039 dev_err(&hdev->pdev->dev,
12040 "failed to get SFP eeprom info, ret = %d\n", ret);
12044 /* copy sfp info from bd0 to out buffer. */
12045 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12046 memcpy(data, sfp_info_bd0->data, copy_len);
12047 read_len = copy_len;
12049 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12050 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12051 if (read_len >= len)
12054 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12055 memcpy(data + read_len, desc[i].data, copy_len);
12056 read_len += copy_len;
12062 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12065 struct hclge_vport *vport = hclge_get_vport(handle);
12066 struct hclge_dev *hdev = vport->back;
12070 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12071 return -EOPNOTSUPP;
12073 if (!hclge_module_existed(hdev))
12076 while (read_len < len) {
12077 data_len = hclge_get_sfp_eeprom_info(hdev,
12084 read_len += data_len;
12090 static const struct hnae3_ae_ops hclge_ops = {
12091 .init_ae_dev = hclge_init_ae_dev,
12092 .uninit_ae_dev = hclge_uninit_ae_dev,
12093 .flr_prepare = hclge_flr_prepare,
12094 .flr_done = hclge_flr_done,
12095 .init_client_instance = hclge_init_client_instance,
12096 .uninit_client_instance = hclge_uninit_client_instance,
12097 .map_ring_to_vector = hclge_map_ring_to_vector,
12098 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12099 .get_vector = hclge_get_vector,
12100 .put_vector = hclge_put_vector,
12101 .set_promisc_mode = hclge_set_promisc_mode,
12102 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12103 .set_loopback = hclge_set_loopback,
12104 .start = hclge_ae_start,
12105 .stop = hclge_ae_stop,
12106 .client_start = hclge_client_start,
12107 .client_stop = hclge_client_stop,
12108 .get_status = hclge_get_status,
12109 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12110 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12111 .get_media_type = hclge_get_media_type,
12112 .check_port_speed = hclge_check_port_speed,
12113 .get_fec = hclge_get_fec,
12114 .set_fec = hclge_set_fec,
12115 .get_rss_key_size = hclge_get_rss_key_size,
12116 .get_rss = hclge_get_rss,
12117 .set_rss = hclge_set_rss,
12118 .set_rss_tuple = hclge_set_rss_tuple,
12119 .get_rss_tuple = hclge_get_rss_tuple,
12120 .get_tc_size = hclge_get_tc_size,
12121 .get_mac_addr = hclge_get_mac_addr,
12122 .set_mac_addr = hclge_set_mac_addr,
12123 .do_ioctl = hclge_do_ioctl,
12124 .add_uc_addr = hclge_add_uc_addr,
12125 .rm_uc_addr = hclge_rm_uc_addr,
12126 .add_mc_addr = hclge_add_mc_addr,
12127 .rm_mc_addr = hclge_rm_mc_addr,
12128 .set_autoneg = hclge_set_autoneg,
12129 .get_autoneg = hclge_get_autoneg,
12130 .restart_autoneg = hclge_restart_autoneg,
12131 .halt_autoneg = hclge_halt_autoneg,
12132 .get_pauseparam = hclge_get_pauseparam,
12133 .set_pauseparam = hclge_set_pauseparam,
12134 .set_mtu = hclge_set_mtu,
12135 .reset_queue = hclge_reset_tqp,
12136 .get_stats = hclge_get_stats,
12137 .get_mac_stats = hclge_get_mac_stat,
12138 .update_stats = hclge_update_stats,
12139 .get_strings = hclge_get_strings,
12140 .get_sset_count = hclge_get_sset_count,
12141 .get_fw_version = hclge_get_fw_version,
12142 .get_mdix_mode = hclge_get_mdix_mode,
12143 .enable_vlan_filter = hclge_enable_vlan_filter,
12144 .set_vlan_filter = hclge_set_vlan_filter,
12145 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12146 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12147 .reset_event = hclge_reset_event,
12148 .get_reset_level = hclge_get_reset_level,
12149 .set_default_reset_request = hclge_set_def_reset_request,
12150 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12151 .set_channels = hclge_set_channels,
12152 .get_channels = hclge_get_channels,
12153 .get_regs_len = hclge_get_regs_len,
12154 .get_regs = hclge_get_regs,
12155 .set_led_id = hclge_set_led_id,
12156 .get_link_mode = hclge_get_link_mode,
12157 .add_fd_entry = hclge_add_fd_entry,
12158 .del_fd_entry = hclge_del_fd_entry,
12159 .del_all_fd_entries = hclge_del_all_fd_entries,
12160 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12161 .get_fd_rule_info = hclge_get_fd_rule_info,
12162 .get_fd_all_rules = hclge_get_all_rules,
12163 .enable_fd = hclge_enable_fd,
12164 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12165 .dbg_run_cmd = hclge_dbg_run_cmd,
12166 .dbg_read_cmd = hclge_dbg_read_cmd,
12167 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12168 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12169 .ae_dev_resetting = hclge_ae_dev_resetting,
12170 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12171 .set_gro_en = hclge_gro_en,
12172 .get_global_queue_id = hclge_covert_handle_qid_global,
12173 .set_timer_task = hclge_set_timer_task,
12174 .mac_connect_phy = hclge_mac_connect_phy,
12175 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12176 .get_vf_config = hclge_get_vf_config,
12177 .set_vf_link_state = hclge_set_vf_link_state,
12178 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12179 .set_vf_trust = hclge_set_vf_trust,
12180 .set_vf_rate = hclge_set_vf_rate,
12181 .set_vf_mac = hclge_set_vf_mac,
12182 .get_module_eeprom = hclge_get_module_eeprom,
12183 .get_cmdq_stat = hclge_get_cmdq_stat,
12184 .add_cls_flower = hclge_add_cls_flower,
12185 .del_cls_flower = hclge_del_cls_flower,
12186 .cls_flower_active = hclge_is_cls_flower_active,
12187 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12188 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12191 static struct hnae3_ae_algo ae_algo = {
12193 .pdev_id_table = ae_algo_pci_tbl,
12196 static int hclge_init(void)
12198 pr_info("%s is initializing\n", HCLGE_NAME);
12200 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12202 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12206 hnae3_register_ae_algo(&ae_algo);
12211 static void hclge_exit(void)
12213 hnae3_unregister_ae_algo(&ae_algo);
12214 destroy_workqueue(hclge_wq);
12216 module_init(hclge_init);
12217 module_exit(hclge_exit);
12219 MODULE_LICENSE("GPL");
12220 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12221 MODULE_DESCRIPTION("HCLGE Driver");
12222 MODULE_VERSION(HCLGE_MOD_VERSION);