1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 { INNER_DST_MAC, 48, KEY_OPT_MAC,
405 offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 offsetof(struct hclge_fd_rule, tuples.src_mac),
409 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 { INNER_L2_RSV, 16, KEY_OPT_LE16,
418 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 { INNER_IP_TOS, 8, KEY_OPT_U8,
421 offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 { INNER_IP_PROTO, 8, KEY_OPT_U8,
424 offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 { INNER_SRC_IP, 32, KEY_OPT_IP,
427 offsetof(struct hclge_fd_rule, tuples.src_ip),
428 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 { INNER_DST_IP, 32, KEY_OPT_IP,
430 offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 { INNER_L3_RSV, 16, KEY_OPT_LE16,
433 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 offsetof(struct hclge_fd_rule, tuples.src_port),
437 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 { INNER_DST_PORT, 16, KEY_OPT_LE16,
439 offsetof(struct hclge_fd_rule, tuples.dst_port),
440 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 { INNER_L4_RSV, 32, KEY_OPT_LE32,
442 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 #define HCLGE_MAC_CMD_NUM 21
450 u64 *data = (u64 *)(&hdev->mac_stats);
451 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
456 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 dev_err(&hdev->pdev->dev,
460 "Get MAC pkt stats fail, status = %d.\n", ret);
465 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 /* for special opcode 0032, only the first desc has the head */
467 if (unlikely(i == 0)) {
468 desc_data = (__le64 *)(&desc[i].data[0]);
469 n = HCLGE_RD_FIRST_STATS_NUM;
471 desc_data = (__le64 *)(&desc[i]);
472 n = HCLGE_RD_OTHER_STATS_NUM;
475 for (k = 0; k < n; k++) {
476 *data += le64_to_cpu(*desc_data);
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 u64 *data = (u64 *)(&hdev->mac_stats);
488 struct hclge_desc *desc;
493 /* This may be called inside atomic sections,
494 * so GFP_ATOMIC is more suitalbe here
496 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
500 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
507 for (i = 0; i < desc_num; i++) {
508 /* for special opcode 0034, only the first desc has the head */
510 desc_data = (__le64 *)(&desc[i].data[0]);
511 n = HCLGE_RD_FIRST_STATS_NUM;
513 desc_data = (__le64 *)(&desc[i]);
514 n = HCLGE_RD_OTHER_STATS_NUM;
517 for (k = 0; k < n; k++) {
518 *data += le64_to_cpu(*desc_data);
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 struct hclge_desc desc;
536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
541 desc_data = (__le32 *)(&desc.data[0]);
542 reg_num = le32_to_cpu(*desc_data);
544 *desc_num = 1 + ((reg_num - 3) >> 2) +
545 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
555 ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 /* The firmware supports the new statistics acquisition method */
558 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 else if (ret == -EOPNOTSUPP)
560 ret = hclge_mac_update_stats_defective(hdev);
562 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 struct hclge_vport *vport = hclge_get_vport(handle);
571 struct hclge_dev *hdev = vport->back;
572 struct hnae3_queue *queue;
573 struct hclge_desc desc[1];
574 struct hclge_tqp *tqp;
577 for (i = 0; i < kinfo->num_tqps; i++) {
578 queue = handle->kinfo.tqp[i];
579 tqp = container_of(queue, struct hclge_tqp, q);
580 /* command : HCLGE_OPC_QUERY_IGU_STAT */
581 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
584 desc[0].data[0] = cpu_to_le32(tqp->index);
585 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 dev_err(&hdev->pdev->dev,
588 "Query tqp stat fail, status = %d,queue = %d\n",
592 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 le32_to_cpu(desc[0].data[1]);
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 queue = handle->kinfo.tqp[i];
598 tqp = container_of(queue, struct hclge_tqp, q);
599 /* command : HCLGE_OPC_QUERY_IGU_STAT */
600 hclge_cmd_setup_basic_desc(&desc[0],
601 HCLGE_OPC_QUERY_TX_STATS,
604 desc[0].data[0] = cpu_to_le32(tqp->index);
605 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 dev_err(&hdev->pdev->dev,
608 "Query tqp stat fail, status = %d,queue = %d\n",
612 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 le32_to_cpu(desc[0].data[1]);
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 struct hclge_tqp *tqp;
626 for (i = 0; i < kinfo->num_tqps; i++) {
627 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 /* each tqp has TX & RX two queues */
644 return kinfo->num_tqps * (2);
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
653 for (i = 0; i < kinfo->num_tqps; i++) {
654 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 struct hclge_tqp, q);
656 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 buff = buff + ETH_GSTRING_LEN;
661 for (i = 0; i < kinfo->num_tqps; i++) {
662 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 struct hclge_tqp, q);
664 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 buff = buff + ETH_GSTRING_LEN;
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 const struct hclge_comm_stats_str strs[],
679 for (i = 0; i < size; i++)
680 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 const struct hclge_comm_stats_str strs[],
689 char *buff = (char *)data;
692 if (stringset != ETH_SS_STATS)
695 for (i = 0; i < size; i++) {
696 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 buff = buff + ETH_GSTRING_LEN;
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 struct hnae3_handle *handle;
708 handle = &hdev->vport[0].nic;
709 if (handle->client) {
710 status = hclge_tqps_update_stats(handle);
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
718 status = hclge_mac_update_stats(hdev);
720 dev_err(&hdev->pdev->dev,
721 "Update MAC stats fail, status = %d.\n", status);
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 struct net_device_stats *net_stats)
727 struct hclge_vport *vport = hclge_get_vport(handle);
728 struct hclge_dev *hdev = vport->back;
731 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
734 status = hclge_mac_update_stats(hdev);
736 dev_err(&hdev->pdev->dev,
737 "Update MAC stats fail, status = %d.\n",
740 status = hclge_tqps_update_stats(handle);
742 dev_err(&hdev->pdev->dev,
743 "Update TQPS stats fail, status = %d.\n",
746 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 HNAE3_SUPPORT_PHY_LOOPBACK |\
753 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 struct hclge_vport *vport = hclge_get_vport(handle);
757 struct hclge_dev *hdev = vport->back;
760 /* Loopback test support rules:
761 * mac: only GE mode support
762 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 * phy: only support when phy device exist on board
765 if (stringset == ETH_SS_TEST) {
766 /* clear loopback bit flags at first */
767 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
777 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 hdev->hw.mac.phydev->drv->set_loopback) ||
782 hnae3_dev_phy_imp_supported(hdev)) {
784 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 } else if (stringset == ETH_SS_STATS) {
787 count = ARRAY_SIZE(g_mac_stats_string) +
788 hclge_tqps_get_sset_count(handle, stringset);
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
797 u8 *p = (char *)data;
800 if (stringset == ETH_SS_STATS) {
801 size = ARRAY_SIZE(g_mac_stats_string);
802 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 p = hclge_tqps_get_strings(handle, p);
805 } else if (stringset == ETH_SS_TEST) {
806 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 p += ETH_GSTRING_LEN;
811 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 p += ETH_GSTRING_LEN;
816 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 p += ETH_GSTRING_LEN;
822 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 p += ETH_GSTRING_LEN;
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 struct hclge_vport *vport = hclge_get_vport(handle);
833 struct hclge_dev *hdev = vport->back;
836 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 ARRAY_SIZE(g_mac_stats_string), data);
838 p = hclge_tqps_get_stats(handle, p);
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 struct hns3_mac_stats *mac_stats)
844 struct hclge_vport *vport = hclge_get_vport(handle);
845 struct hclge_dev *hdev = vport->back;
847 hclge_update_stats(handle, NULL);
849 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 struct hclge_func_status_cmd *status)
856 #define HCLGE_MAC_ID_MASK 0xF
858 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
861 /* Set the pf to main pf */
862 if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 hdev->flag |= HCLGE_FLAG_MAIN;
865 hdev->flag &= ~HCLGE_FLAG_MAIN;
867 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
871 static int hclge_query_function_status(struct hclge_dev *hdev)
873 #define HCLGE_QUERY_MAX_CNT 5
875 struct hclge_func_status_cmd *req;
876 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 req = (struct hclge_func_status_cmd *)desc.data;
884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 dev_err(&hdev->pdev->dev,
887 "query function status failed %d.\n", ret);
891 /* Check pf reset is done */
894 usleep_range(1000, 2000);
895 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897 return hclge_parse_func_status(hdev, req);
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 struct hclge_pf_res_cmd *req;
903 struct hclge_desc desc;
906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 dev_err(&hdev->pdev->dev,
910 "query pf resource failed %d.\n", ret);
914 req = (struct hclge_pf_res_cmd *)desc.data;
915 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 le16_to_cpu(req->ext_tqp_num);
917 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919 if (req->tx_buf_size)
921 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927 if (req->dv_buf_size)
929 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 dev_err(&hdev->pdev->dev,
938 "only %u msi resources available, not enough for pf(min:2).\n",
943 if (hnae3_dev_roce_supported(hdev)) {
945 le16_to_cpu(req->pf_intr_vector_number_roce);
947 /* PF should have NIC vectors and Roce vectors,
948 * NIC vectors are queued before Roce vectors.
950 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 hdev->num_msi = hdev->num_nic_msi;
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
962 *speed = HCLGE_MAC_SPEED_10M;
965 *speed = HCLGE_MAC_SPEED_100M;
968 *speed = HCLGE_MAC_SPEED_1G;
971 *speed = HCLGE_MAC_SPEED_10G;
974 *speed = HCLGE_MAC_SPEED_25G;
977 *speed = HCLGE_MAC_SPEED_40G;
980 *speed = HCLGE_MAC_SPEED_50G;
983 *speed = HCLGE_MAC_SPEED_100G;
986 *speed = HCLGE_MAC_SPEED_200G;
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
997 struct hclge_vport *vport = hclge_get_vport(handle);
998 struct hclge_dev *hdev = vport->back;
999 u32 speed_ability = hdev->hw.mac.speed_ability;
1003 case HCLGE_MAC_SPEED_10M:
1004 speed_bit = HCLGE_SUPPORT_10M_BIT;
1006 case HCLGE_MAC_SPEED_100M:
1007 speed_bit = HCLGE_SUPPORT_100M_BIT;
1009 case HCLGE_MAC_SPEED_1G:
1010 speed_bit = HCLGE_SUPPORT_1G_BIT;
1012 case HCLGE_MAC_SPEED_10G:
1013 speed_bit = HCLGE_SUPPORT_10G_BIT;
1015 case HCLGE_MAC_SPEED_25G:
1016 speed_bit = HCLGE_SUPPORT_25G_BIT;
1018 case HCLGE_MAC_SPEED_40G:
1019 speed_bit = HCLGE_SUPPORT_40G_BIT;
1021 case HCLGE_MAC_SPEED_50G:
1022 speed_bit = HCLGE_SUPPORT_50G_BIT;
1024 case HCLGE_MAC_SPEED_100G:
1025 speed_bit = HCLGE_SUPPORT_100G_BIT;
1027 case HCLGE_MAC_SPEED_200G:
1028 speed_bit = HCLGE_SUPPORT_200G_BIT;
1034 if (speed_bit & speed_ability)
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 mac->fec_ability = 0;
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 struct hclge_mac *mac = &hdev->hw.mac;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 struct hclge_mac *mac = &hdev->hw.mac;
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 unsigned long *supported = hdev->hw.mac.supported;
1207 /* default to support all speed for GE port */
1209 speed_ability = HCLGE_SUPPORT_GE;
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 u8 media_type = hdev->hw.mac.media_type;
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1277 return HCLGE_MAC_SPEED_1G;
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283 #define SPEED_ABILITY_EXT_SHIFT 8
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1293 /* get the configuration */
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309 /* get mac_address */
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 if (!cfg->umv_space)
1346 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1348 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 HCLGE_CFG_PF_RSS_SIZE_M,
1350 HCLGE_CFG_PF_RSS_SIZE_S);
1352 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 * power of 2, instead of reading out directly. This would
1354 * be more flexible for future changes and expansions.
1355 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1359 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 1U << cfg->pf_rss_size_max :
1361 cfg->vf_rss_size_max;
1363 /* The unit of the tx spare buffer size queried from configuration
1364 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1367 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1373 /* hclge_get_cfg: query the static parameter from flash
1374 * @hdev: pointer to struct hclge_dev
1375 * @hcfg: the config structure to be getted
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1379 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 struct hclge_cfg_param_cmd *req;
1384 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1387 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1390 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 /* Len should be united by 4 bytes when send to hardware */
1393 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 req->offset = cpu_to_le32(offset);
1398 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1400 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1404 hclge_parse_cfg(hcfg, desc);
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1411 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1415 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 struct hclge_desc *desc)
1427 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 struct hclge_dev_specs_0_cmd *req0;
1429 struct hclge_dev_specs_1_cmd *req1;
1431 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1434 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 ae_dev->dev_specs.rss_ind_tbl_size =
1436 le16_to_cpu(req0->rss_ind_tbl_size);
1437 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1447 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1449 if (!dev_specs->max_non_tso_bd_num)
1450 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 if (!dev_specs->rss_ind_tbl_size)
1452 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 if (!dev_specs->rss_key_size)
1454 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 if (!dev_specs->max_tm_rate)
1456 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 if (!dev_specs->max_qset_num)
1458 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 if (!dev_specs->max_int_gl)
1460 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 if (!dev_specs->max_frm_size)
1462 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1467 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1471 /* set default specifications as devices lower than version V3 do not
1472 * support querying specifications from firmware.
1474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 hclge_set_default_dev_specs(hdev);
1479 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1482 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1486 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1490 hclge_parse_dev_specs(hdev, desc);
1491 hclge_check_dev_specs(hdev);
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1500 ret = hclge_query_function_status(hdev);
1502 dev_err(&hdev->pdev->dev,
1503 "query function status error %d.\n", ret);
1507 /* get pf resource */
1508 return hclge_query_pf_resource(hdev);
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1513 #define HCLGE_MIN_TX_DESC 64
1514 #define HCLGE_MIN_RX_DESC 64
1516 if (!is_kdump_kernel())
1519 dev_info(&hdev->pdev->dev,
1520 "Running kdump kernel. Using minimal resources\n");
1522 /* minimal queue pairs equals to the number of vports */
1523 hdev->num_tqps = hdev->num_req_vfs + 1;
1524 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1528 static int hclge_configure(struct hclge_dev *hdev)
1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 struct hclge_cfg cfg;
1535 ret = hclge_get_cfg(hdev, &cfg);
1539 hdev->base_tqp_pid = 0;
1540 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1541 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1542 hdev->rx_buf_len = cfg.rx_buf_len;
1543 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1544 hdev->hw.mac.media_type = cfg.media_type;
1545 hdev->hw.mac.phy_addr = cfg.phy_addr;
1546 hdev->num_tx_desc = cfg.tqp_desc_num;
1547 hdev->num_rx_desc = cfg.tqp_desc_num;
1548 hdev->tm_info.num_pg = 1;
1549 hdev->tc_max = cfg.tc_num;
1550 hdev->tm_info.hw_pfc_map = 0;
1551 hdev->wanted_umv_size = cfg.umv_space;
1552 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1553 hdev->gro_en = true;
1554 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1555 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557 if (hnae3_dev_fd_supported(hdev)) {
1559 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1562 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1565 cfg.default_speed, ret);
1569 hclge_parse_link_mode(hdev, cfg.speed_ability);
1571 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1574 (hdev->tc_max < 1)) {
1575 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1580 /* Dev does not support DCB */
1581 if (!hnae3_dev_dcb_supported(hdev)) {
1585 hdev->pfc_max = hdev->tc_max;
1588 hdev->tm_info.num_tc = 1;
1590 /* Currently not support uncontiuous tc */
1591 for (i = 0; i < hdev->tm_info.num_tc; i++)
1592 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596 hclge_init_kdump_kernel_config(hdev);
1598 /* Set the init affinity based on pci func number */
1599 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1600 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1601 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1602 &hdev->affinity_mask);
1607 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610 struct hclge_cfg_tso_status_cmd *req;
1611 struct hclge_desc desc;
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1615 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1616 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1617 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1619 return hclge_cmd_send(&hdev->hw, &desc, 1);
1622 static int hclge_config_gro(struct hclge_dev *hdev)
1624 struct hclge_cfg_gro_status_cmd *req;
1625 struct hclge_desc desc;
1628 if (!hnae3_dev_gro_supported(hdev))
1631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1632 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1634 req->gro_en = hdev->gro_en ? 1 : 0;
1636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1638 dev_err(&hdev->pdev->dev,
1639 "GRO hardware config cmd failed, ret = %d\n", ret);
1644 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1646 struct hclge_tqp *tqp;
1649 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1650 sizeof(struct hclge_tqp), GFP_KERNEL);
1656 for (i = 0; i < hdev->num_tqps; i++) {
1657 tqp->dev = &hdev->pdev->dev;
1660 tqp->q.ae_algo = &ae_algo;
1661 tqp->q.buf_size = hdev->rx_buf_len;
1662 tqp->q.tx_desc_num = hdev->num_tx_desc;
1663 tqp->q.rx_desc_num = hdev->num_rx_desc;
1665 /* need an extended offset to configure queues >=
1666 * HCLGE_TQP_MAX_SIZE_DEV_V2
1668 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1669 tqp->q.io_base = hdev->hw.io_base +
1670 HCLGE_TQP_REG_OFFSET +
1671 i * HCLGE_TQP_REG_SIZE;
1673 tqp->q.io_base = hdev->hw.io_base +
1674 HCLGE_TQP_REG_OFFSET +
1675 HCLGE_TQP_EXT_REG_OFFSET +
1676 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1685 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1686 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1688 struct hclge_tqp_map_cmd *req;
1689 struct hclge_desc desc;
1692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1694 req = (struct hclge_tqp_map_cmd *)desc.data;
1695 req->tqp_id = cpu_to_le16(tqp_pid);
1696 req->tqp_vf = func_id;
1697 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1699 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1700 req->tqp_vid = cpu_to_le16(tqp_vid);
1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1709 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1711 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1712 struct hclge_dev *hdev = vport->back;
1715 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1716 alloced < num_tqps; i++) {
1717 if (!hdev->htqp[i].alloced) {
1718 hdev->htqp[i].q.handle = &vport->nic;
1719 hdev->htqp[i].q.tqp_index = alloced;
1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1722 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1723 hdev->htqp[i].alloced = true;
1727 vport->alloc_tqps = alloced;
1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1729 vport->alloc_tqps / hdev->tm_info.num_tc);
1731 /* ensure one to one mapping between irq and queue at default */
1732 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1738 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1739 u16 num_tx_desc, u16 num_rx_desc)
1742 struct hnae3_handle *nic = &vport->nic;
1743 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1744 struct hclge_dev *hdev = vport->back;
1747 kinfo->num_tx_desc = num_tx_desc;
1748 kinfo->num_rx_desc = num_rx_desc;
1750 kinfo->rx_buf_len = hdev->rx_buf_len;
1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1754 sizeof(struct hnae3_queue *), GFP_KERNEL);
1758 ret = hclge_assign_tqp(vport, num_tqps);
1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1765 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1766 struct hclge_vport *vport)
1768 struct hnae3_handle *nic = &vport->nic;
1769 struct hnae3_knic_private_info *kinfo;
1772 kinfo = &nic->kinfo;
1773 for (i = 0; i < vport->alloc_tqps; i++) {
1774 struct hclge_tqp *q =
1775 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1779 is_pf = !(vport->vport_id);
1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1789 static int hclge_map_tqp(struct hclge_dev *hdev)
1791 struct hclge_vport *vport = hdev->vport;
1794 num_vport = hdev->num_req_vfs + 1;
1795 for (i = 0; i < num_vport; i++) {
1798 ret = hclge_map_tqp_to_vport(hdev, vport);
1808 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1810 struct hnae3_handle *nic = &vport->nic;
1811 struct hclge_dev *hdev = vport->back;
1814 nic->pdev = hdev->pdev;
1815 nic->ae_algo = &ae_algo;
1816 nic->numa_node_mask = hdev->numa_node_mask;
1818 ret = hclge_knic_setup(vport, num_tqps,
1819 hdev->num_tx_desc, hdev->num_rx_desc);
1821 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1826 static int hclge_alloc_vport(struct hclge_dev *hdev)
1828 struct pci_dev *pdev = hdev->pdev;
1829 struct hclge_vport *vport;
1835 /* We need to alloc a vport for main NIC of PF */
1836 num_vport = hdev->num_req_vfs + 1;
1838 if (hdev->num_tqps < num_vport) {
1839 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1840 hdev->num_tqps, num_vport);
1844 /* Alloc the same number of TQPs for every vport */
1845 tqp_per_vport = hdev->num_tqps / num_vport;
1846 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1848 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1853 hdev->vport = vport;
1854 hdev->num_alloc_vport = num_vport;
1856 if (IS_ENABLED(CONFIG_PCI_IOV))
1857 hdev->num_alloc_vfs = hdev->num_req_vfs;
1859 for (i = 0; i < num_vport; i++) {
1861 vport->vport_id = i;
1862 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1863 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1864 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1865 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1866 vport->req_vlan_fltr_en = true;
1867 INIT_LIST_HEAD(&vport->vlan_list);
1868 INIT_LIST_HEAD(&vport->uc_mac_list);
1869 INIT_LIST_HEAD(&vport->mc_mac_list);
1870 spin_lock_init(&vport->mac_list_lock);
1873 ret = hclge_vport_setup(vport, tqp_main_vport);
1875 ret = hclge_vport_setup(vport, tqp_per_vport);
1878 "vport setup failed for vport %d, %d\n",
1889 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1890 struct hclge_pkt_buf_alloc *buf_alloc)
1892 /* TX buffer size is unit by 128 byte */
1893 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1894 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1895 struct hclge_tx_buff_alloc_cmd *req;
1896 struct hclge_desc desc;
1900 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1902 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1904 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1906 req->tx_pkt_buff[i] =
1907 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1908 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1911 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1913 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1919 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1920 struct hclge_pkt_buf_alloc *buf_alloc)
1922 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1925 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1930 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1935 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1936 if (hdev->hw_tc_map & BIT(i))
1941 /* Get the number of pfc enabled TCs, which have private buffer */
1942 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1943 struct hclge_pkt_buf_alloc *buf_alloc)
1945 struct hclge_priv_buf *priv;
1949 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1950 priv = &buf_alloc->priv_buf[i];
1951 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1959 /* Get the number of pfc disabled TCs, which have private buffer */
1960 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1961 struct hclge_pkt_buf_alloc *buf_alloc)
1963 struct hclge_priv_buf *priv;
1967 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1968 priv = &buf_alloc->priv_buf[i];
1969 if (hdev->hw_tc_map & BIT(i) &&
1970 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1978 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1980 struct hclge_priv_buf *priv;
1984 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1985 priv = &buf_alloc->priv_buf[i];
1987 rx_priv += priv->buf_size;
1992 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1994 u32 i, total_tx_size = 0;
1996 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1997 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1999 return total_tx_size;
2002 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2003 struct hclge_pkt_buf_alloc *buf_alloc,
2006 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2007 u32 tc_num = hclge_get_tc_num(hdev);
2008 u32 shared_buf, aligned_mps;
2012 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2014 if (hnae3_dev_dcb_supported(hdev))
2015 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2018 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2019 + hdev->dv_buf_size;
2021 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2022 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2023 HCLGE_BUF_SIZE_UNIT);
2025 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2026 if (rx_all < rx_priv + shared_std)
2029 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2030 buf_alloc->s_buf.buf_size = shared_buf;
2031 if (hnae3_dev_dcb_supported(hdev)) {
2032 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2033 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2034 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2035 HCLGE_BUF_SIZE_UNIT);
2037 buf_alloc->s_buf.self.high = aligned_mps +
2038 HCLGE_NON_DCB_ADDITIONAL_BUF;
2039 buf_alloc->s_buf.self.low = aligned_mps;
2042 if (hnae3_dev_dcb_supported(hdev)) {
2043 hi_thrd = shared_buf - hdev->dv_buf_size;
2045 if (tc_num <= NEED_RESERVE_TC_NUM)
2046 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2050 hi_thrd = hi_thrd / tc_num;
2052 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2053 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2054 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2056 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2057 lo_thrd = aligned_mps;
2060 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2061 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2062 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2068 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2069 struct hclge_pkt_buf_alloc *buf_alloc)
2073 total_size = hdev->pkt_buf_size;
2075 /* alloc tx buffer for all enabled tc */
2076 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2079 if (hdev->hw_tc_map & BIT(i)) {
2080 if (total_size < hdev->tx_buf_size)
2083 priv->tx_buf_size = hdev->tx_buf_size;
2085 priv->tx_buf_size = 0;
2088 total_size -= priv->tx_buf_size;
2094 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2095 struct hclge_pkt_buf_alloc *buf_alloc)
2097 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2098 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2101 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2109 if (!(hdev->hw_tc_map & BIT(i)))
2114 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2115 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2116 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2117 HCLGE_BUF_SIZE_UNIT);
2120 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2124 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2127 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2130 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
2133 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2134 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2137 /* let the last to be cleared first */
2138 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2139 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2140 unsigned int mask = BIT((unsigned int)i);
2142 if (hdev->hw_tc_map & mask &&
2143 !(hdev->tm_info.hw_pfc_map & mask)) {
2144 /* Clear the no pfc TC private buffer */
2152 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2153 no_pfc_priv_num == 0)
2157 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2160 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2161 struct hclge_pkt_buf_alloc *buf_alloc)
2163 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2164 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2167 /* let the last to be cleared first */
2168 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2169 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2170 unsigned int mask = BIT((unsigned int)i);
2172 if (hdev->hw_tc_map & mask &&
2173 hdev->tm_info.hw_pfc_map & mask) {
2174 /* Reduce the number of pfc TC with private buffer */
2182 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2187 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2190 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2191 struct hclge_pkt_buf_alloc *buf_alloc)
2193 #define COMPENSATE_BUFFER 0x3C00
2194 #define COMPENSATE_HALF_MPS_NUM 5
2195 #define PRIV_WL_GAP 0x1800
2197 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2198 u32 tc_num = hclge_get_tc_num(hdev);
2199 u32 half_mps = hdev->mps >> 1;
2204 rx_priv = rx_priv / tc_num;
2206 if (tc_num <= NEED_RESERVE_TC_NUM)
2207 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2209 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2210 COMPENSATE_HALF_MPS_NUM * half_mps;
2211 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2212 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2213 if (rx_priv < min_rx_priv)
2216 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2217 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2224 if (!(hdev->hw_tc_map & BIT(i)))
2228 priv->buf_size = rx_priv;
2229 priv->wl.high = rx_priv - hdev->dv_buf_size;
2230 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2233 buf_alloc->s_buf.buf_size = 0;
2238 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2239 * @hdev: pointer to struct hclge_dev
2240 * @buf_alloc: pointer to buffer calculation data
2241 * @return: 0: calculate successful, negative: fail
2243 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2244 struct hclge_pkt_buf_alloc *buf_alloc)
2246 /* When DCB is not supported, rx private buffer is not allocated. */
2247 if (!hnae3_dev_dcb_supported(hdev)) {
2248 u32 rx_all = hdev->pkt_buf_size;
2250 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2251 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2257 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2260 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2263 /* try to decrease the buffer size */
2264 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2267 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2270 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2276 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2277 struct hclge_pkt_buf_alloc *buf_alloc)
2279 struct hclge_rx_priv_buff_cmd *req;
2280 struct hclge_desc desc;
2284 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2285 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2287 /* Alloc private buffer TCs */
2288 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2289 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2292 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2294 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2298 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2299 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2301 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2303 dev_err(&hdev->pdev->dev,
2304 "rx private buffer alloc cmd failed %d\n", ret);
2309 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2310 struct hclge_pkt_buf_alloc *buf_alloc)
2312 struct hclge_rx_priv_wl_buf *req;
2313 struct hclge_priv_buf *priv;
2314 struct hclge_desc desc[2];
2318 for (i = 0; i < 2; i++) {
2319 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2321 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2323 /* The first descriptor set the NEXT bit to 1 */
2325 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2327 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2330 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2332 priv = &buf_alloc->priv_buf[idx];
2333 req->tc_wl[j].high =
2334 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2335 req->tc_wl[j].high |=
2336 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2338 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2339 req->tc_wl[j].low |=
2340 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344 /* Send 2 descriptor at one time */
2345 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2347 dev_err(&hdev->pdev->dev,
2348 "rx private waterline config cmd failed %d\n",
2353 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2354 struct hclge_pkt_buf_alloc *buf_alloc)
2356 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2357 struct hclge_rx_com_thrd *req;
2358 struct hclge_desc desc[2];
2359 struct hclge_tc_thrd *tc;
2363 for (i = 0; i < 2; i++) {
2364 hclge_cmd_setup_basic_desc(&desc[i],
2365 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2366 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2368 /* The first descriptor set the NEXT bit to 1 */
2370 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2372 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2375 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2377 req->com_thrd[j].high =
2378 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2379 req->com_thrd[j].high |=
2380 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2381 req->com_thrd[j].low =
2382 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2383 req->com_thrd[j].low |=
2384 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 /* Send 2 descriptors at one time */
2389 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2391 dev_err(&hdev->pdev->dev,
2392 "common threshold config cmd failed %d\n", ret);
2396 static int hclge_common_wl_config(struct hclge_dev *hdev,
2397 struct hclge_pkt_buf_alloc *buf_alloc)
2399 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2400 struct hclge_rx_com_wl *req;
2401 struct hclge_desc desc;
2404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2406 req = (struct hclge_rx_com_wl *)desc.data;
2407 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2408 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2410 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2411 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2415 dev_err(&hdev->pdev->dev,
2416 "common waterline config cmd failed %d\n", ret);
2421 int hclge_buffer_alloc(struct hclge_dev *hdev)
2423 struct hclge_pkt_buf_alloc *pkt_buf;
2426 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2430 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2432 dev_err(&hdev->pdev->dev,
2433 "could not calc tx buffer size for all TCs %d\n", ret);
2437 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2439 dev_err(&hdev->pdev->dev,
2440 "could not alloc tx buffers %d\n", ret);
2444 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2446 dev_err(&hdev->pdev->dev,
2447 "could not calc rx priv buffer size for all TCs %d\n",
2452 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2454 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2459 if (hnae3_dev_dcb_supported(hdev)) {
2460 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2462 dev_err(&hdev->pdev->dev,
2463 "could not configure rx private waterline %d\n",
2468 ret = hclge_common_thrd_config(hdev, pkt_buf);
2470 dev_err(&hdev->pdev->dev,
2471 "could not configure common threshold %d\n",
2477 ret = hclge_common_wl_config(hdev, pkt_buf);
2479 dev_err(&hdev->pdev->dev,
2480 "could not configure common waterline %d\n", ret);
2487 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2489 struct hnae3_handle *roce = &vport->roce;
2490 struct hnae3_handle *nic = &vport->nic;
2491 struct hclge_dev *hdev = vport->back;
2493 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2495 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2498 roce->rinfo.base_vector = hdev->roce_base_vector;
2500 roce->rinfo.netdev = nic->kinfo.netdev;
2501 roce->rinfo.roce_io_base = hdev->hw.io_base;
2502 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2504 roce->pdev = nic->pdev;
2505 roce->ae_algo = nic->ae_algo;
2506 roce->numa_node_mask = nic->numa_node_mask;
2511 static int hclge_init_msi(struct hclge_dev *hdev)
2513 struct pci_dev *pdev = hdev->pdev;
2517 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2519 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2522 "failed(%d) to allocate MSI/MSI-X vectors\n",
2526 if (vectors < hdev->num_msi)
2527 dev_warn(&hdev->pdev->dev,
2528 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2529 hdev->num_msi, vectors);
2531 hdev->num_msi = vectors;
2532 hdev->num_msi_left = vectors;
2534 hdev->base_msi_vector = pdev->irq;
2535 hdev->roce_base_vector = hdev->base_msi_vector +
2538 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2539 sizeof(u16), GFP_KERNEL);
2540 if (!hdev->vector_status) {
2541 pci_free_irq_vectors(pdev);
2545 for (i = 0; i < hdev->num_msi; i++)
2546 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2548 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2549 sizeof(int), GFP_KERNEL);
2550 if (!hdev->vector_irq) {
2551 pci_free_irq_vectors(pdev);
2558 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2560 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2561 duplex = HCLGE_MAC_FULL;
2566 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2569 struct hclge_config_mac_speed_dup_cmd *req;
2570 struct hclge_desc desc;
2573 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2578 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2581 case HCLGE_MAC_SPEED_10M:
2582 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2583 HCLGE_CFG_SPEED_S, 6);
2585 case HCLGE_MAC_SPEED_100M:
2586 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2587 HCLGE_CFG_SPEED_S, 7);
2589 case HCLGE_MAC_SPEED_1G:
2590 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2591 HCLGE_CFG_SPEED_S, 0);
2593 case HCLGE_MAC_SPEED_10G:
2594 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2595 HCLGE_CFG_SPEED_S, 1);
2597 case HCLGE_MAC_SPEED_25G:
2598 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2599 HCLGE_CFG_SPEED_S, 2);
2601 case HCLGE_MAC_SPEED_40G:
2602 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2603 HCLGE_CFG_SPEED_S, 3);
2605 case HCLGE_MAC_SPEED_50G:
2606 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2607 HCLGE_CFG_SPEED_S, 4);
2609 case HCLGE_MAC_SPEED_100G:
2610 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2611 HCLGE_CFG_SPEED_S, 5);
2613 case HCLGE_MAC_SPEED_200G:
2614 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2615 HCLGE_CFG_SPEED_S, 8);
2618 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2622 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2627 dev_err(&hdev->pdev->dev,
2628 "mac speed/duplex config cmd failed %d.\n", ret);
2635 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2637 struct hclge_mac *mac = &hdev->hw.mac;
2640 duplex = hclge_check_speed_dup(duplex, speed);
2641 if (!mac->support_autoneg && mac->speed == speed &&
2642 mac->duplex == duplex)
2645 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2649 hdev->hw.mac.speed = speed;
2650 hdev->hw.mac.duplex = duplex;
2655 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2658 struct hclge_vport *vport = hclge_get_vport(handle);
2659 struct hclge_dev *hdev = vport->back;
2661 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2664 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2666 struct hclge_config_auto_neg_cmd *req;
2667 struct hclge_desc desc;
2671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2673 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2675 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2676 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2680 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2686 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2688 struct hclge_vport *vport = hclge_get_vport(handle);
2689 struct hclge_dev *hdev = vport->back;
2691 if (!hdev->hw.mac.support_autoneg) {
2693 dev_err(&hdev->pdev->dev,
2694 "autoneg is not supported by current port\n");
2701 return hclge_set_autoneg_en(hdev, enable);
2704 static int hclge_get_autoneg(struct hnae3_handle *handle)
2706 struct hclge_vport *vport = hclge_get_vport(handle);
2707 struct hclge_dev *hdev = vport->back;
2708 struct phy_device *phydev = hdev->hw.mac.phydev;
2711 return phydev->autoneg;
2713 return hdev->hw.mac.autoneg;
2716 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2722 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2724 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2727 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2730 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2732 struct hclge_vport *vport = hclge_get_vport(handle);
2733 struct hclge_dev *hdev = vport->back;
2735 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2736 return hclge_set_autoneg_en(hdev, !halt);
2741 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2743 struct hclge_config_fec_cmd *req;
2744 struct hclge_desc desc;
2747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2749 req = (struct hclge_config_fec_cmd *)desc.data;
2750 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2751 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2752 if (fec_mode & BIT(HNAE3_FEC_RS))
2753 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2754 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2755 if (fec_mode & BIT(HNAE3_FEC_BASER))
2756 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2759 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2761 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2766 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2768 struct hclge_vport *vport = hclge_get_vport(handle);
2769 struct hclge_dev *hdev = vport->back;
2770 struct hclge_mac *mac = &hdev->hw.mac;
2773 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2774 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2778 ret = hclge_set_fec_hw(hdev, fec_mode);
2782 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2786 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2789 struct hclge_vport *vport = hclge_get_vport(handle);
2790 struct hclge_dev *hdev = vport->back;
2791 struct hclge_mac *mac = &hdev->hw.mac;
2794 *fec_ability = mac->fec_ability;
2796 *fec_mode = mac->fec_mode;
2799 static int hclge_mac_init(struct hclge_dev *hdev)
2801 struct hclge_mac *mac = &hdev->hw.mac;
2804 hdev->support_sfp_query = true;
2805 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2806 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2807 hdev->hw.mac.duplex);
2811 if (hdev->hw.mac.support_autoneg) {
2812 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2819 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2820 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2825 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2827 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2831 ret = hclge_set_default_loopback(hdev);
2835 ret = hclge_buffer_alloc(hdev);
2837 dev_err(&hdev->pdev->dev,
2838 "allocate buffer fail, ret=%d\n", ret);
2843 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2845 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2846 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2847 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2848 hclge_wq, &hdev->service_task, 0);
2851 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2853 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2854 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2855 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2856 hclge_wq, &hdev->service_task, 0);
2859 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2861 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2862 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2863 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2864 hclge_wq, &hdev->service_task, 0);
2867 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2869 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2870 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2871 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2872 hclge_wq, &hdev->service_task,
2876 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2878 struct hclge_link_status_cmd *req;
2879 struct hclge_desc desc;
2882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2885 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2890 req = (struct hclge_link_status_cmd *)desc.data;
2891 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2892 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2897 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2899 struct phy_device *phydev = hdev->hw.mac.phydev;
2901 *link_status = HCLGE_LINK_STATUS_DOWN;
2903 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2906 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2909 return hclge_get_mac_link_status(hdev, link_status);
2912 static void hclge_push_link_status(struct hclge_dev *hdev)
2914 struct hclge_vport *vport;
2918 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2919 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2921 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2922 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2925 ret = hclge_push_vf_link_status(vport);
2927 dev_err(&hdev->pdev->dev,
2928 "failed to push link status to vf%u, ret = %d\n",
2934 static void hclge_update_link_status(struct hclge_dev *hdev)
2936 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2937 struct hnae3_handle *handle = &hdev->vport[0].nic;
2938 struct hnae3_client *rclient = hdev->roce_client;
2939 struct hnae3_client *client = hdev->nic_client;
2946 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2949 ret = hclge_get_mac_phy_link(hdev, &state);
2951 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2955 if (state != hdev->hw.mac.link) {
2956 hdev->hw.mac.link = state;
2957 client->ops->link_status_change(handle, state);
2958 hclge_config_mac_tnl_int(hdev, state);
2959 if (rclient && rclient->ops->link_status_change)
2960 rclient->ops->link_status_change(rhandle, state);
2962 hclge_push_link_status(hdev);
2965 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2968 static void hclge_update_port_capability(struct hclge_dev *hdev,
2969 struct hclge_mac *mac)
2971 if (hnae3_dev_fec_supported(hdev))
2972 /* update fec ability by speed */
2973 hclge_convert_setting_fec(mac);
2975 /* firmware can not identify back plane type, the media type
2976 * read from configuration can help deal it
2978 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2979 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2980 mac->module_type = HNAE3_MODULE_TYPE_KR;
2981 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2982 mac->module_type = HNAE3_MODULE_TYPE_TP;
2984 if (mac->support_autoneg) {
2985 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2986 linkmode_copy(mac->advertising, mac->supported);
2988 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2990 linkmode_zero(mac->advertising);
2994 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2996 struct hclge_sfp_info_cmd *resp;
2997 struct hclge_desc desc;
3000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3001 resp = (struct hclge_sfp_info_cmd *)desc.data;
3002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3003 if (ret == -EOPNOTSUPP) {
3004 dev_warn(&hdev->pdev->dev,
3005 "IMP do not support get SFP speed %d\n", ret);
3008 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3012 *speed = le32_to_cpu(resp->speed);
3017 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3019 struct hclge_sfp_info_cmd *resp;
3020 struct hclge_desc desc;
3023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3024 resp = (struct hclge_sfp_info_cmd *)desc.data;
3026 resp->query_type = QUERY_ACTIVE_SPEED;
3028 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3029 if (ret == -EOPNOTSUPP) {
3030 dev_warn(&hdev->pdev->dev,
3031 "IMP does not support get SFP info %d\n", ret);
3034 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3038 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3039 * set to mac->speed.
3041 if (!le32_to_cpu(resp->speed))
3044 mac->speed = le32_to_cpu(resp->speed);
3045 /* if resp->speed_ability is 0, it means it's an old version
3046 * firmware, do not update these params
3048 if (resp->speed_ability) {
3049 mac->module_type = le32_to_cpu(resp->module_type);
3050 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3051 mac->autoneg = resp->autoneg;
3052 mac->support_autoneg = resp->autoneg_ability;
3053 mac->speed_type = QUERY_ACTIVE_SPEED;
3054 if (!resp->active_fec)
3057 mac->fec_mode = BIT(resp->active_fec);
3059 mac->speed_type = QUERY_SFP_SPEED;
3065 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3066 struct ethtool_link_ksettings *cmd)
3068 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3069 struct hclge_vport *vport = hclge_get_vport(handle);
3070 struct hclge_phy_link_ksetting_0_cmd *req0;
3071 struct hclge_phy_link_ksetting_1_cmd *req1;
3072 u32 supported, advertising, lp_advertising;
3073 struct hclge_dev *hdev = vport->back;
3076 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3078 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3079 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3082 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3084 dev_err(&hdev->pdev->dev,
3085 "failed to get phy link ksetting, ret = %d.\n", ret);
3089 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3090 cmd->base.autoneg = req0->autoneg;
3091 cmd->base.speed = le32_to_cpu(req0->speed);
3092 cmd->base.duplex = req0->duplex;
3093 cmd->base.port = req0->port;
3094 cmd->base.transceiver = req0->transceiver;
3095 cmd->base.phy_address = req0->phy_address;
3096 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3097 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3098 supported = le32_to_cpu(req0->supported);
3099 advertising = le32_to_cpu(req0->advertising);
3100 lp_advertising = le32_to_cpu(req0->lp_advertising);
3101 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3103 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3105 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3108 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3109 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3110 cmd->base.master_slave_state = req1->master_slave_state;
3116 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3117 const struct ethtool_link_ksettings *cmd)
3119 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3120 struct hclge_vport *vport = hclge_get_vport(handle);
3121 struct hclge_phy_link_ksetting_0_cmd *req0;
3122 struct hclge_phy_link_ksetting_1_cmd *req1;
3123 struct hclge_dev *hdev = vport->back;
3127 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3128 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3129 (cmd->base.duplex != DUPLEX_HALF &&
3130 cmd->base.duplex != DUPLEX_FULL)))
3133 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3135 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3136 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3139 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3140 req0->autoneg = cmd->base.autoneg;
3141 req0->speed = cpu_to_le32(cmd->base.speed);
3142 req0->duplex = cmd->base.duplex;
3143 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3144 cmd->link_modes.advertising);
3145 req0->advertising = cpu_to_le32(advertising);
3146 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3148 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3149 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3151 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3153 dev_err(&hdev->pdev->dev,
3154 "failed to set phy link ksettings, ret = %d.\n", ret);
3158 hdev->hw.mac.autoneg = cmd->base.autoneg;
3159 hdev->hw.mac.speed = cmd->base.speed;
3160 hdev->hw.mac.duplex = cmd->base.duplex;
3161 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3166 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3168 struct ethtool_link_ksettings cmd;
3171 if (!hnae3_dev_phy_imp_supported(hdev))
3174 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3178 hdev->hw.mac.autoneg = cmd.base.autoneg;
3179 hdev->hw.mac.speed = cmd.base.speed;
3180 hdev->hw.mac.duplex = cmd.base.duplex;
3185 static int hclge_tp_port_init(struct hclge_dev *hdev)
3187 struct ethtool_link_ksettings cmd;
3189 if (!hnae3_dev_phy_imp_supported(hdev))
3192 cmd.base.autoneg = hdev->hw.mac.autoneg;
3193 cmd.base.speed = hdev->hw.mac.speed;
3194 cmd.base.duplex = hdev->hw.mac.duplex;
3195 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3197 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3200 static int hclge_update_port_info(struct hclge_dev *hdev)
3202 struct hclge_mac *mac = &hdev->hw.mac;
3203 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3206 /* get the port info from SFP cmd if not copper port */
3207 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3208 return hclge_update_tp_port_info(hdev);
3210 /* if IMP does not support get SFP/qSFP info, return directly */
3211 if (!hdev->support_sfp_query)
3214 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3215 ret = hclge_get_sfp_info(hdev, mac);
3217 ret = hclge_get_sfp_speed(hdev, &speed);
3219 if (ret == -EOPNOTSUPP) {
3220 hdev->support_sfp_query = false;
3226 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3227 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3228 hclge_update_port_capability(hdev, mac);
3231 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3234 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3235 return 0; /* do nothing if no SFP */
3237 /* must config full duplex for SFP */
3238 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3242 static int hclge_get_status(struct hnae3_handle *handle)
3244 struct hclge_vport *vport = hclge_get_vport(handle);
3245 struct hclge_dev *hdev = vport->back;
3247 hclge_update_link_status(hdev);
3249 return hdev->hw.mac.link;
3252 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3254 if (!pci_num_vf(hdev->pdev)) {
3255 dev_err(&hdev->pdev->dev,
3256 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3260 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3261 dev_err(&hdev->pdev->dev,
3262 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3263 vf, pci_num_vf(hdev->pdev));
3267 /* VF start from 1 in vport */
3268 vf += HCLGE_VF_VPORT_START_NUM;
3269 return &hdev->vport[vf];
3272 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3273 struct ifla_vf_info *ivf)
3275 struct hclge_vport *vport = hclge_get_vport(handle);
3276 struct hclge_dev *hdev = vport->back;
3278 vport = hclge_get_vf_vport(hdev, vf);
3283 ivf->linkstate = vport->vf_info.link_state;
3284 ivf->spoofchk = vport->vf_info.spoofchk;
3285 ivf->trusted = vport->vf_info.trusted;
3286 ivf->min_tx_rate = 0;
3287 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3288 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3289 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3290 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3291 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3296 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3299 struct hclge_vport *vport = hclge_get_vport(handle);
3300 struct hclge_dev *hdev = vport->back;
3304 vport = hclge_get_vf_vport(hdev, vf);
3308 link_state_old = vport->vf_info.link_state;
3309 vport->vf_info.link_state = link_state;
3311 ret = hclge_push_vf_link_status(vport);
3313 vport->vf_info.link_state = link_state_old;
3314 dev_err(&hdev->pdev->dev,
3315 "failed to push vf%d link status, ret = %d\n", vf, ret);
3321 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3323 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3325 /* fetch the events from their corresponding regs */
3326 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3327 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3328 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3329 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3331 /* Assumption: If by any chance reset and mailbox events are reported
3332 * together then we will only process reset event in this go and will
3333 * defer the processing of the mailbox events. Since, we would have not
3334 * cleared RX CMDQ event this time we would receive again another
3335 * interrupt from H/W just for the mailbox.
3337 * check for vector0 reset event sources
3339 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3340 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3341 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3342 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3343 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3344 hdev->rst_stats.imp_rst_cnt++;
3345 return HCLGE_VECTOR0_EVENT_RST;
3348 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3349 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3350 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3351 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3352 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3353 hdev->rst_stats.global_rst_cnt++;
3354 return HCLGE_VECTOR0_EVENT_RST;
3357 /* check for vector0 msix event and hardware error event source */
3358 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3359 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3360 return HCLGE_VECTOR0_EVENT_ERR;
3362 /* check for vector0 ptp event source */
3363 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3364 *clearval = msix_src_reg;
3365 return HCLGE_VECTOR0_EVENT_PTP;
3368 /* check for vector0 mailbox(=CMDQ RX) event source */
3369 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3370 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3371 *clearval = cmdq_src_reg;
3372 return HCLGE_VECTOR0_EVENT_MBX;
3375 /* print other vector0 event source */
3376 dev_info(&hdev->pdev->dev,
3377 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3378 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3380 return HCLGE_VECTOR0_EVENT_OTHER;
3383 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3386 switch (event_type) {
3387 case HCLGE_VECTOR0_EVENT_PTP:
3388 case HCLGE_VECTOR0_EVENT_RST:
3389 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3391 case HCLGE_VECTOR0_EVENT_MBX:
3392 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3399 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3401 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3402 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3403 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3404 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3405 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3408 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3410 writel(enable ? 1 : 0, vector->addr);
3413 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3415 struct hclge_dev *hdev = data;
3416 unsigned long flags;
3420 hclge_enable_vector(&hdev->misc_vector, false);
3421 event_cause = hclge_check_event_cause(hdev, &clearval);
3423 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3424 switch (event_cause) {
3425 case HCLGE_VECTOR0_EVENT_ERR:
3426 hclge_errhand_task_schedule(hdev);
3428 case HCLGE_VECTOR0_EVENT_RST:
3429 hclge_reset_task_schedule(hdev);
3431 case HCLGE_VECTOR0_EVENT_PTP:
3432 spin_lock_irqsave(&hdev->ptp->lock, flags);
3433 hclge_ptp_clean_tx_hwts(hdev);
3434 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3436 case HCLGE_VECTOR0_EVENT_MBX:
3437 /* If we are here then,
3438 * 1. Either we are not handling any mbx task and we are not
3441 * 2. We could be handling a mbx task but nothing more is
3443 * In both cases, we should schedule mbx task as there are more
3444 * mbx messages reported by this interrupt.
3446 hclge_mbx_task_schedule(hdev);
3449 dev_warn(&hdev->pdev->dev,
3450 "received unknown or unhandled event of vector0\n");
3454 hclge_clear_event_cause(hdev, event_cause, clearval);
3456 /* Enable interrupt if it is not caused by reset event or error event */
3457 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3458 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3459 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3460 hclge_enable_vector(&hdev->misc_vector, true);
3465 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3467 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3468 dev_warn(&hdev->pdev->dev,
3469 "vector(vector_id %d) has been freed.\n", vector_id);
3473 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3474 hdev->num_msi_left += 1;
3475 hdev->num_msi_used -= 1;
3478 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3480 struct hclge_misc_vector *vector = &hdev->misc_vector;
3482 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3484 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3485 hdev->vector_status[0] = 0;
3487 hdev->num_msi_left -= 1;
3488 hdev->num_msi_used += 1;
3491 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3492 const cpumask_t *mask)
3494 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3497 cpumask_copy(&hdev->affinity_mask, mask);
3500 static void hclge_irq_affinity_release(struct kref *ref)
3504 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3506 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3507 &hdev->affinity_mask);
3509 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3510 hdev->affinity_notify.release = hclge_irq_affinity_release;
3511 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3512 &hdev->affinity_notify);
3515 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3517 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3518 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3521 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3525 hclge_get_misc_vector(hdev);
3527 /* this would be explicitly freed in the end */
3528 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3529 HCLGE_NAME, pci_name(hdev->pdev));
3530 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3531 0, hdev->misc_vector.name, hdev);
3533 hclge_free_vector(hdev, 0);
3534 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3535 hdev->misc_vector.vector_irq);
3541 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3543 free_irq(hdev->misc_vector.vector_irq, hdev);
3544 hclge_free_vector(hdev, 0);
3547 int hclge_notify_client(struct hclge_dev *hdev,
3548 enum hnae3_reset_notify_type type)
3550 struct hnae3_handle *handle = &hdev->vport[0].nic;
3551 struct hnae3_client *client = hdev->nic_client;
3554 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3557 if (!client->ops->reset_notify)
3560 ret = client->ops->reset_notify(handle, type);
3562 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3568 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3569 enum hnae3_reset_notify_type type)
3571 struct hnae3_handle *handle = &hdev->vport[0].roce;
3572 struct hnae3_client *client = hdev->roce_client;
3575 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3578 if (!client->ops->reset_notify)
3581 ret = client->ops->reset_notify(handle, type);
3583 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3589 static int hclge_reset_wait(struct hclge_dev *hdev)
3591 #define HCLGE_RESET_WATI_MS 100
3592 #define HCLGE_RESET_WAIT_CNT 350
3594 u32 val, reg, reg_bit;
3597 switch (hdev->reset_type) {
3598 case HNAE3_IMP_RESET:
3599 reg = HCLGE_GLOBAL_RESET_REG;
3600 reg_bit = HCLGE_IMP_RESET_BIT;
3602 case HNAE3_GLOBAL_RESET:
3603 reg = HCLGE_GLOBAL_RESET_REG;
3604 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3606 case HNAE3_FUNC_RESET:
3607 reg = HCLGE_FUN_RST_ING;
3608 reg_bit = HCLGE_FUN_RST_ING_B;
3611 dev_err(&hdev->pdev->dev,
3612 "Wait for unsupported reset type: %d\n",
3617 val = hclge_read_dev(&hdev->hw, reg);
3618 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3619 msleep(HCLGE_RESET_WATI_MS);
3620 val = hclge_read_dev(&hdev->hw, reg);
3624 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3625 dev_warn(&hdev->pdev->dev,
3626 "Wait for reset timeout: %d\n", hdev->reset_type);
3633 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3635 struct hclge_vf_rst_cmd *req;
3636 struct hclge_desc desc;
3638 req = (struct hclge_vf_rst_cmd *)desc.data;
3639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3640 req->dest_vfid = func_id;
3645 return hclge_cmd_send(&hdev->hw, &desc, 1);
3648 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3652 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3653 struct hclge_vport *vport = &hdev->vport[i];
3656 /* Send cmd to set/clear VF's FUNC_RST_ING */
3657 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3659 dev_err(&hdev->pdev->dev,
3660 "set vf(%u) rst failed %d!\n",
3661 vport->vport_id, ret);
3665 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3668 /* Inform VF to process the reset.
3669 * hclge_inform_reset_assert_to_vf may fail if VF
3670 * driver is not loaded.
3672 ret = hclge_inform_reset_assert_to_vf(vport);
3674 dev_warn(&hdev->pdev->dev,
3675 "inform reset to vf(%u) failed %d!\n",
3676 vport->vport_id, ret);
3682 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3684 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3685 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3686 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3689 hclge_mbx_handler(hdev);
3691 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3694 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3696 struct hclge_pf_rst_sync_cmd *req;
3697 struct hclge_desc desc;
3701 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3705 /* vf need to down netdev by mbx during PF or FLR reset */
3706 hclge_mailbox_service_task(hdev);
3708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709 /* for compatible with old firmware, wait
3710 * 100 ms for VF to stop IO
3712 if (ret == -EOPNOTSUPP) {
3713 msleep(HCLGE_RESET_SYNC_TIME);
3716 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3719 } else if (req->all_vf_ready) {
3722 msleep(HCLGE_PF_RESET_SYNC_TIME);
3723 hclge_cmd_reuse_desc(&desc, true);
3724 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3726 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3729 void hclge_report_hw_error(struct hclge_dev *hdev,
3730 enum hnae3_hw_error_type type)
3732 struct hnae3_client *client = hdev->nic_client;
3734 if (!client || !client->ops->process_hw_error ||
3735 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3738 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3741 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3745 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3746 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3747 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3748 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3749 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3752 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3753 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3754 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3755 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3759 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3761 struct hclge_desc desc;
3762 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3765 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3766 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3767 req->fun_reset_vfid = func_id;
3769 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3771 dev_err(&hdev->pdev->dev,
3772 "send function reset cmd fail, status =%d\n", ret);
3777 static void hclge_do_reset(struct hclge_dev *hdev)
3779 struct hnae3_handle *handle = &hdev->vport[0].nic;
3780 struct pci_dev *pdev = hdev->pdev;
3783 if (hclge_get_hw_reset_stat(handle)) {
3784 dev_info(&pdev->dev, "hardware reset not finish\n");
3785 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3786 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3787 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3791 switch (hdev->reset_type) {
3792 case HNAE3_GLOBAL_RESET:
3793 dev_info(&pdev->dev, "global reset requested\n");
3794 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3795 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3796 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3798 case HNAE3_FUNC_RESET:
3799 dev_info(&pdev->dev, "PF reset requested\n");
3800 /* schedule again to check later */
3801 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3802 hclge_reset_task_schedule(hdev);
3805 dev_warn(&pdev->dev,
3806 "unsupported reset type: %d\n", hdev->reset_type);
3811 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3812 unsigned long *addr)
3814 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3815 struct hclge_dev *hdev = ae_dev->priv;
3817 /* return the highest priority reset level amongst all */
3818 if (test_bit(HNAE3_IMP_RESET, addr)) {
3819 rst_level = HNAE3_IMP_RESET;
3820 clear_bit(HNAE3_IMP_RESET, addr);
3821 clear_bit(HNAE3_GLOBAL_RESET, addr);
3822 clear_bit(HNAE3_FUNC_RESET, addr);
3823 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3824 rst_level = HNAE3_GLOBAL_RESET;
3825 clear_bit(HNAE3_GLOBAL_RESET, addr);
3826 clear_bit(HNAE3_FUNC_RESET, addr);
3827 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3828 rst_level = HNAE3_FUNC_RESET;
3829 clear_bit(HNAE3_FUNC_RESET, addr);
3830 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3831 rst_level = HNAE3_FLR_RESET;
3832 clear_bit(HNAE3_FLR_RESET, addr);
3835 if (hdev->reset_type != HNAE3_NONE_RESET &&
3836 rst_level < hdev->reset_type)
3837 return HNAE3_NONE_RESET;
3842 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3846 switch (hdev->reset_type) {
3847 case HNAE3_IMP_RESET:
3848 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3850 case HNAE3_GLOBAL_RESET:
3851 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3860 /* For revision 0x20, the reset interrupt source
3861 * can only be cleared after hardware reset done
3863 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3864 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3867 hclge_enable_vector(&hdev->misc_vector, true);
3870 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3874 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3876 reg_val |= HCLGE_NIC_SW_RST_RDY;
3878 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3880 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3883 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3887 ret = hclge_set_all_vf_rst(hdev, true);
3891 hclge_func_reset_sync_vf(hdev);
3896 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3901 switch (hdev->reset_type) {
3902 case HNAE3_FUNC_RESET:
3903 ret = hclge_func_reset_notify_vf(hdev);
3907 ret = hclge_func_reset_cmd(hdev, 0);
3909 dev_err(&hdev->pdev->dev,
3910 "asserting function reset fail %d!\n", ret);
3914 /* After performaning pf reset, it is not necessary to do the
3915 * mailbox handling or send any command to firmware, because
3916 * any mailbox handling or command to firmware is only valid
3917 * after hclge_cmd_init is called.
3919 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3920 hdev->rst_stats.pf_rst_cnt++;
3922 case HNAE3_FLR_RESET:
3923 ret = hclge_func_reset_notify_vf(hdev);
3927 case HNAE3_IMP_RESET:
3928 hclge_handle_imp_error(hdev);
3929 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3930 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3931 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3937 /* inform hardware that preparatory work is done */
3938 msleep(HCLGE_RESET_SYNC_TIME);
3939 hclge_reset_handshake(hdev, true);
3940 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3945 static void hclge_show_rst_info(struct hclge_dev *hdev)
3949 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3953 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3955 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3960 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3962 #define MAX_RESET_FAIL_CNT 5
3964 if (hdev->reset_pending) {
3965 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3966 hdev->reset_pending);
3968 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3969 HCLGE_RESET_INT_M) {
3970 dev_info(&hdev->pdev->dev,
3971 "reset failed because new reset interrupt\n");
3972 hclge_clear_reset_cause(hdev);
3974 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3975 hdev->rst_stats.reset_fail_cnt++;
3976 set_bit(hdev->reset_type, &hdev->reset_pending);
3977 dev_info(&hdev->pdev->dev,
3978 "re-schedule reset task(%u)\n",
3979 hdev->rst_stats.reset_fail_cnt);
3983 hclge_clear_reset_cause(hdev);
3985 /* recover the handshake status when reset fail */
3986 hclge_reset_handshake(hdev, true);
3988 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3990 hclge_show_rst_info(hdev);
3992 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3997 static void hclge_update_reset_level(struct hclge_dev *hdev)
3999 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4000 enum hnae3_reset_type reset_level;
4002 /* reset request will not be set during reset, so clear
4003 * pending reset request to avoid unnecessary reset
4004 * caused by the same reason.
4006 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4008 /* if default_reset_request has a higher level reset request,
4009 * it should be handled as soon as possible. since some errors
4010 * need this kind of reset to fix.
4012 reset_level = hclge_get_reset_level(ae_dev,
4013 &hdev->default_reset_request);
4014 if (reset_level != HNAE3_NONE_RESET)
4015 set_bit(reset_level, &hdev->reset_request);
4018 static int hclge_set_rst_done(struct hclge_dev *hdev)
4020 struct hclge_pf_rst_done_cmd *req;
4021 struct hclge_desc desc;
4024 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4025 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4026 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4028 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4029 /* To be compatible with the old firmware, which does not support
4030 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4033 if (ret == -EOPNOTSUPP) {
4034 dev_warn(&hdev->pdev->dev,
4035 "current firmware does not support command(0x%x)!\n",
4036 HCLGE_OPC_PF_RST_DONE);
4039 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4046 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4050 switch (hdev->reset_type) {
4051 case HNAE3_FUNC_RESET:
4052 case HNAE3_FLR_RESET:
4053 ret = hclge_set_all_vf_rst(hdev, false);
4055 case HNAE3_GLOBAL_RESET:
4056 case HNAE3_IMP_RESET:
4057 ret = hclge_set_rst_done(hdev);
4063 /* clear up the handshake status after re-initialize done */
4064 hclge_reset_handshake(hdev, false);
4069 static int hclge_reset_stack(struct hclge_dev *hdev)
4073 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4077 ret = hclge_reset_ae_dev(hdev->ae_dev);
4081 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4084 static int hclge_reset_prepare(struct hclge_dev *hdev)
4088 hdev->rst_stats.reset_cnt++;
4089 /* perform reset of the stack & ae device for a client */
4090 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4095 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4100 return hclge_reset_prepare_wait(hdev);
4103 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4107 hdev->rst_stats.hw_reset_done_cnt++;
4109 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4114 ret = hclge_reset_stack(hdev);
4119 hclge_clear_reset_cause(hdev);
4121 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4122 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4126 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4129 ret = hclge_reset_prepare_up(hdev);
4134 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4139 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4143 hdev->last_reset_time = jiffies;
4144 hdev->rst_stats.reset_fail_cnt = 0;
4145 hdev->rst_stats.reset_done_cnt++;
4146 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4148 hclge_update_reset_level(hdev);
4153 static void hclge_reset(struct hclge_dev *hdev)
4155 if (hclge_reset_prepare(hdev))
4158 if (hclge_reset_wait(hdev))
4161 if (hclge_reset_rebuild(hdev))
4167 if (hclge_reset_err_handle(hdev))
4168 hclge_reset_task_schedule(hdev);
4171 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4173 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4174 struct hclge_dev *hdev = ae_dev->priv;
4176 /* We might end up getting called broadly because of 2 below cases:
4177 * 1. Recoverable error was conveyed through APEI and only way to bring
4178 * normalcy is to reset.
4179 * 2. A new reset request from the stack due to timeout
4181 * check if this is a new reset request and we are not here just because
4182 * last reset attempt did not succeed and watchdog hit us again. We will
4183 * know this if last reset request did not occur very recently (watchdog
4184 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4185 * In case of new request we reset the "reset level" to PF reset.
4186 * And if it is a repeat reset request of the most recent one then we
4187 * want to make sure we throttle the reset request. Therefore, we will
4188 * not allow it again before 3*HZ times.
4191 if (time_before(jiffies, (hdev->last_reset_time +
4192 HCLGE_RESET_INTERVAL))) {
4193 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4197 if (hdev->default_reset_request) {
4199 hclge_get_reset_level(ae_dev,
4200 &hdev->default_reset_request);
4201 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4202 hdev->reset_level = HNAE3_FUNC_RESET;
4205 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4208 /* request reset & schedule reset task */
4209 set_bit(hdev->reset_level, &hdev->reset_request);
4210 hclge_reset_task_schedule(hdev);
4212 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4213 hdev->reset_level++;
4216 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4217 enum hnae3_reset_type rst_type)
4219 struct hclge_dev *hdev = ae_dev->priv;
4221 set_bit(rst_type, &hdev->default_reset_request);
4224 static void hclge_reset_timer(struct timer_list *t)
4226 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4228 /* if default_reset_request has no value, it means that this reset
4229 * request has already be handled, so just return here
4231 if (!hdev->default_reset_request)
4234 dev_info(&hdev->pdev->dev,
4235 "triggering reset in reset timer\n");
4236 hclge_reset_event(hdev->pdev, NULL);
4239 static void hclge_reset_subtask(struct hclge_dev *hdev)
4241 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4243 /* check if there is any ongoing reset in the hardware. This status can
4244 * be checked from reset_pending. If there is then, we need to wait for
4245 * hardware to complete reset.
4246 * a. If we are able to figure out in reasonable time that hardware
4247 * has fully resetted then, we can proceed with driver, client
4249 * b. else, we can come back later to check this status so re-sched
4252 hdev->last_reset_time = jiffies;
4253 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4254 if (hdev->reset_type != HNAE3_NONE_RESET)
4257 /* check if we got any *new* reset requests to be honored */
4258 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4259 if (hdev->reset_type != HNAE3_NONE_RESET)
4260 hclge_do_reset(hdev);
4262 hdev->reset_type = HNAE3_NONE_RESET;
4265 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4268 enum hnae3_reset_type reset_type;
4270 if (ae_dev->hw_err_reset_req) {
4271 reset_type = hclge_get_reset_level(ae_dev,
4272 &ae_dev->hw_err_reset_req);
4273 hclge_set_def_reset_request(ae_dev, reset_type);
4276 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4277 ae_dev->ops->reset_event(hdev->pdev, NULL);
4279 /* enable interrupt after error handling complete */
4280 hclge_enable_vector(&hdev->misc_vector, true);
4283 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4285 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4287 ae_dev->hw_err_reset_req = 0;
4289 if (hclge_find_error_source(hdev)) {
4290 hclge_handle_error_info_log(ae_dev);
4291 hclge_handle_mac_tnl(hdev);
4294 hclge_handle_err_reset_request(hdev);
4297 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4299 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4300 struct device *dev = &hdev->pdev->dev;
4303 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4304 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4305 if (hclge_handle_hw_msix_error
4306 (hdev, &hdev->default_reset_request))
4307 dev_info(dev, "received msix interrupt 0x%x\n",
4311 hclge_handle_hw_ras_error(ae_dev);
4313 hclge_handle_err_reset_request(hdev);
4316 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4318 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4321 if (hnae3_dev_ras_imp_supported(hdev))
4322 hclge_handle_err_recovery(hdev);
4324 hclge_misc_err_recovery(hdev);
4327 static void hclge_reset_service_task(struct hclge_dev *hdev)
4329 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4332 down(&hdev->reset_sem);
4333 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4335 hclge_reset_subtask(hdev);
4337 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4338 up(&hdev->reset_sem);
4341 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4345 /* start from vport 1 for PF is always alive */
4346 for (i = 1; i < hdev->num_alloc_vport; i++) {
4347 struct hclge_vport *vport = &hdev->vport[i];
4349 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4350 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4352 /* If vf is not alive, set to default value */
4353 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4354 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4358 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4360 unsigned long delta = round_jiffies_relative(HZ);
4362 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4365 /* Always handle the link updating to make sure link state is
4366 * updated when it is triggered by mbx.
4368 hclge_update_link_status(hdev);
4369 hclge_sync_mac_table(hdev);
4370 hclge_sync_promisc_mode(hdev);
4371 hclge_sync_fd_table(hdev);
4373 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4374 delta = jiffies - hdev->last_serv_processed;
4376 if (delta < round_jiffies_relative(HZ)) {
4377 delta = round_jiffies_relative(HZ) - delta;
4382 hdev->serv_processed_cnt++;
4383 hclge_update_vport_alive(hdev);
4385 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4386 hdev->last_serv_processed = jiffies;
4390 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4391 hclge_update_stats_for_all(hdev);
4393 hclge_update_port_info(hdev);
4394 hclge_sync_vlan_filter(hdev);
4396 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4397 hclge_rfs_filter_expire(hdev);
4399 hdev->last_serv_processed = jiffies;
4402 hclge_task_schedule(hdev, delta);
4405 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4407 unsigned long flags;
4409 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4410 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4411 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4414 /* to prevent concurrence with the irq handler */
4415 spin_lock_irqsave(&hdev->ptp->lock, flags);
4417 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4418 * handler may handle it just before spin_lock_irqsave().
4420 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4421 hclge_ptp_clean_tx_hwts(hdev);
4423 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4426 static void hclge_service_task(struct work_struct *work)
4428 struct hclge_dev *hdev =
4429 container_of(work, struct hclge_dev, service_task.work);
4431 hclge_errhand_service_task(hdev);
4432 hclge_reset_service_task(hdev);
4433 hclge_ptp_service_task(hdev);
4434 hclge_mailbox_service_task(hdev);
4435 hclge_periodic_service_task(hdev);
4437 /* Handle error recovery, reset and mbx again in case periodical task
4438 * delays the handling by calling hclge_task_schedule() in
4439 * hclge_periodic_service_task().
4441 hclge_errhand_service_task(hdev);
4442 hclge_reset_service_task(hdev);
4443 hclge_mailbox_service_task(hdev);
4446 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4448 /* VF handle has no client */
4449 if (!handle->client)
4450 return container_of(handle, struct hclge_vport, nic);
4451 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4452 return container_of(handle, struct hclge_vport, roce);
4454 return container_of(handle, struct hclge_vport, nic);
4457 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4458 struct hnae3_vector_info *vector_info)
4460 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4462 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4464 /* need an extend offset to config vector >= 64 */
4465 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4466 vector_info->io_addr = hdev->hw.io_base +
4467 HCLGE_VECTOR_REG_BASE +
4468 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4470 vector_info->io_addr = hdev->hw.io_base +
4471 HCLGE_VECTOR_EXT_REG_BASE +
4472 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4473 HCLGE_VECTOR_REG_OFFSET_H +
4474 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4475 HCLGE_VECTOR_REG_OFFSET;
4477 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4478 hdev->vector_irq[idx] = vector_info->vector;
4481 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4482 struct hnae3_vector_info *vector_info)
4484 struct hclge_vport *vport = hclge_get_vport(handle);
4485 struct hnae3_vector_info *vector = vector_info;
4486 struct hclge_dev *hdev = vport->back;
4491 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4492 vector_num = min(hdev->num_msi_left, vector_num);
4494 for (j = 0; j < vector_num; j++) {
4495 while (++i < hdev->num_nic_msi) {
4496 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4497 hclge_get_vector_info(hdev, i, vector);
4505 hdev->num_msi_left -= alloc;
4506 hdev->num_msi_used += alloc;
4511 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4515 for (i = 0; i < hdev->num_msi; i++)
4516 if (vector == hdev->vector_irq[i])
4522 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4524 struct hclge_vport *vport = hclge_get_vport(handle);
4525 struct hclge_dev *hdev = vport->back;
4528 vector_id = hclge_get_vector_index(hdev, vector);
4529 if (vector_id < 0) {
4530 dev_err(&hdev->pdev->dev,
4531 "Get vector index fail. vector = %d\n", vector);
4535 hclge_free_vector(hdev, vector_id);
4540 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4542 return HCLGE_RSS_KEY_SIZE;
4545 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4546 const u8 hfunc, const u8 *key)
4548 struct hclge_rss_config_cmd *req;
4549 unsigned int key_offset = 0;
4550 struct hclge_desc desc;
4555 key_counts = HCLGE_RSS_KEY_SIZE;
4556 req = (struct hclge_rss_config_cmd *)desc.data;
4558 while (key_counts) {
4559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4562 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4563 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4565 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4566 memcpy(req->hash_key,
4567 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4569 key_counts -= key_size;
4571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4573 dev_err(&hdev->pdev->dev,
4574 "Configure RSS config fail, status = %d\n",
4582 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4584 struct hclge_rss_indirection_table_cmd *req;
4585 struct hclge_desc desc;
4586 int rss_cfg_tbl_num;
4594 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4595 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4596 HCLGE_RSS_CFG_TBL_SIZE;
4598 for (i = 0; i < rss_cfg_tbl_num; i++) {
4599 hclge_cmd_setup_basic_desc
4600 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4602 req->start_table_index =
4603 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4604 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4605 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4606 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4607 req->rss_qid_l[j] = qid & 0xff;
4609 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4610 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4611 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4612 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4614 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4616 dev_err(&hdev->pdev->dev,
4617 "Configure rss indir table fail,status = %d\n",
4625 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4626 u16 *tc_size, u16 *tc_offset)
4628 struct hclge_rss_tc_mode_cmd *req;
4629 struct hclge_desc desc;
4633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4634 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4636 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4639 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4640 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4641 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4642 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4643 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4644 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4645 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4647 req->rss_tc_mode[i] = cpu_to_le16(mode);
4650 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4652 dev_err(&hdev->pdev->dev,
4653 "Configure rss tc mode fail, status = %d\n", ret);
4658 static void hclge_get_rss_type(struct hclge_vport *vport)
4660 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4661 vport->rss_tuple_sets.ipv4_udp_en ||
4662 vport->rss_tuple_sets.ipv4_sctp_en ||
4663 vport->rss_tuple_sets.ipv6_tcp_en ||
4664 vport->rss_tuple_sets.ipv6_udp_en ||
4665 vport->rss_tuple_sets.ipv6_sctp_en)
4666 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4667 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4668 vport->rss_tuple_sets.ipv6_fragment_en)
4669 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4671 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4674 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4676 struct hclge_rss_input_tuple_cmd *req;
4677 struct hclge_desc desc;
4680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4682 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4684 /* Get the tuple cfg from pf */
4685 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4686 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4687 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4688 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4689 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4690 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4691 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4692 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4693 hclge_get_rss_type(&hdev->vport[0]);
4694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4696 dev_err(&hdev->pdev->dev,
4697 "Configure rss input fail, status = %d\n", ret);
4701 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4704 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4705 struct hclge_vport *vport = hclge_get_vport(handle);
4708 /* Get hash algorithm */
4710 switch (vport->rss_algo) {
4711 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4712 *hfunc = ETH_RSS_HASH_TOP;
4714 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4715 *hfunc = ETH_RSS_HASH_XOR;
4718 *hfunc = ETH_RSS_HASH_UNKNOWN;
4723 /* Get the RSS Key required by the user */
4725 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4727 /* Get indirect table */
4729 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4730 indir[i] = vport->rss_indirection_tbl[i];
4735 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4736 const u8 *key, const u8 hfunc)
4738 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4739 struct hclge_vport *vport = hclge_get_vport(handle);
4740 struct hclge_dev *hdev = vport->back;
4744 /* Set the RSS Hash Key if specififed by the user */
4747 case ETH_RSS_HASH_TOP:
4748 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4750 case ETH_RSS_HASH_XOR:
4751 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4753 case ETH_RSS_HASH_NO_CHANGE:
4754 hash_algo = vport->rss_algo;
4760 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4764 /* Update the shadow RSS key with user specified qids */
4765 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4766 vport->rss_algo = hash_algo;
4769 /* Update the shadow RSS table with user specified qids */
4770 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4771 vport->rss_indirection_tbl[i] = indir[i];
4773 /* Update the hardware */
4774 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4777 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4779 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4781 if (nfc->data & RXH_L4_B_2_3)
4782 hash_sets |= HCLGE_D_PORT_BIT;
4784 hash_sets &= ~HCLGE_D_PORT_BIT;
4786 if (nfc->data & RXH_IP_SRC)
4787 hash_sets |= HCLGE_S_IP_BIT;
4789 hash_sets &= ~HCLGE_S_IP_BIT;
4791 if (nfc->data & RXH_IP_DST)
4792 hash_sets |= HCLGE_D_IP_BIT;
4794 hash_sets &= ~HCLGE_D_IP_BIT;
4796 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4797 hash_sets |= HCLGE_V_TAG_BIT;
4802 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4803 struct ethtool_rxnfc *nfc,
4804 struct hclge_rss_input_tuple_cmd *req)
4806 struct hclge_dev *hdev = vport->back;
4809 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4810 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4811 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4812 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4813 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4814 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4815 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4816 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4818 tuple_sets = hclge_get_rss_hash_bits(nfc);
4819 switch (nfc->flow_type) {
4821 req->ipv4_tcp_en = tuple_sets;
4824 req->ipv6_tcp_en = tuple_sets;
4827 req->ipv4_udp_en = tuple_sets;
4830 req->ipv6_udp_en = tuple_sets;
4833 req->ipv4_sctp_en = tuple_sets;
4836 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4837 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4840 req->ipv6_sctp_en = tuple_sets;
4843 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4846 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4855 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4856 struct ethtool_rxnfc *nfc)
4858 struct hclge_vport *vport = hclge_get_vport(handle);
4859 struct hclge_dev *hdev = vport->back;
4860 struct hclge_rss_input_tuple_cmd *req;
4861 struct hclge_desc desc;
4864 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4865 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4868 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4871 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4873 dev_err(&hdev->pdev->dev,
4874 "failed to init rss tuple cmd, ret = %d\n", ret);
4878 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4880 dev_err(&hdev->pdev->dev,
4881 "Set rss tuple fail, status = %d\n", ret);
4885 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4886 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4887 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4888 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4889 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4890 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4891 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4892 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4893 hclge_get_rss_type(vport);
4897 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4900 switch (flow_type) {
4902 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4905 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4908 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4911 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4914 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4917 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4921 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4930 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4934 if (tuple_sets & HCLGE_D_PORT_BIT)
4935 tuple_data |= RXH_L4_B_2_3;
4936 if (tuple_sets & HCLGE_S_PORT_BIT)
4937 tuple_data |= RXH_L4_B_0_1;
4938 if (tuple_sets & HCLGE_D_IP_BIT)
4939 tuple_data |= RXH_IP_DST;
4940 if (tuple_sets & HCLGE_S_IP_BIT)
4941 tuple_data |= RXH_IP_SRC;
4946 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4947 struct ethtool_rxnfc *nfc)
4949 struct hclge_vport *vport = hclge_get_vport(handle);
4955 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4956 if (ret || !tuple_sets)
4959 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4964 static int hclge_get_tc_size(struct hnae3_handle *handle)
4966 struct hclge_vport *vport = hclge_get_vport(handle);
4967 struct hclge_dev *hdev = vport->back;
4969 return hdev->pf_rss_size_max;
4972 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4974 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4975 struct hclge_vport *vport = hdev->vport;
4976 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4977 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4978 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4979 struct hnae3_tc_info *tc_info;
4984 tc_info = &vport->nic.kinfo.tc_info;
4985 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4986 rss_size = tc_info->tqp_count[i];
4989 if (!(hdev->hw_tc_map & BIT(i)))
4992 /* tc_size set to hardware is the log2 of roundup power of two
4993 * of rss_size, the acutal queue size is limited by indirection
4996 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4998 dev_err(&hdev->pdev->dev,
4999 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5004 roundup_size = roundup_pow_of_two(rss_size);
5005 roundup_size = ilog2(roundup_size);
5008 tc_size[i] = roundup_size;
5009 tc_offset[i] = tc_info->tqp_offset[i];
5012 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5015 int hclge_rss_init_hw(struct hclge_dev *hdev)
5017 struct hclge_vport *vport = hdev->vport;
5018 u16 *rss_indir = vport[0].rss_indirection_tbl;
5019 u8 *key = vport[0].rss_hash_key;
5020 u8 hfunc = vport[0].rss_algo;
5023 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5027 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5031 ret = hclge_set_rss_input_tuple(hdev);
5035 return hclge_init_rss_tc_mode(hdev);
5038 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5040 struct hclge_vport *vport = &hdev->vport[0];
5043 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5044 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5047 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5049 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5050 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5051 struct hclge_vport *vport = &hdev->vport[0];
5054 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5055 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5057 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5058 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5059 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5060 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5061 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5062 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5063 vport->rss_tuple_sets.ipv6_sctp_en =
5064 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5065 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5066 HCLGE_RSS_INPUT_TUPLE_SCTP;
5067 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5069 vport->rss_algo = rss_algo;
5071 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5072 sizeof(*rss_ind_tbl), GFP_KERNEL);
5076 vport->rss_indirection_tbl = rss_ind_tbl;
5077 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5079 hclge_rss_indir_init_cfg(hdev);
5084 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5085 int vector_id, bool en,
5086 struct hnae3_ring_chain_node *ring_chain)
5088 struct hclge_dev *hdev = vport->back;
5089 struct hnae3_ring_chain_node *node;
5090 struct hclge_desc desc;
5091 struct hclge_ctrl_vector_chain_cmd *req =
5092 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5093 enum hclge_cmd_status status;
5094 enum hclge_opcode_type op;
5095 u16 tqp_type_and_id;
5098 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5099 hclge_cmd_setup_basic_desc(&desc, op, false);
5100 req->int_vector_id_l = hnae3_get_field(vector_id,
5101 HCLGE_VECTOR_ID_L_M,
5102 HCLGE_VECTOR_ID_L_S);
5103 req->int_vector_id_h = hnae3_get_field(vector_id,
5104 HCLGE_VECTOR_ID_H_M,
5105 HCLGE_VECTOR_ID_H_S);
5108 for (node = ring_chain; node; node = node->next) {
5109 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5110 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5112 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5113 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5114 HCLGE_TQP_ID_S, node->tqp_index);
5115 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5117 hnae3_get_field(node->int_gl_idx,
5118 HNAE3_RING_GL_IDX_M,
5119 HNAE3_RING_GL_IDX_S));
5120 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5121 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5122 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5123 req->vfid = vport->vport_id;
5125 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5127 dev_err(&hdev->pdev->dev,
5128 "Map TQP fail, status is %d.\n",
5134 hclge_cmd_setup_basic_desc(&desc,
5137 req->int_vector_id_l =
5138 hnae3_get_field(vector_id,
5139 HCLGE_VECTOR_ID_L_M,
5140 HCLGE_VECTOR_ID_L_S);
5141 req->int_vector_id_h =
5142 hnae3_get_field(vector_id,
5143 HCLGE_VECTOR_ID_H_M,
5144 HCLGE_VECTOR_ID_H_S);
5149 req->int_cause_num = i;
5150 req->vfid = vport->vport_id;
5151 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5153 dev_err(&hdev->pdev->dev,
5154 "Map TQP fail, status is %d.\n", status);
5162 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5163 struct hnae3_ring_chain_node *ring_chain)
5165 struct hclge_vport *vport = hclge_get_vport(handle);
5166 struct hclge_dev *hdev = vport->back;
5169 vector_id = hclge_get_vector_index(hdev, vector);
5170 if (vector_id < 0) {
5171 dev_err(&hdev->pdev->dev,
5172 "failed to get vector index. vector=%d\n", vector);
5176 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5179 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5180 struct hnae3_ring_chain_node *ring_chain)
5182 struct hclge_vport *vport = hclge_get_vport(handle);
5183 struct hclge_dev *hdev = vport->back;
5186 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5189 vector_id = hclge_get_vector_index(hdev, vector);
5190 if (vector_id < 0) {
5191 dev_err(&handle->pdev->dev,
5192 "Get vector index fail. ret =%d\n", vector_id);
5196 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5198 dev_err(&handle->pdev->dev,
5199 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5205 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5206 bool en_uc, bool en_mc, bool en_bc)
5208 struct hclge_vport *vport = &hdev->vport[vf_id];
5209 struct hnae3_handle *handle = &vport->nic;
5210 struct hclge_promisc_cfg_cmd *req;
5211 struct hclge_desc desc;
5212 bool uc_tx_en = en_uc;
5216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5218 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5221 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5224 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5225 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5226 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5227 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5228 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5229 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5230 req->extend_promisc = promisc_cfg;
5232 /* to be compatible with DEVICE_VERSION_V1/2 */
5234 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5235 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5236 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5237 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5238 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5239 req->promisc = promisc_cfg;
5241 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5243 dev_err(&hdev->pdev->dev,
5244 "failed to set vport %u promisc mode, ret = %d.\n",
5250 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5251 bool en_mc_pmc, bool en_bc_pmc)
5253 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5254 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5257 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5260 struct hclge_vport *vport = hclge_get_vport(handle);
5261 struct hclge_dev *hdev = vport->back;
5262 bool en_bc_pmc = true;
5264 /* For device whose version below V2, if broadcast promisc enabled,
5265 * vlan filter is always bypassed. So broadcast promisc should be
5266 * disabled until user enable promisc mode
5268 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5269 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5271 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5275 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5277 struct hclge_vport *vport = hclge_get_vport(handle);
5279 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5282 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5284 if (hlist_empty(&hdev->fd_rule_list))
5285 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5288 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5290 if (!test_bit(location, hdev->fd_bmap)) {
5291 set_bit(location, hdev->fd_bmap);
5292 hdev->hclge_fd_rule_num++;
5296 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5298 if (test_bit(location, hdev->fd_bmap)) {
5299 clear_bit(location, hdev->fd_bmap);
5300 hdev->hclge_fd_rule_num--;
5304 static void hclge_fd_free_node(struct hclge_dev *hdev,
5305 struct hclge_fd_rule *rule)
5307 hlist_del(&rule->rule_node);
5309 hclge_sync_fd_state(hdev);
5312 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5313 struct hclge_fd_rule *old_rule,
5314 struct hclge_fd_rule *new_rule,
5315 enum HCLGE_FD_NODE_STATE state)
5318 case HCLGE_FD_TO_ADD:
5319 case HCLGE_FD_ACTIVE:
5320 /* 1) if the new state is TO_ADD, just replace the old rule
5321 * with the same location, no matter its state, because the
5322 * new rule will be configured to the hardware.
5323 * 2) if the new state is ACTIVE, it means the new rule
5324 * has been configured to the hardware, so just replace
5325 * the old rule node with the same location.
5326 * 3) for it doesn't add a new node to the list, so it's
5327 * unnecessary to update the rule number and fd_bmap.
5329 new_rule->rule_node.next = old_rule->rule_node.next;
5330 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5331 memcpy(old_rule, new_rule, sizeof(*old_rule));
5334 case HCLGE_FD_DELETED:
5335 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5336 hclge_fd_free_node(hdev, old_rule);
5338 case HCLGE_FD_TO_DEL:
5339 /* if new request is TO_DEL, and old rule is existent
5340 * 1) the state of old rule is TO_DEL, we need do nothing,
5341 * because we delete rule by location, other rule content
5343 * 2) the state of old rule is ACTIVE, we need to change its
5344 * state to TO_DEL, so the rule will be deleted when periodic
5345 * task being scheduled.
5346 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5347 * been added to hardware, so we just delete the rule node from
5348 * fd_rule_list directly.
5350 if (old_rule->state == HCLGE_FD_TO_ADD) {
5351 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5352 hclge_fd_free_node(hdev, old_rule);
5355 old_rule->state = HCLGE_FD_TO_DEL;
5360 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5362 struct hclge_fd_rule **parent)
5364 struct hclge_fd_rule *rule;
5365 struct hlist_node *node;
5367 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5368 if (rule->location == location)
5370 else if (rule->location > location)
5372 /* record the parent node, use to keep the nodes in fd_rule_list
5381 /* insert fd rule node in ascend order according to rule->location */
5382 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5383 struct hclge_fd_rule *rule,
5384 struct hclge_fd_rule *parent)
5386 INIT_HLIST_NODE(&rule->rule_node);
5389 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5391 hlist_add_head(&rule->rule_node, hlist);
5394 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5395 struct hclge_fd_user_def_cfg *cfg)
5397 struct hclge_fd_user_def_cfg_cmd *req;
5398 struct hclge_desc desc;
5402 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5404 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5406 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5407 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5408 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5409 req->ol2_cfg = cpu_to_le16(data);
5412 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5413 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5414 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5415 req->ol3_cfg = cpu_to_le16(data);
5418 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5419 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5420 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5421 req->ol4_cfg = cpu_to_le16(data);
5423 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5425 dev_err(&hdev->pdev->dev,
5426 "failed to set fd user def data, ret= %d\n", ret);
5430 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5434 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5438 spin_lock_bh(&hdev->fd_rule_lock);
5440 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5442 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5445 spin_unlock_bh(&hdev->fd_rule_lock);
5448 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5449 struct hclge_fd_rule *rule)
5451 struct hlist_head *hlist = &hdev->fd_rule_list;
5452 struct hclge_fd_rule *fd_rule, *parent = NULL;
5453 struct hclge_fd_user_def_info *info, *old_info;
5454 struct hclge_fd_user_def_cfg *cfg;
5456 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5457 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5460 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5461 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5462 info = &rule->ep.user_def;
5464 if (!cfg->ref_cnt || cfg->offset == info->offset)
5467 if (cfg->ref_cnt > 1)
5470 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5472 old_info = &fd_rule->ep.user_def;
5473 if (info->layer == old_info->layer)
5478 dev_err(&hdev->pdev->dev,
5479 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5484 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5485 struct hclge_fd_rule *rule)
5487 struct hclge_fd_user_def_cfg *cfg;
5489 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5490 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5493 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5494 if (!cfg->ref_cnt) {
5495 cfg->offset = rule->ep.user_def.offset;
5496 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5501 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5502 struct hclge_fd_rule *rule)
5504 struct hclge_fd_user_def_cfg *cfg;
5506 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5507 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5510 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5515 if (!cfg->ref_cnt) {
5517 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5521 static void hclge_update_fd_list(struct hclge_dev *hdev,
5522 enum HCLGE_FD_NODE_STATE state, u16 location,
5523 struct hclge_fd_rule *new_rule)
5525 struct hlist_head *hlist = &hdev->fd_rule_list;
5526 struct hclge_fd_rule *fd_rule, *parent = NULL;
5528 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5530 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5531 if (state == HCLGE_FD_ACTIVE)
5532 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5533 hclge_sync_fd_user_def_cfg(hdev, true);
5535 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5539 /* it's unlikely to fail here, because we have checked the rule
5542 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5543 dev_warn(&hdev->pdev->dev,
5544 "failed to delete fd rule %u, it's inexistent\n",
5549 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5550 hclge_sync_fd_user_def_cfg(hdev, true);
5552 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5553 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5555 if (state == HCLGE_FD_TO_ADD) {
5556 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5557 hclge_task_schedule(hdev, 0);
5561 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5563 struct hclge_get_fd_mode_cmd *req;
5564 struct hclge_desc desc;
5567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5569 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5573 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5577 *fd_mode = req->mode;
5582 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5583 u32 *stage1_entry_num,
5584 u32 *stage2_entry_num,
5585 u16 *stage1_counter_num,
5586 u16 *stage2_counter_num)
5588 struct hclge_get_fd_allocation_cmd *req;
5589 struct hclge_desc desc;
5592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5594 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5598 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5603 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5604 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5605 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5606 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5611 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5612 enum HCLGE_FD_STAGE stage_num)
5614 struct hclge_set_fd_key_config_cmd *req;
5615 struct hclge_fd_key_cfg *stage;
5616 struct hclge_desc desc;
5619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5621 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5622 stage = &hdev->fd_cfg.key_cfg[stage_num];
5623 req->stage = stage_num;
5624 req->key_select = stage->key_sel;
5625 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5626 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5627 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5628 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5629 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5630 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5632 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5634 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5639 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5641 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5643 spin_lock_bh(&hdev->fd_rule_lock);
5644 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5645 spin_unlock_bh(&hdev->fd_rule_lock);
5647 hclge_fd_set_user_def_cmd(hdev, cfg);
5650 static int hclge_init_fd_config(struct hclge_dev *hdev)
5652 #define LOW_2_WORDS 0x03
5653 struct hclge_fd_key_cfg *key_cfg;
5656 if (!hnae3_dev_fd_supported(hdev))
5659 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5663 switch (hdev->fd_cfg.fd_mode) {
5664 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5665 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5667 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5668 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5671 dev_err(&hdev->pdev->dev,
5672 "Unsupported flow director mode %u\n",
5673 hdev->fd_cfg.fd_mode);
5677 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5678 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5679 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5680 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5681 key_cfg->outer_sipv6_word_en = 0;
5682 key_cfg->outer_dipv6_word_en = 0;
5684 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5685 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5686 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5687 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5689 /* If use max 400bit key, we can support tuples for ether type */
5690 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5691 key_cfg->tuple_active |=
5692 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5693 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5694 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5697 /* roce_type is used to filter roce frames
5698 * dst_vport is used to specify the rule
5700 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5702 ret = hclge_get_fd_allocation(hdev,
5703 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5704 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5705 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5706 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5710 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5713 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5714 int loc, u8 *key, bool is_add)
5716 struct hclge_fd_tcam_config_1_cmd *req1;
5717 struct hclge_fd_tcam_config_2_cmd *req2;
5718 struct hclge_fd_tcam_config_3_cmd *req3;
5719 struct hclge_desc desc[3];
5722 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5723 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5724 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5725 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5726 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5728 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5729 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5730 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5732 req1->stage = stage;
5733 req1->xy_sel = sel_x ? 1 : 0;
5734 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5735 req1->index = cpu_to_le32(loc);
5736 req1->entry_vld = sel_x ? is_add : 0;
5739 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5740 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5741 sizeof(req2->tcam_data));
5742 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5743 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5746 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5748 dev_err(&hdev->pdev->dev,
5749 "config tcam key fail, ret=%d\n",
5755 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5756 struct hclge_fd_ad_data *action)
5758 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5759 struct hclge_fd_ad_config_cmd *req;
5760 struct hclge_desc desc;
5764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5766 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5767 req->index = cpu_to_le32(loc);
5770 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5771 action->write_rule_id_to_bd);
5772 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5774 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5775 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5776 action->override_tc);
5777 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5778 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5781 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5782 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5783 action->forward_to_direct_queue);
5784 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5786 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5787 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5788 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5789 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5790 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5791 action->counter_id);
5793 req->ad_data = cpu_to_le64(ad_data);
5794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5796 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5801 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5802 struct hclge_fd_rule *rule)
5804 int offset, moffset, ip_offset;
5805 enum HCLGE_FD_KEY_OPT key_opt;
5806 u16 tmp_x_s, tmp_y_s;
5807 u32 tmp_x_l, tmp_y_l;
5811 if (rule->unused_tuple & BIT(tuple_bit))
5814 key_opt = tuple_key_info[tuple_bit].key_opt;
5815 offset = tuple_key_info[tuple_bit].offset;
5816 moffset = tuple_key_info[tuple_bit].moffset;
5820 calc_x(*key_x, p[offset], p[moffset]);
5821 calc_y(*key_y, p[offset], p[moffset]);
5825 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5826 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5827 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5828 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5832 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5833 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5834 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5835 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5839 for (i = 0; i < ETH_ALEN; i++) {
5840 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5842 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5848 ip_offset = IPV4_INDEX * sizeof(u32);
5849 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5850 *(u32 *)(&p[moffset + ip_offset]));
5851 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5852 *(u32 *)(&p[moffset + ip_offset]));
5853 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5854 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5862 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5863 u8 vf_id, u8 network_port_id)
5865 u32 port_number = 0;
5867 if (port_type == HOST_PORT) {
5868 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5870 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5872 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5874 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5875 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5876 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5882 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5883 __le32 *key_x, __le32 *key_y,
5884 struct hclge_fd_rule *rule)
5886 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5887 u8 cur_pos = 0, tuple_size, shift_bits;
5890 for (i = 0; i < MAX_META_DATA; i++) {
5891 tuple_size = meta_data_key_info[i].key_length;
5892 tuple_bit = key_cfg->meta_data_active & BIT(i);
5894 switch (tuple_bit) {
5895 case BIT(ROCE_TYPE):
5896 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5897 cur_pos += tuple_size;
5899 case BIT(DST_VPORT):
5900 port_number = hclge_get_port_number(HOST_PORT, 0,
5902 hnae3_set_field(meta_data,
5903 GENMASK(cur_pos + tuple_size, cur_pos),
5904 cur_pos, port_number);
5905 cur_pos += tuple_size;
5912 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5913 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5914 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5916 *key_x = cpu_to_le32(tmp_x << shift_bits);
5917 *key_y = cpu_to_le32(tmp_y << shift_bits);
5920 /* A complete key is combined with meta data key and tuple key.
5921 * Meta data key is stored at the MSB region, and tuple key is stored at
5922 * the LSB region, unused bits will be filled 0.
5924 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5925 struct hclge_fd_rule *rule)
5927 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5928 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5929 u8 *cur_key_x, *cur_key_y;
5930 u8 meta_data_region;
5935 memset(key_x, 0, sizeof(key_x));
5936 memset(key_y, 0, sizeof(key_y));
5940 for (i = 0 ; i < MAX_TUPLE; i++) {
5943 tuple_size = tuple_key_info[i].key_length / 8;
5944 if (!(key_cfg->tuple_active & BIT(i)))
5947 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5950 cur_key_x += tuple_size;
5951 cur_key_y += tuple_size;
5955 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5956 MAX_META_DATA_LENGTH / 8;
5958 hclge_fd_convert_meta_data(key_cfg,
5959 (__le32 *)(key_x + meta_data_region),
5960 (__le32 *)(key_y + meta_data_region),
5963 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5966 dev_err(&hdev->pdev->dev,
5967 "fd key_y config fail, loc=%u, ret=%d\n",
5968 rule->queue_id, ret);
5972 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5975 dev_err(&hdev->pdev->dev,
5976 "fd key_x config fail, loc=%u, ret=%d\n",
5977 rule->queue_id, ret);
5981 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5982 struct hclge_fd_rule *rule)
5984 struct hclge_vport *vport = hdev->vport;
5985 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5986 struct hclge_fd_ad_data ad_data;
5988 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5989 ad_data.ad_id = rule->location;
5991 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5992 ad_data.drop_packet = true;
5993 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5994 ad_data.override_tc = true;
5996 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5998 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6000 ad_data.forward_to_direct_queue = true;
6001 ad_data.queue_id = rule->queue_id;
6004 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6005 ad_data.use_counter = true;
6006 ad_data.counter_id = rule->vf_id %
6007 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6009 ad_data.use_counter = false;
6010 ad_data.counter_id = 0;
6013 ad_data.use_next_stage = false;
6014 ad_data.next_input_key = 0;
6016 ad_data.write_rule_id_to_bd = true;
6017 ad_data.rule_id = rule->location;
6019 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6022 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6025 if (!spec || !unused_tuple)
6028 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6031 *unused_tuple |= BIT(INNER_SRC_IP);
6034 *unused_tuple |= BIT(INNER_DST_IP);
6037 *unused_tuple |= BIT(INNER_SRC_PORT);
6040 *unused_tuple |= BIT(INNER_DST_PORT);
6043 *unused_tuple |= BIT(INNER_IP_TOS);
6048 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6051 if (!spec || !unused_tuple)
6054 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6055 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6058 *unused_tuple |= BIT(INNER_SRC_IP);
6061 *unused_tuple |= BIT(INNER_DST_IP);
6064 *unused_tuple |= BIT(INNER_IP_TOS);
6067 *unused_tuple |= BIT(INNER_IP_PROTO);
6069 if (spec->l4_4_bytes)
6072 if (spec->ip_ver != ETH_RX_NFC_IP4)
6078 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6081 if (!spec || !unused_tuple)
6084 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6086 /* check whether src/dst ip address used */
6087 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6088 *unused_tuple |= BIT(INNER_SRC_IP);
6090 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6091 *unused_tuple |= BIT(INNER_DST_IP);
6094 *unused_tuple |= BIT(INNER_SRC_PORT);
6097 *unused_tuple |= BIT(INNER_DST_PORT);
6100 *unused_tuple |= BIT(INNER_IP_TOS);
6105 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6108 if (!spec || !unused_tuple)
6111 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6112 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6114 /* check whether src/dst ip address used */
6115 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6116 *unused_tuple |= BIT(INNER_SRC_IP);
6118 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6119 *unused_tuple |= BIT(INNER_DST_IP);
6121 if (!spec->l4_proto)
6122 *unused_tuple |= BIT(INNER_IP_PROTO);
6125 *unused_tuple |= BIT(INNER_IP_TOS);
6127 if (spec->l4_4_bytes)
6133 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6135 if (!spec || !unused_tuple)
6138 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6139 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6140 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6142 if (is_zero_ether_addr(spec->h_source))
6143 *unused_tuple |= BIT(INNER_SRC_MAC);
6145 if (is_zero_ether_addr(spec->h_dest))
6146 *unused_tuple |= BIT(INNER_DST_MAC);
6149 *unused_tuple |= BIT(INNER_ETH_TYPE);
6154 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6155 struct ethtool_rx_flow_spec *fs,
6158 if (fs->flow_type & FLOW_EXT) {
6159 if (fs->h_ext.vlan_etype) {
6160 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6164 if (!fs->h_ext.vlan_tci)
6165 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6167 if (fs->m_ext.vlan_tci &&
6168 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6169 dev_err(&hdev->pdev->dev,
6170 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6171 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6175 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6178 if (fs->flow_type & FLOW_MAC_EXT) {
6179 if (hdev->fd_cfg.fd_mode !=
6180 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6181 dev_err(&hdev->pdev->dev,
6182 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6186 if (is_zero_ether_addr(fs->h_ext.h_dest))
6187 *unused_tuple |= BIT(INNER_DST_MAC);
6189 *unused_tuple &= ~BIT(INNER_DST_MAC);
6195 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6196 struct hclge_fd_user_def_info *info)
6198 switch (flow_type) {
6200 info->layer = HCLGE_FD_USER_DEF_L2;
6201 *unused_tuple &= ~BIT(INNER_L2_RSV);
6204 case IPV6_USER_FLOW:
6205 info->layer = HCLGE_FD_USER_DEF_L3;
6206 *unused_tuple &= ~BIT(INNER_L3_RSV);
6212 info->layer = HCLGE_FD_USER_DEF_L4;
6213 *unused_tuple &= ~BIT(INNER_L4_RSV);
6222 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6224 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6227 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6228 struct ethtool_rx_flow_spec *fs,
6230 struct hclge_fd_user_def_info *info)
6232 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6233 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6234 u16 data, offset, data_mask, offset_mask;
6237 info->layer = HCLGE_FD_USER_DEF_NONE;
6238 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6240 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6243 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6244 * for data, and bit32~47 is used for offset.
6246 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6247 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6248 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6249 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6251 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6252 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6256 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6257 dev_err(&hdev->pdev->dev,
6258 "user-def offset[%u] should be no more than %u\n",
6259 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6263 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6264 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6268 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6270 dev_err(&hdev->pdev->dev,
6271 "unsupported flow type for user-def bytes, ret = %d\n",
6277 info->data_mask = data_mask;
6278 info->offset = offset;
6283 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6284 struct ethtool_rx_flow_spec *fs,
6286 struct hclge_fd_user_def_info *info)
6291 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6292 dev_err(&hdev->pdev->dev,
6293 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6295 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6299 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6303 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6304 switch (flow_type) {
6308 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6312 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6318 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6321 case IPV6_USER_FLOW:
6322 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6326 if (hdev->fd_cfg.fd_mode !=
6327 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6328 dev_err(&hdev->pdev->dev,
6329 "ETHER_FLOW is not supported in current fd mode!\n");
6333 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6337 dev_err(&hdev->pdev->dev,
6338 "unsupported protocol type, protocol type = %#x\n",
6344 dev_err(&hdev->pdev->dev,
6345 "failed to check flow union tuple, ret = %d\n",
6350 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6353 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6354 struct ethtool_rx_flow_spec *fs,
6355 struct hclge_fd_rule *rule, u8 ip_proto)
6357 rule->tuples.src_ip[IPV4_INDEX] =
6358 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6359 rule->tuples_mask.src_ip[IPV4_INDEX] =
6360 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6362 rule->tuples.dst_ip[IPV4_INDEX] =
6363 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6364 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6365 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6367 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6368 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6370 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6371 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6373 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6374 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6376 rule->tuples.ether_proto = ETH_P_IP;
6377 rule->tuples_mask.ether_proto = 0xFFFF;
6379 rule->tuples.ip_proto = ip_proto;
6380 rule->tuples_mask.ip_proto = 0xFF;
6383 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6384 struct ethtool_rx_flow_spec *fs,
6385 struct hclge_fd_rule *rule)
6387 rule->tuples.src_ip[IPV4_INDEX] =
6388 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6389 rule->tuples_mask.src_ip[IPV4_INDEX] =
6390 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6392 rule->tuples.dst_ip[IPV4_INDEX] =
6393 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6394 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6395 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6397 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6398 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6400 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6401 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6403 rule->tuples.ether_proto = ETH_P_IP;
6404 rule->tuples_mask.ether_proto = 0xFFFF;
6407 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6408 struct ethtool_rx_flow_spec *fs,
6409 struct hclge_fd_rule *rule, u8 ip_proto)
6411 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6413 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6416 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6418 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6421 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6422 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6424 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6425 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6427 rule->tuples.ether_proto = ETH_P_IPV6;
6428 rule->tuples_mask.ether_proto = 0xFFFF;
6430 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6431 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6433 rule->tuples.ip_proto = ip_proto;
6434 rule->tuples_mask.ip_proto = 0xFF;
6437 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6438 struct ethtool_rx_flow_spec *fs,
6439 struct hclge_fd_rule *rule)
6441 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6443 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6446 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6448 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6451 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6452 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6454 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6455 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6457 rule->tuples.ether_proto = ETH_P_IPV6;
6458 rule->tuples_mask.ether_proto = 0xFFFF;
6461 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6462 struct ethtool_rx_flow_spec *fs,
6463 struct hclge_fd_rule *rule)
6465 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6466 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6468 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6469 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6471 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6472 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6475 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6476 struct hclge_fd_rule *rule)
6478 switch (info->layer) {
6479 case HCLGE_FD_USER_DEF_L2:
6480 rule->tuples.l2_user_def = info->data;
6481 rule->tuples_mask.l2_user_def = info->data_mask;
6483 case HCLGE_FD_USER_DEF_L3:
6484 rule->tuples.l3_user_def = info->data;
6485 rule->tuples_mask.l3_user_def = info->data_mask;
6487 case HCLGE_FD_USER_DEF_L4:
6488 rule->tuples.l4_user_def = (u32)info->data << 16;
6489 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6495 rule->ep.user_def = *info;
6498 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6499 struct ethtool_rx_flow_spec *fs,
6500 struct hclge_fd_rule *rule,
6501 struct hclge_fd_user_def_info *info)
6503 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6505 switch (flow_type) {
6507 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6510 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6513 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6516 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6519 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6522 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6525 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6527 case IPV6_USER_FLOW:
6528 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6531 hclge_fd_get_ether_tuple(hdev, fs, rule);
6537 if (fs->flow_type & FLOW_EXT) {
6538 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6539 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6540 hclge_fd_get_user_def_tuple(info, rule);
6543 if (fs->flow_type & FLOW_MAC_EXT) {
6544 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6545 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6551 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6552 struct hclge_fd_rule *rule)
6556 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6560 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6563 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6564 struct hclge_fd_rule *rule)
6568 spin_lock_bh(&hdev->fd_rule_lock);
6570 if (hdev->fd_active_type != rule->rule_type &&
6571 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6572 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6573 dev_err(&hdev->pdev->dev,
6574 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6575 rule->rule_type, hdev->fd_active_type);
6576 spin_unlock_bh(&hdev->fd_rule_lock);
6580 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6584 ret = hclge_clear_arfs_rules(hdev);
6588 ret = hclge_fd_config_rule(hdev, rule);
6592 rule->state = HCLGE_FD_ACTIVE;
6593 hdev->fd_active_type = rule->rule_type;
6594 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6597 spin_unlock_bh(&hdev->fd_rule_lock);
6601 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6603 struct hclge_vport *vport = hclge_get_vport(handle);
6604 struct hclge_dev *hdev = vport->back;
6606 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6609 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6610 u16 *vport_id, u8 *action, u16 *queue_id)
6612 struct hclge_vport *vport = hdev->vport;
6614 if (ring_cookie == RX_CLS_FLOW_DISC) {
6615 *action = HCLGE_FD_ACTION_DROP_PACKET;
6617 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6618 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6621 if (vf > hdev->num_req_vfs) {
6622 dev_err(&hdev->pdev->dev,
6623 "Error: vf id (%u) > max vf num (%u)\n",
6624 vf, hdev->num_req_vfs);
6628 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6629 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6632 dev_err(&hdev->pdev->dev,
6633 "Error: queue id (%u) > max tqp num (%u)\n",
6638 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6645 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6646 struct ethtool_rxnfc *cmd)
6648 struct hclge_vport *vport = hclge_get_vport(handle);
6649 struct hclge_dev *hdev = vport->back;
6650 struct hclge_fd_user_def_info info;
6651 u16 dst_vport_id = 0, q_index = 0;
6652 struct ethtool_rx_flow_spec *fs;
6653 struct hclge_fd_rule *rule;
6658 if (!hnae3_dev_fd_supported(hdev)) {
6659 dev_err(&hdev->pdev->dev,
6660 "flow table director is not supported\n");
6665 dev_err(&hdev->pdev->dev,
6666 "please enable flow director first\n");
6670 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6672 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6676 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6681 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6685 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6691 rule->flow_type = fs->flow_type;
6692 rule->location = fs->location;
6693 rule->unused_tuple = unused;
6694 rule->vf_id = dst_vport_id;
6695 rule->queue_id = q_index;
6696 rule->action = action;
6697 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6699 ret = hclge_add_fd_entry_common(hdev, rule);
6706 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6707 struct ethtool_rxnfc *cmd)
6709 struct hclge_vport *vport = hclge_get_vport(handle);
6710 struct hclge_dev *hdev = vport->back;
6711 struct ethtool_rx_flow_spec *fs;
6714 if (!hnae3_dev_fd_supported(hdev))
6717 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6719 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6722 spin_lock_bh(&hdev->fd_rule_lock);
6723 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6724 !test_bit(fs->location, hdev->fd_bmap)) {
6725 dev_err(&hdev->pdev->dev,
6726 "Delete fail, rule %u is inexistent\n", fs->location);
6727 spin_unlock_bh(&hdev->fd_rule_lock);
6731 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6736 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6739 spin_unlock_bh(&hdev->fd_rule_lock);
6743 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6746 struct hclge_fd_rule *rule;
6747 struct hlist_node *node;
6750 if (!hnae3_dev_fd_supported(hdev))
6753 spin_lock_bh(&hdev->fd_rule_lock);
6755 for_each_set_bit(location, hdev->fd_bmap,
6756 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6757 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6761 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6763 hlist_del(&rule->rule_node);
6766 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6767 hdev->hclge_fd_rule_num = 0;
6768 bitmap_zero(hdev->fd_bmap,
6769 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6772 spin_unlock_bh(&hdev->fd_rule_lock);
6775 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6777 hclge_clear_fd_rules_in_list(hdev, true);
6778 hclge_fd_disable_user_def(hdev);
6781 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6783 struct hclge_vport *vport = hclge_get_vport(handle);
6784 struct hclge_dev *hdev = vport->back;
6785 struct hclge_fd_rule *rule;
6786 struct hlist_node *node;
6788 /* Return ok here, because reset error handling will check this
6789 * return value. If error is returned here, the reset process will
6792 if (!hnae3_dev_fd_supported(hdev))
6795 /* if fd is disabled, should not restore it when reset */
6799 spin_lock_bh(&hdev->fd_rule_lock);
6800 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6801 if (rule->state == HCLGE_FD_ACTIVE)
6802 rule->state = HCLGE_FD_TO_ADD;
6804 spin_unlock_bh(&hdev->fd_rule_lock);
6805 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6810 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6811 struct ethtool_rxnfc *cmd)
6813 struct hclge_vport *vport = hclge_get_vport(handle);
6814 struct hclge_dev *hdev = vport->back;
6816 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6819 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6820 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6825 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6826 struct ethtool_tcpip4_spec *spec,
6827 struct ethtool_tcpip4_spec *spec_mask)
6829 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6830 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6831 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6833 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6834 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6835 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6837 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6838 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6839 0 : cpu_to_be16(rule->tuples_mask.src_port);
6841 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6842 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6843 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6845 spec->tos = rule->tuples.ip_tos;
6846 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6847 0 : rule->tuples_mask.ip_tos;
6850 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6851 struct ethtool_usrip4_spec *spec,
6852 struct ethtool_usrip4_spec *spec_mask)
6854 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6855 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6856 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6858 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6859 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6860 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6862 spec->tos = rule->tuples.ip_tos;
6863 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6864 0 : rule->tuples_mask.ip_tos;
6866 spec->proto = rule->tuples.ip_proto;
6867 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6868 0 : rule->tuples_mask.ip_proto;
6870 spec->ip_ver = ETH_RX_NFC_IP4;
6873 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6874 struct ethtool_tcpip6_spec *spec,
6875 struct ethtool_tcpip6_spec *spec_mask)
6877 cpu_to_be32_array(spec->ip6src,
6878 rule->tuples.src_ip, IPV6_SIZE);
6879 cpu_to_be32_array(spec->ip6dst,
6880 rule->tuples.dst_ip, IPV6_SIZE);
6881 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6882 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6884 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6887 if (rule->unused_tuple & BIT(INNER_DST_IP))
6888 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6890 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6893 spec->tclass = rule->tuples.ip_tos;
6894 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6895 0 : rule->tuples_mask.ip_tos;
6897 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6898 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6899 0 : cpu_to_be16(rule->tuples_mask.src_port);
6901 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6902 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6903 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6906 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6907 struct ethtool_usrip6_spec *spec,
6908 struct ethtool_usrip6_spec *spec_mask)
6910 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6911 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6912 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6913 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6915 cpu_to_be32_array(spec_mask->ip6src,
6916 rule->tuples_mask.src_ip, IPV6_SIZE);
6918 if (rule->unused_tuple & BIT(INNER_DST_IP))
6919 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6921 cpu_to_be32_array(spec_mask->ip6dst,
6922 rule->tuples_mask.dst_ip, IPV6_SIZE);
6924 spec->tclass = rule->tuples.ip_tos;
6925 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6926 0 : rule->tuples_mask.ip_tos;
6928 spec->l4_proto = rule->tuples.ip_proto;
6929 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6930 0 : rule->tuples_mask.ip_proto;
6933 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6934 struct ethhdr *spec,
6935 struct ethhdr *spec_mask)
6937 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6938 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6940 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6941 eth_zero_addr(spec_mask->h_source);
6943 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6945 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6946 eth_zero_addr(spec_mask->h_dest);
6948 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6950 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6951 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6952 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6955 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6956 struct hclge_fd_rule *rule)
6958 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6959 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6960 fs->h_ext.data[0] = 0;
6961 fs->h_ext.data[1] = 0;
6962 fs->m_ext.data[0] = 0;
6963 fs->m_ext.data[1] = 0;
6965 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6966 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6968 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6969 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6973 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6974 struct hclge_fd_rule *rule)
6976 if (fs->flow_type & FLOW_EXT) {
6977 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6978 fs->m_ext.vlan_tci =
6979 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6980 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6982 hclge_fd_get_user_def_info(fs, rule);
6985 if (fs->flow_type & FLOW_MAC_EXT) {
6986 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6987 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6988 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6990 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6991 rule->tuples_mask.dst_mac);
6995 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6996 struct ethtool_rxnfc *cmd)
6998 struct hclge_vport *vport = hclge_get_vport(handle);
6999 struct hclge_fd_rule *rule = NULL;
7000 struct hclge_dev *hdev = vport->back;
7001 struct ethtool_rx_flow_spec *fs;
7002 struct hlist_node *node2;
7004 if (!hnae3_dev_fd_supported(hdev))
7007 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7009 spin_lock_bh(&hdev->fd_rule_lock);
7011 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7012 if (rule->location >= fs->location)
7016 if (!rule || fs->location != rule->location) {
7017 spin_unlock_bh(&hdev->fd_rule_lock);
7022 fs->flow_type = rule->flow_type;
7023 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7027 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7028 &fs->m_u.tcp_ip4_spec);
7031 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7032 &fs->m_u.usr_ip4_spec);
7037 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7038 &fs->m_u.tcp_ip6_spec);
7040 case IPV6_USER_FLOW:
7041 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7042 &fs->m_u.usr_ip6_spec);
7044 /* The flow type of fd rule has been checked before adding in to rule
7045 * list. As other flow types have been handled, it must be ETHER_FLOW
7046 * for the default case
7049 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7050 &fs->m_u.ether_spec);
7054 hclge_fd_get_ext_info(fs, rule);
7056 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7057 fs->ring_cookie = RX_CLS_FLOW_DISC;
7061 fs->ring_cookie = rule->queue_id;
7062 vf_id = rule->vf_id;
7063 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7064 fs->ring_cookie |= vf_id;
7067 spin_unlock_bh(&hdev->fd_rule_lock);
7072 static int hclge_get_all_rules(struct hnae3_handle *handle,
7073 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7075 struct hclge_vport *vport = hclge_get_vport(handle);
7076 struct hclge_dev *hdev = vport->back;
7077 struct hclge_fd_rule *rule;
7078 struct hlist_node *node2;
7081 if (!hnae3_dev_fd_supported(hdev))
7084 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7086 spin_lock_bh(&hdev->fd_rule_lock);
7087 hlist_for_each_entry_safe(rule, node2,
7088 &hdev->fd_rule_list, rule_node) {
7089 if (cnt == cmd->rule_cnt) {
7090 spin_unlock_bh(&hdev->fd_rule_lock);
7094 if (rule->state == HCLGE_FD_TO_DEL)
7097 rule_locs[cnt] = rule->location;
7101 spin_unlock_bh(&hdev->fd_rule_lock);
7103 cmd->rule_cnt = cnt;
7108 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7109 struct hclge_fd_rule_tuples *tuples)
7111 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7112 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7114 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7115 tuples->ip_proto = fkeys->basic.ip_proto;
7116 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7118 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7119 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7120 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7124 for (i = 0; i < IPV6_SIZE; i++) {
7125 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7126 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7131 /* traverse all rules, check whether an existed rule has the same tuples */
7132 static struct hclge_fd_rule *
7133 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7134 const struct hclge_fd_rule_tuples *tuples)
7136 struct hclge_fd_rule *rule = NULL;
7137 struct hlist_node *node;
7139 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7140 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7147 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7148 struct hclge_fd_rule *rule)
7150 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7151 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7152 BIT(INNER_SRC_PORT);
7155 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7156 rule->state = HCLGE_FD_TO_ADD;
7157 if (tuples->ether_proto == ETH_P_IP) {
7158 if (tuples->ip_proto == IPPROTO_TCP)
7159 rule->flow_type = TCP_V4_FLOW;
7161 rule->flow_type = UDP_V4_FLOW;
7163 if (tuples->ip_proto == IPPROTO_TCP)
7164 rule->flow_type = TCP_V6_FLOW;
7166 rule->flow_type = UDP_V6_FLOW;
7168 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7169 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7172 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7173 u16 flow_id, struct flow_keys *fkeys)
7175 struct hclge_vport *vport = hclge_get_vport(handle);
7176 struct hclge_fd_rule_tuples new_tuples = {};
7177 struct hclge_dev *hdev = vport->back;
7178 struct hclge_fd_rule *rule;
7181 if (!hnae3_dev_fd_supported(hdev))
7184 /* when there is already fd rule existed add by user,
7185 * arfs should not work
7187 spin_lock_bh(&hdev->fd_rule_lock);
7188 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7189 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7190 spin_unlock_bh(&hdev->fd_rule_lock);
7194 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7196 /* check is there flow director filter existed for this flow,
7197 * if not, create a new filter for it;
7198 * if filter exist with different queue id, modify the filter;
7199 * if filter exist with same queue id, do nothing
7201 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7203 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7204 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7205 spin_unlock_bh(&hdev->fd_rule_lock);
7209 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7211 spin_unlock_bh(&hdev->fd_rule_lock);
7215 rule->location = bit_id;
7216 rule->arfs.flow_id = flow_id;
7217 rule->queue_id = queue_id;
7218 hclge_fd_build_arfs_rule(&new_tuples, rule);
7219 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7220 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7221 } else if (rule->queue_id != queue_id) {
7222 rule->queue_id = queue_id;
7223 rule->state = HCLGE_FD_TO_ADD;
7224 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7225 hclge_task_schedule(hdev, 0);
7227 spin_unlock_bh(&hdev->fd_rule_lock);
7228 return rule->location;
7231 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7233 #ifdef CONFIG_RFS_ACCEL
7234 struct hnae3_handle *handle = &hdev->vport[0].nic;
7235 struct hclge_fd_rule *rule;
7236 struct hlist_node *node;
7238 spin_lock_bh(&hdev->fd_rule_lock);
7239 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7240 spin_unlock_bh(&hdev->fd_rule_lock);
7243 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7244 if (rule->state != HCLGE_FD_ACTIVE)
7246 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7247 rule->arfs.flow_id, rule->location)) {
7248 rule->state = HCLGE_FD_TO_DEL;
7249 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7252 spin_unlock_bh(&hdev->fd_rule_lock);
7256 /* make sure being called after lock up with fd_rule_lock */
7257 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7259 #ifdef CONFIG_RFS_ACCEL
7260 struct hclge_fd_rule *rule;
7261 struct hlist_node *node;
7264 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7267 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7268 switch (rule->state) {
7269 case HCLGE_FD_TO_DEL:
7270 case HCLGE_FD_ACTIVE:
7271 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7272 rule->location, NULL, false);
7276 case HCLGE_FD_TO_ADD:
7277 hclge_fd_dec_rule_cnt(hdev, rule->location);
7278 hlist_del(&rule->rule_node);
7285 hclge_sync_fd_state(hdev);
7291 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7292 struct hclge_fd_rule *rule)
7294 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7295 struct flow_match_basic match;
7296 u16 ethtype_key, ethtype_mask;
7298 flow_rule_match_basic(flow, &match);
7299 ethtype_key = ntohs(match.key->n_proto);
7300 ethtype_mask = ntohs(match.mask->n_proto);
7302 if (ethtype_key == ETH_P_ALL) {
7306 rule->tuples.ether_proto = ethtype_key;
7307 rule->tuples_mask.ether_proto = ethtype_mask;
7308 rule->tuples.ip_proto = match.key->ip_proto;
7309 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7311 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7312 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7316 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7317 struct hclge_fd_rule *rule)
7319 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7320 struct flow_match_eth_addrs match;
7322 flow_rule_match_eth_addrs(flow, &match);
7323 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7324 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7325 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7326 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7328 rule->unused_tuple |= BIT(INNER_DST_MAC);
7329 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7333 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7334 struct hclge_fd_rule *rule)
7336 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7337 struct flow_match_vlan match;
7339 flow_rule_match_vlan(flow, &match);
7340 rule->tuples.vlan_tag1 = match.key->vlan_id |
7341 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7342 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7343 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7345 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7349 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7350 struct hclge_fd_rule *rule)
7354 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7355 struct flow_match_control match;
7357 flow_rule_match_control(flow, &match);
7358 addr_type = match.key->addr_type;
7361 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7362 struct flow_match_ipv4_addrs match;
7364 flow_rule_match_ipv4_addrs(flow, &match);
7365 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7366 rule->tuples_mask.src_ip[IPV4_INDEX] =
7367 be32_to_cpu(match.mask->src);
7368 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7369 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7370 be32_to_cpu(match.mask->dst);
7371 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7372 struct flow_match_ipv6_addrs match;
7374 flow_rule_match_ipv6_addrs(flow, &match);
7375 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7377 be32_to_cpu_array(rule->tuples_mask.src_ip,
7378 match.mask->src.s6_addr32, IPV6_SIZE);
7379 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7381 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7382 match.mask->dst.s6_addr32, IPV6_SIZE);
7384 rule->unused_tuple |= BIT(INNER_SRC_IP);
7385 rule->unused_tuple |= BIT(INNER_DST_IP);
7389 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7390 struct hclge_fd_rule *rule)
7392 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7393 struct flow_match_ports match;
7395 flow_rule_match_ports(flow, &match);
7397 rule->tuples.src_port = be16_to_cpu(match.key->src);
7398 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7399 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7400 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7402 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7403 rule->unused_tuple |= BIT(INNER_DST_PORT);
7407 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7408 struct flow_cls_offload *cls_flower,
7409 struct hclge_fd_rule *rule)
7411 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7412 struct flow_dissector *dissector = flow->match.dissector;
7414 if (dissector->used_keys &
7415 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7416 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7417 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7418 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7419 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7420 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7421 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7422 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7423 dissector->used_keys);
7427 hclge_get_cls_key_basic(flow, rule);
7428 hclge_get_cls_key_mac(flow, rule);
7429 hclge_get_cls_key_vlan(flow, rule);
7430 hclge_get_cls_key_ip(flow, rule);
7431 hclge_get_cls_key_port(flow, rule);
7436 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7437 struct flow_cls_offload *cls_flower, int tc)
7439 u32 prio = cls_flower->common.prio;
7441 if (tc < 0 || tc > hdev->tc_max) {
7442 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7447 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7448 dev_err(&hdev->pdev->dev,
7449 "prio %u should be in range[1, %u]\n",
7450 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7454 if (test_bit(prio - 1, hdev->fd_bmap)) {
7455 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7461 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7462 struct flow_cls_offload *cls_flower,
7465 struct hclge_vport *vport = hclge_get_vport(handle);
7466 struct hclge_dev *hdev = vport->back;
7467 struct hclge_fd_rule *rule;
7470 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7472 dev_err(&hdev->pdev->dev,
7473 "failed to check cls flower params, ret = %d\n", ret);
7477 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7481 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7487 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7488 rule->cls_flower.tc = tc;
7489 rule->location = cls_flower->common.prio - 1;
7491 rule->cls_flower.cookie = cls_flower->cookie;
7492 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7494 ret = hclge_add_fd_entry_common(hdev, rule);
7501 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7502 unsigned long cookie)
7504 struct hclge_fd_rule *rule;
7505 struct hlist_node *node;
7507 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7508 if (rule->cls_flower.cookie == cookie)
7515 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7516 struct flow_cls_offload *cls_flower)
7518 struct hclge_vport *vport = hclge_get_vport(handle);
7519 struct hclge_dev *hdev = vport->back;
7520 struct hclge_fd_rule *rule;
7523 spin_lock_bh(&hdev->fd_rule_lock);
7525 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7527 spin_unlock_bh(&hdev->fd_rule_lock);
7531 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7534 spin_unlock_bh(&hdev->fd_rule_lock);
7538 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7539 spin_unlock_bh(&hdev->fd_rule_lock);
7544 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7546 struct hclge_fd_rule *rule;
7547 struct hlist_node *node;
7550 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7553 spin_lock_bh(&hdev->fd_rule_lock);
7555 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7556 switch (rule->state) {
7557 case HCLGE_FD_TO_ADD:
7558 ret = hclge_fd_config_rule(hdev, rule);
7561 rule->state = HCLGE_FD_ACTIVE;
7563 case HCLGE_FD_TO_DEL:
7564 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7565 rule->location, NULL, false);
7568 hclge_fd_dec_rule_cnt(hdev, rule->location);
7569 hclge_fd_free_node(hdev, rule);
7578 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7580 spin_unlock_bh(&hdev->fd_rule_lock);
7583 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7585 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7586 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7588 hclge_clear_fd_rules_in_list(hdev, clear_list);
7591 hclge_sync_fd_user_def_cfg(hdev, false);
7593 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7596 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7598 struct hclge_vport *vport = hclge_get_vport(handle);
7599 struct hclge_dev *hdev = vport->back;
7601 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7602 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7605 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7607 struct hclge_vport *vport = hclge_get_vport(handle);
7608 struct hclge_dev *hdev = vport->back;
7610 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7613 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7615 struct hclge_vport *vport = hclge_get_vport(handle);
7616 struct hclge_dev *hdev = vport->back;
7618 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7621 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7623 struct hclge_vport *vport = hclge_get_vport(handle);
7624 struct hclge_dev *hdev = vport->back;
7626 return hdev->rst_stats.hw_reset_done_cnt;
7629 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7631 struct hclge_vport *vport = hclge_get_vport(handle);
7632 struct hclge_dev *hdev = vport->back;
7634 hdev->fd_en = enable;
7637 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7639 hclge_restore_fd_entries(handle);
7641 hclge_task_schedule(hdev, 0);
7644 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7646 struct hclge_desc desc;
7647 struct hclge_config_mac_mode_cmd *req =
7648 (struct hclge_config_mac_mode_cmd *)desc.data;
7652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7655 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7656 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7657 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7658 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7659 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7660 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7661 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7662 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7663 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7664 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7667 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7669 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7671 dev_err(&hdev->pdev->dev,
7672 "mac enable fail, ret =%d.\n", ret);
7675 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7676 u8 switch_param, u8 param_mask)
7678 struct hclge_mac_vlan_switch_cmd *req;
7679 struct hclge_desc desc;
7683 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7684 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7686 /* read current config parameter */
7687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7689 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7690 req->func_id = cpu_to_le32(func_id);
7692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7694 dev_err(&hdev->pdev->dev,
7695 "read mac vlan switch parameter fail, ret = %d\n", ret);
7699 /* modify and write new config parameter */
7700 hclge_cmd_reuse_desc(&desc, false);
7701 req->switch_param = (req->switch_param & param_mask) | switch_param;
7702 req->param_mask = param_mask;
7704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7706 dev_err(&hdev->pdev->dev,
7707 "set mac vlan switch parameter fail, ret = %d\n", ret);
7711 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7714 #define HCLGE_PHY_LINK_STATUS_NUM 200
7716 struct phy_device *phydev = hdev->hw.mac.phydev;
7721 ret = phy_read_status(phydev);
7723 dev_err(&hdev->pdev->dev,
7724 "phy update link status fail, ret = %d\n", ret);
7728 if (phydev->link == link_ret)
7731 msleep(HCLGE_LINK_STATUS_MS);
7732 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7735 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7737 #define HCLGE_MAC_LINK_STATUS_NUM 100
7744 ret = hclge_get_mac_link_status(hdev, &link_status);
7747 if (link_status == link_ret)
7750 msleep(HCLGE_LINK_STATUS_MS);
7751 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7755 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7760 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7763 hclge_phy_link_status_wait(hdev, link_ret);
7765 return hclge_mac_link_status_wait(hdev, link_ret);
7768 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7770 struct hclge_config_mac_mode_cmd *req;
7771 struct hclge_desc desc;
7775 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7776 /* 1 Read out the MAC mode config at first */
7777 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7778 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7780 dev_err(&hdev->pdev->dev,
7781 "mac loopback get fail, ret =%d.\n", ret);
7785 /* 2 Then setup the loopback flag */
7786 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7787 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7789 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7791 /* 3 Config mac work mode with loopback flag
7792 * and its original configure parameters
7794 hclge_cmd_reuse_desc(&desc, false);
7795 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7797 dev_err(&hdev->pdev->dev,
7798 "mac loopback set fail, ret =%d.\n", ret);
7802 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7803 enum hnae3_loop loop_mode)
7805 #define HCLGE_COMMON_LB_RETRY_MS 10
7806 #define HCLGE_COMMON_LB_RETRY_NUM 100
7808 struct hclge_common_lb_cmd *req;
7809 struct hclge_desc desc;
7813 req = (struct hclge_common_lb_cmd *)desc.data;
7814 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7816 switch (loop_mode) {
7817 case HNAE3_LOOP_SERIAL_SERDES:
7818 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7820 case HNAE3_LOOP_PARALLEL_SERDES:
7821 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7823 case HNAE3_LOOP_PHY:
7824 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7827 dev_err(&hdev->pdev->dev,
7828 "unsupported common loopback mode %d\n", loop_mode);
7833 req->enable = loop_mode_b;
7834 req->mask = loop_mode_b;
7836 req->mask = loop_mode_b;
7839 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7841 dev_err(&hdev->pdev->dev,
7842 "common loopback set fail, ret = %d\n", ret);
7847 msleep(HCLGE_COMMON_LB_RETRY_MS);
7848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7852 dev_err(&hdev->pdev->dev,
7853 "common loopback get, ret = %d\n", ret);
7856 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7857 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7859 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7860 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7862 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7863 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7869 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7870 enum hnae3_loop loop_mode)
7874 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7878 hclge_cfg_mac_mode(hdev, en);
7880 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7882 dev_err(&hdev->pdev->dev,
7883 "serdes loopback config mac mode timeout\n");
7888 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7889 struct phy_device *phydev)
7893 if (!phydev->suspended) {
7894 ret = phy_suspend(phydev);
7899 ret = phy_resume(phydev);
7903 return phy_loopback(phydev, true);
7906 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7907 struct phy_device *phydev)
7911 ret = phy_loopback(phydev, false);
7915 return phy_suspend(phydev);
7918 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7920 struct phy_device *phydev = hdev->hw.mac.phydev;
7924 if (hnae3_dev_phy_imp_supported(hdev))
7925 return hclge_set_common_loopback(hdev, en,
7931 ret = hclge_enable_phy_loopback(hdev, phydev);
7933 ret = hclge_disable_phy_loopback(hdev, phydev);
7935 dev_err(&hdev->pdev->dev,
7936 "set phy loopback fail, ret = %d\n", ret);
7940 hclge_cfg_mac_mode(hdev, en);
7942 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7944 dev_err(&hdev->pdev->dev,
7945 "phy loopback config mac mode timeout\n");
7950 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7951 u16 stream_id, bool enable)
7953 struct hclge_desc desc;
7954 struct hclge_cfg_com_tqp_queue_cmd *req =
7955 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7957 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7958 req->tqp_id = cpu_to_le16(tqp_id);
7959 req->stream_id = cpu_to_le16(stream_id);
7961 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7963 return hclge_cmd_send(&hdev->hw, &desc, 1);
7966 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7968 struct hclge_vport *vport = hclge_get_vport(handle);
7969 struct hclge_dev *hdev = vport->back;
7973 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7974 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7981 static int hclge_set_loopback(struct hnae3_handle *handle,
7982 enum hnae3_loop loop_mode, bool en)
7984 struct hclge_vport *vport = hclge_get_vport(handle);
7985 struct hclge_dev *hdev = vport->back;
7988 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7989 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7990 * the same, the packets are looped back in the SSU. If SSU loopback
7991 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7993 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7994 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7996 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7997 HCLGE_SWITCH_ALW_LPBK_MASK);
8002 switch (loop_mode) {
8003 case HNAE3_LOOP_APP:
8004 ret = hclge_set_app_loopback(hdev, en);
8006 case HNAE3_LOOP_SERIAL_SERDES:
8007 case HNAE3_LOOP_PARALLEL_SERDES:
8008 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8010 case HNAE3_LOOP_PHY:
8011 ret = hclge_set_phy_loopback(hdev, en);
8015 dev_err(&hdev->pdev->dev,
8016 "loop_mode %d is not supported\n", loop_mode);
8023 ret = hclge_tqp_enable(handle, en);
8025 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8026 en ? "enable" : "disable", ret);
8031 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8035 ret = hclge_set_app_loopback(hdev, false);
8039 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8043 return hclge_cfg_common_loopback(hdev, false,
8044 HNAE3_LOOP_PARALLEL_SERDES);
8047 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8049 struct hclge_vport *vport = hclge_get_vport(handle);
8050 struct hnae3_knic_private_info *kinfo;
8051 struct hnae3_queue *queue;
8052 struct hclge_tqp *tqp;
8055 kinfo = &vport->nic.kinfo;
8056 for (i = 0; i < kinfo->num_tqps; i++) {
8057 queue = handle->kinfo.tqp[i];
8058 tqp = container_of(queue, struct hclge_tqp, q);
8059 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8063 static void hclge_flush_link_update(struct hclge_dev *hdev)
8065 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8067 unsigned long last = hdev->serv_processed_cnt;
8070 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8071 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8072 last == hdev->serv_processed_cnt)
8076 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8078 struct hclge_vport *vport = hclge_get_vport(handle);
8079 struct hclge_dev *hdev = vport->back;
8082 hclge_task_schedule(hdev, 0);
8084 /* Set the DOWN flag here to disable link updating */
8085 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8087 /* flush memory to make sure DOWN is seen by service task */
8088 smp_mb__before_atomic();
8089 hclge_flush_link_update(hdev);
8093 static int hclge_ae_start(struct hnae3_handle *handle)
8095 struct hclge_vport *vport = hclge_get_vport(handle);
8096 struct hclge_dev *hdev = vport->back;
8099 hclge_cfg_mac_mode(hdev, true);
8100 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8101 hdev->hw.mac.link = 0;
8103 /* reset tqp stats */
8104 hclge_reset_tqp_stats(handle);
8106 hclge_mac_start_phy(hdev);
8111 static void hclge_ae_stop(struct hnae3_handle *handle)
8113 struct hclge_vport *vport = hclge_get_vport(handle);
8114 struct hclge_dev *hdev = vport->back;
8116 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8117 spin_lock_bh(&hdev->fd_rule_lock);
8118 hclge_clear_arfs_rules(hdev);
8119 spin_unlock_bh(&hdev->fd_rule_lock);
8121 /* If it is not PF reset, the firmware will disable the MAC,
8122 * so it only need to stop phy here.
8124 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8125 hdev->reset_type != HNAE3_FUNC_RESET) {
8126 hclge_mac_stop_phy(hdev);
8127 hclge_update_link_status(hdev);
8131 hclge_reset_tqp(handle);
8133 hclge_config_mac_tnl_int(hdev, false);
8136 hclge_cfg_mac_mode(hdev, false);
8138 hclge_mac_stop_phy(hdev);
8140 /* reset tqp stats */
8141 hclge_reset_tqp_stats(handle);
8142 hclge_update_link_status(hdev);
8145 int hclge_vport_start(struct hclge_vport *vport)
8147 struct hclge_dev *hdev = vport->back;
8149 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8150 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8151 vport->last_active_jiffies = jiffies;
8153 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8154 if (vport->vport_id) {
8155 hclge_restore_mac_table_common(vport);
8156 hclge_restore_vport_vlan_table(vport);
8158 hclge_restore_hw_table(hdev);
8162 clear_bit(vport->vport_id, hdev->vport_config_block);
8167 void hclge_vport_stop(struct hclge_vport *vport)
8169 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8172 static int hclge_client_start(struct hnae3_handle *handle)
8174 struct hclge_vport *vport = hclge_get_vport(handle);
8176 return hclge_vport_start(vport);
8179 static void hclge_client_stop(struct hnae3_handle *handle)
8181 struct hclge_vport *vport = hclge_get_vport(handle);
8183 hclge_vport_stop(vport);
8186 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8187 u16 cmdq_resp, u8 resp_code,
8188 enum hclge_mac_vlan_tbl_opcode op)
8190 struct hclge_dev *hdev = vport->back;
8193 dev_err(&hdev->pdev->dev,
8194 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8199 if (op == HCLGE_MAC_VLAN_ADD) {
8200 if (!resp_code || resp_code == 1)
8202 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8203 resp_code == HCLGE_ADD_MC_OVERFLOW)
8206 dev_err(&hdev->pdev->dev,
8207 "add mac addr failed for undefined, code=%u.\n",
8210 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8213 } else if (resp_code == 1) {
8214 dev_dbg(&hdev->pdev->dev,
8215 "remove mac addr failed for miss.\n");
8219 dev_err(&hdev->pdev->dev,
8220 "remove mac addr failed for undefined, code=%u.\n",
8223 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8226 } else if (resp_code == 1) {
8227 dev_dbg(&hdev->pdev->dev,
8228 "lookup mac addr failed for miss.\n");
8232 dev_err(&hdev->pdev->dev,
8233 "lookup mac addr failed for undefined, code=%u.\n",
8238 dev_err(&hdev->pdev->dev,
8239 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8244 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8246 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8248 unsigned int word_num;
8249 unsigned int bit_num;
8251 if (vfid > 255 || vfid < 0)
8254 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8255 word_num = vfid / 32;
8256 bit_num = vfid % 32;
8258 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8260 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8262 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8263 bit_num = vfid % 32;
8265 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8267 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8273 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8275 #define HCLGE_DESC_NUMBER 3
8276 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8279 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8280 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8281 if (desc[i].data[j])
8287 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8288 const u8 *addr, bool is_mc)
8290 const unsigned char *mac_addr = addr;
8291 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8292 (mac_addr[0]) | (mac_addr[1] << 8);
8293 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8295 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8297 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8298 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8301 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8302 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8305 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8306 struct hclge_mac_vlan_tbl_entry_cmd *req)
8308 struct hclge_dev *hdev = vport->back;
8309 struct hclge_desc desc;
8314 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8316 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8318 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8320 dev_err(&hdev->pdev->dev,
8321 "del mac addr failed for cmd_send, ret =%d.\n",
8325 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8326 retval = le16_to_cpu(desc.retval);
8328 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8329 HCLGE_MAC_VLAN_REMOVE);
8332 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8333 struct hclge_mac_vlan_tbl_entry_cmd *req,
8334 struct hclge_desc *desc,
8337 struct hclge_dev *hdev = vport->back;
8342 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8344 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8345 memcpy(desc[0].data,
8347 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8348 hclge_cmd_setup_basic_desc(&desc[1],
8349 HCLGE_OPC_MAC_VLAN_ADD,
8351 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8352 hclge_cmd_setup_basic_desc(&desc[2],
8353 HCLGE_OPC_MAC_VLAN_ADD,
8355 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8357 memcpy(desc[0].data,
8359 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8360 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8363 dev_err(&hdev->pdev->dev,
8364 "lookup mac addr failed for cmd_send, ret =%d.\n",
8368 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8369 retval = le16_to_cpu(desc[0].retval);
8371 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8372 HCLGE_MAC_VLAN_LKUP);
8375 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8376 struct hclge_mac_vlan_tbl_entry_cmd *req,
8377 struct hclge_desc *mc_desc)
8379 struct hclge_dev *hdev = vport->back;
8386 struct hclge_desc desc;
8388 hclge_cmd_setup_basic_desc(&desc,
8389 HCLGE_OPC_MAC_VLAN_ADD,
8391 memcpy(desc.data, req,
8392 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8393 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8394 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8395 retval = le16_to_cpu(desc.retval);
8397 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8399 HCLGE_MAC_VLAN_ADD);
8401 hclge_cmd_reuse_desc(&mc_desc[0], false);
8402 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8403 hclge_cmd_reuse_desc(&mc_desc[1], false);
8404 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8405 hclge_cmd_reuse_desc(&mc_desc[2], false);
8406 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8407 memcpy(mc_desc[0].data, req,
8408 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8409 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8410 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8411 retval = le16_to_cpu(mc_desc[0].retval);
8413 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8415 HCLGE_MAC_VLAN_ADD);
8419 dev_err(&hdev->pdev->dev,
8420 "add mac addr failed for cmd_send, ret =%d.\n",
8428 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8429 u16 *allocated_size)
8431 struct hclge_umv_spc_alc_cmd *req;
8432 struct hclge_desc desc;
8435 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8438 req->space_size = cpu_to_le32(space_size);
8440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8442 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8447 *allocated_size = le32_to_cpu(desc.data[1]);
8452 static int hclge_init_umv_space(struct hclge_dev *hdev)
8454 u16 allocated_size = 0;
8457 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8461 if (allocated_size < hdev->wanted_umv_size)
8462 dev_warn(&hdev->pdev->dev,
8463 "failed to alloc umv space, want %u, get %u\n",
8464 hdev->wanted_umv_size, allocated_size);
8466 hdev->max_umv_size = allocated_size;
8467 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8468 hdev->share_umv_size = hdev->priv_umv_size +
8469 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8474 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8476 struct hclge_vport *vport;
8479 for (i = 0; i < hdev->num_alloc_vport; i++) {
8480 vport = &hdev->vport[i];
8481 vport->used_umv_num = 0;
8484 mutex_lock(&hdev->vport_lock);
8485 hdev->share_umv_size = hdev->priv_umv_size +
8486 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8487 mutex_unlock(&hdev->vport_lock);
8490 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8492 struct hclge_dev *hdev = vport->back;
8496 mutex_lock(&hdev->vport_lock);
8498 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8499 hdev->share_umv_size == 0);
8502 mutex_unlock(&hdev->vport_lock);
8507 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8509 struct hclge_dev *hdev = vport->back;
8512 if (vport->used_umv_num > hdev->priv_umv_size)
8513 hdev->share_umv_size++;
8515 if (vport->used_umv_num > 0)
8516 vport->used_umv_num--;
8518 if (vport->used_umv_num >= hdev->priv_umv_size &&
8519 hdev->share_umv_size > 0)
8520 hdev->share_umv_size--;
8521 vport->used_umv_num++;
8525 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8528 struct hclge_mac_node *mac_node, *tmp;
8530 list_for_each_entry_safe(mac_node, tmp, list, node)
8531 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8537 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8538 enum HCLGE_MAC_NODE_STATE state)
8541 /* from set_rx_mode or tmp_add_list */
8542 case HCLGE_MAC_TO_ADD:
8543 if (mac_node->state == HCLGE_MAC_TO_DEL)
8544 mac_node->state = HCLGE_MAC_ACTIVE;
8546 /* only from set_rx_mode */
8547 case HCLGE_MAC_TO_DEL:
8548 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8549 list_del(&mac_node->node);
8552 mac_node->state = HCLGE_MAC_TO_DEL;
8555 /* only from tmp_add_list, the mac_node->state won't be
8558 case HCLGE_MAC_ACTIVE:
8559 if (mac_node->state == HCLGE_MAC_TO_ADD)
8560 mac_node->state = HCLGE_MAC_ACTIVE;
8566 int hclge_update_mac_list(struct hclge_vport *vport,
8567 enum HCLGE_MAC_NODE_STATE state,
8568 enum HCLGE_MAC_ADDR_TYPE mac_type,
8569 const unsigned char *addr)
8571 struct hclge_dev *hdev = vport->back;
8572 struct hclge_mac_node *mac_node;
8573 struct list_head *list;
8575 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8576 &vport->uc_mac_list : &vport->mc_mac_list;
8578 spin_lock_bh(&vport->mac_list_lock);
8580 /* if the mac addr is already in the mac list, no need to add a new
8581 * one into it, just check the mac addr state, convert it to a new
8582 * state, or just remove it, or do nothing.
8584 mac_node = hclge_find_mac_node(list, addr);
8586 hclge_update_mac_node(mac_node, state);
8587 spin_unlock_bh(&vport->mac_list_lock);
8588 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8592 /* if this address is never added, unnecessary to delete */
8593 if (state == HCLGE_MAC_TO_DEL) {
8594 spin_unlock_bh(&vport->mac_list_lock);
8595 dev_err(&hdev->pdev->dev,
8596 "failed to delete address %pM from mac list\n",
8601 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8603 spin_unlock_bh(&vport->mac_list_lock);
8607 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8609 mac_node->state = state;
8610 ether_addr_copy(mac_node->mac_addr, addr);
8611 list_add_tail(&mac_node->node, list);
8613 spin_unlock_bh(&vport->mac_list_lock);
8618 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8619 const unsigned char *addr)
8621 struct hclge_vport *vport = hclge_get_vport(handle);
8623 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8627 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8628 const unsigned char *addr)
8630 struct hclge_dev *hdev = vport->back;
8631 struct hclge_mac_vlan_tbl_entry_cmd req;
8632 struct hclge_desc desc;
8633 u16 egress_port = 0;
8636 /* mac addr check */
8637 if (is_zero_ether_addr(addr) ||
8638 is_broadcast_ether_addr(addr) ||
8639 is_multicast_ether_addr(addr)) {
8640 dev_err(&hdev->pdev->dev,
8641 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8642 addr, is_zero_ether_addr(addr),
8643 is_broadcast_ether_addr(addr),
8644 is_multicast_ether_addr(addr));
8648 memset(&req, 0, sizeof(req));
8650 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8651 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8653 req.egress_port = cpu_to_le16(egress_port);
8655 hclge_prepare_mac_addr(&req, addr, false);
8657 /* Lookup the mac address in the mac_vlan table, and add
8658 * it if the entry is inexistent. Repeated unicast entry
8659 * is not allowed in the mac vlan table.
8661 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8662 if (ret == -ENOENT) {
8663 mutex_lock(&hdev->vport_lock);
8664 if (!hclge_is_umv_space_full(vport, false)) {
8665 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8667 hclge_update_umv_space(vport, false);
8668 mutex_unlock(&hdev->vport_lock);
8671 mutex_unlock(&hdev->vport_lock);
8673 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8674 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8675 hdev->priv_umv_size);
8680 /* check if we just hit the duplicate */
8682 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8683 vport->vport_id, addr);
8687 dev_err(&hdev->pdev->dev,
8688 "PF failed to add unicast entry(%pM) in the MAC table\n",
8694 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8695 const unsigned char *addr)
8697 struct hclge_vport *vport = hclge_get_vport(handle);
8699 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8703 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8704 const unsigned char *addr)
8706 struct hclge_dev *hdev = vport->back;
8707 struct hclge_mac_vlan_tbl_entry_cmd req;
8710 /* mac addr check */
8711 if (is_zero_ether_addr(addr) ||
8712 is_broadcast_ether_addr(addr) ||
8713 is_multicast_ether_addr(addr)) {
8714 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8719 memset(&req, 0, sizeof(req));
8720 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8721 hclge_prepare_mac_addr(&req, addr, false);
8722 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8724 mutex_lock(&hdev->vport_lock);
8725 hclge_update_umv_space(vport, true);
8726 mutex_unlock(&hdev->vport_lock);
8727 } else if (ret == -ENOENT) {
8734 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8735 const unsigned char *addr)
8737 struct hclge_vport *vport = hclge_get_vport(handle);
8739 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8743 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8744 const unsigned char *addr)
8746 struct hclge_dev *hdev = vport->back;
8747 struct hclge_mac_vlan_tbl_entry_cmd req;
8748 struct hclge_desc desc[3];
8751 /* mac addr check */
8752 if (!is_multicast_ether_addr(addr)) {
8753 dev_err(&hdev->pdev->dev,
8754 "Add mc mac err! invalid mac:%pM.\n",
8758 memset(&req, 0, sizeof(req));
8759 hclge_prepare_mac_addr(&req, addr, true);
8760 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8762 /* This mac addr do not exist, add new entry for it */
8763 memset(desc[0].data, 0, sizeof(desc[0].data));
8764 memset(desc[1].data, 0, sizeof(desc[0].data));
8765 memset(desc[2].data, 0, sizeof(desc[0].data));
8767 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8770 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8771 /* if already overflow, not to print each time */
8772 if (status == -ENOSPC &&
8773 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8774 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8779 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8780 const unsigned char *addr)
8782 struct hclge_vport *vport = hclge_get_vport(handle);
8784 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8788 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8789 const unsigned char *addr)
8791 struct hclge_dev *hdev = vport->back;
8792 struct hclge_mac_vlan_tbl_entry_cmd req;
8793 enum hclge_cmd_status status;
8794 struct hclge_desc desc[3];
8796 /* mac addr check */
8797 if (!is_multicast_ether_addr(addr)) {
8798 dev_dbg(&hdev->pdev->dev,
8799 "Remove mc mac err! invalid mac:%pM.\n",
8804 memset(&req, 0, sizeof(req));
8805 hclge_prepare_mac_addr(&req, addr, true);
8806 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8808 /* This mac addr exist, remove this handle's VFID for it */
8809 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8813 if (hclge_is_all_function_id_zero(desc))
8814 /* All the vfid is zero, so need to delete this entry */
8815 status = hclge_remove_mac_vlan_tbl(vport, &req);
8817 /* Not all the vfid is zero, update the vfid */
8818 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8819 } else if (status == -ENOENT) {
8826 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8827 struct list_head *list,
8828 int (*sync)(struct hclge_vport *,
8829 const unsigned char *))
8831 struct hclge_mac_node *mac_node, *tmp;
8834 list_for_each_entry_safe(mac_node, tmp, list, node) {
8835 ret = sync(vport, mac_node->mac_addr);
8837 mac_node->state = HCLGE_MAC_ACTIVE;
8839 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8846 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8847 struct list_head *list,
8848 int (*unsync)(struct hclge_vport *,
8849 const unsigned char *))
8851 struct hclge_mac_node *mac_node, *tmp;
8854 list_for_each_entry_safe(mac_node, tmp, list, node) {
8855 ret = unsync(vport, mac_node->mac_addr);
8856 if (!ret || ret == -ENOENT) {
8857 list_del(&mac_node->node);
8860 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8867 static bool hclge_sync_from_add_list(struct list_head *add_list,
8868 struct list_head *mac_list)
8870 struct hclge_mac_node *mac_node, *tmp, *new_node;
8871 bool all_added = true;
8873 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8874 if (mac_node->state == HCLGE_MAC_TO_ADD)
8877 /* if the mac address from tmp_add_list is not in the
8878 * uc/mc_mac_list, it means have received a TO_DEL request
8879 * during the time window of adding the mac address into mac
8880 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8881 * then it will be removed at next time. else it must be TO_ADD,
8882 * this address hasn't been added into mac table,
8883 * so just remove the mac node.
8885 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8887 hclge_update_mac_node(new_node, mac_node->state);
8888 list_del(&mac_node->node);
8890 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8891 mac_node->state = HCLGE_MAC_TO_DEL;
8892 list_move_tail(&mac_node->node, mac_list);
8894 list_del(&mac_node->node);
8902 static void hclge_sync_from_del_list(struct list_head *del_list,
8903 struct list_head *mac_list)
8905 struct hclge_mac_node *mac_node, *tmp, *new_node;
8907 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8908 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8910 /* If the mac addr exists in the mac list, it means
8911 * received a new TO_ADD request during the time window
8912 * of configuring the mac address. For the mac node
8913 * state is TO_ADD, and the address is already in the
8914 * in the hardware(due to delete fail), so we just need
8915 * to change the mac node state to ACTIVE.
8917 new_node->state = HCLGE_MAC_ACTIVE;
8918 list_del(&mac_node->node);
8921 list_move_tail(&mac_node->node, mac_list);
8926 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8927 enum HCLGE_MAC_ADDR_TYPE mac_type,
8930 if (mac_type == HCLGE_MAC_ADDR_UC) {
8932 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8934 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8937 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8939 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8943 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8944 enum HCLGE_MAC_ADDR_TYPE mac_type)
8946 struct hclge_mac_node *mac_node, *tmp, *new_node;
8947 struct list_head tmp_add_list, tmp_del_list;
8948 struct list_head *list;
8951 INIT_LIST_HEAD(&tmp_add_list);
8952 INIT_LIST_HEAD(&tmp_del_list);
8954 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8955 * we can add/delete these mac addr outside the spin lock
8957 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8958 &vport->uc_mac_list : &vport->mc_mac_list;
8960 spin_lock_bh(&vport->mac_list_lock);
8962 list_for_each_entry_safe(mac_node, tmp, list, node) {
8963 switch (mac_node->state) {
8964 case HCLGE_MAC_TO_DEL:
8965 list_move_tail(&mac_node->node, &tmp_del_list);
8967 case HCLGE_MAC_TO_ADD:
8968 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8971 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8972 new_node->state = mac_node->state;
8973 list_add_tail(&new_node->node, &tmp_add_list);
8981 spin_unlock_bh(&vport->mac_list_lock);
8983 /* delete first, in order to get max mac table space for adding */
8984 if (mac_type == HCLGE_MAC_ADDR_UC) {
8985 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8986 hclge_rm_uc_addr_common);
8987 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8988 hclge_add_uc_addr_common);
8990 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8991 hclge_rm_mc_addr_common);
8992 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8993 hclge_add_mc_addr_common);
8996 /* if some mac addresses were added/deleted fail, move back to the
8997 * mac_list, and retry at next time.
8999 spin_lock_bh(&vport->mac_list_lock);
9001 hclge_sync_from_del_list(&tmp_del_list, list);
9002 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9004 spin_unlock_bh(&vport->mac_list_lock);
9006 hclge_update_overflow_flags(vport, mac_type, all_added);
9009 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9011 struct hclge_dev *hdev = vport->back;
9013 if (test_bit(vport->vport_id, hdev->vport_config_block))
9016 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9022 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9026 for (i = 0; i < hdev->num_alloc_vport; i++) {
9027 struct hclge_vport *vport = &hdev->vport[i];
9029 if (!hclge_need_sync_mac_table(vport))
9032 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9033 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9037 static void hclge_build_del_list(struct list_head *list,
9039 struct list_head *tmp_del_list)
9041 struct hclge_mac_node *mac_cfg, *tmp;
9043 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9044 switch (mac_cfg->state) {
9045 case HCLGE_MAC_TO_DEL:
9046 case HCLGE_MAC_ACTIVE:
9047 list_move_tail(&mac_cfg->node, tmp_del_list);
9049 case HCLGE_MAC_TO_ADD:
9051 list_del(&mac_cfg->node);
9059 static void hclge_unsync_del_list(struct hclge_vport *vport,
9060 int (*unsync)(struct hclge_vport *vport,
9061 const unsigned char *addr),
9063 struct list_head *tmp_del_list)
9065 struct hclge_mac_node *mac_cfg, *tmp;
9068 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9069 ret = unsync(vport, mac_cfg->mac_addr);
9070 if (!ret || ret == -ENOENT) {
9071 /* clear all mac addr from hardware, but remain these
9072 * mac addr in the mac list, and restore them after
9073 * vf reset finished.
9076 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9077 mac_cfg->state = HCLGE_MAC_TO_ADD;
9079 list_del(&mac_cfg->node);
9082 } else if (is_del_list) {
9083 mac_cfg->state = HCLGE_MAC_TO_DEL;
9088 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9089 enum HCLGE_MAC_ADDR_TYPE mac_type)
9091 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9092 struct hclge_dev *hdev = vport->back;
9093 struct list_head tmp_del_list, *list;
9095 if (mac_type == HCLGE_MAC_ADDR_UC) {
9096 list = &vport->uc_mac_list;
9097 unsync = hclge_rm_uc_addr_common;
9099 list = &vport->mc_mac_list;
9100 unsync = hclge_rm_mc_addr_common;
9103 INIT_LIST_HEAD(&tmp_del_list);
9106 set_bit(vport->vport_id, hdev->vport_config_block);
9108 spin_lock_bh(&vport->mac_list_lock);
9110 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9112 spin_unlock_bh(&vport->mac_list_lock);
9114 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9116 spin_lock_bh(&vport->mac_list_lock);
9118 hclge_sync_from_del_list(&tmp_del_list, list);
9120 spin_unlock_bh(&vport->mac_list_lock);
9123 /* remove all mac address when uninitailize */
9124 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9125 enum HCLGE_MAC_ADDR_TYPE mac_type)
9127 struct hclge_mac_node *mac_node, *tmp;
9128 struct hclge_dev *hdev = vport->back;
9129 struct list_head tmp_del_list, *list;
9131 INIT_LIST_HEAD(&tmp_del_list);
9133 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9134 &vport->uc_mac_list : &vport->mc_mac_list;
9136 spin_lock_bh(&vport->mac_list_lock);
9138 list_for_each_entry_safe(mac_node, tmp, list, node) {
9139 switch (mac_node->state) {
9140 case HCLGE_MAC_TO_DEL:
9141 case HCLGE_MAC_ACTIVE:
9142 list_move_tail(&mac_node->node, &tmp_del_list);
9144 case HCLGE_MAC_TO_ADD:
9145 list_del(&mac_node->node);
9151 spin_unlock_bh(&vport->mac_list_lock);
9153 if (mac_type == HCLGE_MAC_ADDR_UC)
9154 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9155 hclge_rm_uc_addr_common);
9157 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9158 hclge_rm_mc_addr_common);
9160 if (!list_empty(&tmp_del_list))
9161 dev_warn(&hdev->pdev->dev,
9162 "uninit %s mac list for vport %u not completely.\n",
9163 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9166 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9167 list_del(&mac_node->node);
9172 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9174 struct hclge_vport *vport;
9177 for (i = 0; i < hdev->num_alloc_vport; i++) {
9178 vport = &hdev->vport[i];
9179 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9180 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9184 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9185 u16 cmdq_resp, u8 resp_code)
9187 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9188 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9189 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9190 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9195 dev_err(&hdev->pdev->dev,
9196 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9201 switch (resp_code) {
9202 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9203 case HCLGE_ETHERTYPE_ALREADY_ADD:
9206 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9207 dev_err(&hdev->pdev->dev,
9208 "add mac ethertype failed for manager table overflow.\n");
9209 return_status = -EIO;
9211 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9212 dev_err(&hdev->pdev->dev,
9213 "add mac ethertype failed for key conflict.\n");
9214 return_status = -EIO;
9217 dev_err(&hdev->pdev->dev,
9218 "add mac ethertype failed for undefined, code=%u.\n",
9220 return_status = -EIO;
9223 return return_status;
9226 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9229 struct hclge_mac_vlan_tbl_entry_cmd req;
9230 struct hclge_dev *hdev = vport->back;
9231 struct hclge_desc desc;
9232 u16 egress_port = 0;
9235 if (is_zero_ether_addr(mac_addr))
9238 memset(&req, 0, sizeof(req));
9239 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9240 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9241 req.egress_port = cpu_to_le16(egress_port);
9242 hclge_prepare_mac_addr(&req, mac_addr, false);
9244 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9247 vf_idx += HCLGE_VF_VPORT_START_NUM;
9248 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9250 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9256 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9259 struct hclge_vport *vport = hclge_get_vport(handle);
9260 struct hclge_dev *hdev = vport->back;
9262 vport = hclge_get_vf_vport(hdev, vf);
9266 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9267 dev_info(&hdev->pdev->dev,
9268 "Specified MAC(=%pM) is same as before, no change committed!\n",
9273 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9274 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9279 ether_addr_copy(vport->vf_info.mac, mac_addr);
9281 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9282 dev_info(&hdev->pdev->dev,
9283 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9285 return hclge_inform_reset_assert_to_vf(vport);
9288 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9293 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9294 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9296 struct hclge_desc desc;
9301 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9302 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9306 dev_err(&hdev->pdev->dev,
9307 "add mac ethertype failed for cmd_send, ret =%d.\n",
9312 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9313 retval = le16_to_cpu(desc.retval);
9315 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9318 static int init_mgr_tbl(struct hclge_dev *hdev)
9323 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9324 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9326 dev_err(&hdev->pdev->dev,
9327 "add mac ethertype failed, ret =%d.\n",
9336 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9338 struct hclge_vport *vport = hclge_get_vport(handle);
9339 struct hclge_dev *hdev = vport->back;
9341 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9344 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9345 const u8 *old_addr, const u8 *new_addr)
9347 struct list_head *list = &vport->uc_mac_list;
9348 struct hclge_mac_node *old_node, *new_node;
9350 new_node = hclge_find_mac_node(list, new_addr);
9352 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9356 new_node->state = HCLGE_MAC_TO_ADD;
9357 ether_addr_copy(new_node->mac_addr, new_addr);
9358 list_add(&new_node->node, list);
9360 if (new_node->state == HCLGE_MAC_TO_DEL)
9361 new_node->state = HCLGE_MAC_ACTIVE;
9363 /* make sure the new addr is in the list head, avoid dev
9364 * addr may be not re-added into mac table for the umv space
9365 * limitation after global/imp reset which will clear mac
9366 * table by hardware.
9368 list_move(&new_node->node, list);
9371 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9372 old_node = hclge_find_mac_node(list, old_addr);
9374 if (old_node->state == HCLGE_MAC_TO_ADD) {
9375 list_del(&old_node->node);
9378 old_node->state = HCLGE_MAC_TO_DEL;
9383 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9388 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9391 const unsigned char *new_addr = (const unsigned char *)p;
9392 struct hclge_vport *vport = hclge_get_vport(handle);
9393 struct hclge_dev *hdev = vport->back;
9394 unsigned char *old_addr = NULL;
9397 /* mac addr check */
9398 if (is_zero_ether_addr(new_addr) ||
9399 is_broadcast_ether_addr(new_addr) ||
9400 is_multicast_ether_addr(new_addr)) {
9401 dev_err(&hdev->pdev->dev,
9402 "change uc mac err! invalid mac: %pM.\n",
9407 ret = hclge_pause_addr_cfg(hdev, new_addr);
9409 dev_err(&hdev->pdev->dev,
9410 "failed to configure mac pause address, ret = %d\n",
9416 old_addr = hdev->hw.mac.mac_addr;
9418 spin_lock_bh(&vport->mac_list_lock);
9419 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9421 dev_err(&hdev->pdev->dev,
9422 "failed to change the mac addr:%pM, ret = %d\n",
9424 spin_unlock_bh(&vport->mac_list_lock);
9427 hclge_pause_addr_cfg(hdev, old_addr);
9431 /* we must update dev addr with spin lock protect, preventing dev addr
9432 * being removed by set_rx_mode path.
9434 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9435 spin_unlock_bh(&vport->mac_list_lock);
9437 hclge_task_schedule(hdev, 0);
9442 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9444 struct mii_ioctl_data *data = if_mii(ifr);
9446 if (!hnae3_dev_phy_imp_supported(hdev))
9451 data->phy_id = hdev->hw.mac.phy_addr;
9452 /* this command reads phy id and register at the same time */
9455 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9459 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9465 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9468 struct hclge_vport *vport = hclge_get_vport(handle);
9469 struct hclge_dev *hdev = vport->back;
9473 return hclge_ptp_get_cfg(hdev, ifr);
9475 return hclge_ptp_set_cfg(hdev, ifr);
9477 if (!hdev->hw.mac.phydev)
9478 return hclge_mii_ioctl(hdev, ifr, cmd);
9481 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9484 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9487 struct hclge_port_vlan_filter_bypass_cmd *req;
9488 struct hclge_desc desc;
9491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9492 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9494 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9499 dev_err(&hdev->pdev->dev,
9500 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9506 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9507 u8 fe_type, bool filter_en, u8 vf_id)
9509 struct hclge_vlan_filter_ctrl_cmd *req;
9510 struct hclge_desc desc;
9513 /* read current vlan filter parameter */
9514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9515 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9516 req->vlan_type = vlan_type;
9519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9521 dev_err(&hdev->pdev->dev,
9522 "failed to get vlan filter config, ret = %d.\n", ret);
9526 /* modify and write new config parameter */
9527 hclge_cmd_reuse_desc(&desc, false);
9528 req->vlan_fe = filter_en ?
9529 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9533 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9539 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9541 struct hclge_dev *hdev = vport->back;
9542 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9545 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9546 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9547 HCLGE_FILTER_FE_EGRESS_V1_B,
9548 enable, vport->vport_id);
9550 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9551 HCLGE_FILTER_FE_EGRESS, enable,
9556 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9557 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9559 } else if (!vport->vport_id) {
9560 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9563 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9564 HCLGE_FILTER_FE_INGRESS,
9571 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9573 struct hnae3_handle *handle = &vport->nic;
9574 struct hclge_vport_vlan_cfg *vlan, *tmp;
9575 struct hclge_dev *hdev = vport->back;
9577 if (vport->vport_id) {
9578 if (vport->port_base_vlan_cfg.state !=
9579 HNAE3_PORT_BASE_VLAN_DISABLE)
9582 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9584 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9588 if (!vport->req_vlan_fltr_en)
9591 /* compatible with former device, always enable vlan filter */
9592 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9595 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9596 if (vlan->vlan_id != 0)
9602 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9604 struct hclge_dev *hdev = vport->back;
9608 mutex_lock(&hdev->vport_lock);
9610 vport->req_vlan_fltr_en = request_en;
9612 need_en = hclge_need_enable_vport_vlan_filter(vport);
9613 if (need_en == vport->cur_vlan_fltr_en) {
9614 mutex_unlock(&hdev->vport_lock);
9618 ret = hclge_set_vport_vlan_filter(vport, need_en);
9620 mutex_unlock(&hdev->vport_lock);
9624 vport->cur_vlan_fltr_en = need_en;
9626 mutex_unlock(&hdev->vport_lock);
9631 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9633 struct hclge_vport *vport = hclge_get_vport(handle);
9635 return hclge_enable_vport_vlan_filter(vport, enable);
9638 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9639 bool is_kill, u16 vlan,
9640 struct hclge_desc *desc)
9642 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9643 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9648 hclge_cmd_setup_basic_desc(&desc[0],
9649 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9650 hclge_cmd_setup_basic_desc(&desc[1],
9651 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9653 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9655 vf_byte_off = vfid / 8;
9656 vf_byte_val = 1 << (vfid % 8);
9658 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9659 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9661 req0->vlan_id = cpu_to_le16(vlan);
9662 req0->vlan_cfg = is_kill;
9664 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9665 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9667 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9669 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9671 dev_err(&hdev->pdev->dev,
9672 "Send vf vlan command fail, ret =%d.\n",
9680 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9681 bool is_kill, struct hclge_desc *desc)
9683 struct hclge_vlan_filter_vf_cfg_cmd *req;
9685 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9688 #define HCLGE_VF_VLAN_NO_ENTRY 2
9689 if (!req->resp_code || req->resp_code == 1)
9692 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9693 set_bit(vfid, hdev->vf_vlan_full);
9694 dev_warn(&hdev->pdev->dev,
9695 "vf vlan table is full, vf vlan filter is disabled\n");
9699 dev_err(&hdev->pdev->dev,
9700 "Add vf vlan filter fail, ret =%u.\n",
9703 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9704 if (!req->resp_code)
9707 /* vf vlan filter is disabled when vf vlan table is full,
9708 * then new vlan id will not be added into vf vlan table.
9709 * Just return 0 without warning, avoid massive verbose
9710 * print logs when unload.
9712 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9715 dev_err(&hdev->pdev->dev,
9716 "Kill vf vlan filter fail, ret =%u.\n",
9723 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9724 bool is_kill, u16 vlan)
9726 struct hclge_vport *vport = &hdev->vport[vfid];
9727 struct hclge_desc desc[2];
9730 /* if vf vlan table is full, firmware will close vf vlan filter, it
9731 * is unable and unnecessary to add new vlan id to vf vlan filter.
9732 * If spoof check is enable, and vf vlan is full, it shouldn't add
9733 * new vlan, because tx packets with these vlan id will be dropped.
9735 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9736 if (vport->vf_info.spoofchk && vlan) {
9737 dev_err(&hdev->pdev->dev,
9738 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9744 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9748 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9751 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9752 u16 vlan_id, bool is_kill)
9754 struct hclge_vlan_filter_pf_cfg_cmd *req;
9755 struct hclge_desc desc;
9756 u8 vlan_offset_byte_val;
9757 u8 vlan_offset_byte;
9761 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9763 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9764 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9765 HCLGE_VLAN_BYTE_SIZE;
9766 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9768 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9769 req->vlan_offset = vlan_offset_160;
9770 req->vlan_cfg = is_kill;
9771 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9773 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9775 dev_err(&hdev->pdev->dev,
9776 "port vlan command, send fail, ret =%d.\n", ret);
9780 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9781 u16 vport_id, u16 vlan_id,
9784 u16 vport_idx, vport_num = 0;
9787 if (is_kill && !vlan_id)
9790 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9792 dev_err(&hdev->pdev->dev,
9793 "Set %u vport vlan filter config fail, ret =%d.\n",
9798 /* vlan 0 may be added twice when 8021q module is enabled */
9799 if (!is_kill && !vlan_id &&
9800 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9803 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9804 dev_err(&hdev->pdev->dev,
9805 "Add port vlan failed, vport %u is already in vlan %u\n",
9811 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9812 dev_err(&hdev->pdev->dev,
9813 "Delete port vlan failed, vport %u is not in vlan %u\n",
9818 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9821 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9822 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9828 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9830 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9831 struct hclge_vport_vtag_tx_cfg_cmd *req;
9832 struct hclge_dev *hdev = vport->back;
9833 struct hclge_desc desc;
9837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9839 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9840 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9841 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9842 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9843 vcfg->accept_tag1 ? 1 : 0);
9844 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9845 vcfg->accept_untag1 ? 1 : 0);
9846 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9847 vcfg->accept_tag2 ? 1 : 0);
9848 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9849 vcfg->accept_untag2 ? 1 : 0);
9850 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9851 vcfg->insert_tag1_en ? 1 : 0);
9852 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9853 vcfg->insert_tag2_en ? 1 : 0);
9854 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9855 vcfg->tag_shift_mode_en ? 1 : 0);
9856 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9858 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9859 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9860 HCLGE_VF_NUM_PER_BYTE;
9861 req->vf_bitmap[bmap_index] =
9862 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9864 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9866 dev_err(&hdev->pdev->dev,
9867 "Send port txvlan cfg command fail, ret =%d\n",
9873 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9875 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9876 struct hclge_vport_vtag_rx_cfg_cmd *req;
9877 struct hclge_dev *hdev = vport->back;
9878 struct hclge_desc desc;
9882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9884 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9885 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9886 vcfg->strip_tag1_en ? 1 : 0);
9887 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9888 vcfg->strip_tag2_en ? 1 : 0);
9889 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9890 vcfg->vlan1_vlan_prionly ? 1 : 0);
9891 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9892 vcfg->vlan2_vlan_prionly ? 1 : 0);
9893 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9894 vcfg->strip_tag1_discard_en ? 1 : 0);
9895 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9896 vcfg->strip_tag2_discard_en ? 1 : 0);
9898 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9899 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9900 HCLGE_VF_NUM_PER_BYTE;
9901 req->vf_bitmap[bmap_index] =
9902 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9904 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9906 dev_err(&hdev->pdev->dev,
9907 "Send port rxvlan cfg command fail, ret =%d\n",
9913 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9914 u16 port_base_vlan_state,
9915 u16 vlan_tag, u8 qos)
9919 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9920 vport->txvlan_cfg.accept_tag1 = true;
9921 vport->txvlan_cfg.insert_tag1_en = false;
9922 vport->txvlan_cfg.default_tag1 = 0;
9924 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9926 vport->txvlan_cfg.accept_tag1 =
9927 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9928 vport->txvlan_cfg.insert_tag1_en = true;
9929 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9933 vport->txvlan_cfg.accept_untag1 = true;
9935 /* accept_tag2 and accept_untag2 are not supported on
9936 * pdev revision(0x20), new revision support them,
9937 * this two fields can not be configured by user.
9939 vport->txvlan_cfg.accept_tag2 = true;
9940 vport->txvlan_cfg.accept_untag2 = true;
9941 vport->txvlan_cfg.insert_tag2_en = false;
9942 vport->txvlan_cfg.default_tag2 = 0;
9943 vport->txvlan_cfg.tag_shift_mode_en = true;
9945 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9946 vport->rxvlan_cfg.strip_tag1_en = false;
9947 vport->rxvlan_cfg.strip_tag2_en =
9948 vport->rxvlan_cfg.rx_vlan_offload_en;
9949 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9951 vport->rxvlan_cfg.strip_tag1_en =
9952 vport->rxvlan_cfg.rx_vlan_offload_en;
9953 vport->rxvlan_cfg.strip_tag2_en = true;
9954 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9957 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9958 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9959 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9961 ret = hclge_set_vlan_tx_offload_cfg(vport);
9965 return hclge_set_vlan_rx_offload_cfg(vport);
9968 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9970 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9971 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9972 struct hclge_desc desc;
9975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9976 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9977 rx_req->ot_fst_vlan_type =
9978 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9979 rx_req->ot_sec_vlan_type =
9980 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9981 rx_req->in_fst_vlan_type =
9982 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9983 rx_req->in_sec_vlan_type =
9984 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9986 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9988 dev_err(&hdev->pdev->dev,
9989 "Send rxvlan protocol type command fail, ret =%d\n",
9994 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9996 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9997 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9998 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10000 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10002 dev_err(&hdev->pdev->dev,
10003 "Send txvlan protocol type command fail, ret =%d\n",
10009 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10011 #define HCLGE_DEF_VLAN_TYPE 0x8100
10013 struct hnae3_handle *handle = &hdev->vport[0].nic;
10014 struct hclge_vport *vport;
10018 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10019 /* for revision 0x21, vf vlan filter is per function */
10020 for (i = 0; i < hdev->num_alloc_vport; i++) {
10021 vport = &hdev->vport[i];
10022 ret = hclge_set_vlan_filter_ctrl(hdev,
10023 HCLGE_FILTER_TYPE_VF,
10024 HCLGE_FILTER_FE_EGRESS,
10029 vport->cur_vlan_fltr_en = true;
10032 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10033 HCLGE_FILTER_FE_INGRESS, true,
10038 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10039 HCLGE_FILTER_FE_EGRESS_V1_B,
10045 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10046 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10047 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10048 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10049 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10050 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10052 ret = hclge_set_vlan_protocol_type(hdev);
10056 for (i = 0; i < hdev->num_alloc_vport; i++) {
10060 vport = &hdev->vport[i];
10061 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10062 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10064 ret = hclge_vlan_offload_cfg(vport,
10065 vport->port_base_vlan_cfg.state,
10071 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10074 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10075 bool writen_to_tbl)
10077 struct hclge_vport_vlan_cfg *vlan, *tmp;
10079 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10080 if (vlan->vlan_id == vlan_id)
10083 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10087 vlan->hd_tbl_status = writen_to_tbl;
10088 vlan->vlan_id = vlan_id;
10090 list_add_tail(&vlan->node, &vport->vlan_list);
10093 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10095 struct hclge_vport_vlan_cfg *vlan, *tmp;
10096 struct hclge_dev *hdev = vport->back;
10099 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10100 if (!vlan->hd_tbl_status) {
10101 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10103 vlan->vlan_id, false);
10105 dev_err(&hdev->pdev->dev,
10106 "restore vport vlan list failed, ret=%d\n",
10111 vlan->hd_tbl_status = true;
10117 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10120 struct hclge_vport_vlan_cfg *vlan, *tmp;
10121 struct hclge_dev *hdev = vport->back;
10123 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10124 if (vlan->vlan_id == vlan_id) {
10125 if (is_write_tbl && vlan->hd_tbl_status)
10126 hclge_set_vlan_filter_hw(hdev,
10127 htons(ETH_P_8021Q),
10132 list_del(&vlan->node);
10139 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10141 struct hclge_vport_vlan_cfg *vlan, *tmp;
10142 struct hclge_dev *hdev = vport->back;
10144 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10145 if (vlan->hd_tbl_status)
10146 hclge_set_vlan_filter_hw(hdev,
10147 htons(ETH_P_8021Q),
10152 vlan->hd_tbl_status = false;
10154 list_del(&vlan->node);
10158 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10161 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10163 struct hclge_vport_vlan_cfg *vlan, *tmp;
10164 struct hclge_vport *vport;
10167 for (i = 0; i < hdev->num_alloc_vport; i++) {
10168 vport = &hdev->vport[i];
10169 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10170 list_del(&vlan->node);
10176 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10178 struct hclge_vport_vlan_cfg *vlan, *tmp;
10179 struct hclge_dev *hdev = vport->back;
10185 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10186 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10187 state = vport->port_base_vlan_cfg.state;
10189 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10190 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10191 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10192 vport->vport_id, vlan_id,
10197 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10198 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10200 vlan->vlan_id, false);
10203 vlan->hd_tbl_status = true;
10207 /* For global reset and imp reset, hardware will clear the mac table,
10208 * so we change the mac address state from ACTIVE to TO_ADD, then they
10209 * can be restored in the service task after reset complete. Furtherly,
10210 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10211 * be restored after reset, so just remove these mac nodes from mac_list.
10213 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10215 struct hclge_mac_node *mac_node, *tmp;
10217 list_for_each_entry_safe(mac_node, tmp, list, node) {
10218 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10219 mac_node->state = HCLGE_MAC_TO_ADD;
10220 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10221 list_del(&mac_node->node);
10227 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10229 spin_lock_bh(&vport->mac_list_lock);
10231 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10232 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10233 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10235 spin_unlock_bh(&vport->mac_list_lock);
10238 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10240 struct hclge_vport *vport = &hdev->vport[0];
10241 struct hnae3_handle *handle = &vport->nic;
10243 hclge_restore_mac_table_common(vport);
10244 hclge_restore_vport_vlan_table(vport);
10245 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10246 hclge_restore_fd_entries(handle);
10249 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10251 struct hclge_vport *vport = hclge_get_vport(handle);
10253 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10254 vport->rxvlan_cfg.strip_tag1_en = false;
10255 vport->rxvlan_cfg.strip_tag2_en = enable;
10256 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10258 vport->rxvlan_cfg.strip_tag1_en = enable;
10259 vport->rxvlan_cfg.strip_tag2_en = true;
10260 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10263 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10264 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10265 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10266 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10268 return hclge_set_vlan_rx_offload_cfg(vport);
10271 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10273 struct hclge_dev *hdev = vport->back;
10275 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10276 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10279 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10280 u16 port_base_vlan_state,
10281 struct hclge_vlan_info *new_info,
10282 struct hclge_vlan_info *old_info)
10284 struct hclge_dev *hdev = vport->back;
10287 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10288 hclge_rm_vport_all_vlan_table(vport, false);
10289 /* force clear VLAN 0 */
10290 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10293 return hclge_set_vlan_filter_hw(hdev,
10294 htons(new_info->vlan_proto),
10296 new_info->vlan_tag,
10300 /* force add VLAN 0 */
10301 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10305 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10306 vport->vport_id, old_info->vlan_tag,
10311 return hclge_add_vport_all_vlan_table(vport);
10314 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10315 const struct hclge_vlan_info *old_cfg)
10317 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10320 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10326 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10327 struct hclge_vlan_info *vlan_info)
10329 struct hnae3_handle *nic = &vport->nic;
10330 struct hclge_vlan_info *old_vlan_info;
10331 struct hclge_dev *hdev = vport->back;
10334 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10336 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10341 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10344 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10345 /* add new VLAN tag */
10346 ret = hclge_set_vlan_filter_hw(hdev,
10347 htons(vlan_info->vlan_proto),
10349 vlan_info->vlan_tag,
10354 /* remove old VLAN tag */
10355 if (old_vlan_info->vlan_tag == 0)
10356 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10359 ret = hclge_set_vlan_filter_hw(hdev,
10360 htons(ETH_P_8021Q),
10362 old_vlan_info->vlan_tag,
10365 dev_err(&hdev->pdev->dev,
10366 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10367 vport->vport_id, old_vlan_info->vlan_tag, ret);
10374 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10380 vport->port_base_vlan_cfg.state = state;
10381 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10382 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10384 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10386 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10387 hclge_set_vport_vlan_fltr_change(vport);
10392 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10393 enum hnae3_port_base_vlan_state state,
10396 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10398 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10400 return HNAE3_PORT_BASE_VLAN_ENABLE;
10404 return HNAE3_PORT_BASE_VLAN_DISABLE;
10406 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10407 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10408 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10410 return HNAE3_PORT_BASE_VLAN_MODIFY;
10413 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10414 u16 vlan, u8 qos, __be16 proto)
10416 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10417 struct hclge_vport *vport = hclge_get_vport(handle);
10418 struct hclge_dev *hdev = vport->back;
10419 struct hclge_vlan_info vlan_info;
10423 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10424 return -EOPNOTSUPP;
10426 vport = hclge_get_vf_vport(hdev, vfid);
10430 /* qos is a 3 bits value, so can not be bigger than 7 */
10431 if (vlan > VLAN_N_VID - 1 || qos > 7)
10433 if (proto != htons(ETH_P_8021Q))
10434 return -EPROTONOSUPPORT;
10436 state = hclge_get_port_base_vlan_state(vport,
10437 vport->port_base_vlan_cfg.state,
10439 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10442 vlan_info.vlan_tag = vlan;
10443 vlan_info.qos = qos;
10444 vlan_info.vlan_proto = ntohs(proto);
10446 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10448 dev_err(&hdev->pdev->dev,
10449 "failed to update port base vlan for vf %d, ret = %d\n",
10454 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10457 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10458 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10459 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10460 vport->vport_id, state,
10466 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10468 struct hclge_vlan_info *vlan_info;
10469 struct hclge_vport *vport;
10473 /* clear port base vlan for all vf */
10474 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10475 vport = &hdev->vport[vf];
10476 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10478 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10480 vlan_info->vlan_tag, true);
10482 dev_err(&hdev->pdev->dev,
10483 "failed to clear vf vlan for vf%d, ret = %d\n",
10484 vf - HCLGE_VF_VPORT_START_NUM, ret);
10488 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10489 u16 vlan_id, bool is_kill)
10491 struct hclge_vport *vport = hclge_get_vport(handle);
10492 struct hclge_dev *hdev = vport->back;
10493 bool writen_to_tbl = false;
10496 /* When device is resetting or reset failed, firmware is unable to
10497 * handle mailbox. Just record the vlan id, and remove it after
10500 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10501 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10502 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10506 /* when port base vlan enabled, we use port base vlan as the vlan
10507 * filter entry. In this case, we don't update vlan filter table
10508 * when user add new vlan or remove exist vlan, just update the vport
10509 * vlan list. The vlan id in vlan list will be writen in vlan filter
10510 * table until port base vlan disabled
10512 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10513 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10515 writen_to_tbl = true;
10520 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10522 hclge_add_vport_vlan_table(vport, vlan_id,
10524 } else if (is_kill) {
10525 /* when remove hw vlan filter failed, record the vlan id,
10526 * and try to remove it from hw later, to be consistence
10529 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10532 hclge_set_vport_vlan_fltr_change(vport);
10537 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10539 struct hclge_vport *vport;
10543 for (i = 0; i < hdev->num_alloc_vport; i++) {
10544 vport = &hdev->vport[i];
10545 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10549 ret = hclge_enable_vport_vlan_filter(vport,
10550 vport->req_vlan_fltr_en);
10552 dev_err(&hdev->pdev->dev,
10553 "failed to sync vlan filter state for vport%u, ret = %d\n",
10554 vport->vport_id, ret);
10555 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10562 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10564 #define HCLGE_MAX_SYNC_COUNT 60
10566 int i, ret, sync_cnt = 0;
10569 /* start from vport 1 for PF is always alive */
10570 for (i = 0; i < hdev->num_alloc_vport; i++) {
10571 struct hclge_vport *vport = &hdev->vport[i];
10573 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10575 while (vlan_id != VLAN_N_VID) {
10576 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10577 vport->vport_id, vlan_id,
10579 if (ret && ret != -EINVAL)
10582 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10583 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10584 hclge_set_vport_vlan_fltr_change(vport);
10587 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10590 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10595 hclge_sync_vlan_fltr_state(hdev);
10598 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10600 struct hclge_config_max_frm_size_cmd *req;
10601 struct hclge_desc desc;
10603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10605 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10606 req->max_frm_size = cpu_to_le16(new_mps);
10607 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10609 return hclge_cmd_send(&hdev->hw, &desc, 1);
10612 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10614 struct hclge_vport *vport = hclge_get_vport(handle);
10616 return hclge_set_vport_mtu(vport, new_mtu);
10619 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10621 struct hclge_dev *hdev = vport->back;
10622 int i, max_frm_size, ret;
10624 /* HW supprt 2 layer vlan */
10625 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10626 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10627 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10630 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10631 mutex_lock(&hdev->vport_lock);
10632 /* VF's mps must fit within hdev->mps */
10633 if (vport->vport_id && max_frm_size > hdev->mps) {
10634 mutex_unlock(&hdev->vport_lock);
10636 } else if (vport->vport_id) {
10637 vport->mps = max_frm_size;
10638 mutex_unlock(&hdev->vport_lock);
10642 /* PF's mps must be greater then VF's mps */
10643 for (i = 1; i < hdev->num_alloc_vport; i++)
10644 if (max_frm_size < hdev->vport[i].mps) {
10645 mutex_unlock(&hdev->vport_lock);
10649 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10651 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10653 dev_err(&hdev->pdev->dev,
10654 "Change mtu fail, ret =%d\n", ret);
10658 hdev->mps = max_frm_size;
10659 vport->mps = max_frm_size;
10661 ret = hclge_buffer_alloc(hdev);
10663 dev_err(&hdev->pdev->dev,
10664 "Allocate buffer fail, ret =%d\n", ret);
10667 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10668 mutex_unlock(&hdev->vport_lock);
10672 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10675 struct hclge_reset_tqp_queue_cmd *req;
10676 struct hclge_desc desc;
10679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10681 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10682 req->tqp_id = cpu_to_le16(queue_id);
10684 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10686 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10688 dev_err(&hdev->pdev->dev,
10689 "Send tqp reset cmd error, status =%d\n", ret);
10696 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10698 struct hclge_reset_tqp_queue_cmd *req;
10699 struct hclge_desc desc;
10702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10704 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10705 req->tqp_id = cpu_to_le16(queue_id);
10707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10709 dev_err(&hdev->pdev->dev,
10710 "Get reset status error, status =%d\n", ret);
10714 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10717 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10719 struct hnae3_queue *queue;
10720 struct hclge_tqp *tqp;
10722 queue = handle->kinfo.tqp[queue_id];
10723 tqp = container_of(queue, struct hclge_tqp, q);
10728 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10730 struct hclge_vport *vport = hclge_get_vport(handle);
10731 struct hclge_dev *hdev = vport->back;
10732 u16 reset_try_times = 0;
10738 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10739 queue_gid = hclge_covert_handle_qid_global(handle, i);
10740 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10742 dev_err(&hdev->pdev->dev,
10743 "failed to send reset tqp cmd, ret = %d\n",
10748 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10749 reset_status = hclge_get_reset_status(hdev, queue_gid);
10753 /* Wait for tqp hw reset */
10754 usleep_range(1000, 1200);
10757 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10758 dev_err(&hdev->pdev->dev,
10759 "wait for tqp hw reset timeout\n");
10763 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10765 dev_err(&hdev->pdev->dev,
10766 "failed to deassert soft reset, ret = %d\n",
10770 reset_try_times = 0;
10775 static int hclge_reset_rcb(struct hnae3_handle *handle)
10777 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10778 #define HCLGE_RESET_RCB_SUCCESS 1U
10780 struct hclge_vport *vport = hclge_get_vport(handle);
10781 struct hclge_dev *hdev = vport->back;
10782 struct hclge_reset_cmd *req;
10783 struct hclge_desc desc;
10788 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10790 req = (struct hclge_reset_cmd *)desc.data;
10791 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10792 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10793 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10794 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10796 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10798 dev_err(&hdev->pdev->dev,
10799 "failed to send rcb reset cmd, ret = %d\n", ret);
10803 return_status = req->fun_reset_rcb_return_status;
10804 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10807 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10808 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10813 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10814 * again to reset all tqps
10816 return hclge_reset_tqp_cmd(handle);
10819 int hclge_reset_tqp(struct hnae3_handle *handle)
10821 struct hclge_vport *vport = hclge_get_vport(handle);
10822 struct hclge_dev *hdev = vport->back;
10825 /* only need to disable PF's tqp */
10826 if (!vport->vport_id) {
10827 ret = hclge_tqp_enable(handle, false);
10829 dev_err(&hdev->pdev->dev,
10830 "failed to disable tqp, ret = %d\n", ret);
10835 return hclge_reset_rcb(handle);
10838 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10840 struct hclge_vport *vport = hclge_get_vport(handle);
10841 struct hclge_dev *hdev = vport->back;
10843 return hdev->fw_version;
10846 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10848 struct phy_device *phydev = hdev->hw.mac.phydev;
10853 phy_set_asym_pause(phydev, rx_en, tx_en);
10856 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10860 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10863 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10865 dev_err(&hdev->pdev->dev,
10866 "configure pauseparam error, ret = %d.\n", ret);
10871 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10873 struct phy_device *phydev = hdev->hw.mac.phydev;
10874 u16 remote_advertising = 0;
10875 u16 local_advertising;
10876 u32 rx_pause, tx_pause;
10879 if (!phydev->link || !phydev->autoneg)
10882 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10885 remote_advertising = LPA_PAUSE_CAP;
10887 if (phydev->asym_pause)
10888 remote_advertising |= LPA_PAUSE_ASYM;
10890 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10891 remote_advertising);
10892 tx_pause = flowctl & FLOW_CTRL_TX;
10893 rx_pause = flowctl & FLOW_CTRL_RX;
10895 if (phydev->duplex == HCLGE_MAC_HALF) {
10900 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10903 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10904 u32 *rx_en, u32 *tx_en)
10906 struct hclge_vport *vport = hclge_get_vport(handle);
10907 struct hclge_dev *hdev = vport->back;
10908 u8 media_type = hdev->hw.mac.media_type;
10910 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10911 hclge_get_autoneg(handle) : 0;
10913 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10919 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10922 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10925 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10934 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10935 u32 rx_en, u32 tx_en)
10937 if (rx_en && tx_en)
10938 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10939 else if (rx_en && !tx_en)
10940 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10941 else if (!rx_en && tx_en)
10942 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10944 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10946 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10949 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10950 u32 rx_en, u32 tx_en)
10952 struct hclge_vport *vport = hclge_get_vport(handle);
10953 struct hclge_dev *hdev = vport->back;
10954 struct phy_device *phydev = hdev->hw.mac.phydev;
10957 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10958 fc_autoneg = hclge_get_autoneg(handle);
10959 if (auto_neg != fc_autoneg) {
10960 dev_info(&hdev->pdev->dev,
10961 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10962 return -EOPNOTSUPP;
10966 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10967 dev_info(&hdev->pdev->dev,
10968 "Priority flow control enabled. Cannot set link flow control.\n");
10969 return -EOPNOTSUPP;
10972 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10974 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10976 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10977 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10980 return phy_start_aneg(phydev);
10982 return -EOPNOTSUPP;
10985 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10986 u8 *auto_neg, u32 *speed, u8 *duplex)
10988 struct hclge_vport *vport = hclge_get_vport(handle);
10989 struct hclge_dev *hdev = vport->back;
10992 *speed = hdev->hw.mac.speed;
10994 *duplex = hdev->hw.mac.duplex;
10996 *auto_neg = hdev->hw.mac.autoneg;
10999 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11002 struct hclge_vport *vport = hclge_get_vport(handle);
11003 struct hclge_dev *hdev = vport->back;
11005 /* When nic is down, the service task is not running, doesn't update
11006 * the port information per second. Query the port information before
11007 * return the media type, ensure getting the correct media information.
11009 hclge_update_port_info(hdev);
11012 *media_type = hdev->hw.mac.media_type;
11015 *module_type = hdev->hw.mac.module_type;
11018 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11019 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11021 struct hclge_vport *vport = hclge_get_vport(handle);
11022 struct hclge_dev *hdev = vport->back;
11023 struct phy_device *phydev = hdev->hw.mac.phydev;
11024 int mdix_ctrl, mdix, is_resolved;
11025 unsigned int retval;
11028 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11029 *tp_mdix = ETH_TP_MDI_INVALID;
11033 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11035 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11036 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11037 HCLGE_PHY_MDIX_CTRL_S);
11039 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11040 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11041 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11043 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11045 switch (mdix_ctrl) {
11047 *tp_mdix_ctrl = ETH_TP_MDI;
11050 *tp_mdix_ctrl = ETH_TP_MDI_X;
11053 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11056 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11061 *tp_mdix = ETH_TP_MDI_INVALID;
11063 *tp_mdix = ETH_TP_MDI_X;
11065 *tp_mdix = ETH_TP_MDI;
11068 static void hclge_info_show(struct hclge_dev *hdev)
11070 struct device *dev = &hdev->pdev->dev;
11072 dev_info(dev, "PF info begin:\n");
11074 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11075 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11076 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11077 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11078 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11079 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11080 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11081 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11082 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11083 dev_info(dev, "This is %s PF\n",
11084 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11085 dev_info(dev, "DCB %s\n",
11086 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11087 dev_info(dev, "MQPRIO %s\n",
11088 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11089 dev_info(dev, "Default tx spare buffer size: %u\n",
11090 hdev->tx_spare_buf_size);
11092 dev_info(dev, "PF info end.\n");
11095 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11096 struct hclge_vport *vport)
11098 struct hnae3_client *client = vport->nic.client;
11099 struct hclge_dev *hdev = ae_dev->priv;
11100 int rst_cnt = hdev->rst_stats.reset_cnt;
11103 ret = client->ops->init_instance(&vport->nic);
11107 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11108 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11109 rst_cnt != hdev->rst_stats.reset_cnt) {
11114 /* Enable nic hw error interrupts */
11115 ret = hclge_config_nic_hw_error(hdev, true);
11117 dev_err(&ae_dev->pdev->dev,
11118 "fail(%d) to enable hw error interrupts\n", ret);
11122 hnae3_set_client_init_flag(client, ae_dev, 1);
11124 if (netif_msg_drv(&hdev->vport->nic))
11125 hclge_info_show(hdev);
11130 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11131 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11132 msleep(HCLGE_WAIT_RESET_DONE);
11134 client->ops->uninit_instance(&vport->nic, 0);
11139 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11140 struct hclge_vport *vport)
11142 struct hclge_dev *hdev = ae_dev->priv;
11143 struct hnae3_client *client;
11147 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11151 client = hdev->roce_client;
11152 ret = hclge_init_roce_base_info(vport);
11156 rst_cnt = hdev->rst_stats.reset_cnt;
11157 ret = client->ops->init_instance(&vport->roce);
11161 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11162 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11163 rst_cnt != hdev->rst_stats.reset_cnt) {
11165 goto init_roce_err;
11168 /* Enable roce ras interrupts */
11169 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11171 dev_err(&ae_dev->pdev->dev,
11172 "fail(%d) to enable roce ras interrupts\n", ret);
11173 goto init_roce_err;
11176 hnae3_set_client_init_flag(client, ae_dev, 1);
11181 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11182 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11183 msleep(HCLGE_WAIT_RESET_DONE);
11185 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11190 static int hclge_init_client_instance(struct hnae3_client *client,
11191 struct hnae3_ae_dev *ae_dev)
11193 struct hclge_dev *hdev = ae_dev->priv;
11194 struct hclge_vport *vport = &hdev->vport[0];
11197 switch (client->type) {
11198 case HNAE3_CLIENT_KNIC:
11199 hdev->nic_client = client;
11200 vport->nic.client = client;
11201 ret = hclge_init_nic_client_instance(ae_dev, vport);
11205 ret = hclge_init_roce_client_instance(ae_dev, vport);
11210 case HNAE3_CLIENT_ROCE:
11211 if (hnae3_dev_roce_supported(hdev)) {
11212 hdev->roce_client = client;
11213 vport->roce.client = client;
11216 ret = hclge_init_roce_client_instance(ae_dev, vport);
11228 hdev->nic_client = NULL;
11229 vport->nic.client = NULL;
11232 hdev->roce_client = NULL;
11233 vport->roce.client = NULL;
11237 static void hclge_uninit_client_instance(struct hnae3_client *client,
11238 struct hnae3_ae_dev *ae_dev)
11240 struct hclge_dev *hdev = ae_dev->priv;
11241 struct hclge_vport *vport = &hdev->vport[0];
11243 if (hdev->roce_client) {
11244 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11245 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11246 msleep(HCLGE_WAIT_RESET_DONE);
11248 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11249 hdev->roce_client = NULL;
11250 vport->roce.client = NULL;
11252 if (client->type == HNAE3_CLIENT_ROCE)
11254 if (hdev->nic_client && client->ops->uninit_instance) {
11255 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11256 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11257 msleep(HCLGE_WAIT_RESET_DONE);
11259 client->ops->uninit_instance(&vport->nic, 0);
11260 hdev->nic_client = NULL;
11261 vport->nic.client = NULL;
11265 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11267 #define HCLGE_MEM_BAR 4
11269 struct pci_dev *pdev = hdev->pdev;
11270 struct hclge_hw *hw = &hdev->hw;
11272 /* for device does not have device memory, return directly */
11273 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11276 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11277 pci_resource_start(pdev, HCLGE_MEM_BAR),
11278 pci_resource_len(pdev, HCLGE_MEM_BAR));
11279 if (!hw->mem_base) {
11280 dev_err(&pdev->dev, "failed to map device memory\n");
11287 static int hclge_pci_init(struct hclge_dev *hdev)
11289 struct pci_dev *pdev = hdev->pdev;
11290 struct hclge_hw *hw;
11293 ret = pci_enable_device(pdev);
11295 dev_err(&pdev->dev, "failed to enable PCI device\n");
11299 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11301 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11303 dev_err(&pdev->dev,
11304 "can't set consistent PCI DMA");
11305 goto err_disable_device;
11307 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11310 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11312 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11313 goto err_disable_device;
11316 pci_set_master(pdev);
11318 hw->io_base = pcim_iomap(pdev, 2, 0);
11319 if (!hw->io_base) {
11320 dev_err(&pdev->dev, "Can't map configuration register space\n");
11322 goto err_clr_master;
11325 ret = hclge_dev_mem_map(hdev);
11327 goto err_unmap_io_base;
11329 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11334 pcim_iounmap(pdev, hdev->hw.io_base);
11336 pci_clear_master(pdev);
11337 pci_release_regions(pdev);
11338 err_disable_device:
11339 pci_disable_device(pdev);
11344 static void hclge_pci_uninit(struct hclge_dev *hdev)
11346 struct pci_dev *pdev = hdev->pdev;
11348 if (hdev->hw.mem_base)
11349 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11351 pcim_iounmap(pdev, hdev->hw.io_base);
11352 pci_free_irq_vectors(pdev);
11353 pci_clear_master(pdev);
11354 pci_release_mem_regions(pdev);
11355 pci_disable_device(pdev);
11358 static void hclge_state_init(struct hclge_dev *hdev)
11360 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11361 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11362 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11363 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11364 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11365 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11366 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11369 static void hclge_state_uninit(struct hclge_dev *hdev)
11371 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11372 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11374 if (hdev->reset_timer.function)
11375 del_timer_sync(&hdev->reset_timer);
11376 if (hdev->service_task.work.func)
11377 cancel_delayed_work_sync(&hdev->service_task);
11380 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11381 enum hnae3_reset_type rst_type)
11383 #define HCLGE_RESET_RETRY_WAIT_MS 500
11384 #define HCLGE_RESET_RETRY_CNT 5
11386 struct hclge_dev *hdev = ae_dev->priv;
11391 down(&hdev->reset_sem);
11392 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11393 hdev->reset_type = rst_type;
11394 ret = hclge_reset_prepare(hdev);
11395 if (ret || hdev->reset_pending) {
11396 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11398 if (hdev->reset_pending ||
11399 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11400 dev_err(&hdev->pdev->dev,
11401 "reset_pending:0x%lx, retry_cnt:%d\n",
11402 hdev->reset_pending, retry_cnt);
11403 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11404 up(&hdev->reset_sem);
11405 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11410 /* disable misc vector before reset done */
11411 hclge_enable_vector(&hdev->misc_vector, false);
11412 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11414 if (hdev->reset_type == HNAE3_FLR_RESET)
11415 hdev->rst_stats.flr_rst_cnt++;
11418 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11420 struct hclge_dev *hdev = ae_dev->priv;
11423 hclge_enable_vector(&hdev->misc_vector, true);
11425 ret = hclge_reset_rebuild(hdev);
11427 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11429 hdev->reset_type = HNAE3_NONE_RESET;
11430 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11431 up(&hdev->reset_sem);
11434 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11438 for (i = 0; i < hdev->num_alloc_vport; i++) {
11439 struct hclge_vport *vport = &hdev->vport[i];
11442 /* Send cmd to clear VF's FUNC_RST_ING */
11443 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11445 dev_warn(&hdev->pdev->dev,
11446 "clear vf(%u) rst failed %d!\n",
11447 vport->vport_id, ret);
11451 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11453 struct hclge_desc desc;
11456 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11458 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11459 /* This new command is only supported by new firmware, it will
11460 * fail with older firmware. Error value -EOPNOSUPP can only be
11461 * returned by older firmware running this command, to keep code
11462 * backward compatible we will override this value and return
11465 if (ret && ret != -EOPNOTSUPP) {
11466 dev_err(&hdev->pdev->dev,
11467 "failed to clear hw resource, ret = %d\n", ret);
11473 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11475 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11476 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11479 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11481 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11482 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11485 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11487 struct pci_dev *pdev = ae_dev->pdev;
11488 struct hclge_dev *hdev;
11491 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11496 hdev->ae_dev = ae_dev;
11497 hdev->reset_type = HNAE3_NONE_RESET;
11498 hdev->reset_level = HNAE3_FUNC_RESET;
11499 ae_dev->priv = hdev;
11501 /* HW supprt 2 layer vlan */
11502 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11504 mutex_init(&hdev->vport_lock);
11505 spin_lock_init(&hdev->fd_rule_lock);
11506 sema_init(&hdev->reset_sem, 1);
11508 ret = hclge_pci_init(hdev);
11512 /* Firmware command queue initialize */
11513 ret = hclge_cmd_queue_init(hdev);
11515 goto err_pci_uninit;
11517 /* Firmware command initialize */
11518 ret = hclge_cmd_init(hdev);
11520 goto err_cmd_uninit;
11522 ret = hclge_clear_hw_resource(hdev);
11524 goto err_cmd_uninit;
11526 ret = hclge_get_cap(hdev);
11528 goto err_cmd_uninit;
11530 ret = hclge_query_dev_specs(hdev);
11532 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11534 goto err_cmd_uninit;
11537 ret = hclge_configure(hdev);
11539 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11540 goto err_cmd_uninit;
11543 ret = hclge_init_msi(hdev);
11545 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11546 goto err_cmd_uninit;
11549 ret = hclge_misc_irq_init(hdev);
11551 goto err_msi_uninit;
11553 ret = hclge_alloc_tqps(hdev);
11555 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11556 goto err_msi_irq_uninit;
11559 ret = hclge_alloc_vport(hdev);
11561 goto err_msi_irq_uninit;
11563 ret = hclge_map_tqp(hdev);
11565 goto err_msi_irq_uninit;
11567 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11568 !hnae3_dev_phy_imp_supported(hdev)) {
11569 ret = hclge_mac_mdio_config(hdev);
11571 goto err_msi_irq_uninit;
11574 ret = hclge_init_umv_space(hdev);
11576 goto err_mdiobus_unreg;
11578 ret = hclge_mac_init(hdev);
11580 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11581 goto err_mdiobus_unreg;
11584 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11586 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11587 goto err_mdiobus_unreg;
11590 ret = hclge_config_gro(hdev);
11592 goto err_mdiobus_unreg;
11594 ret = hclge_init_vlan_config(hdev);
11596 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11597 goto err_mdiobus_unreg;
11600 ret = hclge_tm_schd_init(hdev);
11602 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11603 goto err_mdiobus_unreg;
11606 ret = hclge_rss_init_cfg(hdev);
11608 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11609 goto err_mdiobus_unreg;
11612 ret = hclge_rss_init_hw(hdev);
11614 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11615 goto err_mdiobus_unreg;
11618 ret = init_mgr_tbl(hdev);
11620 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11621 goto err_mdiobus_unreg;
11624 ret = hclge_init_fd_config(hdev);
11626 dev_err(&pdev->dev,
11627 "fd table init fail, ret=%d\n", ret);
11628 goto err_mdiobus_unreg;
11631 ret = hclge_ptp_init(hdev);
11633 goto err_mdiobus_unreg;
11635 INIT_KFIFO(hdev->mac_tnl_log);
11637 hclge_dcb_ops_set(hdev);
11639 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11640 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11642 /* Setup affinity after service timer setup because add_timer_on
11643 * is called in affinity notify.
11645 hclge_misc_affinity_setup(hdev);
11647 hclge_clear_all_event_cause(hdev);
11648 hclge_clear_resetting_state(hdev);
11650 /* Log and clear the hw errors those already occurred */
11651 if (hnae3_dev_ras_imp_supported(hdev))
11652 hclge_handle_occurred_error(hdev);
11654 hclge_handle_all_hns_hw_errors(ae_dev);
11656 /* request delayed reset for the error recovery because an immediate
11657 * global reset on a PF affecting pending initialization of other PFs
11659 if (ae_dev->hw_err_reset_req) {
11660 enum hnae3_reset_type reset_level;
11662 reset_level = hclge_get_reset_level(ae_dev,
11663 &ae_dev->hw_err_reset_req);
11664 hclge_set_def_reset_request(ae_dev, reset_level);
11665 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11668 hclge_init_rxd_adv_layout(hdev);
11670 /* Enable MISC vector(vector0) */
11671 hclge_enable_vector(&hdev->misc_vector, true);
11673 hclge_state_init(hdev);
11674 hdev->last_reset_time = jiffies;
11676 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11677 HCLGE_DRIVER_NAME);
11679 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11684 if (hdev->hw.mac.phydev)
11685 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11686 err_msi_irq_uninit:
11687 hclge_misc_irq_uninit(hdev);
11689 pci_free_irq_vectors(pdev);
11691 hclge_cmd_uninit(hdev);
11693 pcim_iounmap(pdev, hdev->hw.io_base);
11694 pci_clear_master(pdev);
11695 pci_release_regions(pdev);
11696 pci_disable_device(pdev);
11698 mutex_destroy(&hdev->vport_lock);
11702 static void hclge_stats_clear(struct hclge_dev *hdev)
11704 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11707 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11709 return hclge_config_switch_param(hdev, vf, enable,
11710 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11713 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11715 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11716 HCLGE_FILTER_FE_NIC_INGRESS_B,
11720 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11724 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11726 dev_err(&hdev->pdev->dev,
11727 "Set vf %d mac spoof check %s failed, ret=%d\n",
11728 vf, enable ? "on" : "off", ret);
11732 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11734 dev_err(&hdev->pdev->dev,
11735 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11736 vf, enable ? "on" : "off", ret);
11741 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11744 struct hclge_vport *vport = hclge_get_vport(handle);
11745 struct hclge_dev *hdev = vport->back;
11746 u32 new_spoofchk = enable ? 1 : 0;
11749 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11750 return -EOPNOTSUPP;
11752 vport = hclge_get_vf_vport(hdev, vf);
11756 if (vport->vf_info.spoofchk == new_spoofchk)
11759 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11760 dev_warn(&hdev->pdev->dev,
11761 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11763 else if (enable && hclge_is_umv_space_full(vport, true))
11764 dev_warn(&hdev->pdev->dev,
11765 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11768 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11772 vport->vf_info.spoofchk = new_spoofchk;
11776 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11778 struct hclge_vport *vport = hdev->vport;
11782 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11785 /* resume the vf spoof check state after reset */
11786 for (i = 0; i < hdev->num_alloc_vport; i++) {
11787 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11788 vport->vf_info.spoofchk);
11798 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11800 struct hclge_vport *vport = hclge_get_vport(handle);
11801 struct hclge_dev *hdev = vport->back;
11802 u32 new_trusted = enable ? 1 : 0;
11804 vport = hclge_get_vf_vport(hdev, vf);
11808 if (vport->vf_info.trusted == new_trusted)
11811 vport->vf_info.trusted = new_trusted;
11812 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11813 hclge_task_schedule(hdev, 0);
11818 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11823 /* reset vf rate to default value */
11824 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11825 struct hclge_vport *vport = &hdev->vport[vf];
11827 vport->vf_info.max_tx_rate = 0;
11828 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11830 dev_err(&hdev->pdev->dev,
11831 "vf%d failed to reset to default, ret=%d\n",
11832 vf - HCLGE_VF_VPORT_START_NUM, ret);
11836 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11837 int min_tx_rate, int max_tx_rate)
11839 if (min_tx_rate != 0 ||
11840 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11841 dev_err(&hdev->pdev->dev,
11842 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11843 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11850 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11851 int min_tx_rate, int max_tx_rate, bool force)
11853 struct hclge_vport *vport = hclge_get_vport(handle);
11854 struct hclge_dev *hdev = vport->back;
11857 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11861 vport = hclge_get_vf_vport(hdev, vf);
11865 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11868 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11872 vport->vf_info.max_tx_rate = max_tx_rate;
11877 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11879 struct hnae3_handle *handle = &hdev->vport->nic;
11880 struct hclge_vport *vport;
11884 /* resume the vf max_tx_rate after reset */
11885 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11886 vport = hclge_get_vf_vport(hdev, vf);
11890 /* zero means max rate, after reset, firmware already set it to
11891 * max rate, so just continue.
11893 if (!vport->vf_info.max_tx_rate)
11896 ret = hclge_set_vf_rate(handle, vf, 0,
11897 vport->vf_info.max_tx_rate, true);
11899 dev_err(&hdev->pdev->dev,
11900 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11901 vf, vport->vf_info.max_tx_rate, ret);
11909 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11911 struct hclge_vport *vport = hdev->vport;
11914 for (i = 0; i < hdev->num_alloc_vport; i++) {
11915 hclge_vport_stop(vport);
11920 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11922 struct hclge_dev *hdev = ae_dev->priv;
11923 struct pci_dev *pdev = ae_dev->pdev;
11926 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11928 hclge_stats_clear(hdev);
11929 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11930 * so here should not clean table in memory.
11932 if (hdev->reset_type == HNAE3_IMP_RESET ||
11933 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11934 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11935 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11936 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11937 hclge_reset_umv_space(hdev);
11940 ret = hclge_cmd_init(hdev);
11942 dev_err(&pdev->dev, "Cmd queue init failed\n");
11946 ret = hclge_map_tqp(hdev);
11948 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11952 ret = hclge_mac_init(hdev);
11954 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11958 ret = hclge_tp_port_init(hdev);
11960 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11965 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11967 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11971 ret = hclge_config_gro(hdev);
11975 ret = hclge_init_vlan_config(hdev);
11977 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11981 ret = hclge_tm_init_hw(hdev, true);
11983 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11987 ret = hclge_rss_init_hw(hdev);
11989 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11993 ret = init_mgr_tbl(hdev);
11995 dev_err(&pdev->dev,
11996 "failed to reinit manager table, ret = %d\n", ret);
12000 ret = hclge_init_fd_config(hdev);
12002 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12006 ret = hclge_ptp_init(hdev);
12010 /* Log and clear the hw errors those already occurred */
12011 if (hnae3_dev_ras_imp_supported(hdev))
12012 hclge_handle_occurred_error(hdev);
12014 hclge_handle_all_hns_hw_errors(ae_dev);
12016 /* Re-enable the hw error interrupts because
12017 * the interrupts get disabled on global reset.
12019 ret = hclge_config_nic_hw_error(hdev, true);
12021 dev_err(&pdev->dev,
12022 "fail(%d) to re-enable NIC hw error interrupts\n",
12027 if (hdev->roce_client) {
12028 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12030 dev_err(&pdev->dev,
12031 "fail(%d) to re-enable roce ras interrupts\n",
12037 hclge_reset_vport_state(hdev);
12038 ret = hclge_reset_vport_spoofchk(hdev);
12042 ret = hclge_resume_vf_rate(hdev);
12046 hclge_init_rxd_adv_layout(hdev);
12048 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12049 HCLGE_DRIVER_NAME);
12054 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12056 struct hclge_dev *hdev = ae_dev->priv;
12057 struct hclge_mac *mac = &hdev->hw.mac;
12059 hclge_reset_vf_rate(hdev);
12060 hclge_clear_vf_vlan(hdev);
12061 hclge_misc_affinity_teardown(hdev);
12062 hclge_state_uninit(hdev);
12063 hclge_ptp_uninit(hdev);
12064 hclge_uninit_rxd_adv_layout(hdev);
12065 hclge_uninit_mac_table(hdev);
12066 hclge_del_all_fd_entries(hdev);
12069 mdiobus_unregister(mac->mdio_bus);
12071 /* Disable MISC vector(vector0) */
12072 hclge_enable_vector(&hdev->misc_vector, false);
12073 synchronize_irq(hdev->misc_vector.vector_irq);
12075 /* Disable all hw interrupts */
12076 hclge_config_mac_tnl_int(hdev, false);
12077 hclge_config_nic_hw_error(hdev, false);
12078 hclge_config_rocee_ras_interrupt(hdev, false);
12080 hclge_cmd_uninit(hdev);
12081 hclge_misc_irq_uninit(hdev);
12082 hclge_pci_uninit(hdev);
12083 mutex_destroy(&hdev->vport_lock);
12084 hclge_uninit_vport_vlan_table(hdev);
12085 ae_dev->priv = NULL;
12088 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12090 struct hclge_vport *vport = hclge_get_vport(handle);
12091 struct hclge_dev *hdev = vport->back;
12093 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12096 static void hclge_get_channels(struct hnae3_handle *handle,
12097 struct ethtool_channels *ch)
12099 ch->max_combined = hclge_get_max_channels(handle);
12100 ch->other_count = 1;
12102 ch->combined_count = handle->kinfo.rss_size;
12105 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12106 u16 *alloc_tqps, u16 *max_rss_size)
12108 struct hclge_vport *vport = hclge_get_vport(handle);
12109 struct hclge_dev *hdev = vport->back;
12111 *alloc_tqps = vport->alloc_tqps;
12112 *max_rss_size = hdev->pf_rss_size_max;
12115 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12116 bool rxfh_configured)
12118 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12119 struct hclge_vport *vport = hclge_get_vport(handle);
12120 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12121 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12122 struct hclge_dev *hdev = vport->back;
12123 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12124 u16 cur_rss_size = kinfo->rss_size;
12125 u16 cur_tqps = kinfo->num_tqps;
12126 u16 tc_valid[HCLGE_MAX_TC_NUM];
12132 kinfo->req_rss_size = new_tqps_num;
12134 ret = hclge_tm_vport_map_update(hdev);
12136 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12140 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12141 roundup_size = ilog2(roundup_size);
12142 /* Set the RSS TC mode according to the new RSS size */
12143 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12146 if (!(hdev->hw_tc_map & BIT(i)))
12150 tc_size[i] = roundup_size;
12151 tc_offset[i] = kinfo->rss_size * i;
12153 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12157 /* RSS indirection table has been configured by user */
12158 if (rxfh_configured)
12161 /* Reinitializes the rss indirect table according to the new RSS size */
12162 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12167 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12168 rss_indir[i] = i % kinfo->rss_size;
12170 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12172 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12179 dev_info(&hdev->pdev->dev,
12180 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12181 cur_rss_size, kinfo->rss_size,
12182 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12187 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12188 u32 *regs_num_64_bit)
12190 struct hclge_desc desc;
12194 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12197 dev_err(&hdev->pdev->dev,
12198 "Query register number cmd failed, ret = %d.\n", ret);
12202 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12203 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12205 total_num = *regs_num_32_bit + *regs_num_64_bit;
12212 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12215 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12216 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12218 struct hclge_desc *desc;
12219 u32 *reg_val = data;
12229 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12230 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12231 HCLGE_32_BIT_REG_RTN_DATANUM);
12232 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12236 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12237 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12239 dev_err(&hdev->pdev->dev,
12240 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12245 for (i = 0; i < cmd_num; i++) {
12247 desc_data = (__le32 *)(&desc[i].data[0]);
12248 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12250 desc_data = (__le32 *)(&desc[i]);
12251 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12253 for (k = 0; k < n; k++) {
12254 *reg_val++ = le32_to_cpu(*desc_data++);
12266 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12269 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12270 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12272 struct hclge_desc *desc;
12273 u64 *reg_val = data;
12283 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12284 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12285 HCLGE_64_BIT_REG_RTN_DATANUM);
12286 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12290 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12291 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12293 dev_err(&hdev->pdev->dev,
12294 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12299 for (i = 0; i < cmd_num; i++) {
12301 desc_data = (__le64 *)(&desc[i].data[0]);
12302 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12304 desc_data = (__le64 *)(&desc[i]);
12305 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12307 for (k = 0; k < n; k++) {
12308 *reg_val++ = le64_to_cpu(*desc_data++);
12320 #define MAX_SEPARATE_NUM 4
12321 #define SEPARATOR_VALUE 0xFDFCFBFA
12322 #define REG_NUM_PER_LINE 4
12323 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12324 #define REG_SEPARATOR_LINE 1
12325 #define REG_NUM_REMAIN_MASK 3
12327 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12331 /* initialize command BD except the last one */
12332 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12333 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12335 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12338 /* initialize the last command BD */
12339 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12341 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12344 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12348 u32 entries_per_desc, desc_index, index, offset, i;
12349 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12352 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12354 dev_err(&hdev->pdev->dev,
12355 "Get dfx bd num fail, status is %d.\n", ret);
12359 entries_per_desc = ARRAY_SIZE(desc[0].data);
12360 for (i = 0; i < type_num; i++) {
12361 offset = hclge_dfx_bd_offset_list[i];
12362 index = offset % entries_per_desc;
12363 desc_index = offset / entries_per_desc;
12364 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12370 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12371 struct hclge_desc *desc_src, int bd_num,
12372 enum hclge_opcode_type cmd)
12374 struct hclge_desc *desc = desc_src;
12377 hclge_cmd_setup_basic_desc(desc, cmd, true);
12378 for (i = 0; i < bd_num - 1; i++) {
12379 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12381 hclge_cmd_setup_basic_desc(desc, cmd, true);
12385 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12387 dev_err(&hdev->pdev->dev,
12388 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12394 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12397 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12398 struct hclge_desc *desc = desc_src;
12401 entries_per_desc = ARRAY_SIZE(desc->data);
12402 reg_num = entries_per_desc * bd_num;
12403 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12404 for (i = 0; i < reg_num; i++) {
12405 index = i % entries_per_desc;
12406 desc_index = i / entries_per_desc;
12407 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12409 for (i = 0; i < separator_num; i++)
12410 *reg++ = SEPARATOR_VALUE;
12412 return reg_num + separator_num;
12415 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12417 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12418 int data_len_per_desc, bd_num, i;
12423 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12427 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12429 dev_err(&hdev->pdev->dev,
12430 "Get dfx reg bd num fail, status is %d.\n", ret);
12434 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12436 for (i = 0; i < dfx_reg_type_num; i++) {
12437 bd_num = bd_num_list[i];
12438 data_len = data_len_per_desc * bd_num;
12439 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12443 kfree(bd_num_list);
12447 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12449 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12450 int bd_num, bd_num_max, buf_len, i;
12451 struct hclge_desc *desc_src;
12456 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12460 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12462 dev_err(&hdev->pdev->dev,
12463 "Get dfx reg bd num fail, status is %d.\n", ret);
12467 bd_num_max = bd_num_list[0];
12468 for (i = 1; i < dfx_reg_type_num; i++)
12469 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12471 buf_len = sizeof(*desc_src) * bd_num_max;
12472 desc_src = kzalloc(buf_len, GFP_KERNEL);
12478 for (i = 0; i < dfx_reg_type_num; i++) {
12479 bd_num = bd_num_list[i];
12480 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12481 hclge_dfx_reg_opcode_list[i]);
12483 dev_err(&hdev->pdev->dev,
12484 "Get dfx reg fail, status is %d.\n", ret);
12488 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12493 kfree(bd_num_list);
12497 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12498 struct hnae3_knic_private_info *kinfo)
12500 #define HCLGE_RING_REG_OFFSET 0x200
12501 #define HCLGE_RING_INT_REG_OFFSET 0x4
12503 int i, j, reg_num, separator_num;
12507 /* fetching per-PF registers valus from PF PCIe register space */
12508 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12509 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12510 for (i = 0; i < reg_num; i++)
12511 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12512 for (i = 0; i < separator_num; i++)
12513 *reg++ = SEPARATOR_VALUE;
12514 data_num_sum = reg_num + separator_num;
12516 reg_num = ARRAY_SIZE(common_reg_addr_list);
12517 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12518 for (i = 0; i < reg_num; i++)
12519 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12520 for (i = 0; i < separator_num; i++)
12521 *reg++ = SEPARATOR_VALUE;
12522 data_num_sum += reg_num + separator_num;
12524 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12525 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12526 for (j = 0; j < kinfo->num_tqps; j++) {
12527 for (i = 0; i < reg_num; i++)
12528 *reg++ = hclge_read_dev(&hdev->hw,
12529 ring_reg_addr_list[i] +
12530 HCLGE_RING_REG_OFFSET * j);
12531 for (i = 0; i < separator_num; i++)
12532 *reg++ = SEPARATOR_VALUE;
12534 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12536 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12537 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12538 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12539 for (i = 0; i < reg_num; i++)
12540 *reg++ = hclge_read_dev(&hdev->hw,
12541 tqp_intr_reg_addr_list[i] +
12542 HCLGE_RING_INT_REG_OFFSET * j);
12543 for (i = 0; i < separator_num; i++)
12544 *reg++ = SEPARATOR_VALUE;
12546 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12548 return data_num_sum;
12551 static int hclge_get_regs_len(struct hnae3_handle *handle)
12553 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12554 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12555 struct hclge_vport *vport = hclge_get_vport(handle);
12556 struct hclge_dev *hdev = vport->back;
12557 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12558 int regs_lines_32_bit, regs_lines_64_bit;
12561 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12563 dev_err(&hdev->pdev->dev,
12564 "Get register number failed, ret = %d.\n", ret);
12568 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12570 dev_err(&hdev->pdev->dev,
12571 "Get dfx reg len failed, ret = %d.\n", ret);
12575 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12576 REG_SEPARATOR_LINE;
12577 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12578 REG_SEPARATOR_LINE;
12579 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12580 REG_SEPARATOR_LINE;
12581 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12582 REG_SEPARATOR_LINE;
12583 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12584 REG_SEPARATOR_LINE;
12585 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12586 REG_SEPARATOR_LINE;
12588 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12589 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12590 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12593 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12597 struct hclge_vport *vport = hclge_get_vport(handle);
12598 struct hclge_dev *hdev = vport->back;
12599 u32 regs_num_32_bit, regs_num_64_bit;
12600 int i, reg_num, separator_num, ret;
12603 *version = hdev->fw_version;
12605 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12607 dev_err(&hdev->pdev->dev,
12608 "Get register number failed, ret = %d.\n", ret);
12612 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12614 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12616 dev_err(&hdev->pdev->dev,
12617 "Get 32 bit register failed, ret = %d.\n", ret);
12620 reg_num = regs_num_32_bit;
12622 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12623 for (i = 0; i < separator_num; i++)
12624 *reg++ = SEPARATOR_VALUE;
12626 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12628 dev_err(&hdev->pdev->dev,
12629 "Get 64 bit register failed, ret = %d.\n", ret);
12632 reg_num = regs_num_64_bit * 2;
12634 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12635 for (i = 0; i < separator_num; i++)
12636 *reg++ = SEPARATOR_VALUE;
12638 ret = hclge_get_dfx_reg(hdev, reg);
12640 dev_err(&hdev->pdev->dev,
12641 "Get dfx register failed, ret = %d.\n", ret);
12644 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12646 struct hclge_set_led_state_cmd *req;
12647 struct hclge_desc desc;
12650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12652 req = (struct hclge_set_led_state_cmd *)desc.data;
12653 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12654 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12658 dev_err(&hdev->pdev->dev,
12659 "Send set led state cmd error, ret =%d\n", ret);
12664 enum hclge_led_status {
12667 HCLGE_LED_NO_CHANGE = 0xFF,
12670 static int hclge_set_led_id(struct hnae3_handle *handle,
12671 enum ethtool_phys_id_state status)
12673 struct hclge_vport *vport = hclge_get_vport(handle);
12674 struct hclge_dev *hdev = vport->back;
12677 case ETHTOOL_ID_ACTIVE:
12678 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12679 case ETHTOOL_ID_INACTIVE:
12680 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12686 static void hclge_get_link_mode(struct hnae3_handle *handle,
12687 unsigned long *supported,
12688 unsigned long *advertising)
12690 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12691 struct hclge_vport *vport = hclge_get_vport(handle);
12692 struct hclge_dev *hdev = vport->back;
12693 unsigned int idx = 0;
12695 for (; idx < size; idx++) {
12696 supported[idx] = hdev->hw.mac.supported[idx];
12697 advertising[idx] = hdev->hw.mac.advertising[idx];
12701 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12703 struct hclge_vport *vport = hclge_get_vport(handle);
12704 struct hclge_dev *hdev = vport->back;
12705 bool gro_en_old = hdev->gro_en;
12708 hdev->gro_en = enable;
12709 ret = hclge_config_gro(hdev);
12711 hdev->gro_en = gro_en_old;
12716 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12718 struct hclge_vport *vport = &hdev->vport[0];
12719 struct hnae3_handle *handle = &vport->nic;
12724 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12725 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12726 vport->last_promisc_flags = vport->overflow_promisc_flags;
12729 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12730 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12731 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12732 tmp_flags & HNAE3_MPE);
12734 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12736 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12741 for (i = 1; i < hdev->num_alloc_vport; i++) {
12742 bool uc_en = false;
12743 bool mc_en = false;
12746 vport = &hdev->vport[i];
12748 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12752 if (vport->vf_info.trusted) {
12753 uc_en = vport->vf_info.request_uc_en > 0;
12754 mc_en = vport->vf_info.request_mc_en > 0;
12756 bc_en = vport->vf_info.request_bc_en > 0;
12758 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12761 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12765 hclge_set_vport_vlan_fltr_change(vport);
12769 static bool hclge_module_existed(struct hclge_dev *hdev)
12771 struct hclge_desc desc;
12775 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12776 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12778 dev_err(&hdev->pdev->dev,
12779 "failed to get SFP exist state, ret = %d\n", ret);
12783 existed = le32_to_cpu(desc.data[0]);
12785 return existed != 0;
12788 /* need 6 bds(total 140 bytes) in one reading
12789 * return the number of bytes actually read, 0 means read failed.
12791 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12794 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12795 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12801 /* setup all 6 bds to read module eeprom info. */
12802 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12803 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12806 /* bd0~bd4 need next flag */
12807 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12808 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12811 /* setup bd0, this bd contains offset and read length. */
12812 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12813 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12814 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12815 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12817 ret = hclge_cmd_send(&hdev->hw, desc, i);
12819 dev_err(&hdev->pdev->dev,
12820 "failed to get SFP eeprom info, ret = %d\n", ret);
12824 /* copy sfp info from bd0 to out buffer. */
12825 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12826 memcpy(data, sfp_info_bd0->data, copy_len);
12827 read_len = copy_len;
12829 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12830 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12831 if (read_len >= len)
12834 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12835 memcpy(data + read_len, desc[i].data, copy_len);
12836 read_len += copy_len;
12842 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12845 struct hclge_vport *vport = hclge_get_vport(handle);
12846 struct hclge_dev *hdev = vport->back;
12850 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12851 return -EOPNOTSUPP;
12853 if (!hclge_module_existed(hdev))
12856 while (read_len < len) {
12857 data_len = hclge_get_sfp_eeprom_info(hdev,
12864 read_len += data_len;
12870 static const struct hnae3_ae_ops hclge_ops = {
12871 .init_ae_dev = hclge_init_ae_dev,
12872 .uninit_ae_dev = hclge_uninit_ae_dev,
12873 .reset_prepare = hclge_reset_prepare_general,
12874 .reset_done = hclge_reset_done,
12875 .init_client_instance = hclge_init_client_instance,
12876 .uninit_client_instance = hclge_uninit_client_instance,
12877 .map_ring_to_vector = hclge_map_ring_to_vector,
12878 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12879 .get_vector = hclge_get_vector,
12880 .put_vector = hclge_put_vector,
12881 .set_promisc_mode = hclge_set_promisc_mode,
12882 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12883 .set_loopback = hclge_set_loopback,
12884 .start = hclge_ae_start,
12885 .stop = hclge_ae_stop,
12886 .client_start = hclge_client_start,
12887 .client_stop = hclge_client_stop,
12888 .get_status = hclge_get_status,
12889 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12890 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12891 .get_media_type = hclge_get_media_type,
12892 .check_port_speed = hclge_check_port_speed,
12893 .get_fec = hclge_get_fec,
12894 .set_fec = hclge_set_fec,
12895 .get_rss_key_size = hclge_get_rss_key_size,
12896 .get_rss = hclge_get_rss,
12897 .set_rss = hclge_set_rss,
12898 .set_rss_tuple = hclge_set_rss_tuple,
12899 .get_rss_tuple = hclge_get_rss_tuple,
12900 .get_tc_size = hclge_get_tc_size,
12901 .get_mac_addr = hclge_get_mac_addr,
12902 .set_mac_addr = hclge_set_mac_addr,
12903 .do_ioctl = hclge_do_ioctl,
12904 .add_uc_addr = hclge_add_uc_addr,
12905 .rm_uc_addr = hclge_rm_uc_addr,
12906 .add_mc_addr = hclge_add_mc_addr,
12907 .rm_mc_addr = hclge_rm_mc_addr,
12908 .set_autoneg = hclge_set_autoneg,
12909 .get_autoneg = hclge_get_autoneg,
12910 .restart_autoneg = hclge_restart_autoneg,
12911 .halt_autoneg = hclge_halt_autoneg,
12912 .get_pauseparam = hclge_get_pauseparam,
12913 .set_pauseparam = hclge_set_pauseparam,
12914 .set_mtu = hclge_set_mtu,
12915 .reset_queue = hclge_reset_tqp,
12916 .get_stats = hclge_get_stats,
12917 .get_mac_stats = hclge_get_mac_stat,
12918 .update_stats = hclge_update_stats,
12919 .get_strings = hclge_get_strings,
12920 .get_sset_count = hclge_get_sset_count,
12921 .get_fw_version = hclge_get_fw_version,
12922 .get_mdix_mode = hclge_get_mdix_mode,
12923 .enable_vlan_filter = hclge_enable_vlan_filter,
12924 .set_vlan_filter = hclge_set_vlan_filter,
12925 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12926 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12927 .reset_event = hclge_reset_event,
12928 .get_reset_level = hclge_get_reset_level,
12929 .set_default_reset_request = hclge_set_def_reset_request,
12930 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12931 .set_channels = hclge_set_channels,
12932 .get_channels = hclge_get_channels,
12933 .get_regs_len = hclge_get_regs_len,
12934 .get_regs = hclge_get_regs,
12935 .set_led_id = hclge_set_led_id,
12936 .get_link_mode = hclge_get_link_mode,
12937 .add_fd_entry = hclge_add_fd_entry,
12938 .del_fd_entry = hclge_del_fd_entry,
12939 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12940 .get_fd_rule_info = hclge_get_fd_rule_info,
12941 .get_fd_all_rules = hclge_get_all_rules,
12942 .enable_fd = hclge_enable_fd,
12943 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12944 .dbg_read_cmd = hclge_dbg_read_cmd,
12945 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12946 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12947 .ae_dev_resetting = hclge_ae_dev_resetting,
12948 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12949 .set_gro_en = hclge_gro_en,
12950 .get_global_queue_id = hclge_covert_handle_qid_global,
12951 .set_timer_task = hclge_set_timer_task,
12952 .mac_connect_phy = hclge_mac_connect_phy,
12953 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12954 .get_vf_config = hclge_get_vf_config,
12955 .set_vf_link_state = hclge_set_vf_link_state,
12956 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12957 .set_vf_trust = hclge_set_vf_trust,
12958 .set_vf_rate = hclge_set_vf_rate,
12959 .set_vf_mac = hclge_set_vf_mac,
12960 .get_module_eeprom = hclge_get_module_eeprom,
12961 .get_cmdq_stat = hclge_get_cmdq_stat,
12962 .add_cls_flower = hclge_add_cls_flower,
12963 .del_cls_flower = hclge_del_cls_flower,
12964 .cls_flower_active = hclge_is_cls_flower_active,
12965 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12966 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12967 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12968 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12969 .get_ts_info = hclge_ptp_get_ts_info,
12972 static struct hnae3_ae_algo ae_algo = {
12974 .pdev_id_table = ae_algo_pci_tbl,
12977 static int hclge_init(void)
12979 pr_info("%s is initializing\n", HCLGE_NAME);
12981 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12983 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12987 hnae3_register_ae_algo(&ae_algo);
12992 static void hclge_exit(void)
12994 hnae3_unregister_ae_algo(&ae_algo);
12995 destroy_workqueue(hclge_wq);
12997 module_init(hclge_init);
12998 module_exit(hclge_exit);
13000 MODULE_LICENSE("GPL");
13001 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13002 MODULE_DESCRIPTION("HCLGE Driver");
13003 MODULE_VERSION(HCLGE_MOD_VERSION);