1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 { INNER_DST_MAC, 48, KEY_OPT_MAC,
405 offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 offsetof(struct hclge_fd_rule, tuples.src_mac),
409 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 { INNER_L2_RSV, 16, KEY_OPT_LE16,
418 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 { INNER_IP_TOS, 8, KEY_OPT_U8,
421 offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 { INNER_IP_PROTO, 8, KEY_OPT_U8,
424 offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 { INNER_SRC_IP, 32, KEY_OPT_IP,
427 offsetof(struct hclge_fd_rule, tuples.src_ip),
428 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 { INNER_DST_IP, 32, KEY_OPT_IP,
430 offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 { INNER_L3_RSV, 16, KEY_OPT_LE16,
433 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 offsetof(struct hclge_fd_rule, tuples.src_port),
437 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 { INNER_DST_PORT, 16, KEY_OPT_LE16,
439 offsetof(struct hclge_fd_rule, tuples.dst_port),
440 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 { INNER_L4_RSV, 32, KEY_OPT_LE32,
442 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 #define HCLGE_MAC_CMD_NUM 21
450 u64 *data = (u64 *)(&hdev->mac_stats);
451 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
456 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 dev_err(&hdev->pdev->dev,
460 "Get MAC pkt stats fail, status = %d.\n", ret);
465 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 /* for special opcode 0032, only the first desc has the head */
467 if (unlikely(i == 0)) {
468 desc_data = (__le64 *)(&desc[i].data[0]);
469 n = HCLGE_RD_FIRST_STATS_NUM;
471 desc_data = (__le64 *)(&desc[i]);
472 n = HCLGE_RD_OTHER_STATS_NUM;
475 for (k = 0; k < n; k++) {
476 *data += le64_to_cpu(*desc_data);
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 u64 *data = (u64 *)(&hdev->mac_stats);
488 struct hclge_desc *desc;
493 /* This may be called inside atomic sections,
494 * so GFP_ATOMIC is more suitalbe here
496 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
500 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
507 for (i = 0; i < desc_num; i++) {
508 /* for special opcode 0034, only the first desc has the head */
510 desc_data = (__le64 *)(&desc[i].data[0]);
511 n = HCLGE_RD_FIRST_STATS_NUM;
513 desc_data = (__le64 *)(&desc[i]);
514 n = HCLGE_RD_OTHER_STATS_NUM;
517 for (k = 0; k < n; k++) {
518 *data += le64_to_cpu(*desc_data);
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 struct hclge_desc desc;
536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
541 desc_data = (__le32 *)(&desc.data[0]);
542 reg_num = le32_to_cpu(*desc_data);
544 *desc_num = 1 + ((reg_num - 3) >> 2) +
545 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
555 ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 /* The firmware supports the new statistics acquisition method */
558 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 else if (ret == -EOPNOTSUPP)
560 ret = hclge_mac_update_stats_defective(hdev);
562 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 struct hclge_vport *vport = hclge_get_vport(handle);
571 struct hclge_dev *hdev = vport->back;
572 struct hnae3_queue *queue;
573 struct hclge_desc desc[1];
574 struct hclge_tqp *tqp;
577 for (i = 0; i < kinfo->num_tqps; i++) {
578 queue = handle->kinfo.tqp[i];
579 tqp = container_of(queue, struct hclge_tqp, q);
580 /* command : HCLGE_OPC_QUERY_IGU_STAT */
581 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
584 desc[0].data[0] = cpu_to_le32(tqp->index);
585 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 dev_err(&hdev->pdev->dev,
588 "Query tqp stat fail, status = %d,queue = %d\n",
592 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 le32_to_cpu(desc[0].data[1]);
596 for (i = 0; i < kinfo->num_tqps; i++) {
597 queue = handle->kinfo.tqp[i];
598 tqp = container_of(queue, struct hclge_tqp, q);
599 /* command : HCLGE_OPC_QUERY_IGU_STAT */
600 hclge_cmd_setup_basic_desc(&desc[0],
601 HCLGE_OPC_QUERY_TX_STATS,
604 desc[0].data[0] = cpu_to_le32(tqp->index);
605 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 dev_err(&hdev->pdev->dev,
608 "Query tqp stat fail, status = %d,queue = %d\n",
612 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 le32_to_cpu(desc[0].data[1]);
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 struct hclge_tqp *tqp;
626 for (i = 0; i < kinfo->num_tqps; i++) {
627 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 /* each tqp has TX & RX two queues */
644 return kinfo->num_tqps * (2);
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
653 for (i = 0; i < kinfo->num_tqps; i++) {
654 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 struct hclge_tqp, q);
656 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 buff = buff + ETH_GSTRING_LEN;
661 for (i = 0; i < kinfo->num_tqps; i++) {
662 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 struct hclge_tqp, q);
664 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 buff = buff + ETH_GSTRING_LEN;
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 const struct hclge_comm_stats_str strs[],
679 for (i = 0; i < size; i++)
680 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 const struct hclge_comm_stats_str strs[],
689 char *buff = (char *)data;
692 if (stringset != ETH_SS_STATS)
695 for (i = 0; i < size; i++) {
696 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 buff = buff + ETH_GSTRING_LEN;
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 struct hnae3_handle *handle;
708 handle = &hdev->vport[0].nic;
709 if (handle->client) {
710 status = hclge_tqps_update_stats(handle);
712 dev_err(&hdev->pdev->dev,
713 "Update TQPS stats fail, status = %d.\n",
718 status = hclge_mac_update_stats(hdev);
720 dev_err(&hdev->pdev->dev,
721 "Update MAC stats fail, status = %d.\n", status);
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 struct net_device_stats *net_stats)
727 struct hclge_vport *vport = hclge_get_vport(handle);
728 struct hclge_dev *hdev = vport->back;
731 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
734 status = hclge_mac_update_stats(hdev);
736 dev_err(&hdev->pdev->dev,
737 "Update MAC stats fail, status = %d.\n",
740 status = hclge_tqps_update_stats(handle);
742 dev_err(&hdev->pdev->dev,
743 "Update TQPS stats fail, status = %d.\n",
746 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 HNAE3_SUPPORT_PHY_LOOPBACK |\
753 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 struct hclge_vport *vport = hclge_get_vport(handle);
757 struct hclge_dev *hdev = vport->back;
760 /* Loopback test support rules:
761 * mac: only GE mode support
762 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 * phy: only support when phy device exist on board
765 if (stringset == ETH_SS_TEST) {
766 /* clear loopback bit flags at first */
767 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
777 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 hdev->hw.mac.phydev->drv->set_loopback) ||
782 hnae3_dev_phy_imp_supported(hdev)) {
784 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 } else if (stringset == ETH_SS_STATS) {
787 count = ARRAY_SIZE(g_mac_stats_string) +
788 hclge_tqps_get_sset_count(handle, stringset);
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
797 u8 *p = (char *)data;
800 if (stringset == ETH_SS_STATS) {
801 size = ARRAY_SIZE(g_mac_stats_string);
802 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 p = hclge_tqps_get_strings(handle, p);
805 } else if (stringset == ETH_SS_TEST) {
806 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 p += ETH_GSTRING_LEN;
811 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 p += ETH_GSTRING_LEN;
816 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 p += ETH_GSTRING_LEN;
822 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 p += ETH_GSTRING_LEN;
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 struct hclge_vport *vport = hclge_get_vport(handle);
833 struct hclge_dev *hdev = vport->back;
836 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 ARRAY_SIZE(g_mac_stats_string), data);
838 p = hclge_tqps_get_stats(handle, p);
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 struct hns3_mac_stats *mac_stats)
844 struct hclge_vport *vport = hclge_get_vport(handle);
845 struct hclge_dev *hdev = vport->back;
847 hclge_update_stats(handle, NULL);
849 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 struct hclge_func_status_cmd *status)
856 #define HCLGE_MAC_ID_MASK 0xF
858 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
861 /* Set the pf to main pf */
862 if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 hdev->flag |= HCLGE_FLAG_MAIN;
865 hdev->flag &= ~HCLGE_FLAG_MAIN;
867 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
871 static int hclge_query_function_status(struct hclge_dev *hdev)
873 #define HCLGE_QUERY_MAX_CNT 5
875 struct hclge_func_status_cmd *req;
876 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 req = (struct hclge_func_status_cmd *)desc.data;
884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 dev_err(&hdev->pdev->dev,
887 "query function status failed %d.\n", ret);
891 /* Check pf reset is done */
894 usleep_range(1000, 2000);
895 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897 return hclge_parse_func_status(hdev, req);
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 struct hclge_pf_res_cmd *req;
903 struct hclge_desc desc;
906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 dev_err(&hdev->pdev->dev,
910 "query pf resource failed %d.\n", ret);
914 req = (struct hclge_pf_res_cmd *)desc.data;
915 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 le16_to_cpu(req->ext_tqp_num);
917 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919 if (req->tx_buf_size)
921 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927 if (req->dv_buf_size)
929 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 dev_err(&hdev->pdev->dev,
938 "only %u msi resources available, not enough for pf(min:2).\n",
943 if (hnae3_dev_roce_supported(hdev)) {
945 le16_to_cpu(req->pf_intr_vector_number_roce);
947 /* PF should have NIC vectors and Roce vectors,
948 * NIC vectors are queued before Roce vectors.
950 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 hdev->num_msi = hdev->num_nic_msi;
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
962 *speed = HCLGE_MAC_SPEED_10M;
965 *speed = HCLGE_MAC_SPEED_100M;
968 *speed = HCLGE_MAC_SPEED_1G;
971 *speed = HCLGE_MAC_SPEED_10G;
974 *speed = HCLGE_MAC_SPEED_25G;
977 *speed = HCLGE_MAC_SPEED_40G;
980 *speed = HCLGE_MAC_SPEED_50G;
983 *speed = HCLGE_MAC_SPEED_100G;
986 *speed = HCLGE_MAC_SPEED_200G;
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
997 struct hclge_vport *vport = hclge_get_vport(handle);
998 struct hclge_dev *hdev = vport->back;
999 u32 speed_ability = hdev->hw.mac.speed_ability;
1003 case HCLGE_MAC_SPEED_10M:
1004 speed_bit = HCLGE_SUPPORT_10M_BIT;
1006 case HCLGE_MAC_SPEED_100M:
1007 speed_bit = HCLGE_SUPPORT_100M_BIT;
1009 case HCLGE_MAC_SPEED_1G:
1010 speed_bit = HCLGE_SUPPORT_1G_BIT;
1012 case HCLGE_MAC_SPEED_10G:
1013 speed_bit = HCLGE_SUPPORT_10G_BIT;
1015 case HCLGE_MAC_SPEED_25G:
1016 speed_bit = HCLGE_SUPPORT_25G_BIT;
1018 case HCLGE_MAC_SPEED_40G:
1019 speed_bit = HCLGE_SUPPORT_40G_BIT;
1021 case HCLGE_MAC_SPEED_50G:
1022 speed_bit = HCLGE_SUPPORT_50G_BIT;
1024 case HCLGE_MAC_SPEED_100G:
1025 speed_bit = HCLGE_SUPPORT_100G_BIT;
1027 case HCLGE_MAC_SPEED_200G:
1028 speed_bit = HCLGE_SUPPORT_200G_BIT;
1034 if (speed_bit & speed_ability)
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 mac->fec_ability = 0;
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 struct hclge_mac *mac = &hdev->hw.mac;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 struct hclge_mac *mac = &hdev->hw.mac;
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 unsigned long *supported = hdev->hw.mac.supported;
1207 /* default to support all speed for GE port */
1209 speed_ability = HCLGE_SUPPORT_GE;
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 u8 media_type = hdev->hw.mac.media_type;
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1277 return HCLGE_MAC_SPEED_1G;
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 #define SPEED_ABILITY_EXT_SHIFT 8
1284 struct hclge_cfg_param_cmd *req;
1285 u64 mac_addr_tmp_high;
1286 u16 speed_ability_ext;
1290 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292 /* get the configuration */
1293 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 HCLGE_CFG_TQP_DESC_N_M,
1297 HCLGE_CFG_TQP_DESC_N_S);
1299 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 HCLGE_CFG_PHY_ADDR_M,
1301 HCLGE_CFG_PHY_ADDR_S);
1302 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 HCLGE_CFG_MEDIA_TP_M,
1304 HCLGE_CFG_MEDIA_TP_S);
1305 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 HCLGE_CFG_RX_BUF_LEN_M,
1307 HCLGE_CFG_RX_BUF_LEN_S);
1308 /* get mac_address */
1309 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 HCLGE_CFG_MAC_ADDR_H_M,
1312 HCLGE_CFG_MAC_ADDR_H_S);
1314 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 HCLGE_CFG_DEFAULT_SPEED_M,
1318 HCLGE_CFG_DEFAULT_SPEED_S);
1319 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 HCLGE_CFG_RSS_SIZE_M,
1321 HCLGE_CFG_RSS_SIZE_S);
1323 for (i = 0; i < ETH_ALEN; i++)
1324 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 HCLGE_CFG_SPEED_ABILITY_M,
1331 HCLGE_CFG_SPEED_ABILITY_S);
1332 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 HCLGE_CFG_UMV_TBL_SPACE_M,
1339 HCLGE_CFG_UMV_TBL_SPACE_S);
1340 if (!cfg->umv_space)
1341 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1343 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1344 HCLGE_CFG_PF_RSS_SIZE_M,
1345 HCLGE_CFG_PF_RSS_SIZE_S);
1347 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1348 * power of 2, instead of reading out directly. This would
1349 * be more flexible for future changes and expansions.
1350 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1351 * it does not make sense if PF's field is 0. In this case, PF and VF
1352 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1354 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1355 1U << cfg->pf_rss_size_max :
1356 cfg->vf_rss_size_max;
1359 /* hclge_get_cfg: query the static parameter from flash
1360 * @hdev: pointer to struct hclge_dev
1361 * @hcfg: the config structure to be getted
1363 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1365 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1366 struct hclge_cfg_param_cmd *req;
1370 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1373 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1374 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1376 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1377 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1378 /* Len should be united by 4 bytes when send to hardware */
1379 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1380 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1381 req->offset = cpu_to_le32(offset);
1384 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1386 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1390 hclge_parse_cfg(hcfg, desc);
1395 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1397 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1399 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1401 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1402 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1403 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1404 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1405 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1406 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1407 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1410 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1411 struct hclge_desc *desc)
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 struct hclge_dev_specs_0_cmd *req0;
1415 struct hclge_dev_specs_1_cmd *req1;
1417 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1418 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1420 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1421 ae_dev->dev_specs.rss_ind_tbl_size =
1422 le16_to_cpu(req0->rss_ind_tbl_size);
1423 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1424 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1425 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1426 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1427 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1428 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1431 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1433 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1435 if (!dev_specs->max_non_tso_bd_num)
1436 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1437 if (!dev_specs->rss_ind_tbl_size)
1438 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1439 if (!dev_specs->rss_key_size)
1440 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1441 if (!dev_specs->max_tm_rate)
1442 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1443 if (!dev_specs->max_qset_num)
1444 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1445 if (!dev_specs->max_int_gl)
1446 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1447 if (!dev_specs->max_frm_size)
1448 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1451 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1453 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1457 /* set default specifications as devices lower than version V3 do not
1458 * support querying specifications from firmware.
1460 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1461 hclge_set_default_dev_specs(hdev);
1465 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1466 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1468 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1470 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1472 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1476 hclge_parse_dev_specs(hdev, desc);
1477 hclge_check_dev_specs(hdev);
1482 static int hclge_get_cap(struct hclge_dev *hdev)
1486 ret = hclge_query_function_status(hdev);
1488 dev_err(&hdev->pdev->dev,
1489 "query function status error %d.\n", ret);
1493 /* get pf resource */
1494 return hclge_query_pf_resource(hdev);
1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1499 #define HCLGE_MIN_TX_DESC 64
1500 #define HCLGE_MIN_RX_DESC 64
1502 if (!is_kdump_kernel())
1505 dev_info(&hdev->pdev->dev,
1506 "Running kdump kernel. Using minimal resources\n");
1508 /* minimal queue pairs equals to the number of vports */
1509 hdev->num_tqps = hdev->num_req_vfs + 1;
1510 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1511 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1514 static int hclge_configure(struct hclge_dev *hdev)
1516 struct hclge_cfg cfg;
1520 ret = hclge_get_cfg(hdev, &cfg);
1524 hdev->base_tqp_pid = 0;
1525 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1526 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1527 hdev->rx_buf_len = cfg.rx_buf_len;
1528 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1529 hdev->hw.mac.media_type = cfg.media_type;
1530 hdev->hw.mac.phy_addr = cfg.phy_addr;
1531 hdev->num_tx_desc = cfg.tqp_desc_num;
1532 hdev->num_rx_desc = cfg.tqp_desc_num;
1533 hdev->tm_info.num_pg = 1;
1534 hdev->tc_max = cfg.tc_num;
1535 hdev->tm_info.hw_pfc_map = 0;
1536 hdev->wanted_umv_size = cfg.umv_space;
1538 if (hnae3_dev_fd_supported(hdev)) {
1540 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1543 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1545 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1546 cfg.default_speed, ret);
1550 hclge_parse_link_mode(hdev, cfg.speed_ability);
1552 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1554 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1555 (hdev->tc_max < 1)) {
1556 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1561 /* Dev does not support DCB */
1562 if (!hnae3_dev_dcb_supported(hdev)) {
1566 hdev->pfc_max = hdev->tc_max;
1569 hdev->tm_info.num_tc = 1;
1571 /* Currently not support uncontiuous tc */
1572 for (i = 0; i < hdev->tm_info.num_tc; i++)
1573 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1575 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1577 hclge_init_kdump_kernel_config(hdev);
1579 /* Set the init affinity based on pci func number */
1580 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1581 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1582 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1583 &hdev->affinity_mask);
1588 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1591 struct hclge_cfg_tso_status_cmd *req;
1592 struct hclge_desc desc;
1594 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1596 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1597 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1598 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1600 return hclge_cmd_send(&hdev->hw, &desc, 1);
1603 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1605 struct hclge_cfg_gro_status_cmd *req;
1606 struct hclge_desc desc;
1609 if (!hnae3_dev_gro_supported(hdev))
1612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1613 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1615 req->gro_en = en ? 1 : 0;
1617 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1619 dev_err(&hdev->pdev->dev,
1620 "GRO hardware config cmd failed, ret = %d\n", ret);
1625 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1627 struct hclge_tqp *tqp;
1630 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1631 sizeof(struct hclge_tqp), GFP_KERNEL);
1637 for (i = 0; i < hdev->num_tqps; i++) {
1638 tqp->dev = &hdev->pdev->dev;
1641 tqp->q.ae_algo = &ae_algo;
1642 tqp->q.buf_size = hdev->rx_buf_len;
1643 tqp->q.tx_desc_num = hdev->num_tx_desc;
1644 tqp->q.rx_desc_num = hdev->num_rx_desc;
1646 /* need an extended offset to configure queues >=
1647 * HCLGE_TQP_MAX_SIZE_DEV_V2
1649 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1650 tqp->q.io_base = hdev->hw.io_base +
1651 HCLGE_TQP_REG_OFFSET +
1652 i * HCLGE_TQP_REG_SIZE;
1654 tqp->q.io_base = hdev->hw.io_base +
1655 HCLGE_TQP_REG_OFFSET +
1656 HCLGE_TQP_EXT_REG_OFFSET +
1657 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1666 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1667 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1669 struct hclge_tqp_map_cmd *req;
1670 struct hclge_desc desc;
1673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1675 req = (struct hclge_tqp_map_cmd *)desc.data;
1676 req->tqp_id = cpu_to_le16(tqp_pid);
1677 req->tqp_vf = func_id;
1678 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1680 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1681 req->tqp_vid = cpu_to_le16(tqp_vid);
1683 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1685 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1690 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1692 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1693 struct hclge_dev *hdev = vport->back;
1696 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1697 alloced < num_tqps; i++) {
1698 if (!hdev->htqp[i].alloced) {
1699 hdev->htqp[i].q.handle = &vport->nic;
1700 hdev->htqp[i].q.tqp_index = alloced;
1701 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1702 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1703 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1704 hdev->htqp[i].alloced = true;
1708 vport->alloc_tqps = alloced;
1709 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1710 vport->alloc_tqps / hdev->tm_info.num_tc);
1712 /* ensure one to one mapping between irq and queue at default */
1713 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1714 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1719 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1720 u16 num_tx_desc, u16 num_rx_desc)
1723 struct hnae3_handle *nic = &vport->nic;
1724 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1725 struct hclge_dev *hdev = vport->back;
1728 kinfo->num_tx_desc = num_tx_desc;
1729 kinfo->num_rx_desc = num_rx_desc;
1731 kinfo->rx_buf_len = hdev->rx_buf_len;
1733 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1734 sizeof(struct hnae3_queue *), GFP_KERNEL);
1738 ret = hclge_assign_tqp(vport, num_tqps);
1740 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1745 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1746 struct hclge_vport *vport)
1748 struct hnae3_handle *nic = &vport->nic;
1749 struct hnae3_knic_private_info *kinfo;
1752 kinfo = &nic->kinfo;
1753 for (i = 0; i < vport->alloc_tqps; i++) {
1754 struct hclge_tqp *q =
1755 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1759 is_pf = !(vport->vport_id);
1760 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1769 static int hclge_map_tqp(struct hclge_dev *hdev)
1771 struct hclge_vport *vport = hdev->vport;
1774 num_vport = hdev->num_req_vfs + 1;
1775 for (i = 0; i < num_vport; i++) {
1778 ret = hclge_map_tqp_to_vport(hdev, vport);
1788 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1790 struct hnae3_handle *nic = &vport->nic;
1791 struct hclge_dev *hdev = vport->back;
1794 nic->pdev = hdev->pdev;
1795 nic->ae_algo = &ae_algo;
1796 nic->numa_node_mask = hdev->numa_node_mask;
1798 ret = hclge_knic_setup(vport, num_tqps,
1799 hdev->num_tx_desc, hdev->num_rx_desc);
1801 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1806 static int hclge_alloc_vport(struct hclge_dev *hdev)
1808 struct pci_dev *pdev = hdev->pdev;
1809 struct hclge_vport *vport;
1815 /* We need to alloc a vport for main NIC of PF */
1816 num_vport = hdev->num_req_vfs + 1;
1818 if (hdev->num_tqps < num_vport) {
1819 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1820 hdev->num_tqps, num_vport);
1824 /* Alloc the same number of TQPs for every vport */
1825 tqp_per_vport = hdev->num_tqps / num_vport;
1826 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1828 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1833 hdev->vport = vport;
1834 hdev->num_alloc_vport = num_vport;
1836 if (IS_ENABLED(CONFIG_PCI_IOV))
1837 hdev->num_alloc_vfs = hdev->num_req_vfs;
1839 for (i = 0; i < num_vport; i++) {
1841 vport->vport_id = i;
1842 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1843 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1844 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1845 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1846 INIT_LIST_HEAD(&vport->vlan_list);
1847 INIT_LIST_HEAD(&vport->uc_mac_list);
1848 INIT_LIST_HEAD(&vport->mc_mac_list);
1849 spin_lock_init(&vport->mac_list_lock);
1852 ret = hclge_vport_setup(vport, tqp_main_vport);
1854 ret = hclge_vport_setup(vport, tqp_per_vport);
1857 "vport setup failed for vport %d, %d\n",
1868 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1869 struct hclge_pkt_buf_alloc *buf_alloc)
1871 /* TX buffer size is unit by 128 byte */
1872 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1873 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1874 struct hclge_tx_buff_alloc_cmd *req;
1875 struct hclge_desc desc;
1879 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1882 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1885 req->tx_pkt_buff[i] =
1886 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1887 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1892 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1898 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1899 struct hclge_pkt_buf_alloc *buf_alloc)
1901 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1904 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1909 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1914 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1915 if (hdev->hw_tc_map & BIT(i))
1920 /* Get the number of pfc enabled TCs, which have private buffer */
1921 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1922 struct hclge_pkt_buf_alloc *buf_alloc)
1924 struct hclge_priv_buf *priv;
1928 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1929 priv = &buf_alloc->priv_buf[i];
1930 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1938 /* Get the number of pfc disabled TCs, which have private buffer */
1939 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1940 struct hclge_pkt_buf_alloc *buf_alloc)
1942 struct hclge_priv_buf *priv;
1946 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947 priv = &buf_alloc->priv_buf[i];
1948 if (hdev->hw_tc_map & BIT(i) &&
1949 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1957 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_priv_buf *priv;
1963 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1964 priv = &buf_alloc->priv_buf[i];
1966 rx_priv += priv->buf_size;
1971 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1973 u32 i, total_tx_size = 0;
1975 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1976 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1978 return total_tx_size;
1981 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1982 struct hclge_pkt_buf_alloc *buf_alloc,
1985 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1986 u32 tc_num = hclge_get_tc_num(hdev);
1987 u32 shared_buf, aligned_mps;
1991 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1993 if (hnae3_dev_dcb_supported(hdev))
1994 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1997 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1998 + hdev->dv_buf_size;
2000 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2001 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2002 HCLGE_BUF_SIZE_UNIT);
2004 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2005 if (rx_all < rx_priv + shared_std)
2008 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2009 buf_alloc->s_buf.buf_size = shared_buf;
2010 if (hnae3_dev_dcb_supported(hdev)) {
2011 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2012 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2013 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2014 HCLGE_BUF_SIZE_UNIT);
2016 buf_alloc->s_buf.self.high = aligned_mps +
2017 HCLGE_NON_DCB_ADDITIONAL_BUF;
2018 buf_alloc->s_buf.self.low = aligned_mps;
2021 if (hnae3_dev_dcb_supported(hdev)) {
2022 hi_thrd = shared_buf - hdev->dv_buf_size;
2024 if (tc_num <= NEED_RESERVE_TC_NUM)
2025 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2029 hi_thrd = hi_thrd / tc_num;
2031 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2032 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2033 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2035 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2036 lo_thrd = aligned_mps;
2039 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2041 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2047 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2048 struct hclge_pkt_buf_alloc *buf_alloc)
2052 total_size = hdev->pkt_buf_size;
2054 /* alloc tx buffer for all enabled tc */
2055 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2056 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2058 if (hdev->hw_tc_map & BIT(i)) {
2059 if (total_size < hdev->tx_buf_size)
2062 priv->tx_buf_size = hdev->tx_buf_size;
2064 priv->tx_buf_size = 0;
2067 total_size -= priv->tx_buf_size;
2073 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2074 struct hclge_pkt_buf_alloc *buf_alloc)
2076 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2077 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2080 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2081 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2088 if (!(hdev->hw_tc_map & BIT(i)))
2093 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2094 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2095 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2096 HCLGE_BUF_SIZE_UNIT);
2099 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2103 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2106 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2109 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2110 struct hclge_pkt_buf_alloc *buf_alloc)
2112 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2116 /* let the last to be cleared first */
2117 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 unsigned int mask = BIT((unsigned int)i);
2121 if (hdev->hw_tc_map & mask &&
2122 !(hdev->tm_info.hw_pfc_map & mask)) {
2123 /* Clear the no pfc TC private buffer */
2131 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132 no_pfc_priv_num == 0)
2136 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2139 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2140 struct hclge_pkt_buf_alloc *buf_alloc)
2142 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2146 /* let the last to be cleared first */
2147 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149 unsigned int mask = BIT((unsigned int)i);
2151 if (hdev->hw_tc_map & mask &&
2152 hdev->tm_info.hw_pfc_map & mask) {
2153 /* Reduce the number of pfc TC with private buffer */
2161 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2166 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2169 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2170 struct hclge_pkt_buf_alloc *buf_alloc)
2172 #define COMPENSATE_BUFFER 0x3C00
2173 #define COMPENSATE_HALF_MPS_NUM 5
2174 #define PRIV_WL_GAP 0x1800
2176 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2177 u32 tc_num = hclge_get_tc_num(hdev);
2178 u32 half_mps = hdev->mps >> 1;
2183 rx_priv = rx_priv / tc_num;
2185 if (tc_num <= NEED_RESERVE_TC_NUM)
2186 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2188 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2189 COMPENSATE_HALF_MPS_NUM * half_mps;
2190 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2191 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2192 if (rx_priv < min_rx_priv)
2195 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2196 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2203 if (!(hdev->hw_tc_map & BIT(i)))
2207 priv->buf_size = rx_priv;
2208 priv->wl.high = rx_priv - hdev->dv_buf_size;
2209 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2212 buf_alloc->s_buf.buf_size = 0;
2217 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2218 * @hdev: pointer to struct hclge_dev
2219 * @buf_alloc: pointer to buffer calculation data
2220 * @return: 0: calculate successful, negative: fail
2222 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2223 struct hclge_pkt_buf_alloc *buf_alloc)
2225 /* When DCB is not supported, rx private buffer is not allocated. */
2226 if (!hnae3_dev_dcb_supported(hdev)) {
2227 u32 rx_all = hdev->pkt_buf_size;
2229 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2230 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2236 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2239 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2242 /* try to decrease the buffer size */
2243 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2246 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2249 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2255 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2256 struct hclge_pkt_buf_alloc *buf_alloc)
2258 struct hclge_rx_priv_buff_cmd *req;
2259 struct hclge_desc desc;
2263 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2264 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2266 /* Alloc private buffer TCs */
2267 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2268 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2271 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2273 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2277 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2278 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2280 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2282 dev_err(&hdev->pdev->dev,
2283 "rx private buffer alloc cmd failed %d\n", ret);
2288 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2289 struct hclge_pkt_buf_alloc *buf_alloc)
2291 struct hclge_rx_priv_wl_buf *req;
2292 struct hclge_priv_buf *priv;
2293 struct hclge_desc desc[2];
2297 for (i = 0; i < 2; i++) {
2298 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2300 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2302 /* The first descriptor set the NEXT bit to 1 */
2304 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2306 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2308 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2309 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2311 priv = &buf_alloc->priv_buf[idx];
2312 req->tc_wl[j].high =
2313 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2314 req->tc_wl[j].high |=
2315 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2317 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2318 req->tc_wl[j].low |=
2319 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2323 /* Send 2 descriptor at one time */
2324 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2326 dev_err(&hdev->pdev->dev,
2327 "rx private waterline config cmd failed %d\n",
2332 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2333 struct hclge_pkt_buf_alloc *buf_alloc)
2335 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2336 struct hclge_rx_com_thrd *req;
2337 struct hclge_desc desc[2];
2338 struct hclge_tc_thrd *tc;
2342 for (i = 0; i < 2; i++) {
2343 hclge_cmd_setup_basic_desc(&desc[i],
2344 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2345 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2347 /* The first descriptor set the NEXT bit to 1 */
2349 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2351 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2353 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2354 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2356 req->com_thrd[j].high =
2357 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2358 req->com_thrd[j].high |=
2359 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360 req->com_thrd[j].low =
2361 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2362 req->com_thrd[j].low |=
2363 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2367 /* Send 2 descriptors at one time */
2368 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2370 dev_err(&hdev->pdev->dev,
2371 "common threshold config cmd failed %d\n", ret);
2375 static int hclge_common_wl_config(struct hclge_dev *hdev,
2376 struct hclge_pkt_buf_alloc *buf_alloc)
2378 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2379 struct hclge_rx_com_wl *req;
2380 struct hclge_desc desc;
2383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2385 req = (struct hclge_rx_com_wl *)desc.data;
2386 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2387 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2389 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2390 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2392 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2394 dev_err(&hdev->pdev->dev,
2395 "common waterline config cmd failed %d\n", ret);
2400 int hclge_buffer_alloc(struct hclge_dev *hdev)
2402 struct hclge_pkt_buf_alloc *pkt_buf;
2405 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2409 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2411 dev_err(&hdev->pdev->dev,
2412 "could not calc tx buffer size for all TCs %d\n", ret);
2416 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2418 dev_err(&hdev->pdev->dev,
2419 "could not alloc tx buffers %d\n", ret);
2423 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2425 dev_err(&hdev->pdev->dev,
2426 "could not calc rx priv buffer size for all TCs %d\n",
2431 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2433 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2438 if (hnae3_dev_dcb_supported(hdev)) {
2439 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2441 dev_err(&hdev->pdev->dev,
2442 "could not configure rx private waterline %d\n",
2447 ret = hclge_common_thrd_config(hdev, pkt_buf);
2449 dev_err(&hdev->pdev->dev,
2450 "could not configure common threshold %d\n",
2456 ret = hclge_common_wl_config(hdev, pkt_buf);
2458 dev_err(&hdev->pdev->dev,
2459 "could not configure common waterline %d\n", ret);
2466 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2468 struct hnae3_handle *roce = &vport->roce;
2469 struct hnae3_handle *nic = &vport->nic;
2470 struct hclge_dev *hdev = vport->back;
2472 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2474 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2477 roce->rinfo.base_vector = hdev->roce_base_vector;
2479 roce->rinfo.netdev = nic->kinfo.netdev;
2480 roce->rinfo.roce_io_base = hdev->hw.io_base;
2481 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2483 roce->pdev = nic->pdev;
2484 roce->ae_algo = nic->ae_algo;
2485 roce->numa_node_mask = nic->numa_node_mask;
2490 static int hclge_init_msi(struct hclge_dev *hdev)
2492 struct pci_dev *pdev = hdev->pdev;
2496 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2498 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2501 "failed(%d) to allocate MSI/MSI-X vectors\n",
2505 if (vectors < hdev->num_msi)
2506 dev_warn(&hdev->pdev->dev,
2507 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2508 hdev->num_msi, vectors);
2510 hdev->num_msi = vectors;
2511 hdev->num_msi_left = vectors;
2513 hdev->base_msi_vector = pdev->irq;
2514 hdev->roce_base_vector = hdev->base_msi_vector +
2517 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2518 sizeof(u16), GFP_KERNEL);
2519 if (!hdev->vector_status) {
2520 pci_free_irq_vectors(pdev);
2524 for (i = 0; i < hdev->num_msi; i++)
2525 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2527 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2528 sizeof(int), GFP_KERNEL);
2529 if (!hdev->vector_irq) {
2530 pci_free_irq_vectors(pdev);
2537 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2539 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2540 duplex = HCLGE_MAC_FULL;
2545 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2548 struct hclge_config_mac_speed_dup_cmd *req;
2549 struct hclge_desc desc;
2552 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2554 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2557 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2560 case HCLGE_MAC_SPEED_10M:
2561 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562 HCLGE_CFG_SPEED_S, 6);
2564 case HCLGE_MAC_SPEED_100M:
2565 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566 HCLGE_CFG_SPEED_S, 7);
2568 case HCLGE_MAC_SPEED_1G:
2569 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 HCLGE_CFG_SPEED_S, 0);
2572 case HCLGE_MAC_SPEED_10G:
2573 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 HCLGE_CFG_SPEED_S, 1);
2576 case HCLGE_MAC_SPEED_25G:
2577 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 HCLGE_CFG_SPEED_S, 2);
2580 case HCLGE_MAC_SPEED_40G:
2581 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 HCLGE_CFG_SPEED_S, 3);
2584 case HCLGE_MAC_SPEED_50G:
2585 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 HCLGE_CFG_SPEED_S, 4);
2588 case HCLGE_MAC_SPEED_100G:
2589 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 HCLGE_CFG_SPEED_S, 5);
2592 case HCLGE_MAC_SPEED_200G:
2593 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 HCLGE_CFG_SPEED_S, 8);
2597 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2601 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2604 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2606 dev_err(&hdev->pdev->dev,
2607 "mac speed/duplex config cmd failed %d.\n", ret);
2614 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2616 struct hclge_mac *mac = &hdev->hw.mac;
2619 duplex = hclge_check_speed_dup(duplex, speed);
2620 if (!mac->support_autoneg && mac->speed == speed &&
2621 mac->duplex == duplex)
2624 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2628 hdev->hw.mac.speed = speed;
2629 hdev->hw.mac.duplex = duplex;
2634 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2637 struct hclge_vport *vport = hclge_get_vport(handle);
2638 struct hclge_dev *hdev = vport->back;
2640 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2643 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2645 struct hclge_config_auto_neg_cmd *req;
2646 struct hclge_desc desc;
2650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2652 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2654 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2655 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2657 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2659 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2665 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2667 struct hclge_vport *vport = hclge_get_vport(handle);
2668 struct hclge_dev *hdev = vport->back;
2670 if (!hdev->hw.mac.support_autoneg) {
2672 dev_err(&hdev->pdev->dev,
2673 "autoneg is not supported by current port\n");
2680 return hclge_set_autoneg_en(hdev, enable);
2683 static int hclge_get_autoneg(struct hnae3_handle *handle)
2685 struct hclge_vport *vport = hclge_get_vport(handle);
2686 struct hclge_dev *hdev = vport->back;
2687 struct phy_device *phydev = hdev->hw.mac.phydev;
2690 return phydev->autoneg;
2692 return hdev->hw.mac.autoneg;
2695 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2697 struct hclge_vport *vport = hclge_get_vport(handle);
2698 struct hclge_dev *hdev = vport->back;
2701 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2703 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2706 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2709 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2711 struct hclge_vport *vport = hclge_get_vport(handle);
2712 struct hclge_dev *hdev = vport->back;
2714 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2715 return hclge_set_autoneg_en(hdev, !halt);
2720 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2722 struct hclge_config_fec_cmd *req;
2723 struct hclge_desc desc;
2726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2728 req = (struct hclge_config_fec_cmd *)desc.data;
2729 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2730 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2731 if (fec_mode & BIT(HNAE3_FEC_RS))
2732 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2733 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2734 if (fec_mode & BIT(HNAE3_FEC_BASER))
2735 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2736 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2740 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2745 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2747 struct hclge_vport *vport = hclge_get_vport(handle);
2748 struct hclge_dev *hdev = vport->back;
2749 struct hclge_mac *mac = &hdev->hw.mac;
2752 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2753 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2757 ret = hclge_set_fec_hw(hdev, fec_mode);
2761 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2765 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2768 struct hclge_vport *vport = hclge_get_vport(handle);
2769 struct hclge_dev *hdev = vport->back;
2770 struct hclge_mac *mac = &hdev->hw.mac;
2773 *fec_ability = mac->fec_ability;
2775 *fec_mode = mac->fec_mode;
2778 static int hclge_mac_init(struct hclge_dev *hdev)
2780 struct hclge_mac *mac = &hdev->hw.mac;
2783 hdev->support_sfp_query = true;
2784 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2785 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2786 hdev->hw.mac.duplex);
2790 if (hdev->hw.mac.support_autoneg) {
2791 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2798 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2799 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2804 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2806 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2810 ret = hclge_set_default_loopback(hdev);
2814 ret = hclge_buffer_alloc(hdev);
2816 dev_err(&hdev->pdev->dev,
2817 "allocate buffer fail, ret=%d\n", ret);
2822 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2824 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2825 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2826 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2827 hclge_wq, &hdev->service_task, 0);
2830 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2832 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2834 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 hclge_wq, &hdev->service_task, 0);
2838 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2840 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2842 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 hclge_wq, &hdev->service_task,
2847 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2849 struct hclge_link_status_cmd *req;
2850 struct hclge_desc desc;
2853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2854 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2856 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2861 req = (struct hclge_link_status_cmd *)desc.data;
2862 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2863 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2868 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2870 struct phy_device *phydev = hdev->hw.mac.phydev;
2872 *link_status = HCLGE_LINK_STATUS_DOWN;
2874 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2877 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2880 return hclge_get_mac_link_status(hdev, link_status);
2883 static void hclge_update_link_status(struct hclge_dev *hdev)
2885 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2886 struct hnae3_handle *handle = &hdev->vport[0].nic;
2887 struct hnae3_client *rclient = hdev->roce_client;
2888 struct hnae3_client *client = hdev->nic_client;
2895 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2898 ret = hclge_get_mac_phy_link(hdev, &state);
2900 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2904 if (state != hdev->hw.mac.link) {
2905 client->ops->link_status_change(handle, state);
2906 hclge_config_mac_tnl_int(hdev, state);
2907 if (rclient && rclient->ops->link_status_change)
2908 rclient->ops->link_status_change(rhandle, state);
2910 hdev->hw.mac.link = state;
2913 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2916 static void hclge_update_port_capability(struct hclge_dev *hdev,
2917 struct hclge_mac *mac)
2919 if (hnae3_dev_fec_supported(hdev))
2920 /* update fec ability by speed */
2921 hclge_convert_setting_fec(mac);
2923 /* firmware can not identify back plane type, the media type
2924 * read from configuration can help deal it
2926 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2927 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2928 mac->module_type = HNAE3_MODULE_TYPE_KR;
2929 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2930 mac->module_type = HNAE3_MODULE_TYPE_TP;
2932 if (mac->support_autoneg) {
2933 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2934 linkmode_copy(mac->advertising, mac->supported);
2936 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2938 linkmode_zero(mac->advertising);
2942 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2944 struct hclge_sfp_info_cmd *resp;
2945 struct hclge_desc desc;
2948 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2949 resp = (struct hclge_sfp_info_cmd *)desc.data;
2950 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951 if (ret == -EOPNOTSUPP) {
2952 dev_warn(&hdev->pdev->dev,
2953 "IMP do not support get SFP speed %d\n", ret);
2956 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2960 *speed = le32_to_cpu(resp->speed);
2965 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2967 struct hclge_sfp_info_cmd *resp;
2968 struct hclge_desc desc;
2971 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2972 resp = (struct hclge_sfp_info_cmd *)desc.data;
2974 resp->query_type = QUERY_ACTIVE_SPEED;
2976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2977 if (ret == -EOPNOTSUPP) {
2978 dev_warn(&hdev->pdev->dev,
2979 "IMP does not support get SFP info %d\n", ret);
2982 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2986 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2987 * set to mac->speed.
2989 if (!le32_to_cpu(resp->speed))
2992 mac->speed = le32_to_cpu(resp->speed);
2993 /* if resp->speed_ability is 0, it means it's an old version
2994 * firmware, do not update these params
2996 if (resp->speed_ability) {
2997 mac->module_type = le32_to_cpu(resp->module_type);
2998 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2999 mac->autoneg = resp->autoneg;
3000 mac->support_autoneg = resp->autoneg_ability;
3001 mac->speed_type = QUERY_ACTIVE_SPEED;
3002 if (!resp->active_fec)
3005 mac->fec_mode = BIT(resp->active_fec);
3007 mac->speed_type = QUERY_SFP_SPEED;
3013 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3014 struct ethtool_link_ksettings *cmd)
3016 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3017 struct hclge_vport *vport = hclge_get_vport(handle);
3018 struct hclge_phy_link_ksetting_0_cmd *req0;
3019 struct hclge_phy_link_ksetting_1_cmd *req1;
3020 u32 supported, advertising, lp_advertising;
3021 struct hclge_dev *hdev = vport->back;
3024 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3026 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3027 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3030 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3032 dev_err(&hdev->pdev->dev,
3033 "failed to get phy link ksetting, ret = %d.\n", ret);
3037 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3038 cmd->base.autoneg = req0->autoneg;
3039 cmd->base.speed = le32_to_cpu(req0->speed);
3040 cmd->base.duplex = req0->duplex;
3041 cmd->base.port = req0->port;
3042 cmd->base.transceiver = req0->transceiver;
3043 cmd->base.phy_address = req0->phy_address;
3044 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3045 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3046 supported = le32_to_cpu(req0->supported);
3047 advertising = le32_to_cpu(req0->advertising);
3048 lp_advertising = le32_to_cpu(req0->lp_advertising);
3049 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3051 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3053 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3056 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3057 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3058 cmd->base.master_slave_state = req1->master_slave_state;
3064 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3065 const struct ethtool_link_ksettings *cmd)
3067 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3068 struct hclge_vport *vport = hclge_get_vport(handle);
3069 struct hclge_phy_link_ksetting_0_cmd *req0;
3070 struct hclge_phy_link_ksetting_1_cmd *req1;
3071 struct hclge_dev *hdev = vport->back;
3075 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3076 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3077 (cmd->base.duplex != DUPLEX_HALF &&
3078 cmd->base.duplex != DUPLEX_FULL)))
3081 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3083 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3084 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3087 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3088 req0->autoneg = cmd->base.autoneg;
3089 req0->speed = cpu_to_le32(cmd->base.speed);
3090 req0->duplex = cmd->base.duplex;
3091 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3092 cmd->link_modes.advertising);
3093 req0->advertising = cpu_to_le32(advertising);
3094 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3096 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3097 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3099 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3101 dev_err(&hdev->pdev->dev,
3102 "failed to set phy link ksettings, ret = %d.\n", ret);
3106 hdev->hw.mac.autoneg = cmd->base.autoneg;
3107 hdev->hw.mac.speed = cmd->base.speed;
3108 hdev->hw.mac.duplex = cmd->base.duplex;
3109 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3114 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3116 struct ethtool_link_ksettings cmd;
3119 if (!hnae3_dev_phy_imp_supported(hdev))
3122 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3126 hdev->hw.mac.autoneg = cmd.base.autoneg;
3127 hdev->hw.mac.speed = cmd.base.speed;
3128 hdev->hw.mac.duplex = cmd.base.duplex;
3133 static int hclge_tp_port_init(struct hclge_dev *hdev)
3135 struct ethtool_link_ksettings cmd;
3137 if (!hnae3_dev_phy_imp_supported(hdev))
3140 cmd.base.autoneg = hdev->hw.mac.autoneg;
3141 cmd.base.speed = hdev->hw.mac.speed;
3142 cmd.base.duplex = hdev->hw.mac.duplex;
3143 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3145 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3148 static int hclge_update_port_info(struct hclge_dev *hdev)
3150 struct hclge_mac *mac = &hdev->hw.mac;
3151 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3154 /* get the port info from SFP cmd if not copper port */
3155 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3156 return hclge_update_tp_port_info(hdev);
3158 /* if IMP does not support get SFP/qSFP info, return directly */
3159 if (!hdev->support_sfp_query)
3162 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3163 ret = hclge_get_sfp_info(hdev, mac);
3165 ret = hclge_get_sfp_speed(hdev, &speed);
3167 if (ret == -EOPNOTSUPP) {
3168 hdev->support_sfp_query = false;
3174 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3175 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3176 hclge_update_port_capability(hdev, mac);
3179 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3182 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3183 return 0; /* do nothing if no SFP */
3185 /* must config full duplex for SFP */
3186 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3190 static int hclge_get_status(struct hnae3_handle *handle)
3192 struct hclge_vport *vport = hclge_get_vport(handle);
3193 struct hclge_dev *hdev = vport->back;
3195 hclge_update_link_status(hdev);
3197 return hdev->hw.mac.link;
3200 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3202 if (!pci_num_vf(hdev->pdev)) {
3203 dev_err(&hdev->pdev->dev,
3204 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3208 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3209 dev_err(&hdev->pdev->dev,
3210 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3211 vf, pci_num_vf(hdev->pdev));
3215 /* VF start from 1 in vport */
3216 vf += HCLGE_VF_VPORT_START_NUM;
3217 return &hdev->vport[vf];
3220 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3221 struct ifla_vf_info *ivf)
3223 struct hclge_vport *vport = hclge_get_vport(handle);
3224 struct hclge_dev *hdev = vport->back;
3226 vport = hclge_get_vf_vport(hdev, vf);
3231 ivf->linkstate = vport->vf_info.link_state;
3232 ivf->spoofchk = vport->vf_info.spoofchk;
3233 ivf->trusted = vport->vf_info.trusted;
3234 ivf->min_tx_rate = 0;
3235 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3236 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3237 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3238 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3239 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3244 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3247 struct hclge_vport *vport = hclge_get_vport(handle);
3248 struct hclge_dev *hdev = vport->back;
3250 vport = hclge_get_vf_vport(hdev, vf);
3254 vport->vf_info.link_state = link_state;
3259 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3261 u32 cmdq_src_reg, msix_src_reg;
3263 /* fetch the events from their corresponding regs */
3264 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3265 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3267 /* Assumption: If by any chance reset and mailbox events are reported
3268 * together then we will only process reset event in this go and will
3269 * defer the processing of the mailbox events. Since, we would have not
3270 * cleared RX CMDQ event this time we would receive again another
3271 * interrupt from H/W just for the mailbox.
3273 * check for vector0 reset event sources
3275 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3276 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3277 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3278 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3279 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3280 hdev->rst_stats.imp_rst_cnt++;
3281 return HCLGE_VECTOR0_EVENT_RST;
3284 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3285 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3286 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3287 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3288 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3289 hdev->rst_stats.global_rst_cnt++;
3290 return HCLGE_VECTOR0_EVENT_RST;
3293 /* check for vector0 msix event source */
3294 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3295 *clearval = msix_src_reg;
3296 return HCLGE_VECTOR0_EVENT_ERR;
3299 /* check for vector0 mailbox(=CMDQ RX) event source */
3300 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3301 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3302 *clearval = cmdq_src_reg;
3303 return HCLGE_VECTOR0_EVENT_MBX;
3306 /* print other vector0 event source */
3307 dev_info(&hdev->pdev->dev,
3308 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3309 cmdq_src_reg, msix_src_reg);
3310 *clearval = msix_src_reg;
3312 return HCLGE_VECTOR0_EVENT_OTHER;
3315 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3318 switch (event_type) {
3319 case HCLGE_VECTOR0_EVENT_RST:
3320 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3322 case HCLGE_VECTOR0_EVENT_MBX:
3323 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3330 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3332 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3333 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3334 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3335 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3336 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3339 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3341 writel(enable ? 1 : 0, vector->addr);
3344 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3346 struct hclge_dev *hdev = data;
3350 hclge_enable_vector(&hdev->misc_vector, false);
3351 event_cause = hclge_check_event_cause(hdev, &clearval);
3353 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3354 switch (event_cause) {
3355 case HCLGE_VECTOR0_EVENT_ERR:
3356 /* we do not know what type of reset is required now. This could
3357 * only be decided after we fetch the type of errors which
3358 * caused this event. Therefore, we will do below for now:
3359 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3360 * have defered type of reset to be used.
3361 * 2. Schedule the reset service task.
3362 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3363 * will fetch the correct type of reset. This would be done
3364 * by first decoding the types of errors.
3366 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3368 case HCLGE_VECTOR0_EVENT_RST:
3369 hclge_reset_task_schedule(hdev);
3371 case HCLGE_VECTOR0_EVENT_MBX:
3372 /* If we are here then,
3373 * 1. Either we are not handling any mbx task and we are not
3376 * 2. We could be handling a mbx task but nothing more is
3378 * In both cases, we should schedule mbx task as there are more
3379 * mbx messages reported by this interrupt.
3381 hclge_mbx_task_schedule(hdev);
3384 dev_warn(&hdev->pdev->dev,
3385 "received unknown or unhandled event of vector0\n");
3389 hclge_clear_event_cause(hdev, event_cause, clearval);
3391 /* Enable interrupt if it is not cause by reset. And when
3392 * clearval equal to 0, it means interrupt status may be
3393 * cleared by hardware before driver reads status register.
3394 * For this case, vector0 interrupt also should be enabled.
3397 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3398 hclge_enable_vector(&hdev->misc_vector, true);
3404 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3406 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3407 dev_warn(&hdev->pdev->dev,
3408 "vector(vector_id %d) has been freed.\n", vector_id);
3412 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3413 hdev->num_msi_left += 1;
3414 hdev->num_msi_used -= 1;
3417 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3419 struct hclge_misc_vector *vector = &hdev->misc_vector;
3421 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3423 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3424 hdev->vector_status[0] = 0;
3426 hdev->num_msi_left -= 1;
3427 hdev->num_msi_used += 1;
3430 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3431 const cpumask_t *mask)
3433 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3436 cpumask_copy(&hdev->affinity_mask, mask);
3439 static void hclge_irq_affinity_release(struct kref *ref)
3443 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3445 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3446 &hdev->affinity_mask);
3448 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3449 hdev->affinity_notify.release = hclge_irq_affinity_release;
3450 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3451 &hdev->affinity_notify);
3454 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3456 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3457 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3460 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3464 hclge_get_misc_vector(hdev);
3466 /* this would be explicitly freed in the end */
3467 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3468 HCLGE_NAME, pci_name(hdev->pdev));
3469 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3470 0, hdev->misc_vector.name, hdev);
3472 hclge_free_vector(hdev, 0);
3473 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3474 hdev->misc_vector.vector_irq);
3480 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3482 free_irq(hdev->misc_vector.vector_irq, hdev);
3483 hclge_free_vector(hdev, 0);
3486 int hclge_notify_client(struct hclge_dev *hdev,
3487 enum hnae3_reset_notify_type type)
3489 struct hnae3_handle *handle = &hdev->vport[0].nic;
3490 struct hnae3_client *client = hdev->nic_client;
3493 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3496 if (!client->ops->reset_notify)
3499 ret = client->ops->reset_notify(handle, type);
3501 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3507 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3508 enum hnae3_reset_notify_type type)
3510 struct hnae3_handle *handle = &hdev->vport[0].roce;
3511 struct hnae3_client *client = hdev->roce_client;
3514 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3517 if (!client->ops->reset_notify)
3520 ret = client->ops->reset_notify(handle, type);
3522 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3528 static int hclge_reset_wait(struct hclge_dev *hdev)
3530 #define HCLGE_RESET_WATI_MS 100
3531 #define HCLGE_RESET_WAIT_CNT 350
3533 u32 val, reg, reg_bit;
3536 switch (hdev->reset_type) {
3537 case HNAE3_IMP_RESET:
3538 reg = HCLGE_GLOBAL_RESET_REG;
3539 reg_bit = HCLGE_IMP_RESET_BIT;
3541 case HNAE3_GLOBAL_RESET:
3542 reg = HCLGE_GLOBAL_RESET_REG;
3543 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3545 case HNAE3_FUNC_RESET:
3546 reg = HCLGE_FUN_RST_ING;
3547 reg_bit = HCLGE_FUN_RST_ING_B;
3550 dev_err(&hdev->pdev->dev,
3551 "Wait for unsupported reset type: %d\n",
3556 val = hclge_read_dev(&hdev->hw, reg);
3557 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3558 msleep(HCLGE_RESET_WATI_MS);
3559 val = hclge_read_dev(&hdev->hw, reg);
3563 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3564 dev_warn(&hdev->pdev->dev,
3565 "Wait for reset timeout: %d\n", hdev->reset_type);
3572 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3574 struct hclge_vf_rst_cmd *req;
3575 struct hclge_desc desc;
3577 req = (struct hclge_vf_rst_cmd *)desc.data;
3578 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3579 req->dest_vfid = func_id;
3584 return hclge_cmd_send(&hdev->hw, &desc, 1);
3587 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3591 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3592 struct hclge_vport *vport = &hdev->vport[i];
3595 /* Send cmd to set/clear VF's FUNC_RST_ING */
3596 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3598 dev_err(&hdev->pdev->dev,
3599 "set vf(%u) rst failed %d!\n",
3600 vport->vport_id, ret);
3604 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3607 /* Inform VF to process the reset.
3608 * hclge_inform_reset_assert_to_vf may fail if VF
3609 * driver is not loaded.
3611 ret = hclge_inform_reset_assert_to_vf(vport);
3613 dev_warn(&hdev->pdev->dev,
3614 "inform reset to vf(%u) failed %d!\n",
3615 vport->vport_id, ret);
3621 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3623 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3624 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3625 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3628 hclge_mbx_handler(hdev);
3630 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3633 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3635 struct hclge_pf_rst_sync_cmd *req;
3636 struct hclge_desc desc;
3640 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3644 /* vf need to down netdev by mbx during PF or FLR reset */
3645 hclge_mailbox_service_task(hdev);
3647 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3648 /* for compatible with old firmware, wait
3649 * 100 ms for VF to stop IO
3651 if (ret == -EOPNOTSUPP) {
3652 msleep(HCLGE_RESET_SYNC_TIME);
3655 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3658 } else if (req->all_vf_ready) {
3661 msleep(HCLGE_PF_RESET_SYNC_TIME);
3662 hclge_cmd_reuse_desc(&desc, true);
3663 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3665 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3668 void hclge_report_hw_error(struct hclge_dev *hdev,
3669 enum hnae3_hw_error_type type)
3671 struct hnae3_client *client = hdev->nic_client;
3673 if (!client || !client->ops->process_hw_error ||
3674 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3677 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3680 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3684 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3685 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3686 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3687 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3688 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3691 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3692 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3693 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3694 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3698 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3700 struct hclge_desc desc;
3701 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3705 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3706 req->fun_reset_vfid = func_id;
3708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3710 dev_err(&hdev->pdev->dev,
3711 "send function reset cmd fail, status =%d\n", ret);
3716 static void hclge_do_reset(struct hclge_dev *hdev)
3718 struct hnae3_handle *handle = &hdev->vport[0].nic;
3719 struct pci_dev *pdev = hdev->pdev;
3722 if (hclge_get_hw_reset_stat(handle)) {
3723 dev_info(&pdev->dev, "hardware reset not finish\n");
3724 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3725 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3726 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3730 switch (hdev->reset_type) {
3731 case HNAE3_GLOBAL_RESET:
3732 dev_info(&pdev->dev, "global reset requested\n");
3733 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3734 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3735 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3737 case HNAE3_FUNC_RESET:
3738 dev_info(&pdev->dev, "PF reset requested\n");
3739 /* schedule again to check later */
3740 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3741 hclge_reset_task_schedule(hdev);
3744 dev_warn(&pdev->dev,
3745 "unsupported reset type: %d\n", hdev->reset_type);
3750 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3751 unsigned long *addr)
3753 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3754 struct hclge_dev *hdev = ae_dev->priv;
3756 /* first, resolve any unknown reset type to the known type(s) */
3757 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3758 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3759 HCLGE_MISC_VECTOR_INT_STS);
3760 /* we will intentionally ignore any errors from this function
3761 * as we will end up in *some* reset request in any case
3763 if (hclge_handle_hw_msix_error(hdev, addr))
3764 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3767 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3768 /* We defered the clearing of the error event which caused
3769 * interrupt since it was not posssible to do that in
3770 * interrupt context (and this is the reason we introduced
3771 * new UNKNOWN reset type). Now, the errors have been
3772 * handled and cleared in hardware we can safely enable
3773 * interrupts. This is an exception to the norm.
3775 hclge_enable_vector(&hdev->misc_vector, true);
3778 /* return the highest priority reset level amongst all */
3779 if (test_bit(HNAE3_IMP_RESET, addr)) {
3780 rst_level = HNAE3_IMP_RESET;
3781 clear_bit(HNAE3_IMP_RESET, addr);
3782 clear_bit(HNAE3_GLOBAL_RESET, addr);
3783 clear_bit(HNAE3_FUNC_RESET, addr);
3784 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3785 rst_level = HNAE3_GLOBAL_RESET;
3786 clear_bit(HNAE3_GLOBAL_RESET, addr);
3787 clear_bit(HNAE3_FUNC_RESET, addr);
3788 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3789 rst_level = HNAE3_FUNC_RESET;
3790 clear_bit(HNAE3_FUNC_RESET, addr);
3791 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3792 rst_level = HNAE3_FLR_RESET;
3793 clear_bit(HNAE3_FLR_RESET, addr);
3796 if (hdev->reset_type != HNAE3_NONE_RESET &&
3797 rst_level < hdev->reset_type)
3798 return HNAE3_NONE_RESET;
3803 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3807 switch (hdev->reset_type) {
3808 case HNAE3_IMP_RESET:
3809 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3811 case HNAE3_GLOBAL_RESET:
3812 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3821 /* For revision 0x20, the reset interrupt source
3822 * can only be cleared after hardware reset done
3824 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3825 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3828 hclge_enable_vector(&hdev->misc_vector, true);
3831 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3835 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3837 reg_val |= HCLGE_NIC_SW_RST_RDY;
3839 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3841 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3844 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3848 ret = hclge_set_all_vf_rst(hdev, true);
3852 hclge_func_reset_sync_vf(hdev);
3857 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3862 switch (hdev->reset_type) {
3863 case HNAE3_FUNC_RESET:
3864 ret = hclge_func_reset_notify_vf(hdev);
3868 ret = hclge_func_reset_cmd(hdev, 0);
3870 dev_err(&hdev->pdev->dev,
3871 "asserting function reset fail %d!\n", ret);
3875 /* After performaning pf reset, it is not necessary to do the
3876 * mailbox handling or send any command to firmware, because
3877 * any mailbox handling or command to firmware is only valid
3878 * after hclge_cmd_init is called.
3880 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3881 hdev->rst_stats.pf_rst_cnt++;
3883 case HNAE3_FLR_RESET:
3884 ret = hclge_func_reset_notify_vf(hdev);
3888 case HNAE3_IMP_RESET:
3889 hclge_handle_imp_error(hdev);
3890 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3892 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3898 /* inform hardware that preparatory work is done */
3899 msleep(HCLGE_RESET_SYNC_TIME);
3900 hclge_reset_handshake(hdev, true);
3901 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3906 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3908 #define MAX_RESET_FAIL_CNT 5
3910 if (hdev->reset_pending) {
3911 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3912 hdev->reset_pending);
3914 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3915 HCLGE_RESET_INT_M) {
3916 dev_info(&hdev->pdev->dev,
3917 "reset failed because new reset interrupt\n");
3918 hclge_clear_reset_cause(hdev);
3920 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3921 hdev->rst_stats.reset_fail_cnt++;
3922 set_bit(hdev->reset_type, &hdev->reset_pending);
3923 dev_info(&hdev->pdev->dev,
3924 "re-schedule reset task(%u)\n",
3925 hdev->rst_stats.reset_fail_cnt);
3929 hclge_clear_reset_cause(hdev);
3931 /* recover the handshake status when reset fail */
3932 hclge_reset_handshake(hdev, true);
3934 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3936 hclge_dbg_dump_rst_info(hdev);
3938 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3943 static void hclge_update_reset_level(struct hclge_dev *hdev)
3945 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3946 enum hnae3_reset_type reset_level;
3948 /* if default_reset_request has a higher level reset request,
3949 * it should be handled as soon as possible. since some errors
3950 * need this kind of reset to fix.
3952 reset_level = hclge_get_reset_level(ae_dev,
3953 &hdev->default_reset_request);
3954 if (reset_level != HNAE3_NONE_RESET)
3955 set_bit(reset_level, &hdev->reset_request);
3958 static int hclge_set_rst_done(struct hclge_dev *hdev)
3960 struct hclge_pf_rst_done_cmd *req;
3961 struct hclge_desc desc;
3964 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3966 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3968 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3969 /* To be compatible with the old firmware, which does not support
3970 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3973 if (ret == -EOPNOTSUPP) {
3974 dev_warn(&hdev->pdev->dev,
3975 "current firmware does not support command(0x%x)!\n",
3976 HCLGE_OPC_PF_RST_DONE);
3979 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3986 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3990 switch (hdev->reset_type) {
3991 case HNAE3_FUNC_RESET:
3992 case HNAE3_FLR_RESET:
3993 ret = hclge_set_all_vf_rst(hdev, false);
3995 case HNAE3_GLOBAL_RESET:
3996 case HNAE3_IMP_RESET:
3997 ret = hclge_set_rst_done(hdev);
4003 /* clear up the handshake status after re-initialize done */
4004 hclge_reset_handshake(hdev, false);
4009 static int hclge_reset_stack(struct hclge_dev *hdev)
4013 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4017 ret = hclge_reset_ae_dev(hdev->ae_dev);
4021 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4024 static int hclge_reset_prepare(struct hclge_dev *hdev)
4028 hdev->rst_stats.reset_cnt++;
4029 /* perform reset of the stack & ae device for a client */
4030 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4035 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4040 return hclge_reset_prepare_wait(hdev);
4043 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4047 hdev->rst_stats.hw_reset_done_cnt++;
4049 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4054 ret = hclge_reset_stack(hdev);
4059 hclge_clear_reset_cause(hdev);
4061 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4062 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4066 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4069 ret = hclge_reset_prepare_up(hdev);
4074 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4079 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4083 hdev->last_reset_time = jiffies;
4084 hdev->rst_stats.reset_fail_cnt = 0;
4085 hdev->rst_stats.reset_done_cnt++;
4086 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4088 hclge_update_reset_level(hdev);
4093 static void hclge_reset(struct hclge_dev *hdev)
4095 if (hclge_reset_prepare(hdev))
4098 if (hclge_reset_wait(hdev))
4101 if (hclge_reset_rebuild(hdev))
4107 if (hclge_reset_err_handle(hdev))
4108 hclge_reset_task_schedule(hdev);
4111 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4113 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4114 struct hclge_dev *hdev = ae_dev->priv;
4116 /* We might end up getting called broadly because of 2 below cases:
4117 * 1. Recoverable error was conveyed through APEI and only way to bring
4118 * normalcy is to reset.
4119 * 2. A new reset request from the stack due to timeout
4121 * For the first case,error event might not have ae handle available.
4122 * check if this is a new reset request and we are not here just because
4123 * last reset attempt did not succeed and watchdog hit us again. We will
4124 * know this if last reset request did not occur very recently (watchdog
4125 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4126 * In case of new request we reset the "reset level" to PF reset.
4127 * And if it is a repeat reset request of the most recent one then we
4128 * want to make sure we throttle the reset request. Therefore, we will
4129 * not allow it again before 3*HZ times.
4132 handle = &hdev->vport[0].nic;
4134 if (time_before(jiffies, (hdev->last_reset_time +
4135 HCLGE_RESET_INTERVAL))) {
4136 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4138 } else if (hdev->default_reset_request) {
4140 hclge_get_reset_level(ae_dev,
4141 &hdev->default_reset_request);
4142 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4143 hdev->reset_level = HNAE3_FUNC_RESET;
4146 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4149 /* request reset & schedule reset task */
4150 set_bit(hdev->reset_level, &hdev->reset_request);
4151 hclge_reset_task_schedule(hdev);
4153 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4154 hdev->reset_level++;
4157 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4158 enum hnae3_reset_type rst_type)
4160 struct hclge_dev *hdev = ae_dev->priv;
4162 set_bit(rst_type, &hdev->default_reset_request);
4165 static void hclge_reset_timer(struct timer_list *t)
4167 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4169 /* if default_reset_request has no value, it means that this reset
4170 * request has already be handled, so just return here
4172 if (!hdev->default_reset_request)
4175 dev_info(&hdev->pdev->dev,
4176 "triggering reset in reset timer\n");
4177 hclge_reset_event(hdev->pdev, NULL);
4180 static void hclge_reset_subtask(struct hclge_dev *hdev)
4182 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4184 /* check if there is any ongoing reset in the hardware. This status can
4185 * be checked from reset_pending. If there is then, we need to wait for
4186 * hardware to complete reset.
4187 * a. If we are able to figure out in reasonable time that hardware
4188 * has fully resetted then, we can proceed with driver, client
4190 * b. else, we can come back later to check this status so re-sched
4193 hdev->last_reset_time = jiffies;
4194 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4195 if (hdev->reset_type != HNAE3_NONE_RESET)
4198 /* check if we got any *new* reset requests to be honored */
4199 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4200 if (hdev->reset_type != HNAE3_NONE_RESET)
4201 hclge_do_reset(hdev);
4203 hdev->reset_type = HNAE3_NONE_RESET;
4206 static void hclge_reset_service_task(struct hclge_dev *hdev)
4208 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4211 down(&hdev->reset_sem);
4212 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4214 hclge_reset_subtask(hdev);
4216 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4217 up(&hdev->reset_sem);
4220 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4224 /* start from vport 1 for PF is always alive */
4225 for (i = 1; i < hdev->num_alloc_vport; i++) {
4226 struct hclge_vport *vport = &hdev->vport[i];
4228 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4229 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4231 /* If vf is not alive, set to default value */
4232 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4233 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4237 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4239 unsigned long delta = round_jiffies_relative(HZ);
4241 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4244 /* Always handle the link updating to make sure link state is
4245 * updated when it is triggered by mbx.
4247 hclge_update_link_status(hdev);
4248 hclge_sync_mac_table(hdev);
4249 hclge_sync_promisc_mode(hdev);
4250 hclge_sync_fd_table(hdev);
4252 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4253 delta = jiffies - hdev->last_serv_processed;
4255 if (delta < round_jiffies_relative(HZ)) {
4256 delta = round_jiffies_relative(HZ) - delta;
4261 hdev->serv_processed_cnt++;
4262 hclge_update_vport_alive(hdev);
4264 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4265 hdev->last_serv_processed = jiffies;
4269 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4270 hclge_update_stats_for_all(hdev);
4272 hclge_update_port_info(hdev);
4273 hclge_sync_vlan_filter(hdev);
4275 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4276 hclge_rfs_filter_expire(hdev);
4278 hdev->last_serv_processed = jiffies;
4281 hclge_task_schedule(hdev, delta);
4284 static void hclge_service_task(struct work_struct *work)
4286 struct hclge_dev *hdev =
4287 container_of(work, struct hclge_dev, service_task.work);
4289 hclge_reset_service_task(hdev);
4290 hclge_mailbox_service_task(hdev);
4291 hclge_periodic_service_task(hdev);
4293 /* Handle reset and mbx again in case periodical task delays the
4294 * handling by calling hclge_task_schedule() in
4295 * hclge_periodic_service_task().
4297 hclge_reset_service_task(hdev);
4298 hclge_mailbox_service_task(hdev);
4301 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4303 /* VF handle has no client */
4304 if (!handle->client)
4305 return container_of(handle, struct hclge_vport, nic);
4306 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4307 return container_of(handle, struct hclge_vport, roce);
4309 return container_of(handle, struct hclge_vport, nic);
4312 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4313 struct hnae3_vector_info *vector_info)
4315 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4317 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4319 /* need an extend offset to config vector >= 64 */
4320 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4321 vector_info->io_addr = hdev->hw.io_base +
4322 HCLGE_VECTOR_REG_BASE +
4323 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4325 vector_info->io_addr = hdev->hw.io_base +
4326 HCLGE_VECTOR_EXT_REG_BASE +
4327 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4328 HCLGE_VECTOR_REG_OFFSET_H +
4329 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4330 HCLGE_VECTOR_REG_OFFSET;
4332 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4333 hdev->vector_irq[idx] = vector_info->vector;
4336 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4337 struct hnae3_vector_info *vector_info)
4339 struct hclge_vport *vport = hclge_get_vport(handle);
4340 struct hnae3_vector_info *vector = vector_info;
4341 struct hclge_dev *hdev = vport->back;
4346 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4347 vector_num = min(hdev->num_msi_left, vector_num);
4349 for (j = 0; j < vector_num; j++) {
4350 while (++i < hdev->num_nic_msi) {
4351 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4352 hclge_get_vector_info(hdev, i, vector);
4360 hdev->num_msi_left -= alloc;
4361 hdev->num_msi_used += alloc;
4366 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4370 for (i = 0; i < hdev->num_msi; i++)
4371 if (vector == hdev->vector_irq[i])
4377 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4379 struct hclge_vport *vport = hclge_get_vport(handle);
4380 struct hclge_dev *hdev = vport->back;
4383 vector_id = hclge_get_vector_index(hdev, vector);
4384 if (vector_id < 0) {
4385 dev_err(&hdev->pdev->dev,
4386 "Get vector index fail. vector = %d\n", vector);
4390 hclge_free_vector(hdev, vector_id);
4395 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4397 return HCLGE_RSS_KEY_SIZE;
4400 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4401 const u8 hfunc, const u8 *key)
4403 struct hclge_rss_config_cmd *req;
4404 unsigned int key_offset = 0;
4405 struct hclge_desc desc;
4410 key_counts = HCLGE_RSS_KEY_SIZE;
4411 req = (struct hclge_rss_config_cmd *)desc.data;
4413 while (key_counts) {
4414 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4417 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4418 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4420 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4421 memcpy(req->hash_key,
4422 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4424 key_counts -= key_size;
4426 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4428 dev_err(&hdev->pdev->dev,
4429 "Configure RSS config fail, status = %d\n",
4437 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4439 struct hclge_rss_indirection_table_cmd *req;
4440 struct hclge_desc desc;
4441 int rss_cfg_tbl_num;
4449 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4450 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4451 HCLGE_RSS_CFG_TBL_SIZE;
4453 for (i = 0; i < rss_cfg_tbl_num; i++) {
4454 hclge_cmd_setup_basic_desc
4455 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4457 req->start_table_index =
4458 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4459 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4460 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4461 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4462 req->rss_qid_l[j] = qid & 0xff;
4464 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4465 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4466 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4467 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4469 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4471 dev_err(&hdev->pdev->dev,
4472 "Configure rss indir table fail,status = %d\n",
4480 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4481 u16 *tc_size, u16 *tc_offset)
4483 struct hclge_rss_tc_mode_cmd *req;
4484 struct hclge_desc desc;
4488 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4489 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4491 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4494 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4495 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4496 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4497 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4498 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4499 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4500 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4502 req->rss_tc_mode[i] = cpu_to_le16(mode);
4505 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4507 dev_err(&hdev->pdev->dev,
4508 "Configure rss tc mode fail, status = %d\n", ret);
4513 static void hclge_get_rss_type(struct hclge_vport *vport)
4515 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4516 vport->rss_tuple_sets.ipv4_udp_en ||
4517 vport->rss_tuple_sets.ipv4_sctp_en ||
4518 vport->rss_tuple_sets.ipv6_tcp_en ||
4519 vport->rss_tuple_sets.ipv6_udp_en ||
4520 vport->rss_tuple_sets.ipv6_sctp_en)
4521 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4522 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4523 vport->rss_tuple_sets.ipv6_fragment_en)
4524 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4526 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4529 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4531 struct hclge_rss_input_tuple_cmd *req;
4532 struct hclge_desc desc;
4535 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4537 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4539 /* Get the tuple cfg from pf */
4540 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4541 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4542 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4543 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4544 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4545 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4546 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4547 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4548 hclge_get_rss_type(&hdev->vport[0]);
4549 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4551 dev_err(&hdev->pdev->dev,
4552 "Configure rss input fail, status = %d\n", ret);
4556 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4559 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4560 struct hclge_vport *vport = hclge_get_vport(handle);
4563 /* Get hash algorithm */
4565 switch (vport->rss_algo) {
4566 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4567 *hfunc = ETH_RSS_HASH_TOP;
4569 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4570 *hfunc = ETH_RSS_HASH_XOR;
4573 *hfunc = ETH_RSS_HASH_UNKNOWN;
4578 /* Get the RSS Key required by the user */
4580 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4582 /* Get indirect table */
4584 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4585 indir[i] = vport->rss_indirection_tbl[i];
4590 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4591 const u8 *key, const u8 hfunc)
4593 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4594 struct hclge_vport *vport = hclge_get_vport(handle);
4595 struct hclge_dev *hdev = vport->back;
4599 /* Set the RSS Hash Key if specififed by the user */
4602 case ETH_RSS_HASH_TOP:
4603 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4605 case ETH_RSS_HASH_XOR:
4606 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4608 case ETH_RSS_HASH_NO_CHANGE:
4609 hash_algo = vport->rss_algo;
4615 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4619 /* Update the shadow RSS key with user specified qids */
4620 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4621 vport->rss_algo = hash_algo;
4624 /* Update the shadow RSS table with user specified qids */
4625 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4626 vport->rss_indirection_tbl[i] = indir[i];
4628 /* Update the hardware */
4629 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4632 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4634 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4636 if (nfc->data & RXH_L4_B_2_3)
4637 hash_sets |= HCLGE_D_PORT_BIT;
4639 hash_sets &= ~HCLGE_D_PORT_BIT;
4641 if (nfc->data & RXH_IP_SRC)
4642 hash_sets |= HCLGE_S_IP_BIT;
4644 hash_sets &= ~HCLGE_S_IP_BIT;
4646 if (nfc->data & RXH_IP_DST)
4647 hash_sets |= HCLGE_D_IP_BIT;
4649 hash_sets &= ~HCLGE_D_IP_BIT;
4651 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4652 hash_sets |= HCLGE_V_TAG_BIT;
4657 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4658 struct ethtool_rxnfc *nfc,
4659 struct hclge_rss_input_tuple_cmd *req)
4661 struct hclge_dev *hdev = vport->back;
4664 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4665 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4666 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4667 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4668 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4669 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4670 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4671 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4673 tuple_sets = hclge_get_rss_hash_bits(nfc);
4674 switch (nfc->flow_type) {
4676 req->ipv4_tcp_en = tuple_sets;
4679 req->ipv6_tcp_en = tuple_sets;
4682 req->ipv4_udp_en = tuple_sets;
4685 req->ipv6_udp_en = tuple_sets;
4688 req->ipv4_sctp_en = tuple_sets;
4691 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4692 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4695 req->ipv6_sctp_en = tuple_sets;
4698 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4701 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4710 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4711 struct ethtool_rxnfc *nfc)
4713 struct hclge_vport *vport = hclge_get_vport(handle);
4714 struct hclge_dev *hdev = vport->back;
4715 struct hclge_rss_input_tuple_cmd *req;
4716 struct hclge_desc desc;
4719 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4720 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4723 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4726 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4728 dev_err(&hdev->pdev->dev,
4729 "failed to init rss tuple cmd, ret = %d\n", ret);
4733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4735 dev_err(&hdev->pdev->dev,
4736 "Set rss tuple fail, status = %d\n", ret);
4740 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4741 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4742 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4743 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4744 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4745 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4746 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4747 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4748 hclge_get_rss_type(vport);
4752 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4755 switch (flow_type) {
4757 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4760 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4763 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4766 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4769 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4772 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4776 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4785 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4789 if (tuple_sets & HCLGE_D_PORT_BIT)
4790 tuple_data |= RXH_L4_B_2_3;
4791 if (tuple_sets & HCLGE_S_PORT_BIT)
4792 tuple_data |= RXH_L4_B_0_1;
4793 if (tuple_sets & HCLGE_D_IP_BIT)
4794 tuple_data |= RXH_IP_DST;
4795 if (tuple_sets & HCLGE_S_IP_BIT)
4796 tuple_data |= RXH_IP_SRC;
4801 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4802 struct ethtool_rxnfc *nfc)
4804 struct hclge_vport *vport = hclge_get_vport(handle);
4810 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4811 if (ret || !tuple_sets)
4814 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4819 static int hclge_get_tc_size(struct hnae3_handle *handle)
4821 struct hclge_vport *vport = hclge_get_vport(handle);
4822 struct hclge_dev *hdev = vport->back;
4824 return hdev->pf_rss_size_max;
4827 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4829 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4830 struct hclge_vport *vport = hdev->vport;
4831 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4832 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4833 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4834 struct hnae3_tc_info *tc_info;
4839 tc_info = &vport->nic.kinfo.tc_info;
4840 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4841 rss_size = tc_info->tqp_count[i];
4844 if (!(hdev->hw_tc_map & BIT(i)))
4847 /* tc_size set to hardware is the log2 of roundup power of two
4848 * of rss_size, the acutal queue size is limited by indirection
4851 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4853 dev_err(&hdev->pdev->dev,
4854 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4859 roundup_size = roundup_pow_of_two(rss_size);
4860 roundup_size = ilog2(roundup_size);
4863 tc_size[i] = roundup_size;
4864 tc_offset[i] = tc_info->tqp_offset[i];
4867 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4870 int hclge_rss_init_hw(struct hclge_dev *hdev)
4872 struct hclge_vport *vport = hdev->vport;
4873 u16 *rss_indir = vport[0].rss_indirection_tbl;
4874 u8 *key = vport[0].rss_hash_key;
4875 u8 hfunc = vport[0].rss_algo;
4878 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4882 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4886 ret = hclge_set_rss_input_tuple(hdev);
4890 return hclge_init_rss_tc_mode(hdev);
4893 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4895 struct hclge_vport *vport = &hdev->vport[0];
4898 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4899 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4902 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4904 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4905 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4906 struct hclge_vport *vport = &hdev->vport[0];
4909 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4910 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4912 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4913 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4914 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4915 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4916 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4917 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4918 vport->rss_tuple_sets.ipv6_sctp_en =
4919 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4920 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4921 HCLGE_RSS_INPUT_TUPLE_SCTP;
4922 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4924 vport->rss_algo = rss_algo;
4926 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4927 sizeof(*rss_ind_tbl), GFP_KERNEL);
4931 vport->rss_indirection_tbl = rss_ind_tbl;
4932 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4934 hclge_rss_indir_init_cfg(hdev);
4939 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4940 int vector_id, bool en,
4941 struct hnae3_ring_chain_node *ring_chain)
4943 struct hclge_dev *hdev = vport->back;
4944 struct hnae3_ring_chain_node *node;
4945 struct hclge_desc desc;
4946 struct hclge_ctrl_vector_chain_cmd *req =
4947 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4948 enum hclge_cmd_status status;
4949 enum hclge_opcode_type op;
4950 u16 tqp_type_and_id;
4953 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4954 hclge_cmd_setup_basic_desc(&desc, op, false);
4955 req->int_vector_id_l = hnae3_get_field(vector_id,
4956 HCLGE_VECTOR_ID_L_M,
4957 HCLGE_VECTOR_ID_L_S);
4958 req->int_vector_id_h = hnae3_get_field(vector_id,
4959 HCLGE_VECTOR_ID_H_M,
4960 HCLGE_VECTOR_ID_H_S);
4963 for (node = ring_chain; node; node = node->next) {
4964 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4965 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4967 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4968 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4969 HCLGE_TQP_ID_S, node->tqp_index);
4970 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4972 hnae3_get_field(node->int_gl_idx,
4973 HNAE3_RING_GL_IDX_M,
4974 HNAE3_RING_GL_IDX_S));
4975 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4976 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4977 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4978 req->vfid = vport->vport_id;
4980 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4982 dev_err(&hdev->pdev->dev,
4983 "Map TQP fail, status is %d.\n",
4989 hclge_cmd_setup_basic_desc(&desc,
4992 req->int_vector_id_l =
4993 hnae3_get_field(vector_id,
4994 HCLGE_VECTOR_ID_L_M,
4995 HCLGE_VECTOR_ID_L_S);
4996 req->int_vector_id_h =
4997 hnae3_get_field(vector_id,
4998 HCLGE_VECTOR_ID_H_M,
4999 HCLGE_VECTOR_ID_H_S);
5004 req->int_cause_num = i;
5005 req->vfid = vport->vport_id;
5006 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5008 dev_err(&hdev->pdev->dev,
5009 "Map TQP fail, status is %d.\n", status);
5017 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5018 struct hnae3_ring_chain_node *ring_chain)
5020 struct hclge_vport *vport = hclge_get_vport(handle);
5021 struct hclge_dev *hdev = vport->back;
5024 vector_id = hclge_get_vector_index(hdev, vector);
5025 if (vector_id < 0) {
5026 dev_err(&hdev->pdev->dev,
5027 "failed to get vector index. vector=%d\n", vector);
5031 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5034 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5035 struct hnae3_ring_chain_node *ring_chain)
5037 struct hclge_vport *vport = hclge_get_vport(handle);
5038 struct hclge_dev *hdev = vport->back;
5041 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5044 vector_id = hclge_get_vector_index(hdev, vector);
5045 if (vector_id < 0) {
5046 dev_err(&handle->pdev->dev,
5047 "Get vector index fail. ret =%d\n", vector_id);
5051 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5053 dev_err(&handle->pdev->dev,
5054 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5060 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5061 bool en_uc, bool en_mc, bool en_bc)
5063 struct hclge_vport *vport = &hdev->vport[vf_id];
5064 struct hnae3_handle *handle = &vport->nic;
5065 struct hclge_promisc_cfg_cmd *req;
5066 struct hclge_desc desc;
5067 bool uc_tx_en = en_uc;
5071 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5073 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5076 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5079 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5080 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5081 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5082 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5083 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5084 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5085 req->extend_promisc = promisc_cfg;
5087 /* to be compatible with DEVICE_VERSION_V1/2 */
5089 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5090 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5091 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5092 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5093 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5094 req->promisc = promisc_cfg;
5096 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5098 dev_err(&hdev->pdev->dev,
5099 "failed to set vport %u promisc mode, ret = %d.\n",
5105 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5106 bool en_mc_pmc, bool en_bc_pmc)
5108 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5109 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5112 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5115 struct hclge_vport *vport = hclge_get_vport(handle);
5116 struct hclge_dev *hdev = vport->back;
5117 bool en_bc_pmc = true;
5119 /* For device whose version below V2, if broadcast promisc enabled,
5120 * vlan filter is always bypassed. So broadcast promisc should be
5121 * disabled until user enable promisc mode
5123 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5124 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5126 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5130 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5132 struct hclge_vport *vport = hclge_get_vport(handle);
5133 struct hclge_dev *hdev = vport->back;
5135 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5138 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5140 if (hlist_empty(&hdev->fd_rule_list))
5141 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5144 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5146 if (!test_bit(location, hdev->fd_bmap)) {
5147 set_bit(location, hdev->fd_bmap);
5148 hdev->hclge_fd_rule_num++;
5152 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5154 if (test_bit(location, hdev->fd_bmap)) {
5155 clear_bit(location, hdev->fd_bmap);
5156 hdev->hclge_fd_rule_num--;
5160 static void hclge_fd_free_node(struct hclge_dev *hdev,
5161 struct hclge_fd_rule *rule)
5163 hlist_del(&rule->rule_node);
5165 hclge_sync_fd_state(hdev);
5168 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5169 struct hclge_fd_rule *old_rule,
5170 struct hclge_fd_rule *new_rule,
5171 enum HCLGE_FD_NODE_STATE state)
5174 case HCLGE_FD_TO_ADD:
5175 case HCLGE_FD_ACTIVE:
5176 /* 1) if the new state is TO_ADD, just replace the old rule
5177 * with the same location, no matter its state, because the
5178 * new rule will be configured to the hardware.
5179 * 2) if the new state is ACTIVE, it means the new rule
5180 * has been configured to the hardware, so just replace
5181 * the old rule node with the same location.
5182 * 3) for it doesn't add a new node to the list, so it's
5183 * unnecessary to update the rule number and fd_bmap.
5185 new_rule->rule_node.next = old_rule->rule_node.next;
5186 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5187 memcpy(old_rule, new_rule, sizeof(*old_rule));
5190 case HCLGE_FD_DELETED:
5191 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5192 hclge_fd_free_node(hdev, old_rule);
5194 case HCLGE_FD_TO_DEL:
5195 /* if new request is TO_DEL, and old rule is existent
5196 * 1) the state of old rule is TO_DEL, we need do nothing,
5197 * because we delete rule by location, other rule content
5199 * 2) the state of old rule is ACTIVE, we need to change its
5200 * state to TO_DEL, so the rule will be deleted when periodic
5201 * task being scheduled.
5202 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5203 * been added to hardware, so we just delete the rule node from
5204 * fd_rule_list directly.
5206 if (old_rule->state == HCLGE_FD_TO_ADD) {
5207 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5208 hclge_fd_free_node(hdev, old_rule);
5211 old_rule->state = HCLGE_FD_TO_DEL;
5216 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5218 struct hclge_fd_rule **parent)
5220 struct hclge_fd_rule *rule;
5221 struct hlist_node *node;
5223 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5224 if (rule->location == location)
5226 else if (rule->location > location)
5228 /* record the parent node, use to keep the nodes in fd_rule_list
5237 /* insert fd rule node in ascend order according to rule->location */
5238 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5239 struct hclge_fd_rule *rule,
5240 struct hclge_fd_rule *parent)
5242 INIT_HLIST_NODE(&rule->rule_node);
5245 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5247 hlist_add_head(&rule->rule_node, hlist);
5250 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5251 struct hclge_fd_user_def_cfg *cfg)
5253 struct hclge_fd_user_def_cfg_cmd *req;
5254 struct hclge_desc desc;
5258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5260 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5262 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5263 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5264 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5265 req->ol2_cfg = cpu_to_le16(data);
5268 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5269 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5270 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5271 req->ol3_cfg = cpu_to_le16(data);
5274 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5275 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5276 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5277 req->ol4_cfg = cpu_to_le16(data);
5279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5281 dev_err(&hdev->pdev->dev,
5282 "failed to set fd user def data, ret= %d\n", ret);
5286 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5290 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5294 spin_lock_bh(&hdev->fd_rule_lock);
5296 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5298 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5301 spin_unlock_bh(&hdev->fd_rule_lock);
5304 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5305 struct hclge_fd_rule *rule)
5307 struct hlist_head *hlist = &hdev->fd_rule_list;
5308 struct hclge_fd_rule *fd_rule, *parent = NULL;
5309 struct hclge_fd_user_def_info *info, *old_info;
5310 struct hclge_fd_user_def_cfg *cfg;
5312 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5313 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5316 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5317 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5318 info = &rule->ep.user_def;
5320 if (!cfg->ref_cnt || cfg->offset == info->offset)
5323 if (cfg->ref_cnt > 1)
5326 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5328 old_info = &fd_rule->ep.user_def;
5329 if (info->layer == old_info->layer)
5334 dev_err(&hdev->pdev->dev,
5335 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5340 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5341 struct hclge_fd_rule *rule)
5343 struct hclge_fd_user_def_cfg *cfg;
5345 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5346 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5349 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5350 if (!cfg->ref_cnt) {
5351 cfg->offset = rule->ep.user_def.offset;
5352 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5357 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5358 struct hclge_fd_rule *rule)
5360 struct hclge_fd_user_def_cfg *cfg;
5362 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5363 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5366 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5371 if (!cfg->ref_cnt) {
5373 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5377 static void hclge_update_fd_list(struct hclge_dev *hdev,
5378 enum HCLGE_FD_NODE_STATE state, u16 location,
5379 struct hclge_fd_rule *new_rule)
5381 struct hlist_head *hlist = &hdev->fd_rule_list;
5382 struct hclge_fd_rule *fd_rule, *parent = NULL;
5384 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5386 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5387 if (state == HCLGE_FD_ACTIVE)
5388 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5389 hclge_sync_fd_user_def_cfg(hdev, true);
5391 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5395 /* it's unlikely to fail here, because we have checked the rule
5398 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5399 dev_warn(&hdev->pdev->dev,
5400 "failed to delete fd rule %u, it's inexistent\n",
5405 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5406 hclge_sync_fd_user_def_cfg(hdev, true);
5408 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5409 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5411 if (state == HCLGE_FD_TO_ADD) {
5412 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5413 hclge_task_schedule(hdev, 0);
5417 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5419 struct hclge_get_fd_mode_cmd *req;
5420 struct hclge_desc desc;
5423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5425 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5427 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5429 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5433 *fd_mode = req->mode;
5438 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5439 u32 *stage1_entry_num,
5440 u32 *stage2_entry_num,
5441 u16 *stage1_counter_num,
5442 u16 *stage2_counter_num)
5444 struct hclge_get_fd_allocation_cmd *req;
5445 struct hclge_desc desc;
5448 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5450 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5452 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5454 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5459 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5460 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5461 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5462 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5467 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5468 enum HCLGE_FD_STAGE stage_num)
5470 struct hclge_set_fd_key_config_cmd *req;
5471 struct hclge_fd_key_cfg *stage;
5472 struct hclge_desc desc;
5475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5477 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5478 stage = &hdev->fd_cfg.key_cfg[stage_num];
5479 req->stage = stage_num;
5480 req->key_select = stage->key_sel;
5481 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5482 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5483 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5484 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5485 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5486 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5488 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5490 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5495 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5497 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5499 spin_lock_bh(&hdev->fd_rule_lock);
5500 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5501 spin_unlock_bh(&hdev->fd_rule_lock);
5503 hclge_fd_set_user_def_cmd(hdev, cfg);
5506 static int hclge_init_fd_config(struct hclge_dev *hdev)
5508 #define LOW_2_WORDS 0x03
5509 struct hclge_fd_key_cfg *key_cfg;
5512 if (!hnae3_dev_fd_supported(hdev))
5515 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5519 switch (hdev->fd_cfg.fd_mode) {
5520 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5521 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5523 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5524 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5527 dev_err(&hdev->pdev->dev,
5528 "Unsupported flow director mode %u\n",
5529 hdev->fd_cfg.fd_mode);
5533 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5534 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5535 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5536 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5537 key_cfg->outer_sipv6_word_en = 0;
5538 key_cfg->outer_dipv6_word_en = 0;
5540 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5541 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5542 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5543 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5545 /* If use max 400bit key, we can support tuples for ether type */
5546 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5547 key_cfg->tuple_active |=
5548 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5549 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5550 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5553 /* roce_type is used to filter roce frames
5554 * dst_vport is used to specify the rule
5556 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5558 ret = hclge_get_fd_allocation(hdev,
5559 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5560 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5561 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5562 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5566 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5569 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5570 int loc, u8 *key, bool is_add)
5572 struct hclge_fd_tcam_config_1_cmd *req1;
5573 struct hclge_fd_tcam_config_2_cmd *req2;
5574 struct hclge_fd_tcam_config_3_cmd *req3;
5575 struct hclge_desc desc[3];
5578 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5579 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5580 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5581 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5582 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5584 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5585 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5586 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5588 req1->stage = stage;
5589 req1->xy_sel = sel_x ? 1 : 0;
5590 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5591 req1->index = cpu_to_le32(loc);
5592 req1->entry_vld = sel_x ? is_add : 0;
5595 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5596 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5597 sizeof(req2->tcam_data));
5598 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5599 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5602 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5604 dev_err(&hdev->pdev->dev,
5605 "config tcam key fail, ret=%d\n",
5611 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5612 struct hclge_fd_ad_data *action)
5614 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5615 struct hclge_fd_ad_config_cmd *req;
5616 struct hclge_desc desc;
5620 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5622 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5623 req->index = cpu_to_le32(loc);
5626 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5627 action->write_rule_id_to_bd);
5628 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5630 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5631 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5632 action->override_tc);
5633 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5634 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5637 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5638 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5639 action->forward_to_direct_queue);
5640 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5642 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5643 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5644 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5645 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5646 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5647 action->counter_id);
5649 req->ad_data = cpu_to_le64(ad_data);
5650 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5652 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5657 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5658 struct hclge_fd_rule *rule)
5660 int offset, moffset, ip_offset;
5661 enum HCLGE_FD_KEY_OPT key_opt;
5662 u16 tmp_x_s, tmp_y_s;
5663 u32 tmp_x_l, tmp_y_l;
5667 if (rule->unused_tuple & BIT(tuple_bit))
5670 key_opt = tuple_key_info[tuple_bit].key_opt;
5671 offset = tuple_key_info[tuple_bit].offset;
5672 moffset = tuple_key_info[tuple_bit].moffset;
5676 calc_x(*key_x, p[offset], p[moffset]);
5677 calc_y(*key_y, p[offset], p[moffset]);
5681 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5682 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5683 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5684 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5688 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5689 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5690 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5691 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5695 for (i = 0; i < ETH_ALEN; i++) {
5696 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5698 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5704 ip_offset = IPV4_INDEX * sizeof(u32);
5705 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5706 *(u32 *)(&p[moffset + ip_offset]));
5707 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5708 *(u32 *)(&p[moffset + ip_offset]));
5709 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5710 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5718 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5719 u8 vf_id, u8 network_port_id)
5721 u32 port_number = 0;
5723 if (port_type == HOST_PORT) {
5724 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5726 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5728 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5730 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5731 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5732 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5738 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5739 __le32 *key_x, __le32 *key_y,
5740 struct hclge_fd_rule *rule)
5742 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5743 u8 cur_pos = 0, tuple_size, shift_bits;
5746 for (i = 0; i < MAX_META_DATA; i++) {
5747 tuple_size = meta_data_key_info[i].key_length;
5748 tuple_bit = key_cfg->meta_data_active & BIT(i);
5750 switch (tuple_bit) {
5751 case BIT(ROCE_TYPE):
5752 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5753 cur_pos += tuple_size;
5755 case BIT(DST_VPORT):
5756 port_number = hclge_get_port_number(HOST_PORT, 0,
5758 hnae3_set_field(meta_data,
5759 GENMASK(cur_pos + tuple_size, cur_pos),
5760 cur_pos, port_number);
5761 cur_pos += tuple_size;
5768 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5769 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5770 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5772 *key_x = cpu_to_le32(tmp_x << shift_bits);
5773 *key_y = cpu_to_le32(tmp_y << shift_bits);
5776 /* A complete key is combined with meta data key and tuple key.
5777 * Meta data key is stored at the MSB region, and tuple key is stored at
5778 * the LSB region, unused bits will be filled 0.
5780 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5781 struct hclge_fd_rule *rule)
5783 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5784 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5785 u8 *cur_key_x, *cur_key_y;
5786 u8 meta_data_region;
5791 memset(key_x, 0, sizeof(key_x));
5792 memset(key_y, 0, sizeof(key_y));
5796 for (i = 0 ; i < MAX_TUPLE; i++) {
5799 tuple_size = tuple_key_info[i].key_length / 8;
5800 if (!(key_cfg->tuple_active & BIT(i)))
5803 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5806 cur_key_x += tuple_size;
5807 cur_key_y += tuple_size;
5811 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5812 MAX_META_DATA_LENGTH / 8;
5814 hclge_fd_convert_meta_data(key_cfg,
5815 (__le32 *)(key_x + meta_data_region),
5816 (__le32 *)(key_y + meta_data_region),
5819 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5822 dev_err(&hdev->pdev->dev,
5823 "fd key_y config fail, loc=%u, ret=%d\n",
5824 rule->queue_id, ret);
5828 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5831 dev_err(&hdev->pdev->dev,
5832 "fd key_x config fail, loc=%u, ret=%d\n",
5833 rule->queue_id, ret);
5837 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5838 struct hclge_fd_rule *rule)
5840 struct hclge_vport *vport = hdev->vport;
5841 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5842 struct hclge_fd_ad_data ad_data;
5844 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5845 ad_data.ad_id = rule->location;
5847 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5848 ad_data.drop_packet = true;
5849 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5850 ad_data.override_tc = true;
5852 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5854 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5856 ad_data.forward_to_direct_queue = true;
5857 ad_data.queue_id = rule->queue_id;
5860 ad_data.use_counter = false;
5861 ad_data.counter_id = 0;
5863 ad_data.use_next_stage = false;
5864 ad_data.next_input_key = 0;
5866 ad_data.write_rule_id_to_bd = true;
5867 ad_data.rule_id = rule->location;
5869 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5872 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5875 if (!spec || !unused_tuple)
5878 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5881 *unused_tuple |= BIT(INNER_SRC_IP);
5884 *unused_tuple |= BIT(INNER_DST_IP);
5887 *unused_tuple |= BIT(INNER_SRC_PORT);
5890 *unused_tuple |= BIT(INNER_DST_PORT);
5893 *unused_tuple |= BIT(INNER_IP_TOS);
5898 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5901 if (!spec || !unused_tuple)
5904 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5905 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5908 *unused_tuple |= BIT(INNER_SRC_IP);
5911 *unused_tuple |= BIT(INNER_DST_IP);
5914 *unused_tuple |= BIT(INNER_IP_TOS);
5917 *unused_tuple |= BIT(INNER_IP_PROTO);
5919 if (spec->l4_4_bytes)
5922 if (spec->ip_ver != ETH_RX_NFC_IP4)
5928 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5931 if (!spec || !unused_tuple)
5934 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5936 /* check whether src/dst ip address used */
5937 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5938 *unused_tuple |= BIT(INNER_SRC_IP);
5940 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5941 *unused_tuple |= BIT(INNER_DST_IP);
5944 *unused_tuple |= BIT(INNER_SRC_PORT);
5947 *unused_tuple |= BIT(INNER_DST_PORT);
5950 *unused_tuple |= BIT(INNER_IP_TOS);
5955 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5958 if (!spec || !unused_tuple)
5961 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5962 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5964 /* check whether src/dst ip address used */
5965 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5966 *unused_tuple |= BIT(INNER_SRC_IP);
5968 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5969 *unused_tuple |= BIT(INNER_DST_IP);
5971 if (!spec->l4_proto)
5972 *unused_tuple |= BIT(INNER_IP_PROTO);
5975 *unused_tuple |= BIT(INNER_IP_TOS);
5977 if (spec->l4_4_bytes)
5983 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5985 if (!spec || !unused_tuple)
5988 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5989 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5990 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5992 if (is_zero_ether_addr(spec->h_source))
5993 *unused_tuple |= BIT(INNER_SRC_MAC);
5995 if (is_zero_ether_addr(spec->h_dest))
5996 *unused_tuple |= BIT(INNER_DST_MAC);
5999 *unused_tuple |= BIT(INNER_ETH_TYPE);
6004 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6005 struct ethtool_rx_flow_spec *fs,
6008 if (fs->flow_type & FLOW_EXT) {
6009 if (fs->h_ext.vlan_etype) {
6010 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6014 if (!fs->h_ext.vlan_tci)
6015 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6017 if (fs->m_ext.vlan_tci &&
6018 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6019 dev_err(&hdev->pdev->dev,
6020 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6021 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6025 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6028 if (fs->flow_type & FLOW_MAC_EXT) {
6029 if (hdev->fd_cfg.fd_mode !=
6030 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6031 dev_err(&hdev->pdev->dev,
6032 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6036 if (is_zero_ether_addr(fs->h_ext.h_dest))
6037 *unused_tuple |= BIT(INNER_DST_MAC);
6039 *unused_tuple &= ~BIT(INNER_DST_MAC);
6045 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6046 struct hclge_fd_user_def_info *info)
6048 switch (flow_type) {
6050 info->layer = HCLGE_FD_USER_DEF_L2;
6051 *unused_tuple &= ~BIT(INNER_L2_RSV);
6054 case IPV6_USER_FLOW:
6055 info->layer = HCLGE_FD_USER_DEF_L3;
6056 *unused_tuple &= ~BIT(INNER_L3_RSV);
6062 info->layer = HCLGE_FD_USER_DEF_L4;
6063 *unused_tuple &= ~BIT(INNER_L4_RSV);
6072 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6074 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6077 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6078 struct ethtool_rx_flow_spec *fs,
6080 struct hclge_fd_user_def_info *info)
6082 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6083 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6084 u16 data, offset, data_mask, offset_mask;
6087 info->layer = HCLGE_FD_USER_DEF_NONE;
6088 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6090 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6093 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6094 * for data, and bit32~47 is used for offset.
6096 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6097 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6098 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6099 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6101 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6102 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6106 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6107 dev_err(&hdev->pdev->dev,
6108 "user-def offset[%u] should be no more than %u\n",
6109 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6113 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6114 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6118 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6120 dev_err(&hdev->pdev->dev,
6121 "unsupported flow type for user-def bytes, ret = %d\n",
6127 info->data_mask = data_mask;
6128 info->offset = offset;
6133 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6134 struct ethtool_rx_flow_spec *fs,
6136 struct hclge_fd_user_def_info *info)
6141 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6142 dev_err(&hdev->pdev->dev,
6143 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6145 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6149 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6153 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6154 switch (flow_type) {
6158 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6162 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6168 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6171 case IPV6_USER_FLOW:
6172 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6176 if (hdev->fd_cfg.fd_mode !=
6177 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6178 dev_err(&hdev->pdev->dev,
6179 "ETHER_FLOW is not supported in current fd mode!\n");
6183 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6187 dev_err(&hdev->pdev->dev,
6188 "unsupported protocol type, protocol type = %#x\n",
6194 dev_err(&hdev->pdev->dev,
6195 "failed to check flow union tuple, ret = %d\n",
6200 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6203 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6204 struct ethtool_rx_flow_spec *fs,
6205 struct hclge_fd_rule *rule, u8 ip_proto)
6207 rule->tuples.src_ip[IPV4_INDEX] =
6208 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6209 rule->tuples_mask.src_ip[IPV4_INDEX] =
6210 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6212 rule->tuples.dst_ip[IPV4_INDEX] =
6213 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6214 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6215 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6217 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6218 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6220 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6221 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6223 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6224 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6226 rule->tuples.ether_proto = ETH_P_IP;
6227 rule->tuples_mask.ether_proto = 0xFFFF;
6229 rule->tuples.ip_proto = ip_proto;
6230 rule->tuples_mask.ip_proto = 0xFF;
6233 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6234 struct ethtool_rx_flow_spec *fs,
6235 struct hclge_fd_rule *rule)
6237 rule->tuples.src_ip[IPV4_INDEX] =
6238 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6239 rule->tuples_mask.src_ip[IPV4_INDEX] =
6240 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6242 rule->tuples.dst_ip[IPV4_INDEX] =
6243 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6244 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6245 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6247 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6248 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6250 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6251 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6253 rule->tuples.ether_proto = ETH_P_IP;
6254 rule->tuples_mask.ether_proto = 0xFFFF;
6257 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6258 struct ethtool_rx_flow_spec *fs,
6259 struct hclge_fd_rule *rule, u8 ip_proto)
6261 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6263 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6266 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6268 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6271 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6272 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6274 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6275 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6277 rule->tuples.ether_proto = ETH_P_IPV6;
6278 rule->tuples_mask.ether_proto = 0xFFFF;
6280 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6281 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6283 rule->tuples.ip_proto = ip_proto;
6284 rule->tuples_mask.ip_proto = 0xFF;
6287 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6288 struct ethtool_rx_flow_spec *fs,
6289 struct hclge_fd_rule *rule)
6291 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6293 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6296 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6298 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6301 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6302 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6304 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6305 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6307 rule->tuples.ether_proto = ETH_P_IPV6;
6308 rule->tuples_mask.ether_proto = 0xFFFF;
6311 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6312 struct ethtool_rx_flow_spec *fs,
6313 struct hclge_fd_rule *rule)
6315 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6316 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6318 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6319 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6321 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6322 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6325 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6326 struct hclge_fd_rule *rule)
6328 switch (info->layer) {
6329 case HCLGE_FD_USER_DEF_L2:
6330 rule->tuples.l2_user_def = info->data;
6331 rule->tuples_mask.l2_user_def = info->data_mask;
6333 case HCLGE_FD_USER_DEF_L3:
6334 rule->tuples.l3_user_def = info->data;
6335 rule->tuples_mask.l3_user_def = info->data_mask;
6337 case HCLGE_FD_USER_DEF_L4:
6338 rule->tuples.l4_user_def = (u32)info->data << 16;
6339 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6345 rule->ep.user_def = *info;
6348 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6349 struct ethtool_rx_flow_spec *fs,
6350 struct hclge_fd_rule *rule,
6351 struct hclge_fd_user_def_info *info)
6353 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6355 switch (flow_type) {
6357 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6360 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6363 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6366 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6369 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6372 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6375 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6377 case IPV6_USER_FLOW:
6378 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6381 hclge_fd_get_ether_tuple(hdev, fs, rule);
6387 if (fs->flow_type & FLOW_EXT) {
6388 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6389 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6390 hclge_fd_get_user_def_tuple(info, rule);
6393 if (fs->flow_type & FLOW_MAC_EXT) {
6394 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6395 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6401 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6402 struct hclge_fd_rule *rule)
6406 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6410 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6413 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6414 struct hclge_fd_rule *rule)
6418 spin_lock_bh(&hdev->fd_rule_lock);
6420 if (hdev->fd_active_type != rule->rule_type &&
6421 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6422 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6423 dev_err(&hdev->pdev->dev,
6424 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6425 rule->rule_type, hdev->fd_active_type);
6426 spin_unlock_bh(&hdev->fd_rule_lock);
6430 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6434 ret = hclge_clear_arfs_rules(hdev);
6438 ret = hclge_fd_config_rule(hdev, rule);
6442 rule->state = HCLGE_FD_ACTIVE;
6443 hdev->fd_active_type = rule->rule_type;
6444 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6447 spin_unlock_bh(&hdev->fd_rule_lock);
6451 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6453 struct hclge_vport *vport = hclge_get_vport(handle);
6454 struct hclge_dev *hdev = vport->back;
6456 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6459 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6460 u16 *vport_id, u8 *action, u16 *queue_id)
6462 struct hclge_vport *vport = hdev->vport;
6464 if (ring_cookie == RX_CLS_FLOW_DISC) {
6465 *action = HCLGE_FD_ACTION_DROP_PACKET;
6467 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6468 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6471 if (vf > hdev->num_req_vfs) {
6472 dev_err(&hdev->pdev->dev,
6473 "Error: vf id (%u) > max vf num (%u)\n",
6474 vf, hdev->num_req_vfs);
6478 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6479 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6482 dev_err(&hdev->pdev->dev,
6483 "Error: queue id (%u) > max tqp num (%u)\n",
6488 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6495 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6496 struct ethtool_rxnfc *cmd)
6498 struct hclge_vport *vport = hclge_get_vport(handle);
6499 struct hclge_dev *hdev = vport->back;
6500 struct hclge_fd_user_def_info info;
6501 u16 dst_vport_id = 0, q_index = 0;
6502 struct ethtool_rx_flow_spec *fs;
6503 struct hclge_fd_rule *rule;
6508 if (!hnae3_dev_fd_supported(hdev)) {
6509 dev_err(&hdev->pdev->dev,
6510 "flow table director is not supported\n");
6515 dev_err(&hdev->pdev->dev,
6516 "please enable flow director first\n");
6520 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6522 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6526 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6531 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6535 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6541 rule->flow_type = fs->flow_type;
6542 rule->location = fs->location;
6543 rule->unused_tuple = unused;
6544 rule->vf_id = dst_vport_id;
6545 rule->queue_id = q_index;
6546 rule->action = action;
6547 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6549 ret = hclge_add_fd_entry_common(hdev, rule);
6556 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6557 struct ethtool_rxnfc *cmd)
6559 struct hclge_vport *vport = hclge_get_vport(handle);
6560 struct hclge_dev *hdev = vport->back;
6561 struct ethtool_rx_flow_spec *fs;
6564 if (!hnae3_dev_fd_supported(hdev))
6567 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6569 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6572 spin_lock_bh(&hdev->fd_rule_lock);
6573 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6574 !test_bit(fs->location, hdev->fd_bmap)) {
6575 dev_err(&hdev->pdev->dev,
6576 "Delete fail, rule %u is inexistent\n", fs->location);
6577 spin_unlock_bh(&hdev->fd_rule_lock);
6581 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6586 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6589 spin_unlock_bh(&hdev->fd_rule_lock);
6593 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6596 struct hclge_fd_rule *rule;
6597 struct hlist_node *node;
6600 if (!hnae3_dev_fd_supported(hdev))
6603 spin_lock_bh(&hdev->fd_rule_lock);
6605 for_each_set_bit(location, hdev->fd_bmap,
6606 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6607 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6611 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6613 hlist_del(&rule->rule_node);
6616 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6617 hdev->hclge_fd_rule_num = 0;
6618 bitmap_zero(hdev->fd_bmap,
6619 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6622 spin_unlock_bh(&hdev->fd_rule_lock);
6625 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6627 hclge_clear_fd_rules_in_list(hdev, true);
6628 hclge_fd_disable_user_def(hdev);
6631 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6633 struct hclge_vport *vport = hclge_get_vport(handle);
6634 struct hclge_dev *hdev = vport->back;
6635 struct hclge_fd_rule *rule;
6636 struct hlist_node *node;
6638 /* Return ok here, because reset error handling will check this
6639 * return value. If error is returned here, the reset process will
6642 if (!hnae3_dev_fd_supported(hdev))
6645 /* if fd is disabled, should not restore it when reset */
6649 spin_lock_bh(&hdev->fd_rule_lock);
6650 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6651 if (rule->state == HCLGE_FD_ACTIVE)
6652 rule->state = HCLGE_FD_TO_ADD;
6654 spin_unlock_bh(&hdev->fd_rule_lock);
6655 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6660 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6661 struct ethtool_rxnfc *cmd)
6663 struct hclge_vport *vport = hclge_get_vport(handle);
6664 struct hclge_dev *hdev = vport->back;
6666 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6669 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6670 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6675 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6676 struct ethtool_tcpip4_spec *spec,
6677 struct ethtool_tcpip4_spec *spec_mask)
6679 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6680 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6681 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6683 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6684 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6685 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6687 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6688 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6689 0 : cpu_to_be16(rule->tuples_mask.src_port);
6691 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6692 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6693 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6695 spec->tos = rule->tuples.ip_tos;
6696 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6697 0 : rule->tuples_mask.ip_tos;
6700 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6701 struct ethtool_usrip4_spec *spec,
6702 struct ethtool_usrip4_spec *spec_mask)
6704 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6705 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6706 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6708 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6709 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6710 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6712 spec->tos = rule->tuples.ip_tos;
6713 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6714 0 : rule->tuples_mask.ip_tos;
6716 spec->proto = rule->tuples.ip_proto;
6717 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6718 0 : rule->tuples_mask.ip_proto;
6720 spec->ip_ver = ETH_RX_NFC_IP4;
6723 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6724 struct ethtool_tcpip6_spec *spec,
6725 struct ethtool_tcpip6_spec *spec_mask)
6727 cpu_to_be32_array(spec->ip6src,
6728 rule->tuples.src_ip, IPV6_SIZE);
6729 cpu_to_be32_array(spec->ip6dst,
6730 rule->tuples.dst_ip, IPV6_SIZE);
6731 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6732 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6734 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6737 if (rule->unused_tuple & BIT(INNER_DST_IP))
6738 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6740 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6743 spec->tclass = rule->tuples.ip_tos;
6744 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6745 0 : rule->tuples_mask.ip_tos;
6747 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6748 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6749 0 : cpu_to_be16(rule->tuples_mask.src_port);
6751 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6752 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6753 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6756 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6757 struct ethtool_usrip6_spec *spec,
6758 struct ethtool_usrip6_spec *spec_mask)
6760 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6761 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6762 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6763 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6765 cpu_to_be32_array(spec_mask->ip6src,
6766 rule->tuples_mask.src_ip, IPV6_SIZE);
6768 if (rule->unused_tuple & BIT(INNER_DST_IP))
6769 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6771 cpu_to_be32_array(spec_mask->ip6dst,
6772 rule->tuples_mask.dst_ip, IPV6_SIZE);
6774 spec->tclass = rule->tuples.ip_tos;
6775 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6776 0 : rule->tuples_mask.ip_tos;
6778 spec->l4_proto = rule->tuples.ip_proto;
6779 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6780 0 : rule->tuples_mask.ip_proto;
6783 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6784 struct ethhdr *spec,
6785 struct ethhdr *spec_mask)
6787 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6788 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6790 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6791 eth_zero_addr(spec_mask->h_source);
6793 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6795 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6796 eth_zero_addr(spec_mask->h_dest);
6798 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6800 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6801 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6802 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6805 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6806 struct hclge_fd_rule *rule)
6808 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6809 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6810 fs->h_ext.data[0] = 0;
6811 fs->h_ext.data[1] = 0;
6812 fs->m_ext.data[0] = 0;
6813 fs->m_ext.data[1] = 0;
6815 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6816 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6818 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6819 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6823 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6824 struct hclge_fd_rule *rule)
6826 if (fs->flow_type & FLOW_EXT) {
6827 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6828 fs->m_ext.vlan_tci =
6829 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6830 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6832 hclge_fd_get_user_def_info(fs, rule);
6835 if (fs->flow_type & FLOW_MAC_EXT) {
6836 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6837 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6838 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6840 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6841 rule->tuples_mask.dst_mac);
6845 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6846 struct ethtool_rxnfc *cmd)
6848 struct hclge_vport *vport = hclge_get_vport(handle);
6849 struct hclge_fd_rule *rule = NULL;
6850 struct hclge_dev *hdev = vport->back;
6851 struct ethtool_rx_flow_spec *fs;
6852 struct hlist_node *node2;
6854 if (!hnae3_dev_fd_supported(hdev))
6857 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6859 spin_lock_bh(&hdev->fd_rule_lock);
6861 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6862 if (rule->location >= fs->location)
6866 if (!rule || fs->location != rule->location) {
6867 spin_unlock_bh(&hdev->fd_rule_lock);
6872 fs->flow_type = rule->flow_type;
6873 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6877 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6878 &fs->m_u.tcp_ip4_spec);
6881 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6882 &fs->m_u.usr_ip4_spec);
6887 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6888 &fs->m_u.tcp_ip6_spec);
6890 case IPV6_USER_FLOW:
6891 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6892 &fs->m_u.usr_ip6_spec);
6894 /* The flow type of fd rule has been checked before adding in to rule
6895 * list. As other flow types have been handled, it must be ETHER_FLOW
6896 * for the default case
6899 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6900 &fs->m_u.ether_spec);
6904 hclge_fd_get_ext_info(fs, rule);
6906 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6907 fs->ring_cookie = RX_CLS_FLOW_DISC;
6911 fs->ring_cookie = rule->queue_id;
6912 vf_id = rule->vf_id;
6913 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6914 fs->ring_cookie |= vf_id;
6917 spin_unlock_bh(&hdev->fd_rule_lock);
6922 static int hclge_get_all_rules(struct hnae3_handle *handle,
6923 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6925 struct hclge_vport *vport = hclge_get_vport(handle);
6926 struct hclge_dev *hdev = vport->back;
6927 struct hclge_fd_rule *rule;
6928 struct hlist_node *node2;
6931 if (!hnae3_dev_fd_supported(hdev))
6934 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6936 spin_lock_bh(&hdev->fd_rule_lock);
6937 hlist_for_each_entry_safe(rule, node2,
6938 &hdev->fd_rule_list, rule_node) {
6939 if (cnt == cmd->rule_cnt) {
6940 spin_unlock_bh(&hdev->fd_rule_lock);
6944 if (rule->state == HCLGE_FD_TO_DEL)
6947 rule_locs[cnt] = rule->location;
6951 spin_unlock_bh(&hdev->fd_rule_lock);
6953 cmd->rule_cnt = cnt;
6958 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6959 struct hclge_fd_rule_tuples *tuples)
6961 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6962 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6964 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6965 tuples->ip_proto = fkeys->basic.ip_proto;
6966 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6968 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6969 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6970 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6974 for (i = 0; i < IPV6_SIZE; i++) {
6975 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6976 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6981 /* traverse all rules, check whether an existed rule has the same tuples */
6982 static struct hclge_fd_rule *
6983 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6984 const struct hclge_fd_rule_tuples *tuples)
6986 struct hclge_fd_rule *rule = NULL;
6987 struct hlist_node *node;
6989 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6990 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6997 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6998 struct hclge_fd_rule *rule)
7000 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7001 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7002 BIT(INNER_SRC_PORT);
7005 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7006 rule->state = HCLGE_FD_TO_ADD;
7007 if (tuples->ether_proto == ETH_P_IP) {
7008 if (tuples->ip_proto == IPPROTO_TCP)
7009 rule->flow_type = TCP_V4_FLOW;
7011 rule->flow_type = UDP_V4_FLOW;
7013 if (tuples->ip_proto == IPPROTO_TCP)
7014 rule->flow_type = TCP_V6_FLOW;
7016 rule->flow_type = UDP_V6_FLOW;
7018 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7019 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7022 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7023 u16 flow_id, struct flow_keys *fkeys)
7025 struct hclge_vport *vport = hclge_get_vport(handle);
7026 struct hclge_fd_rule_tuples new_tuples = {};
7027 struct hclge_dev *hdev = vport->back;
7028 struct hclge_fd_rule *rule;
7031 if (!hnae3_dev_fd_supported(hdev))
7034 /* when there is already fd rule existed add by user,
7035 * arfs should not work
7037 spin_lock_bh(&hdev->fd_rule_lock);
7038 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7039 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7040 spin_unlock_bh(&hdev->fd_rule_lock);
7044 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7046 /* check is there flow director filter existed for this flow,
7047 * if not, create a new filter for it;
7048 * if filter exist with different queue id, modify the filter;
7049 * if filter exist with same queue id, do nothing
7051 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7053 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7054 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7055 spin_unlock_bh(&hdev->fd_rule_lock);
7059 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7061 spin_unlock_bh(&hdev->fd_rule_lock);
7065 rule->location = bit_id;
7066 rule->arfs.flow_id = flow_id;
7067 rule->queue_id = queue_id;
7068 hclge_fd_build_arfs_rule(&new_tuples, rule);
7069 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7070 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7071 } else if (rule->queue_id != queue_id) {
7072 rule->queue_id = queue_id;
7073 rule->state = HCLGE_FD_TO_ADD;
7074 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7075 hclge_task_schedule(hdev, 0);
7077 spin_unlock_bh(&hdev->fd_rule_lock);
7078 return rule->location;
7081 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7083 #ifdef CONFIG_RFS_ACCEL
7084 struct hnae3_handle *handle = &hdev->vport[0].nic;
7085 struct hclge_fd_rule *rule;
7086 struct hlist_node *node;
7088 spin_lock_bh(&hdev->fd_rule_lock);
7089 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7090 spin_unlock_bh(&hdev->fd_rule_lock);
7093 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7094 if (rule->state != HCLGE_FD_ACTIVE)
7096 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7097 rule->arfs.flow_id, rule->location)) {
7098 rule->state = HCLGE_FD_TO_DEL;
7099 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7102 spin_unlock_bh(&hdev->fd_rule_lock);
7106 /* make sure being called after lock up with fd_rule_lock */
7107 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7109 #ifdef CONFIG_RFS_ACCEL
7110 struct hclge_fd_rule *rule;
7111 struct hlist_node *node;
7114 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7117 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7118 switch (rule->state) {
7119 case HCLGE_FD_TO_DEL:
7120 case HCLGE_FD_ACTIVE:
7121 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7122 rule->location, NULL, false);
7126 case HCLGE_FD_TO_ADD:
7127 hclge_fd_dec_rule_cnt(hdev, rule->location);
7128 hlist_del(&rule->rule_node);
7135 hclge_sync_fd_state(hdev);
7141 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7142 struct hclge_fd_rule *rule)
7144 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7145 struct flow_match_basic match;
7146 u16 ethtype_key, ethtype_mask;
7148 flow_rule_match_basic(flow, &match);
7149 ethtype_key = ntohs(match.key->n_proto);
7150 ethtype_mask = ntohs(match.mask->n_proto);
7152 if (ethtype_key == ETH_P_ALL) {
7156 rule->tuples.ether_proto = ethtype_key;
7157 rule->tuples_mask.ether_proto = ethtype_mask;
7158 rule->tuples.ip_proto = match.key->ip_proto;
7159 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7161 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7162 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7166 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7167 struct hclge_fd_rule *rule)
7169 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7170 struct flow_match_eth_addrs match;
7172 flow_rule_match_eth_addrs(flow, &match);
7173 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7174 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7175 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7176 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7178 rule->unused_tuple |= BIT(INNER_DST_MAC);
7179 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7183 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7184 struct hclge_fd_rule *rule)
7186 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7187 struct flow_match_vlan match;
7189 flow_rule_match_vlan(flow, &match);
7190 rule->tuples.vlan_tag1 = match.key->vlan_id |
7191 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7192 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7193 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7195 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7199 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7200 struct hclge_fd_rule *rule)
7204 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7205 struct flow_match_control match;
7207 flow_rule_match_control(flow, &match);
7208 addr_type = match.key->addr_type;
7211 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7212 struct flow_match_ipv4_addrs match;
7214 flow_rule_match_ipv4_addrs(flow, &match);
7215 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7216 rule->tuples_mask.src_ip[IPV4_INDEX] =
7217 be32_to_cpu(match.mask->src);
7218 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7219 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7220 be32_to_cpu(match.mask->dst);
7221 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7222 struct flow_match_ipv6_addrs match;
7224 flow_rule_match_ipv6_addrs(flow, &match);
7225 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7227 be32_to_cpu_array(rule->tuples_mask.src_ip,
7228 match.mask->src.s6_addr32, IPV6_SIZE);
7229 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7231 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7232 match.mask->dst.s6_addr32, IPV6_SIZE);
7234 rule->unused_tuple |= BIT(INNER_SRC_IP);
7235 rule->unused_tuple |= BIT(INNER_DST_IP);
7239 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7240 struct hclge_fd_rule *rule)
7242 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7243 struct flow_match_ports match;
7245 flow_rule_match_ports(flow, &match);
7247 rule->tuples.src_port = be16_to_cpu(match.key->src);
7248 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7249 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7250 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7252 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7253 rule->unused_tuple |= BIT(INNER_DST_PORT);
7257 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7258 struct flow_cls_offload *cls_flower,
7259 struct hclge_fd_rule *rule)
7261 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7262 struct flow_dissector *dissector = flow->match.dissector;
7264 if (dissector->used_keys &
7265 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7266 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7267 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7268 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7269 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7270 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7271 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7272 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7273 dissector->used_keys);
7277 hclge_get_cls_key_basic(flow, rule);
7278 hclge_get_cls_key_mac(flow, rule);
7279 hclge_get_cls_key_vlan(flow, rule);
7280 hclge_get_cls_key_ip(flow, rule);
7281 hclge_get_cls_key_port(flow, rule);
7286 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7287 struct flow_cls_offload *cls_flower, int tc)
7289 u32 prio = cls_flower->common.prio;
7291 if (tc < 0 || tc > hdev->tc_max) {
7292 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7297 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7298 dev_err(&hdev->pdev->dev,
7299 "prio %u should be in range[1, %u]\n",
7300 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7304 if (test_bit(prio - 1, hdev->fd_bmap)) {
7305 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7311 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7312 struct flow_cls_offload *cls_flower,
7315 struct hclge_vport *vport = hclge_get_vport(handle);
7316 struct hclge_dev *hdev = vport->back;
7317 struct hclge_fd_rule *rule;
7320 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7322 dev_err(&hdev->pdev->dev,
7323 "failed to check cls flower params, ret = %d\n", ret);
7327 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7331 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7337 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7338 rule->cls_flower.tc = tc;
7339 rule->location = cls_flower->common.prio - 1;
7341 rule->cls_flower.cookie = cls_flower->cookie;
7342 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7344 ret = hclge_add_fd_entry_common(hdev, rule);
7351 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7352 unsigned long cookie)
7354 struct hclge_fd_rule *rule;
7355 struct hlist_node *node;
7357 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7358 if (rule->cls_flower.cookie == cookie)
7365 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7366 struct flow_cls_offload *cls_flower)
7368 struct hclge_vport *vport = hclge_get_vport(handle);
7369 struct hclge_dev *hdev = vport->back;
7370 struct hclge_fd_rule *rule;
7373 spin_lock_bh(&hdev->fd_rule_lock);
7375 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7377 spin_unlock_bh(&hdev->fd_rule_lock);
7381 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7384 spin_unlock_bh(&hdev->fd_rule_lock);
7388 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7389 spin_unlock_bh(&hdev->fd_rule_lock);
7394 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7396 struct hclge_fd_rule *rule;
7397 struct hlist_node *node;
7400 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7403 spin_lock_bh(&hdev->fd_rule_lock);
7405 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7406 switch (rule->state) {
7407 case HCLGE_FD_TO_ADD:
7408 ret = hclge_fd_config_rule(hdev, rule);
7411 rule->state = HCLGE_FD_ACTIVE;
7413 case HCLGE_FD_TO_DEL:
7414 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7415 rule->location, NULL, false);
7418 hclge_fd_dec_rule_cnt(hdev, rule->location);
7419 hclge_fd_free_node(hdev, rule);
7428 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7430 spin_unlock_bh(&hdev->fd_rule_lock);
7433 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7435 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7436 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7438 hclge_clear_fd_rules_in_list(hdev, clear_list);
7441 hclge_sync_fd_user_def_cfg(hdev, false);
7443 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7446 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7448 struct hclge_vport *vport = hclge_get_vport(handle);
7449 struct hclge_dev *hdev = vport->back;
7451 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7452 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7455 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7457 struct hclge_vport *vport = hclge_get_vport(handle);
7458 struct hclge_dev *hdev = vport->back;
7460 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7463 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7465 struct hclge_vport *vport = hclge_get_vport(handle);
7466 struct hclge_dev *hdev = vport->back;
7468 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7471 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7473 struct hclge_vport *vport = hclge_get_vport(handle);
7474 struct hclge_dev *hdev = vport->back;
7476 return hdev->rst_stats.hw_reset_done_cnt;
7479 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7481 struct hclge_vport *vport = hclge_get_vport(handle);
7482 struct hclge_dev *hdev = vport->back;
7484 hdev->fd_en = enable;
7487 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7489 hclge_restore_fd_entries(handle);
7491 hclge_task_schedule(hdev, 0);
7494 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7496 struct hclge_desc desc;
7497 struct hclge_config_mac_mode_cmd *req =
7498 (struct hclge_config_mac_mode_cmd *)desc.data;
7502 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7505 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7506 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7507 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7508 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7509 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7510 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7511 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7512 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7513 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7514 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7517 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7521 dev_err(&hdev->pdev->dev,
7522 "mac enable fail, ret =%d.\n", ret);
7525 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7526 u8 switch_param, u8 param_mask)
7528 struct hclge_mac_vlan_switch_cmd *req;
7529 struct hclge_desc desc;
7533 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7534 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7536 /* read current config parameter */
7537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7539 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7540 req->func_id = cpu_to_le32(func_id);
7542 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7544 dev_err(&hdev->pdev->dev,
7545 "read mac vlan switch parameter fail, ret = %d\n", ret);
7549 /* modify and write new config parameter */
7550 hclge_cmd_reuse_desc(&desc, false);
7551 req->switch_param = (req->switch_param & param_mask) | switch_param;
7552 req->param_mask = param_mask;
7554 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7556 dev_err(&hdev->pdev->dev,
7557 "set mac vlan switch parameter fail, ret = %d\n", ret);
7561 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7564 #define HCLGE_PHY_LINK_STATUS_NUM 200
7566 struct phy_device *phydev = hdev->hw.mac.phydev;
7571 ret = phy_read_status(phydev);
7573 dev_err(&hdev->pdev->dev,
7574 "phy update link status fail, ret = %d\n", ret);
7578 if (phydev->link == link_ret)
7581 msleep(HCLGE_LINK_STATUS_MS);
7582 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7585 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7587 #define HCLGE_MAC_LINK_STATUS_NUM 100
7594 ret = hclge_get_mac_link_status(hdev, &link_status);
7597 if (link_status == link_ret)
7600 msleep(HCLGE_LINK_STATUS_MS);
7601 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7605 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7610 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7613 hclge_phy_link_status_wait(hdev, link_ret);
7615 return hclge_mac_link_status_wait(hdev, link_ret);
7618 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7620 struct hclge_config_mac_mode_cmd *req;
7621 struct hclge_desc desc;
7625 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7626 /* 1 Read out the MAC mode config at first */
7627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7630 dev_err(&hdev->pdev->dev,
7631 "mac loopback get fail, ret =%d.\n", ret);
7635 /* 2 Then setup the loopback flag */
7636 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7637 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7639 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7641 /* 3 Config mac work mode with loopback flag
7642 * and its original configure parameters
7644 hclge_cmd_reuse_desc(&desc, false);
7645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7647 dev_err(&hdev->pdev->dev,
7648 "mac loopback set fail, ret =%d.\n", ret);
7652 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7653 enum hnae3_loop loop_mode)
7655 #define HCLGE_COMMON_LB_RETRY_MS 10
7656 #define HCLGE_COMMON_LB_RETRY_NUM 100
7658 struct hclge_common_lb_cmd *req;
7659 struct hclge_desc desc;
7663 req = (struct hclge_common_lb_cmd *)desc.data;
7664 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7666 switch (loop_mode) {
7667 case HNAE3_LOOP_SERIAL_SERDES:
7668 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7670 case HNAE3_LOOP_PARALLEL_SERDES:
7671 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7673 case HNAE3_LOOP_PHY:
7674 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7677 dev_err(&hdev->pdev->dev,
7678 "unsupported common loopback mode %d\n", loop_mode);
7683 req->enable = loop_mode_b;
7684 req->mask = loop_mode_b;
7686 req->mask = loop_mode_b;
7689 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7691 dev_err(&hdev->pdev->dev,
7692 "common loopback set fail, ret = %d\n", ret);
7697 msleep(HCLGE_COMMON_LB_RETRY_MS);
7698 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7702 dev_err(&hdev->pdev->dev,
7703 "common loopback get, ret = %d\n", ret);
7706 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7707 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7709 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7710 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7712 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7713 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7719 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7720 enum hnae3_loop loop_mode)
7724 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7728 hclge_cfg_mac_mode(hdev, en);
7730 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7732 dev_err(&hdev->pdev->dev,
7733 "serdes loopback config mac mode timeout\n");
7738 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7739 struct phy_device *phydev)
7743 if (!phydev->suspended) {
7744 ret = phy_suspend(phydev);
7749 ret = phy_resume(phydev);
7753 return phy_loopback(phydev, true);
7756 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7757 struct phy_device *phydev)
7761 ret = phy_loopback(phydev, false);
7765 return phy_suspend(phydev);
7768 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7770 struct phy_device *phydev = hdev->hw.mac.phydev;
7774 if (hnae3_dev_phy_imp_supported(hdev))
7775 return hclge_set_common_loopback(hdev, en,
7781 ret = hclge_enable_phy_loopback(hdev, phydev);
7783 ret = hclge_disable_phy_loopback(hdev, phydev);
7785 dev_err(&hdev->pdev->dev,
7786 "set phy loopback fail, ret = %d\n", ret);
7790 hclge_cfg_mac_mode(hdev, en);
7792 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7794 dev_err(&hdev->pdev->dev,
7795 "phy loopback config mac mode timeout\n");
7800 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7801 u16 stream_id, bool enable)
7803 struct hclge_desc desc;
7804 struct hclge_cfg_com_tqp_queue_cmd *req =
7805 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7807 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7808 req->tqp_id = cpu_to_le16(tqp_id);
7809 req->stream_id = cpu_to_le16(stream_id);
7811 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7813 return hclge_cmd_send(&hdev->hw, &desc, 1);
7816 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7818 struct hclge_vport *vport = hclge_get_vport(handle);
7819 struct hclge_dev *hdev = vport->back;
7823 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7824 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7831 static int hclge_set_loopback(struct hnae3_handle *handle,
7832 enum hnae3_loop loop_mode, bool en)
7834 struct hclge_vport *vport = hclge_get_vport(handle);
7835 struct hclge_dev *hdev = vport->back;
7838 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7839 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7840 * the same, the packets are looped back in the SSU. If SSU loopback
7841 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7843 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7844 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7846 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7847 HCLGE_SWITCH_ALW_LPBK_MASK);
7852 switch (loop_mode) {
7853 case HNAE3_LOOP_APP:
7854 ret = hclge_set_app_loopback(hdev, en);
7856 case HNAE3_LOOP_SERIAL_SERDES:
7857 case HNAE3_LOOP_PARALLEL_SERDES:
7858 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7860 case HNAE3_LOOP_PHY:
7861 ret = hclge_set_phy_loopback(hdev, en);
7865 dev_err(&hdev->pdev->dev,
7866 "loop_mode %d is not supported\n", loop_mode);
7873 ret = hclge_tqp_enable(handle, en);
7875 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7876 en ? "enable" : "disable", ret);
7881 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7885 ret = hclge_set_app_loopback(hdev, false);
7889 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7893 return hclge_cfg_common_loopback(hdev, false,
7894 HNAE3_LOOP_PARALLEL_SERDES);
7897 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7899 struct hclge_vport *vport = hclge_get_vport(handle);
7900 struct hnae3_knic_private_info *kinfo;
7901 struct hnae3_queue *queue;
7902 struct hclge_tqp *tqp;
7905 kinfo = &vport->nic.kinfo;
7906 for (i = 0; i < kinfo->num_tqps; i++) {
7907 queue = handle->kinfo.tqp[i];
7908 tqp = container_of(queue, struct hclge_tqp, q);
7909 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7913 static void hclge_flush_link_update(struct hclge_dev *hdev)
7915 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7917 unsigned long last = hdev->serv_processed_cnt;
7920 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7921 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7922 last == hdev->serv_processed_cnt)
7926 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7928 struct hclge_vport *vport = hclge_get_vport(handle);
7929 struct hclge_dev *hdev = vport->back;
7932 hclge_task_schedule(hdev, 0);
7934 /* Set the DOWN flag here to disable link updating */
7935 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7937 /* flush memory to make sure DOWN is seen by service task */
7938 smp_mb__before_atomic();
7939 hclge_flush_link_update(hdev);
7943 static int hclge_ae_start(struct hnae3_handle *handle)
7945 struct hclge_vport *vport = hclge_get_vport(handle);
7946 struct hclge_dev *hdev = vport->back;
7949 hclge_cfg_mac_mode(hdev, true);
7950 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7951 hdev->hw.mac.link = 0;
7953 /* reset tqp stats */
7954 hclge_reset_tqp_stats(handle);
7956 hclge_mac_start_phy(hdev);
7961 static void hclge_ae_stop(struct hnae3_handle *handle)
7963 struct hclge_vport *vport = hclge_get_vport(handle);
7964 struct hclge_dev *hdev = vport->back;
7966 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7967 spin_lock_bh(&hdev->fd_rule_lock);
7968 hclge_clear_arfs_rules(hdev);
7969 spin_unlock_bh(&hdev->fd_rule_lock);
7971 /* If it is not PF reset, the firmware will disable the MAC,
7972 * so it only need to stop phy here.
7974 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7975 hdev->reset_type != HNAE3_FUNC_RESET) {
7976 hclge_mac_stop_phy(hdev);
7977 hclge_update_link_status(hdev);
7981 hclge_reset_tqp(handle);
7983 hclge_config_mac_tnl_int(hdev, false);
7986 hclge_cfg_mac_mode(hdev, false);
7988 hclge_mac_stop_phy(hdev);
7990 /* reset tqp stats */
7991 hclge_reset_tqp_stats(handle);
7992 hclge_update_link_status(hdev);
7995 int hclge_vport_start(struct hclge_vport *vport)
7997 struct hclge_dev *hdev = vport->back;
7999 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8000 vport->last_active_jiffies = jiffies;
8002 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8003 if (vport->vport_id) {
8004 hclge_restore_mac_table_common(vport);
8005 hclge_restore_vport_vlan_table(vport);
8007 hclge_restore_hw_table(hdev);
8011 clear_bit(vport->vport_id, hdev->vport_config_block);
8016 void hclge_vport_stop(struct hclge_vport *vport)
8018 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8021 static int hclge_client_start(struct hnae3_handle *handle)
8023 struct hclge_vport *vport = hclge_get_vport(handle);
8025 return hclge_vport_start(vport);
8028 static void hclge_client_stop(struct hnae3_handle *handle)
8030 struct hclge_vport *vport = hclge_get_vport(handle);
8032 hclge_vport_stop(vport);
8035 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8036 u16 cmdq_resp, u8 resp_code,
8037 enum hclge_mac_vlan_tbl_opcode op)
8039 struct hclge_dev *hdev = vport->back;
8042 dev_err(&hdev->pdev->dev,
8043 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8048 if (op == HCLGE_MAC_VLAN_ADD) {
8049 if (!resp_code || resp_code == 1)
8051 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8052 resp_code == HCLGE_ADD_MC_OVERFLOW)
8055 dev_err(&hdev->pdev->dev,
8056 "add mac addr failed for undefined, code=%u.\n",
8059 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8062 } else if (resp_code == 1) {
8063 dev_dbg(&hdev->pdev->dev,
8064 "remove mac addr failed for miss.\n");
8068 dev_err(&hdev->pdev->dev,
8069 "remove mac addr failed for undefined, code=%u.\n",
8072 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8075 } else if (resp_code == 1) {
8076 dev_dbg(&hdev->pdev->dev,
8077 "lookup mac addr failed for miss.\n");
8081 dev_err(&hdev->pdev->dev,
8082 "lookup mac addr failed for undefined, code=%u.\n",
8087 dev_err(&hdev->pdev->dev,
8088 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8093 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8095 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8097 unsigned int word_num;
8098 unsigned int bit_num;
8100 if (vfid > 255 || vfid < 0)
8103 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8104 word_num = vfid / 32;
8105 bit_num = vfid % 32;
8107 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8109 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8111 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8112 bit_num = vfid % 32;
8114 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8116 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8122 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8124 #define HCLGE_DESC_NUMBER 3
8125 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8128 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8129 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8130 if (desc[i].data[j])
8136 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8137 const u8 *addr, bool is_mc)
8139 const unsigned char *mac_addr = addr;
8140 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8141 (mac_addr[0]) | (mac_addr[1] << 8);
8142 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8144 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8146 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8147 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8150 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8151 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8154 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8155 struct hclge_mac_vlan_tbl_entry_cmd *req)
8157 struct hclge_dev *hdev = vport->back;
8158 struct hclge_desc desc;
8163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8165 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8167 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8169 dev_err(&hdev->pdev->dev,
8170 "del mac addr failed for cmd_send, ret =%d.\n",
8174 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8175 retval = le16_to_cpu(desc.retval);
8177 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8178 HCLGE_MAC_VLAN_REMOVE);
8181 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8182 struct hclge_mac_vlan_tbl_entry_cmd *req,
8183 struct hclge_desc *desc,
8186 struct hclge_dev *hdev = vport->back;
8191 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8193 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8194 memcpy(desc[0].data,
8196 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8197 hclge_cmd_setup_basic_desc(&desc[1],
8198 HCLGE_OPC_MAC_VLAN_ADD,
8200 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8201 hclge_cmd_setup_basic_desc(&desc[2],
8202 HCLGE_OPC_MAC_VLAN_ADD,
8204 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8206 memcpy(desc[0].data,
8208 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8209 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8212 dev_err(&hdev->pdev->dev,
8213 "lookup mac addr failed for cmd_send, ret =%d.\n",
8217 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8218 retval = le16_to_cpu(desc[0].retval);
8220 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8221 HCLGE_MAC_VLAN_LKUP);
8224 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8225 struct hclge_mac_vlan_tbl_entry_cmd *req,
8226 struct hclge_desc *mc_desc)
8228 struct hclge_dev *hdev = vport->back;
8235 struct hclge_desc desc;
8237 hclge_cmd_setup_basic_desc(&desc,
8238 HCLGE_OPC_MAC_VLAN_ADD,
8240 memcpy(desc.data, req,
8241 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8242 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8243 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8244 retval = le16_to_cpu(desc.retval);
8246 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8248 HCLGE_MAC_VLAN_ADD);
8250 hclge_cmd_reuse_desc(&mc_desc[0], false);
8251 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8252 hclge_cmd_reuse_desc(&mc_desc[1], false);
8253 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8254 hclge_cmd_reuse_desc(&mc_desc[2], false);
8255 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8256 memcpy(mc_desc[0].data, req,
8257 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8258 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8259 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8260 retval = le16_to_cpu(mc_desc[0].retval);
8262 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8264 HCLGE_MAC_VLAN_ADD);
8268 dev_err(&hdev->pdev->dev,
8269 "add mac addr failed for cmd_send, ret =%d.\n",
8277 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8278 u16 *allocated_size)
8280 struct hclge_umv_spc_alc_cmd *req;
8281 struct hclge_desc desc;
8284 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8285 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8287 req->space_size = cpu_to_le32(space_size);
8289 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8291 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8296 *allocated_size = le32_to_cpu(desc.data[1]);
8301 static int hclge_init_umv_space(struct hclge_dev *hdev)
8303 u16 allocated_size = 0;
8306 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8310 if (allocated_size < hdev->wanted_umv_size)
8311 dev_warn(&hdev->pdev->dev,
8312 "failed to alloc umv space, want %u, get %u\n",
8313 hdev->wanted_umv_size, allocated_size);
8315 hdev->max_umv_size = allocated_size;
8316 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8317 hdev->share_umv_size = hdev->priv_umv_size +
8318 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8323 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8325 struct hclge_vport *vport;
8328 for (i = 0; i < hdev->num_alloc_vport; i++) {
8329 vport = &hdev->vport[i];
8330 vport->used_umv_num = 0;
8333 mutex_lock(&hdev->vport_lock);
8334 hdev->share_umv_size = hdev->priv_umv_size +
8335 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8336 mutex_unlock(&hdev->vport_lock);
8339 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8341 struct hclge_dev *hdev = vport->back;
8345 mutex_lock(&hdev->vport_lock);
8347 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8348 hdev->share_umv_size == 0);
8351 mutex_unlock(&hdev->vport_lock);
8356 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8358 struct hclge_dev *hdev = vport->back;
8361 if (vport->used_umv_num > hdev->priv_umv_size)
8362 hdev->share_umv_size++;
8364 if (vport->used_umv_num > 0)
8365 vport->used_umv_num--;
8367 if (vport->used_umv_num >= hdev->priv_umv_size &&
8368 hdev->share_umv_size > 0)
8369 hdev->share_umv_size--;
8370 vport->used_umv_num++;
8374 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8377 struct hclge_mac_node *mac_node, *tmp;
8379 list_for_each_entry_safe(mac_node, tmp, list, node)
8380 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8386 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8387 enum HCLGE_MAC_NODE_STATE state)
8390 /* from set_rx_mode or tmp_add_list */
8391 case HCLGE_MAC_TO_ADD:
8392 if (mac_node->state == HCLGE_MAC_TO_DEL)
8393 mac_node->state = HCLGE_MAC_ACTIVE;
8395 /* only from set_rx_mode */
8396 case HCLGE_MAC_TO_DEL:
8397 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8398 list_del(&mac_node->node);
8401 mac_node->state = HCLGE_MAC_TO_DEL;
8404 /* only from tmp_add_list, the mac_node->state won't be
8407 case HCLGE_MAC_ACTIVE:
8408 if (mac_node->state == HCLGE_MAC_TO_ADD)
8409 mac_node->state = HCLGE_MAC_ACTIVE;
8415 int hclge_update_mac_list(struct hclge_vport *vport,
8416 enum HCLGE_MAC_NODE_STATE state,
8417 enum HCLGE_MAC_ADDR_TYPE mac_type,
8418 const unsigned char *addr)
8420 struct hclge_dev *hdev = vport->back;
8421 struct hclge_mac_node *mac_node;
8422 struct list_head *list;
8424 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8425 &vport->uc_mac_list : &vport->mc_mac_list;
8427 spin_lock_bh(&vport->mac_list_lock);
8429 /* if the mac addr is already in the mac list, no need to add a new
8430 * one into it, just check the mac addr state, convert it to a new
8431 * state, or just remove it, or do nothing.
8433 mac_node = hclge_find_mac_node(list, addr);
8435 hclge_update_mac_node(mac_node, state);
8436 spin_unlock_bh(&vport->mac_list_lock);
8437 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8441 /* if this address is never added, unnecessary to delete */
8442 if (state == HCLGE_MAC_TO_DEL) {
8443 spin_unlock_bh(&vport->mac_list_lock);
8444 dev_err(&hdev->pdev->dev,
8445 "failed to delete address %pM from mac list\n",
8450 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8452 spin_unlock_bh(&vport->mac_list_lock);
8456 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8458 mac_node->state = state;
8459 ether_addr_copy(mac_node->mac_addr, addr);
8460 list_add_tail(&mac_node->node, list);
8462 spin_unlock_bh(&vport->mac_list_lock);
8467 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8468 const unsigned char *addr)
8470 struct hclge_vport *vport = hclge_get_vport(handle);
8472 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8476 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8477 const unsigned char *addr)
8479 struct hclge_dev *hdev = vport->back;
8480 struct hclge_mac_vlan_tbl_entry_cmd req;
8481 struct hclge_desc desc;
8482 u16 egress_port = 0;
8485 /* mac addr check */
8486 if (is_zero_ether_addr(addr) ||
8487 is_broadcast_ether_addr(addr) ||
8488 is_multicast_ether_addr(addr)) {
8489 dev_err(&hdev->pdev->dev,
8490 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8491 addr, is_zero_ether_addr(addr),
8492 is_broadcast_ether_addr(addr),
8493 is_multicast_ether_addr(addr));
8497 memset(&req, 0, sizeof(req));
8499 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8500 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8502 req.egress_port = cpu_to_le16(egress_port);
8504 hclge_prepare_mac_addr(&req, addr, false);
8506 /* Lookup the mac address in the mac_vlan table, and add
8507 * it if the entry is inexistent. Repeated unicast entry
8508 * is not allowed in the mac vlan table.
8510 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8511 if (ret == -ENOENT) {
8512 mutex_lock(&hdev->vport_lock);
8513 if (!hclge_is_umv_space_full(vport, false)) {
8514 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8516 hclge_update_umv_space(vport, false);
8517 mutex_unlock(&hdev->vport_lock);
8520 mutex_unlock(&hdev->vport_lock);
8522 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8523 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8524 hdev->priv_umv_size);
8529 /* check if we just hit the duplicate */
8531 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8532 vport->vport_id, addr);
8536 dev_err(&hdev->pdev->dev,
8537 "PF failed to add unicast entry(%pM) in the MAC table\n",
8543 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8544 const unsigned char *addr)
8546 struct hclge_vport *vport = hclge_get_vport(handle);
8548 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8552 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8553 const unsigned char *addr)
8555 struct hclge_dev *hdev = vport->back;
8556 struct hclge_mac_vlan_tbl_entry_cmd req;
8559 /* mac addr check */
8560 if (is_zero_ether_addr(addr) ||
8561 is_broadcast_ether_addr(addr) ||
8562 is_multicast_ether_addr(addr)) {
8563 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8568 memset(&req, 0, sizeof(req));
8569 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8570 hclge_prepare_mac_addr(&req, addr, false);
8571 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8573 mutex_lock(&hdev->vport_lock);
8574 hclge_update_umv_space(vport, true);
8575 mutex_unlock(&hdev->vport_lock);
8576 } else if (ret == -ENOENT) {
8583 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8584 const unsigned char *addr)
8586 struct hclge_vport *vport = hclge_get_vport(handle);
8588 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8592 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8593 const unsigned char *addr)
8595 struct hclge_dev *hdev = vport->back;
8596 struct hclge_mac_vlan_tbl_entry_cmd req;
8597 struct hclge_desc desc[3];
8600 /* mac addr check */
8601 if (!is_multicast_ether_addr(addr)) {
8602 dev_err(&hdev->pdev->dev,
8603 "Add mc mac err! invalid mac:%pM.\n",
8607 memset(&req, 0, sizeof(req));
8608 hclge_prepare_mac_addr(&req, addr, true);
8609 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8611 /* This mac addr do not exist, add new entry for it */
8612 memset(desc[0].data, 0, sizeof(desc[0].data));
8613 memset(desc[1].data, 0, sizeof(desc[0].data));
8614 memset(desc[2].data, 0, sizeof(desc[0].data));
8616 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8619 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8620 /* if already overflow, not to print each time */
8621 if (status == -ENOSPC &&
8622 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8623 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8628 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8629 const unsigned char *addr)
8631 struct hclge_vport *vport = hclge_get_vport(handle);
8633 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8637 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8638 const unsigned char *addr)
8640 struct hclge_dev *hdev = vport->back;
8641 struct hclge_mac_vlan_tbl_entry_cmd req;
8642 enum hclge_cmd_status status;
8643 struct hclge_desc desc[3];
8645 /* mac addr check */
8646 if (!is_multicast_ether_addr(addr)) {
8647 dev_dbg(&hdev->pdev->dev,
8648 "Remove mc mac err! invalid mac:%pM.\n",
8653 memset(&req, 0, sizeof(req));
8654 hclge_prepare_mac_addr(&req, addr, true);
8655 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8657 /* This mac addr exist, remove this handle's VFID for it */
8658 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8662 if (hclge_is_all_function_id_zero(desc))
8663 /* All the vfid is zero, so need to delete this entry */
8664 status = hclge_remove_mac_vlan_tbl(vport, &req);
8666 /* Not all the vfid is zero, update the vfid */
8667 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8668 } else if (status == -ENOENT) {
8675 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8676 struct list_head *list,
8677 int (*sync)(struct hclge_vport *,
8678 const unsigned char *))
8680 struct hclge_mac_node *mac_node, *tmp;
8683 list_for_each_entry_safe(mac_node, tmp, list, node) {
8684 ret = sync(vport, mac_node->mac_addr);
8686 mac_node->state = HCLGE_MAC_ACTIVE;
8688 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8695 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8696 struct list_head *list,
8697 int (*unsync)(struct hclge_vport *,
8698 const unsigned char *))
8700 struct hclge_mac_node *mac_node, *tmp;
8703 list_for_each_entry_safe(mac_node, tmp, list, node) {
8704 ret = unsync(vport, mac_node->mac_addr);
8705 if (!ret || ret == -ENOENT) {
8706 list_del(&mac_node->node);
8709 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8716 static bool hclge_sync_from_add_list(struct list_head *add_list,
8717 struct list_head *mac_list)
8719 struct hclge_mac_node *mac_node, *tmp, *new_node;
8720 bool all_added = true;
8722 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8723 if (mac_node->state == HCLGE_MAC_TO_ADD)
8726 /* if the mac address from tmp_add_list is not in the
8727 * uc/mc_mac_list, it means have received a TO_DEL request
8728 * during the time window of adding the mac address into mac
8729 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8730 * then it will be removed at next time. else it must be TO_ADD,
8731 * this address hasn't been added into mac table,
8732 * so just remove the mac node.
8734 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8736 hclge_update_mac_node(new_node, mac_node->state);
8737 list_del(&mac_node->node);
8739 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8740 mac_node->state = HCLGE_MAC_TO_DEL;
8741 list_del(&mac_node->node);
8742 list_add_tail(&mac_node->node, mac_list);
8744 list_del(&mac_node->node);
8752 static void hclge_sync_from_del_list(struct list_head *del_list,
8753 struct list_head *mac_list)
8755 struct hclge_mac_node *mac_node, *tmp, *new_node;
8757 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8758 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8760 /* If the mac addr exists in the mac list, it means
8761 * received a new TO_ADD request during the time window
8762 * of configuring the mac address. For the mac node
8763 * state is TO_ADD, and the address is already in the
8764 * in the hardware(due to delete fail), so we just need
8765 * to change the mac node state to ACTIVE.
8767 new_node->state = HCLGE_MAC_ACTIVE;
8768 list_del(&mac_node->node);
8771 list_del(&mac_node->node);
8772 list_add_tail(&mac_node->node, mac_list);
8777 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8778 enum HCLGE_MAC_ADDR_TYPE mac_type,
8781 if (mac_type == HCLGE_MAC_ADDR_UC) {
8783 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8785 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8788 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8790 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8794 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8795 enum HCLGE_MAC_ADDR_TYPE mac_type)
8797 struct hclge_mac_node *mac_node, *tmp, *new_node;
8798 struct list_head tmp_add_list, tmp_del_list;
8799 struct list_head *list;
8802 INIT_LIST_HEAD(&tmp_add_list);
8803 INIT_LIST_HEAD(&tmp_del_list);
8805 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8806 * we can add/delete these mac addr outside the spin lock
8808 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8809 &vport->uc_mac_list : &vport->mc_mac_list;
8811 spin_lock_bh(&vport->mac_list_lock);
8813 list_for_each_entry_safe(mac_node, tmp, list, node) {
8814 switch (mac_node->state) {
8815 case HCLGE_MAC_TO_DEL:
8816 list_del(&mac_node->node);
8817 list_add_tail(&mac_node->node, &tmp_del_list);
8819 case HCLGE_MAC_TO_ADD:
8820 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8823 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8824 new_node->state = mac_node->state;
8825 list_add_tail(&new_node->node, &tmp_add_list);
8833 spin_unlock_bh(&vport->mac_list_lock);
8835 /* delete first, in order to get max mac table space for adding */
8836 if (mac_type == HCLGE_MAC_ADDR_UC) {
8837 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8838 hclge_rm_uc_addr_common);
8839 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8840 hclge_add_uc_addr_common);
8842 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8843 hclge_rm_mc_addr_common);
8844 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8845 hclge_add_mc_addr_common);
8848 /* if some mac addresses were added/deleted fail, move back to the
8849 * mac_list, and retry at next time.
8851 spin_lock_bh(&vport->mac_list_lock);
8853 hclge_sync_from_del_list(&tmp_del_list, list);
8854 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8856 spin_unlock_bh(&vport->mac_list_lock);
8858 hclge_update_overflow_flags(vport, mac_type, all_added);
8861 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8863 struct hclge_dev *hdev = vport->back;
8865 if (test_bit(vport->vport_id, hdev->vport_config_block))
8868 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8874 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8878 for (i = 0; i < hdev->num_alloc_vport; i++) {
8879 struct hclge_vport *vport = &hdev->vport[i];
8881 if (!hclge_need_sync_mac_table(vport))
8884 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8885 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8889 static void hclge_build_del_list(struct list_head *list,
8891 struct list_head *tmp_del_list)
8893 struct hclge_mac_node *mac_cfg, *tmp;
8895 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8896 switch (mac_cfg->state) {
8897 case HCLGE_MAC_TO_DEL:
8898 case HCLGE_MAC_ACTIVE:
8899 list_del(&mac_cfg->node);
8900 list_add_tail(&mac_cfg->node, tmp_del_list);
8902 case HCLGE_MAC_TO_ADD:
8904 list_del(&mac_cfg->node);
8912 static void hclge_unsync_del_list(struct hclge_vport *vport,
8913 int (*unsync)(struct hclge_vport *vport,
8914 const unsigned char *addr),
8916 struct list_head *tmp_del_list)
8918 struct hclge_mac_node *mac_cfg, *tmp;
8921 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8922 ret = unsync(vport, mac_cfg->mac_addr);
8923 if (!ret || ret == -ENOENT) {
8924 /* clear all mac addr from hardware, but remain these
8925 * mac addr in the mac list, and restore them after
8926 * vf reset finished.
8929 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8930 mac_cfg->state = HCLGE_MAC_TO_ADD;
8932 list_del(&mac_cfg->node);
8935 } else if (is_del_list) {
8936 mac_cfg->state = HCLGE_MAC_TO_DEL;
8941 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8942 enum HCLGE_MAC_ADDR_TYPE mac_type)
8944 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8945 struct hclge_dev *hdev = vport->back;
8946 struct list_head tmp_del_list, *list;
8948 if (mac_type == HCLGE_MAC_ADDR_UC) {
8949 list = &vport->uc_mac_list;
8950 unsync = hclge_rm_uc_addr_common;
8952 list = &vport->mc_mac_list;
8953 unsync = hclge_rm_mc_addr_common;
8956 INIT_LIST_HEAD(&tmp_del_list);
8959 set_bit(vport->vport_id, hdev->vport_config_block);
8961 spin_lock_bh(&vport->mac_list_lock);
8963 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8965 spin_unlock_bh(&vport->mac_list_lock);
8967 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8969 spin_lock_bh(&vport->mac_list_lock);
8971 hclge_sync_from_del_list(&tmp_del_list, list);
8973 spin_unlock_bh(&vport->mac_list_lock);
8976 /* remove all mac address when uninitailize */
8977 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8978 enum HCLGE_MAC_ADDR_TYPE mac_type)
8980 struct hclge_mac_node *mac_node, *tmp;
8981 struct hclge_dev *hdev = vport->back;
8982 struct list_head tmp_del_list, *list;
8984 INIT_LIST_HEAD(&tmp_del_list);
8986 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8987 &vport->uc_mac_list : &vport->mc_mac_list;
8989 spin_lock_bh(&vport->mac_list_lock);
8991 list_for_each_entry_safe(mac_node, tmp, list, node) {
8992 switch (mac_node->state) {
8993 case HCLGE_MAC_TO_DEL:
8994 case HCLGE_MAC_ACTIVE:
8995 list_del(&mac_node->node);
8996 list_add_tail(&mac_node->node, &tmp_del_list);
8998 case HCLGE_MAC_TO_ADD:
8999 list_del(&mac_node->node);
9005 spin_unlock_bh(&vport->mac_list_lock);
9007 if (mac_type == HCLGE_MAC_ADDR_UC)
9008 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9009 hclge_rm_uc_addr_common);
9011 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9012 hclge_rm_mc_addr_common);
9014 if (!list_empty(&tmp_del_list))
9015 dev_warn(&hdev->pdev->dev,
9016 "uninit %s mac list for vport %u not completely.\n",
9017 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9020 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9021 list_del(&mac_node->node);
9026 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9028 struct hclge_vport *vport;
9031 for (i = 0; i < hdev->num_alloc_vport; i++) {
9032 vport = &hdev->vport[i];
9033 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9034 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9038 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9039 u16 cmdq_resp, u8 resp_code)
9041 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9042 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9043 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9044 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9049 dev_err(&hdev->pdev->dev,
9050 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9055 switch (resp_code) {
9056 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9057 case HCLGE_ETHERTYPE_ALREADY_ADD:
9060 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9061 dev_err(&hdev->pdev->dev,
9062 "add mac ethertype failed for manager table overflow.\n");
9063 return_status = -EIO;
9065 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9066 dev_err(&hdev->pdev->dev,
9067 "add mac ethertype failed for key conflict.\n");
9068 return_status = -EIO;
9071 dev_err(&hdev->pdev->dev,
9072 "add mac ethertype failed for undefined, code=%u.\n",
9074 return_status = -EIO;
9077 return return_status;
9080 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9083 struct hclge_mac_vlan_tbl_entry_cmd req;
9084 struct hclge_dev *hdev = vport->back;
9085 struct hclge_desc desc;
9086 u16 egress_port = 0;
9089 if (is_zero_ether_addr(mac_addr))
9092 memset(&req, 0, sizeof(req));
9093 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9094 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9095 req.egress_port = cpu_to_le16(egress_port);
9096 hclge_prepare_mac_addr(&req, mac_addr, false);
9098 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9101 vf_idx += HCLGE_VF_VPORT_START_NUM;
9102 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9104 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9110 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9113 struct hclge_vport *vport = hclge_get_vport(handle);
9114 struct hclge_dev *hdev = vport->back;
9116 vport = hclge_get_vf_vport(hdev, vf);
9120 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9121 dev_info(&hdev->pdev->dev,
9122 "Specified MAC(=%pM) is same as before, no change committed!\n",
9127 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9128 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9133 ether_addr_copy(vport->vf_info.mac, mac_addr);
9135 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9136 dev_info(&hdev->pdev->dev,
9137 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9139 return hclge_inform_reset_assert_to_vf(vport);
9142 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9147 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9148 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9150 struct hclge_desc desc;
9155 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9156 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9158 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9160 dev_err(&hdev->pdev->dev,
9161 "add mac ethertype failed for cmd_send, ret =%d.\n",
9166 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9167 retval = le16_to_cpu(desc.retval);
9169 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9172 static int init_mgr_tbl(struct hclge_dev *hdev)
9177 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9178 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9180 dev_err(&hdev->pdev->dev,
9181 "add mac ethertype failed, ret =%d.\n",
9190 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9192 struct hclge_vport *vport = hclge_get_vport(handle);
9193 struct hclge_dev *hdev = vport->back;
9195 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9198 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9199 const u8 *old_addr, const u8 *new_addr)
9201 struct list_head *list = &vport->uc_mac_list;
9202 struct hclge_mac_node *old_node, *new_node;
9204 new_node = hclge_find_mac_node(list, new_addr);
9206 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9210 new_node->state = HCLGE_MAC_TO_ADD;
9211 ether_addr_copy(new_node->mac_addr, new_addr);
9212 list_add(&new_node->node, list);
9214 if (new_node->state == HCLGE_MAC_TO_DEL)
9215 new_node->state = HCLGE_MAC_ACTIVE;
9217 /* make sure the new addr is in the list head, avoid dev
9218 * addr may be not re-added into mac table for the umv space
9219 * limitation after global/imp reset which will clear mac
9220 * table by hardware.
9222 list_move(&new_node->node, list);
9225 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9226 old_node = hclge_find_mac_node(list, old_addr);
9228 if (old_node->state == HCLGE_MAC_TO_ADD) {
9229 list_del(&old_node->node);
9232 old_node->state = HCLGE_MAC_TO_DEL;
9237 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9242 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9245 const unsigned char *new_addr = (const unsigned char *)p;
9246 struct hclge_vport *vport = hclge_get_vport(handle);
9247 struct hclge_dev *hdev = vport->back;
9248 unsigned char *old_addr = NULL;
9251 /* mac addr check */
9252 if (is_zero_ether_addr(new_addr) ||
9253 is_broadcast_ether_addr(new_addr) ||
9254 is_multicast_ether_addr(new_addr)) {
9255 dev_err(&hdev->pdev->dev,
9256 "change uc mac err! invalid mac: %pM.\n",
9261 ret = hclge_pause_addr_cfg(hdev, new_addr);
9263 dev_err(&hdev->pdev->dev,
9264 "failed to configure mac pause address, ret = %d\n",
9270 old_addr = hdev->hw.mac.mac_addr;
9272 spin_lock_bh(&vport->mac_list_lock);
9273 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9275 dev_err(&hdev->pdev->dev,
9276 "failed to change the mac addr:%pM, ret = %d\n",
9278 spin_unlock_bh(&vport->mac_list_lock);
9281 hclge_pause_addr_cfg(hdev, old_addr);
9285 /* we must update dev addr with spin lock protect, preventing dev addr
9286 * being removed by set_rx_mode path.
9288 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9289 spin_unlock_bh(&vport->mac_list_lock);
9291 hclge_task_schedule(hdev, 0);
9296 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9298 struct mii_ioctl_data *data = if_mii(ifr);
9300 if (!hnae3_dev_phy_imp_supported(hdev))
9305 data->phy_id = hdev->hw.mac.phy_addr;
9306 /* this command reads phy id and register at the same time */
9309 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9313 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9319 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9322 struct hclge_vport *vport = hclge_get_vport(handle);
9323 struct hclge_dev *hdev = vport->back;
9325 if (!hdev->hw.mac.phydev)
9326 return hclge_mii_ioctl(hdev, ifr, cmd);
9328 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9331 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9332 u8 fe_type, bool filter_en, u8 vf_id)
9334 struct hclge_vlan_filter_ctrl_cmd *req;
9335 struct hclge_desc desc;
9338 /* read current vlan filter parameter */
9339 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9340 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9341 req->vlan_type = vlan_type;
9344 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9346 dev_err(&hdev->pdev->dev,
9347 "failed to get vlan filter config, ret = %d.\n", ret);
9351 /* modify and write new config parameter */
9352 hclge_cmd_reuse_desc(&desc, false);
9353 req->vlan_fe = filter_en ?
9354 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9356 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9358 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9364 #define HCLGE_FILTER_TYPE_VF 0
9365 #define HCLGE_FILTER_TYPE_PORT 1
9366 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
9367 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
9368 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
9369 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
9370 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
9371 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
9372 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
9373 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
9374 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
9376 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9378 struct hclge_vport *vport = hclge_get_vport(handle);
9379 struct hclge_dev *hdev = vport->back;
9381 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9382 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9383 HCLGE_FILTER_FE_EGRESS, enable, 0);
9384 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9385 HCLGE_FILTER_FE_INGRESS, enable, 0);
9387 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9388 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9392 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9394 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9397 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9398 bool is_kill, u16 vlan,
9399 struct hclge_desc *desc)
9401 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9402 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9407 hclge_cmd_setup_basic_desc(&desc[0],
9408 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9409 hclge_cmd_setup_basic_desc(&desc[1],
9410 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9412 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9414 vf_byte_off = vfid / 8;
9415 vf_byte_val = 1 << (vfid % 8);
9417 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9418 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9420 req0->vlan_id = cpu_to_le16(vlan);
9421 req0->vlan_cfg = is_kill;
9423 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9424 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9426 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9428 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9430 dev_err(&hdev->pdev->dev,
9431 "Send vf vlan command fail, ret =%d.\n",
9439 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9440 bool is_kill, struct hclge_desc *desc)
9442 struct hclge_vlan_filter_vf_cfg_cmd *req;
9444 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9447 #define HCLGE_VF_VLAN_NO_ENTRY 2
9448 if (!req->resp_code || req->resp_code == 1)
9451 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9452 set_bit(vfid, hdev->vf_vlan_full);
9453 dev_warn(&hdev->pdev->dev,
9454 "vf vlan table is full, vf vlan filter is disabled\n");
9458 dev_err(&hdev->pdev->dev,
9459 "Add vf vlan filter fail, ret =%u.\n",
9462 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9463 if (!req->resp_code)
9466 /* vf vlan filter is disabled when vf vlan table is full,
9467 * then new vlan id will not be added into vf vlan table.
9468 * Just return 0 without warning, avoid massive verbose
9469 * print logs when unload.
9471 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9474 dev_err(&hdev->pdev->dev,
9475 "Kill vf vlan filter fail, ret =%u.\n",
9482 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9483 bool is_kill, u16 vlan)
9485 struct hclge_vport *vport = &hdev->vport[vfid];
9486 struct hclge_desc desc[2];
9489 /* if vf vlan table is full, firmware will close vf vlan filter, it
9490 * is unable and unnecessary to add new vlan id to vf vlan filter.
9491 * If spoof check is enable, and vf vlan is full, it shouldn't add
9492 * new vlan, because tx packets with these vlan id will be dropped.
9494 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9495 if (vport->vf_info.spoofchk && vlan) {
9496 dev_err(&hdev->pdev->dev,
9497 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9503 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9507 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9510 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9511 u16 vlan_id, bool is_kill)
9513 struct hclge_vlan_filter_pf_cfg_cmd *req;
9514 struct hclge_desc desc;
9515 u8 vlan_offset_byte_val;
9516 u8 vlan_offset_byte;
9520 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9522 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9523 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9524 HCLGE_VLAN_BYTE_SIZE;
9525 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9527 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9528 req->vlan_offset = vlan_offset_160;
9529 req->vlan_cfg = is_kill;
9530 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9534 dev_err(&hdev->pdev->dev,
9535 "port vlan command, send fail, ret =%d.\n", ret);
9539 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9540 u16 vport_id, u16 vlan_id,
9543 u16 vport_idx, vport_num = 0;
9546 if (is_kill && !vlan_id)
9549 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9551 dev_err(&hdev->pdev->dev,
9552 "Set %u vport vlan filter config fail, ret =%d.\n",
9557 /* vlan 0 may be added twice when 8021q module is enabled */
9558 if (!is_kill && !vlan_id &&
9559 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9562 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9563 dev_err(&hdev->pdev->dev,
9564 "Add port vlan failed, vport %u is already in vlan %u\n",
9570 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9571 dev_err(&hdev->pdev->dev,
9572 "Delete port vlan failed, vport %u is not in vlan %u\n",
9577 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9580 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9581 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9587 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9589 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9590 struct hclge_vport_vtag_tx_cfg_cmd *req;
9591 struct hclge_dev *hdev = vport->back;
9592 struct hclge_desc desc;
9596 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9598 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9599 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9600 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9601 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9602 vcfg->accept_tag1 ? 1 : 0);
9603 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9604 vcfg->accept_untag1 ? 1 : 0);
9605 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9606 vcfg->accept_tag2 ? 1 : 0);
9607 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9608 vcfg->accept_untag2 ? 1 : 0);
9609 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9610 vcfg->insert_tag1_en ? 1 : 0);
9611 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9612 vcfg->insert_tag2_en ? 1 : 0);
9613 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9614 vcfg->tag_shift_mode_en ? 1 : 0);
9615 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9617 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9618 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9619 HCLGE_VF_NUM_PER_BYTE;
9620 req->vf_bitmap[bmap_index] =
9621 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9623 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9625 dev_err(&hdev->pdev->dev,
9626 "Send port txvlan cfg command fail, ret =%d\n",
9632 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9634 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9635 struct hclge_vport_vtag_rx_cfg_cmd *req;
9636 struct hclge_dev *hdev = vport->back;
9637 struct hclge_desc desc;
9641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9643 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9644 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9645 vcfg->strip_tag1_en ? 1 : 0);
9646 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9647 vcfg->strip_tag2_en ? 1 : 0);
9648 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9649 vcfg->vlan1_vlan_prionly ? 1 : 0);
9650 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9651 vcfg->vlan2_vlan_prionly ? 1 : 0);
9652 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9653 vcfg->strip_tag1_discard_en ? 1 : 0);
9654 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9655 vcfg->strip_tag2_discard_en ? 1 : 0);
9657 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9658 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9659 HCLGE_VF_NUM_PER_BYTE;
9660 req->vf_bitmap[bmap_index] =
9661 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9663 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9665 dev_err(&hdev->pdev->dev,
9666 "Send port rxvlan cfg command fail, ret =%d\n",
9672 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9673 u16 port_base_vlan_state,
9678 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9679 vport->txvlan_cfg.accept_tag1 = true;
9680 vport->txvlan_cfg.insert_tag1_en = false;
9681 vport->txvlan_cfg.default_tag1 = 0;
9683 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9685 vport->txvlan_cfg.accept_tag1 =
9686 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9687 vport->txvlan_cfg.insert_tag1_en = true;
9688 vport->txvlan_cfg.default_tag1 = vlan_tag;
9691 vport->txvlan_cfg.accept_untag1 = true;
9693 /* accept_tag2 and accept_untag2 are not supported on
9694 * pdev revision(0x20), new revision support them,
9695 * this two fields can not be configured by user.
9697 vport->txvlan_cfg.accept_tag2 = true;
9698 vport->txvlan_cfg.accept_untag2 = true;
9699 vport->txvlan_cfg.insert_tag2_en = false;
9700 vport->txvlan_cfg.default_tag2 = 0;
9701 vport->txvlan_cfg.tag_shift_mode_en = true;
9703 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9704 vport->rxvlan_cfg.strip_tag1_en = false;
9705 vport->rxvlan_cfg.strip_tag2_en =
9706 vport->rxvlan_cfg.rx_vlan_offload_en;
9707 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9709 vport->rxvlan_cfg.strip_tag1_en =
9710 vport->rxvlan_cfg.rx_vlan_offload_en;
9711 vport->rxvlan_cfg.strip_tag2_en = true;
9712 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9715 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9716 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9717 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9719 ret = hclge_set_vlan_tx_offload_cfg(vport);
9723 return hclge_set_vlan_rx_offload_cfg(vport);
9726 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9728 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9729 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9730 struct hclge_desc desc;
9733 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9734 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9735 rx_req->ot_fst_vlan_type =
9736 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9737 rx_req->ot_sec_vlan_type =
9738 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9739 rx_req->in_fst_vlan_type =
9740 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9741 rx_req->in_sec_vlan_type =
9742 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9744 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9746 dev_err(&hdev->pdev->dev,
9747 "Send rxvlan protocol type command fail, ret =%d\n",
9752 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9754 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9755 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9756 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9758 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9760 dev_err(&hdev->pdev->dev,
9761 "Send txvlan protocol type command fail, ret =%d\n",
9767 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9769 #define HCLGE_DEF_VLAN_TYPE 0x8100
9771 struct hnae3_handle *handle = &hdev->vport[0].nic;
9772 struct hclge_vport *vport;
9776 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9777 /* for revision 0x21, vf vlan filter is per function */
9778 for (i = 0; i < hdev->num_alloc_vport; i++) {
9779 vport = &hdev->vport[i];
9780 ret = hclge_set_vlan_filter_ctrl(hdev,
9781 HCLGE_FILTER_TYPE_VF,
9782 HCLGE_FILTER_FE_EGRESS,
9789 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9790 HCLGE_FILTER_FE_INGRESS, true,
9795 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9796 HCLGE_FILTER_FE_EGRESS_V1_B,
9802 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9804 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9805 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9806 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9807 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9808 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9809 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9811 ret = hclge_set_vlan_protocol_type(hdev);
9815 for (i = 0; i < hdev->num_alloc_vport; i++) {
9818 vport = &hdev->vport[i];
9819 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9821 ret = hclge_vlan_offload_cfg(vport,
9822 vport->port_base_vlan_cfg.state,
9828 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9831 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9834 struct hclge_vport_vlan_cfg *vlan;
9836 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9840 vlan->hd_tbl_status = writen_to_tbl;
9841 vlan->vlan_id = vlan_id;
9843 list_add_tail(&vlan->node, &vport->vlan_list);
9846 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9848 struct hclge_vport_vlan_cfg *vlan, *tmp;
9849 struct hclge_dev *hdev = vport->back;
9852 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9853 if (!vlan->hd_tbl_status) {
9854 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9856 vlan->vlan_id, false);
9858 dev_err(&hdev->pdev->dev,
9859 "restore vport vlan list failed, ret=%d\n",
9864 vlan->hd_tbl_status = true;
9870 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9873 struct hclge_vport_vlan_cfg *vlan, *tmp;
9874 struct hclge_dev *hdev = vport->back;
9876 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9877 if (vlan->vlan_id == vlan_id) {
9878 if (is_write_tbl && vlan->hd_tbl_status)
9879 hclge_set_vlan_filter_hw(hdev,
9885 list_del(&vlan->node);
9892 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9894 struct hclge_vport_vlan_cfg *vlan, *tmp;
9895 struct hclge_dev *hdev = vport->back;
9897 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9898 if (vlan->hd_tbl_status)
9899 hclge_set_vlan_filter_hw(hdev,
9905 vlan->hd_tbl_status = false;
9907 list_del(&vlan->node);
9911 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9914 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9916 struct hclge_vport_vlan_cfg *vlan, *tmp;
9917 struct hclge_vport *vport;
9920 for (i = 0; i < hdev->num_alloc_vport; i++) {
9921 vport = &hdev->vport[i];
9922 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9923 list_del(&vlan->node);
9929 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9931 struct hclge_vport_vlan_cfg *vlan, *tmp;
9932 struct hclge_dev *hdev = vport->back;
9938 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9939 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9940 state = vport->port_base_vlan_cfg.state;
9942 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9943 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9944 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9945 vport->vport_id, vlan_id,
9950 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9951 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9953 vlan->vlan_id, false);
9956 vlan->hd_tbl_status = true;
9960 /* For global reset and imp reset, hardware will clear the mac table,
9961 * so we change the mac address state from ACTIVE to TO_ADD, then they
9962 * can be restored in the service task after reset complete. Furtherly,
9963 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9964 * be restored after reset, so just remove these mac nodes from mac_list.
9966 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9968 struct hclge_mac_node *mac_node, *tmp;
9970 list_for_each_entry_safe(mac_node, tmp, list, node) {
9971 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9972 mac_node->state = HCLGE_MAC_TO_ADD;
9973 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9974 list_del(&mac_node->node);
9980 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9982 spin_lock_bh(&vport->mac_list_lock);
9984 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9985 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9986 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9988 spin_unlock_bh(&vport->mac_list_lock);
9991 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9993 struct hclge_vport *vport = &hdev->vport[0];
9994 struct hnae3_handle *handle = &vport->nic;
9996 hclge_restore_mac_table_common(vport);
9997 hclge_restore_vport_vlan_table(vport);
9998 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9999 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10000 hclge_restore_fd_entries(handle);
10003 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10005 struct hclge_vport *vport = hclge_get_vport(handle);
10007 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10008 vport->rxvlan_cfg.strip_tag1_en = false;
10009 vport->rxvlan_cfg.strip_tag2_en = enable;
10010 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10012 vport->rxvlan_cfg.strip_tag1_en = enable;
10013 vport->rxvlan_cfg.strip_tag2_en = true;
10014 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10017 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10018 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10019 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10020 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10022 return hclge_set_vlan_rx_offload_cfg(vport);
10025 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10026 u16 port_base_vlan_state,
10027 struct hclge_vlan_info *new_info,
10028 struct hclge_vlan_info *old_info)
10030 struct hclge_dev *hdev = vport->back;
10033 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10034 hclge_rm_vport_all_vlan_table(vport, false);
10035 return hclge_set_vlan_filter_hw(hdev,
10036 htons(new_info->vlan_proto),
10038 new_info->vlan_tag,
10042 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10043 vport->vport_id, old_info->vlan_tag,
10048 return hclge_add_vport_all_vlan_table(vport);
10051 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10052 struct hclge_vlan_info *vlan_info)
10054 struct hnae3_handle *nic = &vport->nic;
10055 struct hclge_vlan_info *old_vlan_info;
10056 struct hclge_dev *hdev = vport->back;
10059 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10061 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
10065 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10066 /* add new VLAN tag */
10067 ret = hclge_set_vlan_filter_hw(hdev,
10068 htons(vlan_info->vlan_proto),
10070 vlan_info->vlan_tag,
10075 /* remove old VLAN tag */
10076 ret = hclge_set_vlan_filter_hw(hdev,
10077 htons(old_vlan_info->vlan_proto),
10079 old_vlan_info->vlan_tag,
10087 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10092 /* update state only when disable/enable port based VLAN */
10093 vport->port_base_vlan_cfg.state = state;
10094 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10095 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10097 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10100 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
10101 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
10102 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
10107 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10108 enum hnae3_port_base_vlan_state state,
10111 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10113 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10115 return HNAE3_PORT_BASE_VLAN_ENABLE;
10118 return HNAE3_PORT_BASE_VLAN_DISABLE;
10119 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
10120 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10122 return HNAE3_PORT_BASE_VLAN_MODIFY;
10126 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10127 u16 vlan, u8 qos, __be16 proto)
10129 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10130 struct hclge_vport *vport = hclge_get_vport(handle);
10131 struct hclge_dev *hdev = vport->back;
10132 struct hclge_vlan_info vlan_info;
10136 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10137 return -EOPNOTSUPP;
10139 vport = hclge_get_vf_vport(hdev, vfid);
10143 /* qos is a 3 bits value, so can not be bigger than 7 */
10144 if (vlan > VLAN_N_VID - 1 || qos > 7)
10146 if (proto != htons(ETH_P_8021Q))
10147 return -EPROTONOSUPPORT;
10149 state = hclge_get_port_base_vlan_state(vport,
10150 vport->port_base_vlan_cfg.state,
10152 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10155 vlan_info.vlan_tag = vlan;
10156 vlan_info.qos = qos;
10157 vlan_info.vlan_proto = ntohs(proto);
10159 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10161 dev_err(&hdev->pdev->dev,
10162 "failed to update port base vlan for vf %d, ret = %d\n",
10167 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10170 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10171 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10172 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10173 vport->vport_id, state,
10180 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10182 struct hclge_vlan_info *vlan_info;
10183 struct hclge_vport *vport;
10187 /* clear port base vlan for all vf */
10188 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10189 vport = &hdev->vport[vf];
10190 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10192 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10194 vlan_info->vlan_tag, true);
10196 dev_err(&hdev->pdev->dev,
10197 "failed to clear vf vlan for vf%d, ret = %d\n",
10198 vf - HCLGE_VF_VPORT_START_NUM, ret);
10202 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10203 u16 vlan_id, bool is_kill)
10205 struct hclge_vport *vport = hclge_get_vport(handle);
10206 struct hclge_dev *hdev = vport->back;
10207 bool writen_to_tbl = false;
10210 /* When device is resetting or reset failed, firmware is unable to
10211 * handle mailbox. Just record the vlan id, and remove it after
10214 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10215 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10216 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10220 /* when port base vlan enabled, we use port base vlan as the vlan
10221 * filter entry. In this case, we don't update vlan filter table
10222 * when user add new vlan or remove exist vlan, just update the vport
10223 * vlan list. The vlan id in vlan list will be writen in vlan filter
10224 * table until port base vlan disabled
10226 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10227 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10229 writen_to_tbl = true;
10234 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10236 hclge_add_vport_vlan_table(vport, vlan_id,
10238 } else if (is_kill) {
10239 /* when remove hw vlan filter failed, record the vlan id,
10240 * and try to remove it from hw later, to be consistence
10243 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10248 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10250 #define HCLGE_MAX_SYNC_COUNT 60
10252 int i, ret, sync_cnt = 0;
10255 /* start from vport 1 for PF is always alive */
10256 for (i = 0; i < hdev->num_alloc_vport; i++) {
10257 struct hclge_vport *vport = &hdev->vport[i];
10259 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10261 while (vlan_id != VLAN_N_VID) {
10262 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10263 vport->vport_id, vlan_id,
10265 if (ret && ret != -EINVAL)
10268 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10269 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10272 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10275 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10281 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10283 struct hclge_config_max_frm_size_cmd *req;
10284 struct hclge_desc desc;
10286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10288 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10289 req->max_frm_size = cpu_to_le16(new_mps);
10290 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10292 return hclge_cmd_send(&hdev->hw, &desc, 1);
10295 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10297 struct hclge_vport *vport = hclge_get_vport(handle);
10299 return hclge_set_vport_mtu(vport, new_mtu);
10302 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10304 struct hclge_dev *hdev = vport->back;
10305 int i, max_frm_size, ret;
10307 /* HW supprt 2 layer vlan */
10308 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10309 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10310 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10313 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10314 mutex_lock(&hdev->vport_lock);
10315 /* VF's mps must fit within hdev->mps */
10316 if (vport->vport_id && max_frm_size > hdev->mps) {
10317 mutex_unlock(&hdev->vport_lock);
10319 } else if (vport->vport_id) {
10320 vport->mps = max_frm_size;
10321 mutex_unlock(&hdev->vport_lock);
10325 /* PF's mps must be greater then VF's mps */
10326 for (i = 1; i < hdev->num_alloc_vport; i++)
10327 if (max_frm_size < hdev->vport[i].mps) {
10328 mutex_unlock(&hdev->vport_lock);
10332 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10334 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10336 dev_err(&hdev->pdev->dev,
10337 "Change mtu fail, ret =%d\n", ret);
10341 hdev->mps = max_frm_size;
10342 vport->mps = max_frm_size;
10344 ret = hclge_buffer_alloc(hdev);
10346 dev_err(&hdev->pdev->dev,
10347 "Allocate buffer fail, ret =%d\n", ret);
10350 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10351 mutex_unlock(&hdev->vport_lock);
10355 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10358 struct hclge_reset_tqp_queue_cmd *req;
10359 struct hclge_desc desc;
10362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10364 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10365 req->tqp_id = cpu_to_le16(queue_id);
10367 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10369 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10371 dev_err(&hdev->pdev->dev,
10372 "Send tqp reset cmd error, status =%d\n", ret);
10379 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10381 struct hclge_reset_tqp_queue_cmd *req;
10382 struct hclge_desc desc;
10385 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10387 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10388 req->tqp_id = cpu_to_le16(queue_id);
10390 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10392 dev_err(&hdev->pdev->dev,
10393 "Get reset status error, status =%d\n", ret);
10397 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10400 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10402 struct hnae3_queue *queue;
10403 struct hclge_tqp *tqp;
10405 queue = handle->kinfo.tqp[queue_id];
10406 tqp = container_of(queue, struct hclge_tqp, q);
10411 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10413 struct hclge_vport *vport = hclge_get_vport(handle);
10414 struct hclge_dev *hdev = vport->back;
10415 u16 reset_try_times = 0;
10421 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10422 queue_gid = hclge_covert_handle_qid_global(handle, i);
10423 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10425 dev_err(&hdev->pdev->dev,
10426 "failed to send reset tqp cmd, ret = %d\n",
10431 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10432 reset_status = hclge_get_reset_status(hdev, queue_gid);
10436 /* Wait for tqp hw reset */
10437 usleep_range(1000, 1200);
10440 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10441 dev_err(&hdev->pdev->dev,
10442 "wait for tqp hw reset timeout\n");
10446 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10448 dev_err(&hdev->pdev->dev,
10449 "failed to deassert soft reset, ret = %d\n",
10453 reset_try_times = 0;
10458 static int hclge_reset_rcb(struct hnae3_handle *handle)
10460 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10461 #define HCLGE_RESET_RCB_SUCCESS 1U
10463 struct hclge_vport *vport = hclge_get_vport(handle);
10464 struct hclge_dev *hdev = vport->back;
10465 struct hclge_reset_cmd *req;
10466 struct hclge_desc desc;
10471 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10473 req = (struct hclge_reset_cmd *)desc.data;
10474 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10475 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10476 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10477 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10479 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10481 dev_err(&hdev->pdev->dev,
10482 "failed to send rcb reset cmd, ret = %d\n", ret);
10486 return_status = req->fun_reset_rcb_return_status;
10487 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10490 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10491 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10496 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10497 * again to reset all tqps
10499 return hclge_reset_tqp_cmd(handle);
10502 int hclge_reset_tqp(struct hnae3_handle *handle)
10504 struct hclge_vport *vport = hclge_get_vport(handle);
10505 struct hclge_dev *hdev = vport->back;
10508 /* only need to disable PF's tqp */
10509 if (!vport->vport_id) {
10510 ret = hclge_tqp_enable(handle, false);
10512 dev_err(&hdev->pdev->dev,
10513 "failed to disable tqp, ret = %d\n", ret);
10518 return hclge_reset_rcb(handle);
10521 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10523 struct hclge_vport *vport = hclge_get_vport(handle);
10524 struct hclge_dev *hdev = vport->back;
10526 return hdev->fw_version;
10529 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10531 struct phy_device *phydev = hdev->hw.mac.phydev;
10536 phy_set_asym_pause(phydev, rx_en, tx_en);
10539 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10543 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10546 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10548 dev_err(&hdev->pdev->dev,
10549 "configure pauseparam error, ret = %d.\n", ret);
10554 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10556 struct phy_device *phydev = hdev->hw.mac.phydev;
10557 u16 remote_advertising = 0;
10558 u16 local_advertising;
10559 u32 rx_pause, tx_pause;
10562 if (!phydev->link || !phydev->autoneg)
10565 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10568 remote_advertising = LPA_PAUSE_CAP;
10570 if (phydev->asym_pause)
10571 remote_advertising |= LPA_PAUSE_ASYM;
10573 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10574 remote_advertising);
10575 tx_pause = flowctl & FLOW_CTRL_TX;
10576 rx_pause = flowctl & FLOW_CTRL_RX;
10578 if (phydev->duplex == HCLGE_MAC_HALF) {
10583 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10586 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10587 u32 *rx_en, u32 *tx_en)
10589 struct hclge_vport *vport = hclge_get_vport(handle);
10590 struct hclge_dev *hdev = vport->back;
10591 u8 media_type = hdev->hw.mac.media_type;
10593 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10594 hclge_get_autoneg(handle) : 0;
10596 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10602 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10605 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10608 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10617 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10618 u32 rx_en, u32 tx_en)
10620 if (rx_en && tx_en)
10621 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10622 else if (rx_en && !tx_en)
10623 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10624 else if (!rx_en && tx_en)
10625 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10627 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10629 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10632 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10633 u32 rx_en, u32 tx_en)
10635 struct hclge_vport *vport = hclge_get_vport(handle);
10636 struct hclge_dev *hdev = vport->back;
10637 struct phy_device *phydev = hdev->hw.mac.phydev;
10640 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10641 fc_autoneg = hclge_get_autoneg(handle);
10642 if (auto_neg != fc_autoneg) {
10643 dev_info(&hdev->pdev->dev,
10644 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10645 return -EOPNOTSUPP;
10649 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10650 dev_info(&hdev->pdev->dev,
10651 "Priority flow control enabled. Cannot set link flow control.\n");
10652 return -EOPNOTSUPP;
10655 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10657 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10659 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10660 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10663 return phy_start_aneg(phydev);
10665 return -EOPNOTSUPP;
10668 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10669 u8 *auto_neg, u32 *speed, u8 *duplex)
10671 struct hclge_vport *vport = hclge_get_vport(handle);
10672 struct hclge_dev *hdev = vport->back;
10675 *speed = hdev->hw.mac.speed;
10677 *duplex = hdev->hw.mac.duplex;
10679 *auto_neg = hdev->hw.mac.autoneg;
10682 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10685 struct hclge_vport *vport = hclge_get_vport(handle);
10686 struct hclge_dev *hdev = vport->back;
10688 /* When nic is down, the service task is not running, doesn't update
10689 * the port information per second. Query the port information before
10690 * return the media type, ensure getting the correct media information.
10692 hclge_update_port_info(hdev);
10695 *media_type = hdev->hw.mac.media_type;
10698 *module_type = hdev->hw.mac.module_type;
10701 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10702 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10704 struct hclge_vport *vport = hclge_get_vport(handle);
10705 struct hclge_dev *hdev = vport->back;
10706 struct phy_device *phydev = hdev->hw.mac.phydev;
10707 int mdix_ctrl, mdix, is_resolved;
10708 unsigned int retval;
10711 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10712 *tp_mdix = ETH_TP_MDI_INVALID;
10716 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10718 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10719 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10720 HCLGE_PHY_MDIX_CTRL_S);
10722 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10723 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10724 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10726 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10728 switch (mdix_ctrl) {
10730 *tp_mdix_ctrl = ETH_TP_MDI;
10733 *tp_mdix_ctrl = ETH_TP_MDI_X;
10736 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10739 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10744 *tp_mdix = ETH_TP_MDI_INVALID;
10746 *tp_mdix = ETH_TP_MDI_X;
10748 *tp_mdix = ETH_TP_MDI;
10751 static void hclge_info_show(struct hclge_dev *hdev)
10753 struct device *dev = &hdev->pdev->dev;
10755 dev_info(dev, "PF info begin:\n");
10757 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10758 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10759 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10760 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10761 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10762 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10763 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10764 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10765 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10766 dev_info(dev, "This is %s PF\n",
10767 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10768 dev_info(dev, "DCB %s\n",
10769 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10770 dev_info(dev, "MQPRIO %s\n",
10771 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10773 dev_info(dev, "PF info end.\n");
10776 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10777 struct hclge_vport *vport)
10779 struct hnae3_client *client = vport->nic.client;
10780 struct hclge_dev *hdev = ae_dev->priv;
10781 int rst_cnt = hdev->rst_stats.reset_cnt;
10784 ret = client->ops->init_instance(&vport->nic);
10788 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10789 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10790 rst_cnt != hdev->rst_stats.reset_cnt) {
10795 /* Enable nic hw error interrupts */
10796 ret = hclge_config_nic_hw_error(hdev, true);
10798 dev_err(&ae_dev->pdev->dev,
10799 "fail(%d) to enable hw error interrupts\n", ret);
10803 hnae3_set_client_init_flag(client, ae_dev, 1);
10805 if (netif_msg_drv(&hdev->vport->nic))
10806 hclge_info_show(hdev);
10811 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10812 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10813 msleep(HCLGE_WAIT_RESET_DONE);
10815 client->ops->uninit_instance(&vport->nic, 0);
10820 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10821 struct hclge_vport *vport)
10823 struct hclge_dev *hdev = ae_dev->priv;
10824 struct hnae3_client *client;
10828 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10832 client = hdev->roce_client;
10833 ret = hclge_init_roce_base_info(vport);
10837 rst_cnt = hdev->rst_stats.reset_cnt;
10838 ret = client->ops->init_instance(&vport->roce);
10842 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10843 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10844 rst_cnt != hdev->rst_stats.reset_cnt) {
10846 goto init_roce_err;
10849 /* Enable roce ras interrupts */
10850 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10852 dev_err(&ae_dev->pdev->dev,
10853 "fail(%d) to enable roce ras interrupts\n", ret);
10854 goto init_roce_err;
10857 hnae3_set_client_init_flag(client, ae_dev, 1);
10862 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10863 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10864 msleep(HCLGE_WAIT_RESET_DONE);
10866 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10871 static int hclge_init_client_instance(struct hnae3_client *client,
10872 struct hnae3_ae_dev *ae_dev)
10874 struct hclge_dev *hdev = ae_dev->priv;
10875 struct hclge_vport *vport = &hdev->vport[0];
10878 switch (client->type) {
10879 case HNAE3_CLIENT_KNIC:
10880 hdev->nic_client = client;
10881 vport->nic.client = client;
10882 ret = hclge_init_nic_client_instance(ae_dev, vport);
10886 ret = hclge_init_roce_client_instance(ae_dev, vport);
10891 case HNAE3_CLIENT_ROCE:
10892 if (hnae3_dev_roce_supported(hdev)) {
10893 hdev->roce_client = client;
10894 vport->roce.client = client;
10897 ret = hclge_init_roce_client_instance(ae_dev, vport);
10909 hdev->nic_client = NULL;
10910 vport->nic.client = NULL;
10913 hdev->roce_client = NULL;
10914 vport->roce.client = NULL;
10918 static void hclge_uninit_client_instance(struct hnae3_client *client,
10919 struct hnae3_ae_dev *ae_dev)
10921 struct hclge_dev *hdev = ae_dev->priv;
10922 struct hclge_vport *vport = &hdev->vport[0];
10924 if (hdev->roce_client) {
10925 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10926 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10927 msleep(HCLGE_WAIT_RESET_DONE);
10929 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10930 hdev->roce_client = NULL;
10931 vport->roce.client = NULL;
10933 if (client->type == HNAE3_CLIENT_ROCE)
10935 if (hdev->nic_client && client->ops->uninit_instance) {
10936 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10937 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10938 msleep(HCLGE_WAIT_RESET_DONE);
10940 client->ops->uninit_instance(&vport->nic, 0);
10941 hdev->nic_client = NULL;
10942 vport->nic.client = NULL;
10946 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10948 #define HCLGE_MEM_BAR 4
10950 struct pci_dev *pdev = hdev->pdev;
10951 struct hclge_hw *hw = &hdev->hw;
10953 /* for device does not have device memory, return directly */
10954 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10957 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10958 pci_resource_start(pdev, HCLGE_MEM_BAR),
10959 pci_resource_len(pdev, HCLGE_MEM_BAR));
10960 if (!hw->mem_base) {
10961 dev_err(&pdev->dev, "failed to map device memory\n");
10968 static int hclge_pci_init(struct hclge_dev *hdev)
10970 struct pci_dev *pdev = hdev->pdev;
10971 struct hclge_hw *hw;
10974 ret = pci_enable_device(pdev);
10976 dev_err(&pdev->dev, "failed to enable PCI device\n");
10980 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10982 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10984 dev_err(&pdev->dev,
10985 "can't set consistent PCI DMA");
10986 goto err_disable_device;
10988 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10991 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10993 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10994 goto err_disable_device;
10997 pci_set_master(pdev);
10999 hw->io_base = pcim_iomap(pdev, 2, 0);
11000 if (!hw->io_base) {
11001 dev_err(&pdev->dev, "Can't map configuration register space\n");
11003 goto err_clr_master;
11006 ret = hclge_dev_mem_map(hdev);
11008 goto err_unmap_io_base;
11010 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11015 pcim_iounmap(pdev, hdev->hw.io_base);
11017 pci_clear_master(pdev);
11018 pci_release_regions(pdev);
11019 err_disable_device:
11020 pci_disable_device(pdev);
11025 static void hclge_pci_uninit(struct hclge_dev *hdev)
11027 struct pci_dev *pdev = hdev->pdev;
11029 if (hdev->hw.mem_base)
11030 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11032 pcim_iounmap(pdev, hdev->hw.io_base);
11033 pci_free_irq_vectors(pdev);
11034 pci_clear_master(pdev);
11035 pci_release_mem_regions(pdev);
11036 pci_disable_device(pdev);
11039 static void hclge_state_init(struct hclge_dev *hdev)
11041 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11042 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11043 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11044 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11045 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11046 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11047 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11050 static void hclge_state_uninit(struct hclge_dev *hdev)
11052 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11053 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11055 if (hdev->reset_timer.function)
11056 del_timer_sync(&hdev->reset_timer);
11057 if (hdev->service_task.work.func)
11058 cancel_delayed_work_sync(&hdev->service_task);
11061 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
11063 #define HCLGE_FLR_RETRY_WAIT_MS 500
11064 #define HCLGE_FLR_RETRY_CNT 5
11066 struct hclge_dev *hdev = ae_dev->priv;
11071 down(&hdev->reset_sem);
11072 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11073 hdev->reset_type = HNAE3_FLR_RESET;
11074 ret = hclge_reset_prepare(hdev);
11075 if (ret || hdev->reset_pending) {
11076 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
11078 if (hdev->reset_pending ||
11079 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
11080 dev_err(&hdev->pdev->dev,
11081 "reset_pending:0x%lx, retry_cnt:%d\n",
11082 hdev->reset_pending, retry_cnt);
11083 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11084 up(&hdev->reset_sem);
11085 msleep(HCLGE_FLR_RETRY_WAIT_MS);
11090 /* disable misc vector before FLR done */
11091 hclge_enable_vector(&hdev->misc_vector, false);
11092 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11093 hdev->rst_stats.flr_rst_cnt++;
11096 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
11098 struct hclge_dev *hdev = ae_dev->priv;
11101 hclge_enable_vector(&hdev->misc_vector, true);
11103 ret = hclge_reset_rebuild(hdev);
11105 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11107 hdev->reset_type = HNAE3_NONE_RESET;
11108 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11109 up(&hdev->reset_sem);
11112 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11116 for (i = 0; i < hdev->num_alloc_vport; i++) {
11117 struct hclge_vport *vport = &hdev->vport[i];
11120 /* Send cmd to clear VF's FUNC_RST_ING */
11121 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11123 dev_warn(&hdev->pdev->dev,
11124 "clear vf(%u) rst failed %d!\n",
11125 vport->vport_id, ret);
11129 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11131 struct pci_dev *pdev = ae_dev->pdev;
11132 struct hclge_dev *hdev;
11135 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11140 hdev->ae_dev = ae_dev;
11141 hdev->reset_type = HNAE3_NONE_RESET;
11142 hdev->reset_level = HNAE3_FUNC_RESET;
11143 ae_dev->priv = hdev;
11145 /* HW supprt 2 layer vlan */
11146 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11148 mutex_init(&hdev->vport_lock);
11149 spin_lock_init(&hdev->fd_rule_lock);
11150 sema_init(&hdev->reset_sem, 1);
11152 ret = hclge_pci_init(hdev);
11156 /* Firmware command queue initialize */
11157 ret = hclge_cmd_queue_init(hdev);
11159 goto err_pci_uninit;
11161 /* Firmware command initialize */
11162 ret = hclge_cmd_init(hdev);
11164 goto err_cmd_uninit;
11166 ret = hclge_get_cap(hdev);
11168 goto err_cmd_uninit;
11170 ret = hclge_query_dev_specs(hdev);
11172 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11174 goto err_cmd_uninit;
11177 ret = hclge_configure(hdev);
11179 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11180 goto err_cmd_uninit;
11183 ret = hclge_init_msi(hdev);
11185 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11186 goto err_cmd_uninit;
11189 ret = hclge_misc_irq_init(hdev);
11191 goto err_msi_uninit;
11193 ret = hclge_alloc_tqps(hdev);
11195 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11196 goto err_msi_irq_uninit;
11199 ret = hclge_alloc_vport(hdev);
11201 goto err_msi_irq_uninit;
11203 ret = hclge_map_tqp(hdev);
11205 goto err_msi_irq_uninit;
11207 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11208 !hnae3_dev_phy_imp_supported(hdev)) {
11209 ret = hclge_mac_mdio_config(hdev);
11211 goto err_msi_irq_uninit;
11214 ret = hclge_init_umv_space(hdev);
11216 goto err_mdiobus_unreg;
11218 ret = hclge_mac_init(hdev);
11220 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11221 goto err_mdiobus_unreg;
11224 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11226 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11227 goto err_mdiobus_unreg;
11230 ret = hclge_config_gro(hdev, true);
11232 goto err_mdiobus_unreg;
11234 ret = hclge_init_vlan_config(hdev);
11236 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11237 goto err_mdiobus_unreg;
11240 ret = hclge_tm_schd_init(hdev);
11242 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11243 goto err_mdiobus_unreg;
11246 ret = hclge_rss_init_cfg(hdev);
11248 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11249 goto err_mdiobus_unreg;
11252 ret = hclge_rss_init_hw(hdev);
11254 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11255 goto err_mdiobus_unreg;
11258 ret = init_mgr_tbl(hdev);
11260 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11261 goto err_mdiobus_unreg;
11264 ret = hclge_init_fd_config(hdev);
11266 dev_err(&pdev->dev,
11267 "fd table init fail, ret=%d\n", ret);
11268 goto err_mdiobus_unreg;
11271 INIT_KFIFO(hdev->mac_tnl_log);
11273 hclge_dcb_ops_set(hdev);
11275 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11276 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11278 /* Setup affinity after service timer setup because add_timer_on
11279 * is called in affinity notify.
11281 hclge_misc_affinity_setup(hdev);
11283 hclge_clear_all_event_cause(hdev);
11284 hclge_clear_resetting_state(hdev);
11286 /* Log and clear the hw errors those already occurred */
11287 hclge_handle_all_hns_hw_errors(ae_dev);
11289 /* request delayed reset for the error recovery because an immediate
11290 * global reset on a PF affecting pending initialization of other PFs
11292 if (ae_dev->hw_err_reset_req) {
11293 enum hnae3_reset_type reset_level;
11295 reset_level = hclge_get_reset_level(ae_dev,
11296 &ae_dev->hw_err_reset_req);
11297 hclge_set_def_reset_request(ae_dev, reset_level);
11298 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11301 /* Enable MISC vector(vector0) */
11302 hclge_enable_vector(&hdev->misc_vector, true);
11304 hclge_state_init(hdev);
11305 hdev->last_reset_time = jiffies;
11307 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11308 HCLGE_DRIVER_NAME);
11310 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11315 if (hdev->hw.mac.phydev)
11316 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11317 err_msi_irq_uninit:
11318 hclge_misc_irq_uninit(hdev);
11320 pci_free_irq_vectors(pdev);
11322 hclge_cmd_uninit(hdev);
11324 pcim_iounmap(pdev, hdev->hw.io_base);
11325 pci_clear_master(pdev);
11326 pci_release_regions(pdev);
11327 pci_disable_device(pdev);
11329 mutex_destroy(&hdev->vport_lock);
11333 static void hclge_stats_clear(struct hclge_dev *hdev)
11335 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11338 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11340 return hclge_config_switch_param(hdev, vf, enable,
11341 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11344 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11346 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11347 HCLGE_FILTER_FE_NIC_INGRESS_B,
11351 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11355 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11357 dev_err(&hdev->pdev->dev,
11358 "Set vf %d mac spoof check %s failed, ret=%d\n",
11359 vf, enable ? "on" : "off", ret);
11363 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11365 dev_err(&hdev->pdev->dev,
11366 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11367 vf, enable ? "on" : "off", ret);
11372 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11375 struct hclge_vport *vport = hclge_get_vport(handle);
11376 struct hclge_dev *hdev = vport->back;
11377 u32 new_spoofchk = enable ? 1 : 0;
11380 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11381 return -EOPNOTSUPP;
11383 vport = hclge_get_vf_vport(hdev, vf);
11387 if (vport->vf_info.spoofchk == new_spoofchk)
11390 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11391 dev_warn(&hdev->pdev->dev,
11392 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11394 else if (enable && hclge_is_umv_space_full(vport, true))
11395 dev_warn(&hdev->pdev->dev,
11396 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11399 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11403 vport->vf_info.spoofchk = new_spoofchk;
11407 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11409 struct hclge_vport *vport = hdev->vport;
11413 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11416 /* resume the vf spoof check state after reset */
11417 for (i = 0; i < hdev->num_alloc_vport; i++) {
11418 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11419 vport->vf_info.spoofchk);
11429 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11431 struct hclge_vport *vport = hclge_get_vport(handle);
11432 struct hclge_dev *hdev = vport->back;
11433 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11434 u32 new_trusted = enable ? 1 : 0;
11438 vport = hclge_get_vf_vport(hdev, vf);
11442 if (vport->vf_info.trusted == new_trusted)
11445 /* Disable promisc mode for VF if it is not trusted any more. */
11446 if (!enable && vport->vf_info.promisc_enable) {
11447 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11448 ret = hclge_set_vport_promisc_mode(vport, false, false,
11452 vport->vf_info.promisc_enable = 0;
11453 hclge_inform_vf_promisc_info(vport);
11456 vport->vf_info.trusted = new_trusted;
11461 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11466 /* reset vf rate to default value */
11467 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11468 struct hclge_vport *vport = &hdev->vport[vf];
11470 vport->vf_info.max_tx_rate = 0;
11471 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11473 dev_err(&hdev->pdev->dev,
11474 "vf%d failed to reset to default, ret=%d\n",
11475 vf - HCLGE_VF_VPORT_START_NUM, ret);
11479 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11480 int min_tx_rate, int max_tx_rate)
11482 if (min_tx_rate != 0 ||
11483 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11484 dev_err(&hdev->pdev->dev,
11485 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11486 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11493 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11494 int min_tx_rate, int max_tx_rate, bool force)
11496 struct hclge_vport *vport = hclge_get_vport(handle);
11497 struct hclge_dev *hdev = vport->back;
11500 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11504 vport = hclge_get_vf_vport(hdev, vf);
11508 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11511 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11515 vport->vf_info.max_tx_rate = max_tx_rate;
11520 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11522 struct hnae3_handle *handle = &hdev->vport->nic;
11523 struct hclge_vport *vport;
11527 /* resume the vf max_tx_rate after reset */
11528 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11529 vport = hclge_get_vf_vport(hdev, vf);
11533 /* zero means max rate, after reset, firmware already set it to
11534 * max rate, so just continue.
11536 if (!vport->vf_info.max_tx_rate)
11539 ret = hclge_set_vf_rate(handle, vf, 0,
11540 vport->vf_info.max_tx_rate, true);
11542 dev_err(&hdev->pdev->dev,
11543 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11544 vf, vport->vf_info.max_tx_rate, ret);
11552 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11554 struct hclge_vport *vport = hdev->vport;
11557 for (i = 0; i < hdev->num_alloc_vport; i++) {
11558 hclge_vport_stop(vport);
11563 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11565 struct hclge_dev *hdev = ae_dev->priv;
11566 struct pci_dev *pdev = ae_dev->pdev;
11569 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11571 hclge_stats_clear(hdev);
11572 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11573 * so here should not clean table in memory.
11575 if (hdev->reset_type == HNAE3_IMP_RESET ||
11576 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11577 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11578 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11579 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11580 hclge_reset_umv_space(hdev);
11583 ret = hclge_cmd_init(hdev);
11585 dev_err(&pdev->dev, "Cmd queue init failed\n");
11589 ret = hclge_map_tqp(hdev);
11591 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11595 ret = hclge_mac_init(hdev);
11597 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11601 ret = hclge_tp_port_init(hdev);
11603 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11608 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11610 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11614 ret = hclge_config_gro(hdev, true);
11618 ret = hclge_init_vlan_config(hdev);
11620 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11624 ret = hclge_tm_init_hw(hdev, true);
11626 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11630 ret = hclge_rss_init_hw(hdev);
11632 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11636 ret = init_mgr_tbl(hdev);
11638 dev_err(&pdev->dev,
11639 "failed to reinit manager table, ret = %d\n", ret);
11643 ret = hclge_init_fd_config(hdev);
11645 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11649 /* Log and clear the hw errors those already occurred */
11650 hclge_handle_all_hns_hw_errors(ae_dev);
11652 /* Re-enable the hw error interrupts because
11653 * the interrupts get disabled on global reset.
11655 ret = hclge_config_nic_hw_error(hdev, true);
11657 dev_err(&pdev->dev,
11658 "fail(%d) to re-enable NIC hw error interrupts\n",
11663 if (hdev->roce_client) {
11664 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11666 dev_err(&pdev->dev,
11667 "fail(%d) to re-enable roce ras interrupts\n",
11673 hclge_reset_vport_state(hdev);
11674 ret = hclge_reset_vport_spoofchk(hdev);
11678 ret = hclge_resume_vf_rate(hdev);
11682 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11683 HCLGE_DRIVER_NAME);
11688 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11690 struct hclge_dev *hdev = ae_dev->priv;
11691 struct hclge_mac *mac = &hdev->hw.mac;
11693 hclge_reset_vf_rate(hdev);
11694 hclge_clear_vf_vlan(hdev);
11695 hclge_misc_affinity_teardown(hdev);
11696 hclge_state_uninit(hdev);
11697 hclge_uninit_mac_table(hdev);
11698 hclge_del_all_fd_entries(hdev);
11701 mdiobus_unregister(mac->mdio_bus);
11703 /* Disable MISC vector(vector0) */
11704 hclge_enable_vector(&hdev->misc_vector, false);
11705 synchronize_irq(hdev->misc_vector.vector_irq);
11707 /* Disable all hw interrupts */
11708 hclge_config_mac_tnl_int(hdev, false);
11709 hclge_config_nic_hw_error(hdev, false);
11710 hclge_config_rocee_ras_interrupt(hdev, false);
11712 hclge_cmd_uninit(hdev);
11713 hclge_misc_irq_uninit(hdev);
11714 hclge_pci_uninit(hdev);
11715 mutex_destroy(&hdev->vport_lock);
11716 hclge_uninit_vport_vlan_table(hdev);
11717 ae_dev->priv = NULL;
11720 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11722 struct hclge_vport *vport = hclge_get_vport(handle);
11723 struct hclge_dev *hdev = vport->back;
11725 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11728 static void hclge_get_channels(struct hnae3_handle *handle,
11729 struct ethtool_channels *ch)
11731 ch->max_combined = hclge_get_max_channels(handle);
11732 ch->other_count = 1;
11734 ch->combined_count = handle->kinfo.rss_size;
11737 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11738 u16 *alloc_tqps, u16 *max_rss_size)
11740 struct hclge_vport *vport = hclge_get_vport(handle);
11741 struct hclge_dev *hdev = vport->back;
11743 *alloc_tqps = vport->alloc_tqps;
11744 *max_rss_size = hdev->pf_rss_size_max;
11747 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11748 bool rxfh_configured)
11750 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11751 struct hclge_vport *vport = hclge_get_vport(handle);
11752 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11753 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11754 struct hclge_dev *hdev = vport->back;
11755 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11756 u16 cur_rss_size = kinfo->rss_size;
11757 u16 cur_tqps = kinfo->num_tqps;
11758 u16 tc_valid[HCLGE_MAX_TC_NUM];
11764 kinfo->req_rss_size = new_tqps_num;
11766 ret = hclge_tm_vport_map_update(hdev);
11768 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11772 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11773 roundup_size = ilog2(roundup_size);
11774 /* Set the RSS TC mode according to the new RSS size */
11775 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11778 if (!(hdev->hw_tc_map & BIT(i)))
11782 tc_size[i] = roundup_size;
11783 tc_offset[i] = kinfo->rss_size * i;
11785 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11789 /* RSS indirection table has been configuared by user */
11790 if (rxfh_configured)
11793 /* Reinitializes the rss indirect table according to the new RSS size */
11794 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11799 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11800 rss_indir[i] = i % kinfo->rss_size;
11802 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11804 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11811 dev_info(&hdev->pdev->dev,
11812 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11813 cur_rss_size, kinfo->rss_size,
11814 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11819 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11820 u32 *regs_num_64_bit)
11822 struct hclge_desc desc;
11826 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11827 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11829 dev_err(&hdev->pdev->dev,
11830 "Query register number cmd failed, ret = %d.\n", ret);
11834 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11835 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11837 total_num = *regs_num_32_bit + *regs_num_64_bit;
11844 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11847 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11848 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11850 struct hclge_desc *desc;
11851 u32 *reg_val = data;
11861 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11862 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11863 HCLGE_32_BIT_REG_RTN_DATANUM);
11864 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11868 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11869 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11871 dev_err(&hdev->pdev->dev,
11872 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11877 for (i = 0; i < cmd_num; i++) {
11879 desc_data = (__le32 *)(&desc[i].data[0]);
11880 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11882 desc_data = (__le32 *)(&desc[i]);
11883 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11885 for (k = 0; k < n; k++) {
11886 *reg_val++ = le32_to_cpu(*desc_data++);
11898 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11901 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11902 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11904 struct hclge_desc *desc;
11905 u64 *reg_val = data;
11915 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11916 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11917 HCLGE_64_BIT_REG_RTN_DATANUM);
11918 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11922 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11923 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11925 dev_err(&hdev->pdev->dev,
11926 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11931 for (i = 0; i < cmd_num; i++) {
11933 desc_data = (__le64 *)(&desc[i].data[0]);
11934 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11936 desc_data = (__le64 *)(&desc[i]);
11937 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11939 for (k = 0; k < n; k++) {
11940 *reg_val++ = le64_to_cpu(*desc_data++);
11952 #define MAX_SEPARATE_NUM 4
11953 #define SEPARATOR_VALUE 0xFDFCFBFA
11954 #define REG_NUM_PER_LINE 4
11955 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11956 #define REG_SEPARATOR_LINE 1
11957 #define REG_NUM_REMAIN_MASK 3
11959 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11963 /* initialize command BD except the last one */
11964 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11965 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11967 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11970 /* initialize the last command BD */
11971 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11973 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11976 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11980 u32 entries_per_desc, desc_index, index, offset, i;
11981 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11984 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11986 dev_err(&hdev->pdev->dev,
11987 "Get dfx bd num fail, status is %d.\n", ret);
11991 entries_per_desc = ARRAY_SIZE(desc[0].data);
11992 for (i = 0; i < type_num; i++) {
11993 offset = hclge_dfx_bd_offset_list[i];
11994 index = offset % entries_per_desc;
11995 desc_index = offset / entries_per_desc;
11996 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12002 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12003 struct hclge_desc *desc_src, int bd_num,
12004 enum hclge_opcode_type cmd)
12006 struct hclge_desc *desc = desc_src;
12009 hclge_cmd_setup_basic_desc(desc, cmd, true);
12010 for (i = 0; i < bd_num - 1; i++) {
12011 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12013 hclge_cmd_setup_basic_desc(desc, cmd, true);
12017 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12019 dev_err(&hdev->pdev->dev,
12020 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12026 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12029 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12030 struct hclge_desc *desc = desc_src;
12033 entries_per_desc = ARRAY_SIZE(desc->data);
12034 reg_num = entries_per_desc * bd_num;
12035 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12036 for (i = 0; i < reg_num; i++) {
12037 index = i % entries_per_desc;
12038 desc_index = i / entries_per_desc;
12039 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12041 for (i = 0; i < separator_num; i++)
12042 *reg++ = SEPARATOR_VALUE;
12044 return reg_num + separator_num;
12047 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12049 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12050 int data_len_per_desc, bd_num, i;
12055 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12059 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12061 dev_err(&hdev->pdev->dev,
12062 "Get dfx reg bd num fail, status is %d.\n", ret);
12066 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12068 for (i = 0; i < dfx_reg_type_num; i++) {
12069 bd_num = bd_num_list[i];
12070 data_len = data_len_per_desc * bd_num;
12071 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12075 kfree(bd_num_list);
12079 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12081 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12082 int bd_num, bd_num_max, buf_len, i;
12083 struct hclge_desc *desc_src;
12088 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12092 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12094 dev_err(&hdev->pdev->dev,
12095 "Get dfx reg bd num fail, status is %d.\n", ret);
12099 bd_num_max = bd_num_list[0];
12100 for (i = 1; i < dfx_reg_type_num; i++)
12101 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12103 buf_len = sizeof(*desc_src) * bd_num_max;
12104 desc_src = kzalloc(buf_len, GFP_KERNEL);
12110 for (i = 0; i < dfx_reg_type_num; i++) {
12111 bd_num = bd_num_list[i];
12112 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12113 hclge_dfx_reg_opcode_list[i]);
12115 dev_err(&hdev->pdev->dev,
12116 "Get dfx reg fail, status is %d.\n", ret);
12120 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12125 kfree(bd_num_list);
12129 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12130 struct hnae3_knic_private_info *kinfo)
12132 #define HCLGE_RING_REG_OFFSET 0x200
12133 #define HCLGE_RING_INT_REG_OFFSET 0x4
12135 int i, j, reg_num, separator_num;
12139 /* fetching per-PF registers valus from PF PCIe register space */
12140 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12141 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12142 for (i = 0; i < reg_num; i++)
12143 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12144 for (i = 0; i < separator_num; i++)
12145 *reg++ = SEPARATOR_VALUE;
12146 data_num_sum = reg_num + separator_num;
12148 reg_num = ARRAY_SIZE(common_reg_addr_list);
12149 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12150 for (i = 0; i < reg_num; i++)
12151 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12152 for (i = 0; i < separator_num; i++)
12153 *reg++ = SEPARATOR_VALUE;
12154 data_num_sum += reg_num + separator_num;
12156 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12157 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12158 for (j = 0; j < kinfo->num_tqps; j++) {
12159 for (i = 0; i < reg_num; i++)
12160 *reg++ = hclge_read_dev(&hdev->hw,
12161 ring_reg_addr_list[i] +
12162 HCLGE_RING_REG_OFFSET * j);
12163 for (i = 0; i < separator_num; i++)
12164 *reg++ = SEPARATOR_VALUE;
12166 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12168 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12169 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12170 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12171 for (i = 0; i < reg_num; i++)
12172 *reg++ = hclge_read_dev(&hdev->hw,
12173 tqp_intr_reg_addr_list[i] +
12174 HCLGE_RING_INT_REG_OFFSET * j);
12175 for (i = 0; i < separator_num; i++)
12176 *reg++ = SEPARATOR_VALUE;
12178 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12180 return data_num_sum;
12183 static int hclge_get_regs_len(struct hnae3_handle *handle)
12185 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12186 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12187 struct hclge_vport *vport = hclge_get_vport(handle);
12188 struct hclge_dev *hdev = vport->back;
12189 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12190 int regs_lines_32_bit, regs_lines_64_bit;
12193 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12195 dev_err(&hdev->pdev->dev,
12196 "Get register number failed, ret = %d.\n", ret);
12200 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12202 dev_err(&hdev->pdev->dev,
12203 "Get dfx reg len failed, ret = %d.\n", ret);
12207 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12208 REG_SEPARATOR_LINE;
12209 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12210 REG_SEPARATOR_LINE;
12211 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12212 REG_SEPARATOR_LINE;
12213 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12214 REG_SEPARATOR_LINE;
12215 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12216 REG_SEPARATOR_LINE;
12217 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12218 REG_SEPARATOR_LINE;
12220 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12221 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12222 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12225 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12228 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12229 struct hclge_vport *vport = hclge_get_vport(handle);
12230 struct hclge_dev *hdev = vport->back;
12231 u32 regs_num_32_bit, regs_num_64_bit;
12232 int i, reg_num, separator_num, ret;
12235 *version = hdev->fw_version;
12237 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12239 dev_err(&hdev->pdev->dev,
12240 "Get register number failed, ret = %d.\n", ret);
12244 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12246 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12248 dev_err(&hdev->pdev->dev,
12249 "Get 32 bit register failed, ret = %d.\n", ret);
12252 reg_num = regs_num_32_bit;
12254 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12255 for (i = 0; i < separator_num; i++)
12256 *reg++ = SEPARATOR_VALUE;
12258 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12260 dev_err(&hdev->pdev->dev,
12261 "Get 64 bit register failed, ret = %d.\n", ret);
12264 reg_num = regs_num_64_bit * 2;
12266 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12267 for (i = 0; i < separator_num; i++)
12268 *reg++ = SEPARATOR_VALUE;
12270 ret = hclge_get_dfx_reg(hdev, reg);
12272 dev_err(&hdev->pdev->dev,
12273 "Get dfx register failed, ret = %d.\n", ret);
12276 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12278 struct hclge_set_led_state_cmd *req;
12279 struct hclge_desc desc;
12282 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12284 req = (struct hclge_set_led_state_cmd *)desc.data;
12285 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12286 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12288 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12290 dev_err(&hdev->pdev->dev,
12291 "Send set led state cmd error, ret =%d\n", ret);
12296 enum hclge_led_status {
12299 HCLGE_LED_NO_CHANGE = 0xFF,
12302 static int hclge_set_led_id(struct hnae3_handle *handle,
12303 enum ethtool_phys_id_state status)
12305 struct hclge_vport *vport = hclge_get_vport(handle);
12306 struct hclge_dev *hdev = vport->back;
12309 case ETHTOOL_ID_ACTIVE:
12310 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12311 case ETHTOOL_ID_INACTIVE:
12312 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12318 static void hclge_get_link_mode(struct hnae3_handle *handle,
12319 unsigned long *supported,
12320 unsigned long *advertising)
12322 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12323 struct hclge_vport *vport = hclge_get_vport(handle);
12324 struct hclge_dev *hdev = vport->back;
12325 unsigned int idx = 0;
12327 for (; idx < size; idx++) {
12328 supported[idx] = hdev->hw.mac.supported[idx];
12329 advertising[idx] = hdev->hw.mac.advertising[idx];
12333 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12335 struct hclge_vport *vport = hclge_get_vport(handle);
12336 struct hclge_dev *hdev = vport->back;
12338 return hclge_config_gro(hdev, enable);
12341 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12343 struct hclge_vport *vport = &hdev->vport[0];
12344 struct hnae3_handle *handle = &vport->nic;
12348 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12349 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12350 vport->last_promisc_flags = vport->overflow_promisc_flags;
12353 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
12354 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12355 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12356 tmp_flags & HNAE3_MPE);
12358 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12359 hclge_enable_vlan_filter(handle,
12360 tmp_flags & HNAE3_VLAN_FLTR);
12365 static bool hclge_module_existed(struct hclge_dev *hdev)
12367 struct hclge_desc desc;
12371 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12372 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12374 dev_err(&hdev->pdev->dev,
12375 "failed to get SFP exist state, ret = %d\n", ret);
12379 existed = le32_to_cpu(desc.data[0]);
12381 return existed != 0;
12384 /* need 6 bds(total 140 bytes) in one reading
12385 * return the number of bytes actually read, 0 means read failed.
12387 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12390 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12391 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12397 /* setup all 6 bds to read module eeprom info. */
12398 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12399 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12402 /* bd0~bd4 need next flag */
12403 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12404 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12407 /* setup bd0, this bd contains offset and read length. */
12408 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12409 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12410 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12411 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12413 ret = hclge_cmd_send(&hdev->hw, desc, i);
12415 dev_err(&hdev->pdev->dev,
12416 "failed to get SFP eeprom info, ret = %d\n", ret);
12420 /* copy sfp info from bd0 to out buffer. */
12421 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12422 memcpy(data, sfp_info_bd0->data, copy_len);
12423 read_len = copy_len;
12425 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12426 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12427 if (read_len >= len)
12430 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12431 memcpy(data + read_len, desc[i].data, copy_len);
12432 read_len += copy_len;
12438 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12441 struct hclge_vport *vport = hclge_get_vport(handle);
12442 struct hclge_dev *hdev = vport->back;
12446 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12447 return -EOPNOTSUPP;
12449 if (!hclge_module_existed(hdev))
12452 while (read_len < len) {
12453 data_len = hclge_get_sfp_eeprom_info(hdev,
12460 read_len += data_len;
12466 static const struct hnae3_ae_ops hclge_ops = {
12467 .init_ae_dev = hclge_init_ae_dev,
12468 .uninit_ae_dev = hclge_uninit_ae_dev,
12469 .flr_prepare = hclge_flr_prepare,
12470 .flr_done = hclge_flr_done,
12471 .init_client_instance = hclge_init_client_instance,
12472 .uninit_client_instance = hclge_uninit_client_instance,
12473 .map_ring_to_vector = hclge_map_ring_to_vector,
12474 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12475 .get_vector = hclge_get_vector,
12476 .put_vector = hclge_put_vector,
12477 .set_promisc_mode = hclge_set_promisc_mode,
12478 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12479 .set_loopback = hclge_set_loopback,
12480 .start = hclge_ae_start,
12481 .stop = hclge_ae_stop,
12482 .client_start = hclge_client_start,
12483 .client_stop = hclge_client_stop,
12484 .get_status = hclge_get_status,
12485 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12486 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12487 .get_media_type = hclge_get_media_type,
12488 .check_port_speed = hclge_check_port_speed,
12489 .get_fec = hclge_get_fec,
12490 .set_fec = hclge_set_fec,
12491 .get_rss_key_size = hclge_get_rss_key_size,
12492 .get_rss = hclge_get_rss,
12493 .set_rss = hclge_set_rss,
12494 .set_rss_tuple = hclge_set_rss_tuple,
12495 .get_rss_tuple = hclge_get_rss_tuple,
12496 .get_tc_size = hclge_get_tc_size,
12497 .get_mac_addr = hclge_get_mac_addr,
12498 .set_mac_addr = hclge_set_mac_addr,
12499 .do_ioctl = hclge_do_ioctl,
12500 .add_uc_addr = hclge_add_uc_addr,
12501 .rm_uc_addr = hclge_rm_uc_addr,
12502 .add_mc_addr = hclge_add_mc_addr,
12503 .rm_mc_addr = hclge_rm_mc_addr,
12504 .set_autoneg = hclge_set_autoneg,
12505 .get_autoneg = hclge_get_autoneg,
12506 .restart_autoneg = hclge_restart_autoneg,
12507 .halt_autoneg = hclge_halt_autoneg,
12508 .get_pauseparam = hclge_get_pauseparam,
12509 .set_pauseparam = hclge_set_pauseparam,
12510 .set_mtu = hclge_set_mtu,
12511 .reset_queue = hclge_reset_tqp,
12512 .get_stats = hclge_get_stats,
12513 .get_mac_stats = hclge_get_mac_stat,
12514 .update_stats = hclge_update_stats,
12515 .get_strings = hclge_get_strings,
12516 .get_sset_count = hclge_get_sset_count,
12517 .get_fw_version = hclge_get_fw_version,
12518 .get_mdix_mode = hclge_get_mdix_mode,
12519 .enable_vlan_filter = hclge_enable_vlan_filter,
12520 .set_vlan_filter = hclge_set_vlan_filter,
12521 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12522 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12523 .reset_event = hclge_reset_event,
12524 .get_reset_level = hclge_get_reset_level,
12525 .set_default_reset_request = hclge_set_def_reset_request,
12526 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12527 .set_channels = hclge_set_channels,
12528 .get_channels = hclge_get_channels,
12529 .get_regs_len = hclge_get_regs_len,
12530 .get_regs = hclge_get_regs,
12531 .set_led_id = hclge_set_led_id,
12532 .get_link_mode = hclge_get_link_mode,
12533 .add_fd_entry = hclge_add_fd_entry,
12534 .del_fd_entry = hclge_del_fd_entry,
12535 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12536 .get_fd_rule_info = hclge_get_fd_rule_info,
12537 .get_fd_all_rules = hclge_get_all_rules,
12538 .enable_fd = hclge_enable_fd,
12539 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12540 .dbg_run_cmd = hclge_dbg_run_cmd,
12541 .dbg_read_cmd = hclge_dbg_read_cmd,
12542 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12543 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12544 .ae_dev_resetting = hclge_ae_dev_resetting,
12545 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12546 .set_gro_en = hclge_gro_en,
12547 .get_global_queue_id = hclge_covert_handle_qid_global,
12548 .set_timer_task = hclge_set_timer_task,
12549 .mac_connect_phy = hclge_mac_connect_phy,
12550 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12551 .get_vf_config = hclge_get_vf_config,
12552 .set_vf_link_state = hclge_set_vf_link_state,
12553 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12554 .set_vf_trust = hclge_set_vf_trust,
12555 .set_vf_rate = hclge_set_vf_rate,
12556 .set_vf_mac = hclge_set_vf_mac,
12557 .get_module_eeprom = hclge_get_module_eeprom,
12558 .get_cmdq_stat = hclge_get_cmdq_stat,
12559 .add_cls_flower = hclge_add_cls_flower,
12560 .del_cls_flower = hclge_del_cls_flower,
12561 .cls_flower_active = hclge_is_cls_flower_active,
12562 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12563 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12566 static struct hnae3_ae_algo ae_algo = {
12568 .pdev_id_table = ae_algo_pci_tbl,
12571 static int hclge_init(void)
12573 pr_info("%s is initializing\n", HCLGE_NAME);
12575 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12577 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12581 hnae3_register_ae_algo(&ae_algo);
12586 static void hclge_exit(void)
12588 hnae3_unregister_ae_algo(&ae_algo);
12589 destroy_workqueue(hclge_wq);
12591 module_init(hclge_init);
12592 module_exit(hclge_exit);
12594 MODULE_LICENSE("GPL");
12595 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12596 MODULE_DESCRIPTION("HCLGE Driver");
12597 MODULE_VERSION(HCLGE_MOD_VERSION);