1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static struct hnae3_ae_algo ae_algo;
76 static struct workqueue_struct *hclge_wq;
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32(tqp->index);
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32(tqp->index);
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 hdev->hw.mac.phydev->drv->set_loopback) {
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
762 hclge_tqps_get_sset_count(handle, stringset);
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
771 u8 *p = (char *)data;
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 p += ETH_GSTRING_LEN;
785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 p += ETH_GSTRING_LEN;
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 p += ETH_GSTRING_LEN;
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 p += ETH_GSTRING_LEN;
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 ARRAY_SIZE(g_mac_stats_string), data);
812 p = hclge_tqps_get_stats(handle, p);
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
821 hclge_update_stats(handle, NULL);
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 struct hclge_func_status_cmd *status)
830 #define HCLGE_MAC_ID_MASK 0xF
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
845 static int hclge_query_function_status(struct hclge_dev *hdev)
847 #define HCLGE_QUERY_MAX_CNT 5
849 struct hclge_func_status_cmd *req;
850 struct hclge_desc desc;
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 req = (struct hclge_func_status_cmd *)desc.data;
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 dev_err(&hdev->pdev->dev,
861 "query function status failed %d.\n", ret);
865 /* Check pf reset is done */
868 usleep_range(1000, 2000);
869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871 return hclge_parse_func_status(hdev, req);
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 struct hclge_pf_res_cmd *req;
877 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
888 req = (struct hclge_pf_res_cmd *)desc.data;
889 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 le16_to_cpu(req->ext_tqp_num);
891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893 if (req->tx_buf_size)
895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
901 if (req->dv_buf_size)
903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
909 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 dev_err(&hdev->pdev->dev,
912 "only %u msi resources available, not enough for pf(min:2).\n",
917 if (hnae3_dev_roce_supported(hdev)) {
919 le16_to_cpu(req->pf_intr_vector_number_roce);
921 /* PF should have NIC vectors and Roce vectors,
922 * NIC vectors are queued before Roce vectors.
924 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
926 hdev->num_msi = hdev->num_nic_msi;
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
936 *speed = HCLGE_MAC_SPEED_10M;
939 *speed = HCLGE_MAC_SPEED_100M;
942 *speed = HCLGE_MAC_SPEED_1G;
945 *speed = HCLGE_MAC_SPEED_10G;
948 *speed = HCLGE_MAC_SPEED_25G;
951 *speed = HCLGE_MAC_SPEED_40G;
954 *speed = HCLGE_MAC_SPEED_50G;
957 *speed = HCLGE_MAC_SPEED_100G;
960 *speed = HCLGE_MAC_SPEED_200G;
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 struct hclge_vport *vport = hclge_get_vport(handle);
972 struct hclge_dev *hdev = vport->back;
973 u32 speed_ability = hdev->hw.mac.speed_ability;
977 case HCLGE_MAC_SPEED_10M:
978 speed_bit = HCLGE_SUPPORT_10M_BIT;
980 case HCLGE_MAC_SPEED_100M:
981 speed_bit = HCLGE_SUPPORT_100M_BIT;
983 case HCLGE_MAC_SPEED_1G:
984 speed_bit = HCLGE_SUPPORT_1G_BIT;
986 case HCLGE_MAC_SPEED_10G:
987 speed_bit = HCLGE_SUPPORT_10G_BIT;
989 case HCLGE_MAC_SPEED_25G:
990 speed_bit = HCLGE_SUPPORT_25G_BIT;
992 case HCLGE_MAC_SPEED_40G:
993 speed_bit = HCLGE_SUPPORT_40G_BIT;
995 case HCLGE_MAC_SPEED_50G:
996 speed_bit = HCLGE_SUPPORT_50G_BIT;
998 case HCLGE_MAC_SPEED_100G:
999 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001 case HCLGE_MAC_SPEED_200G:
1002 speed_bit = HCLGE_SUPPORT_200G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1053 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1055 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1061 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1083 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1089 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1095 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1098 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1101 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1108 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1111 switch (mac->speed) {
1112 case HCLGE_MAC_SPEED_10G:
1113 case HCLGE_MAC_SPEED_40G:
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1117 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1119 case HCLGE_MAC_SPEED_25G:
1120 case HCLGE_MAC_SPEED_50G:
1121 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1124 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 BIT(HNAE3_FEC_AUTO);
1127 case HCLGE_MAC_SPEED_100G:
1128 case HCLGE_MAC_SPEED_200G:
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1133 mac->fec_ability = 0;
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1141 struct hclge_mac *mac = &hdev->hw.mac;
1143 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1147 hclge_convert_setting_sr(mac, speed_ability);
1148 hclge_convert_setting_lr(mac, speed_ability);
1149 hclge_convert_setting_cr(mac, speed_ability);
1150 if (hnae3_dev_fec_supported(hdev))
1151 hclge_convert_setting_fec(mac);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1161 struct hclge_mac *mac = &hdev->hw.mac;
1163 hclge_convert_setting_kr(mac, speed_ability);
1164 if (hnae3_dev_fec_supported(hdev))
1165 hclge_convert_setting_fec(mac);
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1174 unsigned long *supported = hdev->hw.mac.supported;
1176 /* default to support all speed for GE port */
1178 speed_ability = HCLGE_SUPPORT_GE;
1180 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1184 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1191 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1204 u8 media_type = hdev->hw.mac.media_type;
1206 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207 hclge_parse_fiber_link_mode(hdev, speed_ability);
1208 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209 hclge_parse_copper_link_mode(hdev, speed_ability);
1210 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211 hclge_parse_backplane_link_mode(hdev, speed_ability);
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1216 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217 return HCLGE_MAC_SPEED_200G;
1219 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220 return HCLGE_MAC_SPEED_100G;
1222 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223 return HCLGE_MAC_SPEED_50G;
1225 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226 return HCLGE_MAC_SPEED_40G;
1228 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229 return HCLGE_MAC_SPEED_25G;
1231 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232 return HCLGE_MAC_SPEED_10G;
1234 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235 return HCLGE_MAC_SPEED_1G;
1237 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238 return HCLGE_MAC_SPEED_100M;
1240 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241 return HCLGE_MAC_SPEED_10M;
1243 return HCLGE_MAC_SPEED_1G;
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1248 #define SPEED_ABILITY_EXT_SHIFT 8
1250 struct hclge_cfg_param_cmd *req;
1251 u64 mac_addr_tmp_high;
1252 u16 speed_ability_ext;
1256 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1258 /* get the configuration */
1259 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1262 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 HCLGE_CFG_TQP_DESC_N_M,
1266 HCLGE_CFG_TQP_DESC_N_S);
1268 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 HCLGE_CFG_PHY_ADDR_M,
1270 HCLGE_CFG_PHY_ADDR_S);
1271 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 HCLGE_CFG_MEDIA_TP_M,
1273 HCLGE_CFG_MEDIA_TP_S);
1274 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275 HCLGE_CFG_RX_BUF_LEN_M,
1276 HCLGE_CFG_RX_BUF_LEN_S);
1277 /* get mac_address */
1278 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280 HCLGE_CFG_MAC_ADDR_H_M,
1281 HCLGE_CFG_MAC_ADDR_H_S);
1283 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1285 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 HCLGE_CFG_DEFAULT_SPEED_M,
1287 HCLGE_CFG_DEFAULT_SPEED_S);
1288 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 HCLGE_CFG_RSS_SIZE_M,
1290 HCLGE_CFG_RSS_SIZE_S);
1292 for (i = 0; i < ETH_ALEN; i++)
1293 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1295 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1298 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 HCLGE_CFG_SPEED_ABILITY_M,
1300 HCLGE_CFG_SPEED_ABILITY_S);
1301 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1306 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 HCLGE_CFG_UMV_TBL_SPACE_S);
1309 if (!cfg->umv_space)
1310 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1312 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313 HCLGE_CFG_PF_RSS_SIZE_M,
1314 HCLGE_CFG_PF_RSS_SIZE_S);
1316 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317 * power of 2, instead of reading out directly. This would
1318 * be more flexible for future changes and expansions.
1319 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1320 * it does not make sense if PF's field is 0. In this case, PF and VF
1321 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1323 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324 1U << cfg->pf_rss_size_max :
1325 cfg->vf_rss_size_max;
1328 /* hclge_get_cfg: query the static parameter from flash
1329 * @hdev: pointer to struct hclge_dev
1330 * @hcfg: the config structure to be getted
1332 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1334 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1335 struct hclge_cfg_param_cmd *req;
1339 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1342 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1343 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1345 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1347 /* Len should be united by 4 bytes when send to hardware */
1348 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1350 req->offset = cpu_to_le32(offset);
1353 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1355 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1359 hclge_parse_cfg(hcfg, desc);
1364 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1366 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1368 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1370 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1373 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1374 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1375 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1376 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1379 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1380 struct hclge_desc *desc)
1382 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1383 struct hclge_dev_specs_0_cmd *req0;
1384 struct hclge_dev_specs_1_cmd *req1;
1386 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1387 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1389 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1390 ae_dev->dev_specs.rss_ind_tbl_size =
1391 le16_to_cpu(req0->rss_ind_tbl_size);
1392 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1393 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1394 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1395 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1396 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1397 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1400 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1402 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1404 if (!dev_specs->max_non_tso_bd_num)
1405 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 if (!dev_specs->rss_ind_tbl_size)
1407 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408 if (!dev_specs->rss_key_size)
1409 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1410 if (!dev_specs->max_tm_rate)
1411 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1412 if (!dev_specs->max_qset_num)
1413 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1414 if (!dev_specs->max_int_gl)
1415 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1416 if (!dev_specs->max_frm_size)
1417 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1420 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1422 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1426 /* set default specifications as devices lower than version V3 do not
1427 * support querying specifications from firmware.
1429 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1430 hclge_set_default_dev_specs(hdev);
1434 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1435 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1437 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1439 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1441 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1445 hclge_parse_dev_specs(hdev, desc);
1446 hclge_check_dev_specs(hdev);
1451 static int hclge_get_cap(struct hclge_dev *hdev)
1455 ret = hclge_query_function_status(hdev);
1457 dev_err(&hdev->pdev->dev,
1458 "query function status error %d.\n", ret);
1462 /* get pf resource */
1463 return hclge_query_pf_resource(hdev);
1466 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1468 #define HCLGE_MIN_TX_DESC 64
1469 #define HCLGE_MIN_RX_DESC 64
1471 if (!is_kdump_kernel())
1474 dev_info(&hdev->pdev->dev,
1475 "Running kdump kernel. Using minimal resources\n");
1477 /* minimal queue pairs equals to the number of vports */
1478 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1479 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1480 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1483 static int hclge_configure(struct hclge_dev *hdev)
1485 struct hclge_cfg cfg;
1489 ret = hclge_get_cfg(hdev, &cfg);
1493 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1494 hdev->base_tqp_pid = 0;
1495 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1496 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1497 hdev->rx_buf_len = cfg.rx_buf_len;
1498 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1499 hdev->hw.mac.media_type = cfg.media_type;
1500 hdev->hw.mac.phy_addr = cfg.phy_addr;
1501 hdev->num_tx_desc = cfg.tqp_desc_num;
1502 hdev->num_rx_desc = cfg.tqp_desc_num;
1503 hdev->tm_info.num_pg = 1;
1504 hdev->tc_max = cfg.tc_num;
1505 hdev->tm_info.hw_pfc_map = 0;
1506 hdev->wanted_umv_size = cfg.umv_space;
1508 if (hnae3_dev_fd_supported(hdev)) {
1510 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1513 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1515 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1516 cfg.default_speed, ret);
1520 hclge_parse_link_mode(hdev, cfg.speed_ability);
1522 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1524 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1525 (hdev->tc_max < 1)) {
1526 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1531 /* Dev does not support DCB */
1532 if (!hnae3_dev_dcb_supported(hdev)) {
1536 hdev->pfc_max = hdev->tc_max;
1539 hdev->tm_info.num_tc = 1;
1541 /* Currently not support uncontiuous tc */
1542 for (i = 0; i < hdev->tm_info.num_tc; i++)
1543 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1545 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1547 hclge_init_kdump_kernel_config(hdev);
1549 /* Set the init affinity based on pci func number */
1550 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1551 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1552 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1553 &hdev->affinity_mask);
1558 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1561 struct hclge_cfg_tso_status_cmd *req;
1562 struct hclge_desc desc;
1564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1566 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1567 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1568 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1570 return hclge_cmd_send(&hdev->hw, &desc, 1);
1573 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1575 struct hclge_cfg_gro_status_cmd *req;
1576 struct hclge_desc desc;
1579 if (!hnae3_dev_gro_supported(hdev))
1582 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1583 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1585 req->gro_en = en ? 1 : 0;
1587 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1589 dev_err(&hdev->pdev->dev,
1590 "GRO hardware config cmd failed, ret = %d\n", ret);
1595 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1597 struct hclge_tqp *tqp;
1600 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1601 sizeof(struct hclge_tqp), GFP_KERNEL);
1607 for (i = 0; i < hdev->num_tqps; i++) {
1608 tqp->dev = &hdev->pdev->dev;
1611 tqp->q.ae_algo = &ae_algo;
1612 tqp->q.buf_size = hdev->rx_buf_len;
1613 tqp->q.tx_desc_num = hdev->num_tx_desc;
1614 tqp->q.rx_desc_num = hdev->num_rx_desc;
1616 /* need an extended offset to configure queues >=
1617 * HCLGE_TQP_MAX_SIZE_DEV_V2
1619 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1620 tqp->q.io_base = hdev->hw.io_base +
1621 HCLGE_TQP_REG_OFFSET +
1622 i * HCLGE_TQP_REG_SIZE;
1624 tqp->q.io_base = hdev->hw.io_base +
1625 HCLGE_TQP_REG_OFFSET +
1626 HCLGE_TQP_EXT_REG_OFFSET +
1627 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1636 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1637 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1639 struct hclge_tqp_map_cmd *req;
1640 struct hclge_desc desc;
1643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1645 req = (struct hclge_tqp_map_cmd *)desc.data;
1646 req->tqp_id = cpu_to_le16(tqp_pid);
1647 req->tqp_vf = func_id;
1648 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1650 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1651 req->tqp_vid = cpu_to_le16(tqp_vid);
1653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1655 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1660 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1662 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1663 struct hclge_dev *hdev = vport->back;
1666 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1667 alloced < num_tqps; i++) {
1668 if (!hdev->htqp[i].alloced) {
1669 hdev->htqp[i].q.handle = &vport->nic;
1670 hdev->htqp[i].q.tqp_index = alloced;
1671 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1672 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1673 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1674 hdev->htqp[i].alloced = true;
1678 vport->alloc_tqps = alloced;
1679 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1680 vport->alloc_tqps / hdev->tm_info.num_tc);
1682 /* ensure one to one mapping between irq and queue at default */
1683 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1684 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1689 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1690 u16 num_tx_desc, u16 num_rx_desc)
1693 struct hnae3_handle *nic = &vport->nic;
1694 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1695 struct hclge_dev *hdev = vport->back;
1698 kinfo->num_tx_desc = num_tx_desc;
1699 kinfo->num_rx_desc = num_rx_desc;
1701 kinfo->rx_buf_len = hdev->rx_buf_len;
1703 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1704 sizeof(struct hnae3_queue *), GFP_KERNEL);
1708 ret = hclge_assign_tqp(vport, num_tqps);
1710 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1715 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1716 struct hclge_vport *vport)
1718 struct hnae3_handle *nic = &vport->nic;
1719 struct hnae3_knic_private_info *kinfo;
1722 kinfo = &nic->kinfo;
1723 for (i = 0; i < vport->alloc_tqps; i++) {
1724 struct hclge_tqp *q =
1725 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1729 is_pf = !(vport->vport_id);
1730 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1739 static int hclge_map_tqp(struct hclge_dev *hdev)
1741 struct hclge_vport *vport = hdev->vport;
1744 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1745 for (i = 0; i < num_vport; i++) {
1748 ret = hclge_map_tqp_to_vport(hdev, vport);
1758 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1760 struct hnae3_handle *nic = &vport->nic;
1761 struct hclge_dev *hdev = vport->back;
1764 nic->pdev = hdev->pdev;
1765 nic->ae_algo = &ae_algo;
1766 nic->numa_node_mask = hdev->numa_node_mask;
1768 ret = hclge_knic_setup(vport, num_tqps,
1769 hdev->num_tx_desc, hdev->num_rx_desc);
1771 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1776 static int hclge_alloc_vport(struct hclge_dev *hdev)
1778 struct pci_dev *pdev = hdev->pdev;
1779 struct hclge_vport *vport;
1785 /* We need to alloc a vport for main NIC of PF */
1786 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1788 if (hdev->num_tqps < num_vport) {
1789 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1790 hdev->num_tqps, num_vport);
1794 /* Alloc the same number of TQPs for every vport */
1795 tqp_per_vport = hdev->num_tqps / num_vport;
1796 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1798 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1803 hdev->vport = vport;
1804 hdev->num_alloc_vport = num_vport;
1806 if (IS_ENABLED(CONFIG_PCI_IOV))
1807 hdev->num_alloc_vfs = hdev->num_req_vfs;
1809 for (i = 0; i < num_vport; i++) {
1811 vport->vport_id = i;
1812 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1813 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1814 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1815 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1816 INIT_LIST_HEAD(&vport->vlan_list);
1817 INIT_LIST_HEAD(&vport->uc_mac_list);
1818 INIT_LIST_HEAD(&vport->mc_mac_list);
1819 spin_lock_init(&vport->mac_list_lock);
1822 ret = hclge_vport_setup(vport, tqp_main_vport);
1824 ret = hclge_vport_setup(vport, tqp_per_vport);
1827 "vport setup failed for vport %d, %d\n",
1838 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1841 /* TX buffer size is unit by 128 byte */
1842 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1843 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1844 struct hclge_tx_buff_alloc_cmd *req;
1845 struct hclge_desc desc;
1849 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1851 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1852 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1853 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1855 req->tx_pkt_buff[i] =
1856 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1857 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1862 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1868 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1869 struct hclge_pkt_buf_alloc *buf_alloc)
1871 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1874 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1879 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1884 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1885 if (hdev->hw_tc_map & BIT(i))
1890 /* Get the number of pfc enabled TCs, which have private buffer */
1891 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1892 struct hclge_pkt_buf_alloc *buf_alloc)
1894 struct hclge_priv_buf *priv;
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 priv = &buf_alloc->priv_buf[i];
1900 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1908 /* Get the number of pfc disabled TCs, which have private buffer */
1909 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1910 struct hclge_pkt_buf_alloc *buf_alloc)
1912 struct hclge_priv_buf *priv;
1916 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1917 priv = &buf_alloc->priv_buf[i];
1918 if (hdev->hw_tc_map & BIT(i) &&
1919 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1927 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1929 struct hclge_priv_buf *priv;
1933 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1934 priv = &buf_alloc->priv_buf[i];
1936 rx_priv += priv->buf_size;
1941 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1943 u32 i, total_tx_size = 0;
1945 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1946 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1948 return total_tx_size;
1951 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1952 struct hclge_pkt_buf_alloc *buf_alloc,
1955 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1956 u32 tc_num = hclge_get_tc_num(hdev);
1957 u32 shared_buf, aligned_mps;
1961 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1963 if (hnae3_dev_dcb_supported(hdev))
1964 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1967 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1968 + hdev->dv_buf_size;
1970 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1971 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1972 HCLGE_BUF_SIZE_UNIT);
1974 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1975 if (rx_all < rx_priv + shared_std)
1978 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1979 buf_alloc->s_buf.buf_size = shared_buf;
1980 if (hnae3_dev_dcb_supported(hdev)) {
1981 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1982 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1983 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1984 HCLGE_BUF_SIZE_UNIT);
1986 buf_alloc->s_buf.self.high = aligned_mps +
1987 HCLGE_NON_DCB_ADDITIONAL_BUF;
1988 buf_alloc->s_buf.self.low = aligned_mps;
1991 if (hnae3_dev_dcb_supported(hdev)) {
1992 hi_thrd = shared_buf - hdev->dv_buf_size;
1994 if (tc_num <= NEED_RESERVE_TC_NUM)
1995 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1999 hi_thrd = hi_thrd / tc_num;
2001 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2002 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2003 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2005 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2006 lo_thrd = aligned_mps;
2009 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2010 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2011 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2017 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2018 struct hclge_pkt_buf_alloc *buf_alloc)
2022 total_size = hdev->pkt_buf_size;
2024 /* alloc tx buffer for all enabled tc */
2025 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2026 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2028 if (hdev->hw_tc_map & BIT(i)) {
2029 if (total_size < hdev->tx_buf_size)
2032 priv->tx_buf_size = hdev->tx_buf_size;
2034 priv->tx_buf_size = 0;
2037 total_size -= priv->tx_buf_size;
2043 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2044 struct hclge_pkt_buf_alloc *buf_alloc)
2046 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2047 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2050 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2051 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2058 if (!(hdev->hw_tc_map & BIT(i)))
2063 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2064 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2065 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2066 HCLGE_BUF_SIZE_UNIT);
2069 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2073 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2076 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2079 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2080 struct hclge_pkt_buf_alloc *buf_alloc)
2082 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2086 /* let the last to be cleared first */
2087 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089 unsigned int mask = BIT((unsigned int)i);
2091 if (hdev->hw_tc_map & mask &&
2092 !(hdev->tm_info.hw_pfc_map & mask)) {
2093 /* Clear the no pfc TC private buffer */
2101 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102 no_pfc_priv_num == 0)
2106 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2109 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2110 struct hclge_pkt_buf_alloc *buf_alloc)
2112 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2116 /* let the last to be cleared first */
2117 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 unsigned int mask = BIT((unsigned int)i);
2121 if (hdev->hw_tc_map & mask &&
2122 hdev->tm_info.hw_pfc_map & mask) {
2123 /* Reduce the number of pfc TC with private buffer */
2131 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2136 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2139 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2140 struct hclge_pkt_buf_alloc *buf_alloc)
2142 #define COMPENSATE_BUFFER 0x3C00
2143 #define COMPENSATE_HALF_MPS_NUM 5
2144 #define PRIV_WL_GAP 0x1800
2146 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2147 u32 tc_num = hclge_get_tc_num(hdev);
2148 u32 half_mps = hdev->mps >> 1;
2153 rx_priv = rx_priv / tc_num;
2155 if (tc_num <= NEED_RESERVE_TC_NUM)
2156 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2158 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2159 COMPENSATE_HALF_MPS_NUM * half_mps;
2160 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2161 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2163 if (rx_priv < min_rx_priv)
2166 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2167 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2174 if (!(hdev->hw_tc_map & BIT(i)))
2178 priv->buf_size = rx_priv;
2179 priv->wl.high = rx_priv - hdev->dv_buf_size;
2180 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2183 buf_alloc->s_buf.buf_size = 0;
2188 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2189 * @hdev: pointer to struct hclge_dev
2190 * @buf_alloc: pointer to buffer calculation data
2191 * @return: 0: calculate sucessful, negative: fail
2193 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2194 struct hclge_pkt_buf_alloc *buf_alloc)
2196 /* When DCB is not supported, rx private buffer is not allocated. */
2197 if (!hnae3_dev_dcb_supported(hdev)) {
2198 u32 rx_all = hdev->pkt_buf_size;
2200 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2201 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2207 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2210 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2213 /* try to decrease the buffer size */
2214 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2217 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2220 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2226 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2227 struct hclge_pkt_buf_alloc *buf_alloc)
2229 struct hclge_rx_priv_buff_cmd *req;
2230 struct hclge_desc desc;
2234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2235 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2237 /* Alloc private buffer TCs */
2238 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2239 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2242 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2244 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2248 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2249 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2251 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2253 dev_err(&hdev->pdev->dev,
2254 "rx private buffer alloc cmd failed %d\n", ret);
2259 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2260 struct hclge_pkt_buf_alloc *buf_alloc)
2262 struct hclge_rx_priv_wl_buf *req;
2263 struct hclge_priv_buf *priv;
2264 struct hclge_desc desc[2];
2268 for (i = 0; i < 2; i++) {
2269 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2271 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2273 /* The first descriptor set the NEXT bit to 1 */
2275 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2277 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2279 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2280 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2282 priv = &buf_alloc->priv_buf[idx];
2283 req->tc_wl[j].high =
2284 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2285 req->tc_wl[j].high |=
2286 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2288 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2289 req->tc_wl[j].low |=
2290 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2294 /* Send 2 descriptor at one time */
2295 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2297 dev_err(&hdev->pdev->dev,
2298 "rx private waterline config cmd failed %d\n",
2303 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2304 struct hclge_pkt_buf_alloc *buf_alloc)
2306 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2307 struct hclge_rx_com_thrd *req;
2308 struct hclge_desc desc[2];
2309 struct hclge_tc_thrd *tc;
2313 for (i = 0; i < 2; i++) {
2314 hclge_cmd_setup_basic_desc(&desc[i],
2315 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2316 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2318 /* The first descriptor set the NEXT bit to 1 */
2320 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2322 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2324 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2325 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2327 req->com_thrd[j].high =
2328 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2329 req->com_thrd[j].high |=
2330 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 req->com_thrd[j].low =
2332 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2333 req->com_thrd[j].low |=
2334 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2338 /* Send 2 descriptors at one time */
2339 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2341 dev_err(&hdev->pdev->dev,
2342 "common threshold config cmd failed %d\n", ret);
2346 static int hclge_common_wl_config(struct hclge_dev *hdev,
2347 struct hclge_pkt_buf_alloc *buf_alloc)
2349 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2350 struct hclge_rx_com_wl *req;
2351 struct hclge_desc desc;
2354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2356 req = (struct hclge_rx_com_wl *)desc.data;
2357 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2358 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2361 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2365 dev_err(&hdev->pdev->dev,
2366 "common waterline config cmd failed %d\n", ret);
2371 int hclge_buffer_alloc(struct hclge_dev *hdev)
2373 struct hclge_pkt_buf_alloc *pkt_buf;
2376 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2380 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2382 dev_err(&hdev->pdev->dev,
2383 "could not calc tx buffer size for all TCs %d\n", ret);
2387 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2389 dev_err(&hdev->pdev->dev,
2390 "could not alloc tx buffers %d\n", ret);
2394 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2396 dev_err(&hdev->pdev->dev,
2397 "could not calc rx priv buffer size for all TCs %d\n",
2402 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2404 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2409 if (hnae3_dev_dcb_supported(hdev)) {
2410 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2412 dev_err(&hdev->pdev->dev,
2413 "could not configure rx private waterline %d\n",
2418 ret = hclge_common_thrd_config(hdev, pkt_buf);
2420 dev_err(&hdev->pdev->dev,
2421 "could not configure common threshold %d\n",
2427 ret = hclge_common_wl_config(hdev, pkt_buf);
2429 dev_err(&hdev->pdev->dev,
2430 "could not configure common waterline %d\n", ret);
2437 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2439 struct hnae3_handle *roce = &vport->roce;
2440 struct hnae3_handle *nic = &vport->nic;
2441 struct hclge_dev *hdev = vport->back;
2443 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2445 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2448 roce->rinfo.base_vector = hdev->roce_base_vector;
2450 roce->rinfo.netdev = nic->kinfo.netdev;
2451 roce->rinfo.roce_io_base = hdev->hw.io_base;
2452 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2454 roce->pdev = nic->pdev;
2455 roce->ae_algo = nic->ae_algo;
2456 roce->numa_node_mask = nic->numa_node_mask;
2461 static int hclge_init_msi(struct hclge_dev *hdev)
2463 struct pci_dev *pdev = hdev->pdev;
2467 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2469 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2472 "failed(%d) to allocate MSI/MSI-X vectors\n",
2476 if (vectors < hdev->num_msi)
2477 dev_warn(&hdev->pdev->dev,
2478 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2479 hdev->num_msi, vectors);
2481 hdev->num_msi = vectors;
2482 hdev->num_msi_left = vectors;
2484 hdev->base_msi_vector = pdev->irq;
2485 hdev->roce_base_vector = hdev->base_msi_vector +
2488 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2489 sizeof(u16), GFP_KERNEL);
2490 if (!hdev->vector_status) {
2491 pci_free_irq_vectors(pdev);
2495 for (i = 0; i < hdev->num_msi; i++)
2496 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2498 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2499 sizeof(int), GFP_KERNEL);
2500 if (!hdev->vector_irq) {
2501 pci_free_irq_vectors(pdev);
2508 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2510 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2511 duplex = HCLGE_MAC_FULL;
2516 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2519 struct hclge_config_mac_speed_dup_cmd *req;
2520 struct hclge_desc desc;
2523 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2528 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2531 case HCLGE_MAC_SPEED_10M:
2532 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533 HCLGE_CFG_SPEED_S, 6);
2535 case HCLGE_MAC_SPEED_100M:
2536 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537 HCLGE_CFG_SPEED_S, 7);
2539 case HCLGE_MAC_SPEED_1G:
2540 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 HCLGE_CFG_SPEED_S, 0);
2543 case HCLGE_MAC_SPEED_10G:
2544 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545 HCLGE_CFG_SPEED_S, 1);
2547 case HCLGE_MAC_SPEED_25G:
2548 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549 HCLGE_CFG_SPEED_S, 2);
2551 case HCLGE_MAC_SPEED_40G:
2552 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553 HCLGE_CFG_SPEED_S, 3);
2555 case HCLGE_MAC_SPEED_50G:
2556 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557 HCLGE_CFG_SPEED_S, 4);
2559 case HCLGE_MAC_SPEED_100G:
2560 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561 HCLGE_CFG_SPEED_S, 5);
2563 case HCLGE_MAC_SPEED_200G:
2564 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565 HCLGE_CFG_SPEED_S, 8);
2568 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2572 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev,
2578 "mac speed/duplex config cmd failed %d.\n", ret);
2585 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2587 struct hclge_mac *mac = &hdev->hw.mac;
2590 duplex = hclge_check_speed_dup(duplex, speed);
2591 if (!mac->support_autoneg && mac->speed == speed &&
2592 mac->duplex == duplex)
2595 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2599 hdev->hw.mac.speed = speed;
2600 hdev->hw.mac.duplex = duplex;
2605 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2608 struct hclge_vport *vport = hclge_get_vport(handle);
2609 struct hclge_dev *hdev = vport->back;
2611 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2614 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2616 struct hclge_config_auto_neg_cmd *req;
2617 struct hclge_desc desc;
2621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2623 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2625 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2626 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2630 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2636 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2638 struct hclge_vport *vport = hclge_get_vport(handle);
2639 struct hclge_dev *hdev = vport->back;
2641 if (!hdev->hw.mac.support_autoneg) {
2643 dev_err(&hdev->pdev->dev,
2644 "autoneg is not supported by current port\n");
2651 return hclge_set_autoneg_en(hdev, enable);
2654 static int hclge_get_autoneg(struct hnae3_handle *handle)
2656 struct hclge_vport *vport = hclge_get_vport(handle);
2657 struct hclge_dev *hdev = vport->back;
2658 struct phy_device *phydev = hdev->hw.mac.phydev;
2661 return phydev->autoneg;
2663 return hdev->hw.mac.autoneg;
2666 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2668 struct hclge_vport *vport = hclge_get_vport(handle);
2669 struct hclge_dev *hdev = vport->back;
2672 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2674 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2677 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2680 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2682 struct hclge_vport *vport = hclge_get_vport(handle);
2683 struct hclge_dev *hdev = vport->back;
2685 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2686 return hclge_set_autoneg_en(hdev, !halt);
2691 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2693 struct hclge_config_fec_cmd *req;
2694 struct hclge_desc desc;
2697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2699 req = (struct hclge_config_fec_cmd *)desc.data;
2700 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2701 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2702 if (fec_mode & BIT(HNAE3_FEC_RS))
2703 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2704 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2705 if (fec_mode & BIT(HNAE3_FEC_BASER))
2706 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2707 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2711 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2716 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2720 struct hclge_mac *mac = &hdev->hw.mac;
2723 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2724 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2728 ret = hclge_set_fec_hw(hdev, fec_mode);
2732 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2736 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2739 struct hclge_vport *vport = hclge_get_vport(handle);
2740 struct hclge_dev *hdev = vport->back;
2741 struct hclge_mac *mac = &hdev->hw.mac;
2744 *fec_ability = mac->fec_ability;
2746 *fec_mode = mac->fec_mode;
2749 static int hclge_mac_init(struct hclge_dev *hdev)
2751 struct hclge_mac *mac = &hdev->hw.mac;
2754 hdev->support_sfp_query = true;
2755 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2756 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2757 hdev->hw.mac.duplex);
2761 if (hdev->hw.mac.support_autoneg) {
2762 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2769 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2770 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2775 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2777 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2781 ret = hclge_set_default_loopback(hdev);
2785 ret = hclge_buffer_alloc(hdev);
2787 dev_err(&hdev->pdev->dev,
2788 "allocate buffer fail, ret=%d\n", ret);
2793 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2795 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2796 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2797 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2798 hclge_wq, &hdev->service_task, 0);
2801 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2803 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2805 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806 hclge_wq, &hdev->service_task, 0);
2809 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2811 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2813 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814 hclge_wq, &hdev->service_task,
2818 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2820 struct hclge_link_status_cmd *req;
2821 struct hclge_desc desc;
2824 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2825 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2827 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2832 req = (struct hclge_link_status_cmd *)desc.data;
2833 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2834 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2839 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2841 struct phy_device *phydev = hdev->hw.mac.phydev;
2843 *link_status = HCLGE_LINK_STATUS_DOWN;
2845 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2848 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2851 return hclge_get_mac_link_status(hdev, link_status);
2854 static void hclge_update_link_status(struct hclge_dev *hdev)
2856 struct hnae3_client *rclient = hdev->roce_client;
2857 struct hnae3_client *client = hdev->nic_client;
2858 struct hnae3_handle *rhandle;
2859 struct hnae3_handle *handle;
2867 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2870 ret = hclge_get_mac_phy_link(hdev, &state);
2872 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2876 if (state != hdev->hw.mac.link) {
2877 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2878 handle = &hdev->vport[i].nic;
2879 client->ops->link_status_change(handle, state);
2880 hclge_config_mac_tnl_int(hdev, state);
2881 rhandle = &hdev->vport[i].roce;
2882 if (rclient && rclient->ops->link_status_change)
2883 rclient->ops->link_status_change(rhandle,
2886 hdev->hw.mac.link = state;
2889 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2892 static void hclge_update_port_capability(struct hclge_mac *mac)
2894 /* update fec ability by speed */
2895 hclge_convert_setting_fec(mac);
2897 /* firmware can not identify back plane type, the media type
2898 * read from configuration can help deal it
2900 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2901 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2902 mac->module_type = HNAE3_MODULE_TYPE_KR;
2903 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2904 mac->module_type = HNAE3_MODULE_TYPE_TP;
2906 if (mac->support_autoneg) {
2907 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2908 linkmode_copy(mac->advertising, mac->supported);
2910 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2912 linkmode_zero(mac->advertising);
2916 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2918 struct hclge_sfp_info_cmd *resp;
2919 struct hclge_desc desc;
2922 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2923 resp = (struct hclge_sfp_info_cmd *)desc.data;
2924 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2925 if (ret == -EOPNOTSUPP) {
2926 dev_warn(&hdev->pdev->dev,
2927 "IMP do not support get SFP speed %d\n", ret);
2930 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2934 *speed = le32_to_cpu(resp->speed);
2939 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2941 struct hclge_sfp_info_cmd *resp;
2942 struct hclge_desc desc;
2945 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2946 resp = (struct hclge_sfp_info_cmd *)desc.data;
2948 resp->query_type = QUERY_ACTIVE_SPEED;
2950 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951 if (ret == -EOPNOTSUPP) {
2952 dev_warn(&hdev->pdev->dev,
2953 "IMP does not support get SFP info %d\n", ret);
2956 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2960 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2961 * set to mac->speed.
2963 if (!le32_to_cpu(resp->speed))
2966 mac->speed = le32_to_cpu(resp->speed);
2967 /* if resp->speed_ability is 0, it means it's an old version
2968 * firmware, do not update these params
2970 if (resp->speed_ability) {
2971 mac->module_type = le32_to_cpu(resp->module_type);
2972 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2973 mac->autoneg = resp->autoneg;
2974 mac->support_autoneg = resp->autoneg_ability;
2975 mac->speed_type = QUERY_ACTIVE_SPEED;
2976 if (!resp->active_fec)
2979 mac->fec_mode = BIT(resp->active_fec);
2981 mac->speed_type = QUERY_SFP_SPEED;
2987 static int hclge_update_port_info(struct hclge_dev *hdev)
2989 struct hclge_mac *mac = &hdev->hw.mac;
2990 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2993 /* get the port info from SFP cmd if not copper port */
2994 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2997 /* if IMP does not support get SFP/qSFP info, return directly */
2998 if (!hdev->support_sfp_query)
3001 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3002 ret = hclge_get_sfp_info(hdev, mac);
3004 ret = hclge_get_sfp_speed(hdev, &speed);
3006 if (ret == -EOPNOTSUPP) {
3007 hdev->support_sfp_query = false;
3013 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3014 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3015 hclge_update_port_capability(mac);
3018 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3021 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3022 return 0; /* do nothing if no SFP */
3024 /* must config full duplex for SFP */
3025 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3029 static int hclge_get_status(struct hnae3_handle *handle)
3031 struct hclge_vport *vport = hclge_get_vport(handle);
3032 struct hclge_dev *hdev = vport->back;
3034 hclge_update_link_status(hdev);
3036 return hdev->hw.mac.link;
3039 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3041 if (!pci_num_vf(hdev->pdev)) {
3042 dev_err(&hdev->pdev->dev,
3043 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3047 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3048 dev_err(&hdev->pdev->dev,
3049 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3050 vf, pci_num_vf(hdev->pdev));
3054 /* VF start from 1 in vport */
3055 vf += HCLGE_VF_VPORT_START_NUM;
3056 return &hdev->vport[vf];
3059 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3060 struct ifla_vf_info *ivf)
3062 struct hclge_vport *vport = hclge_get_vport(handle);
3063 struct hclge_dev *hdev = vport->back;
3065 vport = hclge_get_vf_vport(hdev, vf);
3070 ivf->linkstate = vport->vf_info.link_state;
3071 ivf->spoofchk = vport->vf_info.spoofchk;
3072 ivf->trusted = vport->vf_info.trusted;
3073 ivf->min_tx_rate = 0;
3074 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3075 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3076 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3077 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3078 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3083 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3086 struct hclge_vport *vport = hclge_get_vport(handle);
3087 struct hclge_dev *hdev = vport->back;
3089 vport = hclge_get_vf_vport(hdev, vf);
3093 vport->vf_info.link_state = link_state;
3098 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3100 u32 cmdq_src_reg, msix_src_reg;
3102 /* fetch the events from their corresponding regs */
3103 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3104 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3106 /* Assumption: If by any chance reset and mailbox events are reported
3107 * together then we will only process reset event in this go and will
3108 * defer the processing of the mailbox events. Since, we would have not
3109 * cleared RX CMDQ event this time we would receive again another
3110 * interrupt from H/W just for the mailbox.
3112 * check for vector0 reset event sources
3114 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3115 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3116 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3117 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3118 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3119 hdev->rst_stats.imp_rst_cnt++;
3120 return HCLGE_VECTOR0_EVENT_RST;
3123 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3124 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3125 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3126 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3127 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3128 hdev->rst_stats.global_rst_cnt++;
3129 return HCLGE_VECTOR0_EVENT_RST;
3132 /* check for vector0 msix event source */
3133 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3134 *clearval = msix_src_reg;
3135 return HCLGE_VECTOR0_EVENT_ERR;
3138 /* check for vector0 mailbox(=CMDQ RX) event source */
3139 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3140 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3141 *clearval = cmdq_src_reg;
3142 return HCLGE_VECTOR0_EVENT_MBX;
3145 /* print other vector0 event source */
3146 dev_info(&hdev->pdev->dev,
3147 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3148 cmdq_src_reg, msix_src_reg);
3149 *clearval = msix_src_reg;
3151 return HCLGE_VECTOR0_EVENT_OTHER;
3154 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3157 switch (event_type) {
3158 case HCLGE_VECTOR0_EVENT_RST:
3159 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3161 case HCLGE_VECTOR0_EVENT_MBX:
3162 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3169 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3171 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3172 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3173 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3174 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3175 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3178 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3180 writel(enable ? 1 : 0, vector->addr);
3183 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3185 struct hclge_dev *hdev = data;
3189 hclge_enable_vector(&hdev->misc_vector, false);
3190 event_cause = hclge_check_event_cause(hdev, &clearval);
3192 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3193 switch (event_cause) {
3194 case HCLGE_VECTOR0_EVENT_ERR:
3195 /* we do not know what type of reset is required now. This could
3196 * only be decided after we fetch the type of errors which
3197 * caused this event. Therefore, we will do below for now:
3198 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3199 * have defered type of reset to be used.
3200 * 2. Schedule the reset serivce task.
3201 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3202 * will fetch the correct type of reset. This would be done
3203 * by first decoding the types of errors.
3205 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3207 case HCLGE_VECTOR0_EVENT_RST:
3208 hclge_reset_task_schedule(hdev);
3210 case HCLGE_VECTOR0_EVENT_MBX:
3211 /* If we are here then,
3212 * 1. Either we are not handling any mbx task and we are not
3215 * 2. We could be handling a mbx task but nothing more is
3217 * In both cases, we should schedule mbx task as there are more
3218 * mbx messages reported by this interrupt.
3220 hclge_mbx_task_schedule(hdev);
3223 dev_warn(&hdev->pdev->dev,
3224 "received unknown or unhandled event of vector0\n");
3228 hclge_clear_event_cause(hdev, event_cause, clearval);
3230 /* Enable interrupt if it is not cause by reset. And when
3231 * clearval equal to 0, it means interrupt status may be
3232 * cleared by hardware before driver reads status register.
3233 * For this case, vector0 interrupt also should be enabled.
3236 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3237 hclge_enable_vector(&hdev->misc_vector, true);
3243 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3245 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3246 dev_warn(&hdev->pdev->dev,
3247 "vector(vector_id %d) has been freed.\n", vector_id);
3251 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3252 hdev->num_msi_left += 1;
3253 hdev->num_msi_used -= 1;
3256 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3258 struct hclge_misc_vector *vector = &hdev->misc_vector;
3260 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3262 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3263 hdev->vector_status[0] = 0;
3265 hdev->num_msi_left -= 1;
3266 hdev->num_msi_used += 1;
3269 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3270 const cpumask_t *mask)
3272 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3275 cpumask_copy(&hdev->affinity_mask, mask);
3278 static void hclge_irq_affinity_release(struct kref *ref)
3282 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3284 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3285 &hdev->affinity_mask);
3287 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3288 hdev->affinity_notify.release = hclge_irq_affinity_release;
3289 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3290 &hdev->affinity_notify);
3293 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3295 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3296 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3299 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3303 hclge_get_misc_vector(hdev);
3305 /* this would be explicitly freed in the end */
3306 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3307 HCLGE_NAME, pci_name(hdev->pdev));
3308 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3309 0, hdev->misc_vector.name, hdev);
3311 hclge_free_vector(hdev, 0);
3312 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3313 hdev->misc_vector.vector_irq);
3319 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3321 free_irq(hdev->misc_vector.vector_irq, hdev);
3322 hclge_free_vector(hdev, 0);
3325 int hclge_notify_client(struct hclge_dev *hdev,
3326 enum hnae3_reset_notify_type type)
3328 struct hnae3_client *client = hdev->nic_client;
3331 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3334 if (!client->ops->reset_notify)
3337 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3338 struct hnae3_handle *handle = &hdev->vport[i].nic;
3341 ret = client->ops->reset_notify(handle, type);
3343 dev_err(&hdev->pdev->dev,
3344 "notify nic client failed %d(%d)\n", type, ret);
3352 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3353 enum hnae3_reset_notify_type type)
3355 struct hnae3_client *client = hdev->roce_client;
3359 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3362 if (!client->ops->reset_notify)
3365 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3366 struct hnae3_handle *handle = &hdev->vport[i].roce;
3368 ret = client->ops->reset_notify(handle, type);
3370 dev_err(&hdev->pdev->dev,
3371 "notify roce client failed %d(%d)",
3380 static int hclge_reset_wait(struct hclge_dev *hdev)
3382 #define HCLGE_RESET_WATI_MS 100
3383 #define HCLGE_RESET_WAIT_CNT 350
3385 u32 val, reg, reg_bit;
3388 switch (hdev->reset_type) {
3389 case HNAE3_IMP_RESET:
3390 reg = HCLGE_GLOBAL_RESET_REG;
3391 reg_bit = HCLGE_IMP_RESET_BIT;
3393 case HNAE3_GLOBAL_RESET:
3394 reg = HCLGE_GLOBAL_RESET_REG;
3395 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3397 case HNAE3_FUNC_RESET:
3398 reg = HCLGE_FUN_RST_ING;
3399 reg_bit = HCLGE_FUN_RST_ING_B;
3402 dev_err(&hdev->pdev->dev,
3403 "Wait for unsupported reset type: %d\n",
3408 val = hclge_read_dev(&hdev->hw, reg);
3409 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3410 msleep(HCLGE_RESET_WATI_MS);
3411 val = hclge_read_dev(&hdev->hw, reg);
3415 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3416 dev_warn(&hdev->pdev->dev,
3417 "Wait for reset timeout: %d\n", hdev->reset_type);
3424 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3426 struct hclge_vf_rst_cmd *req;
3427 struct hclge_desc desc;
3429 req = (struct hclge_vf_rst_cmd *)desc.data;
3430 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3431 req->dest_vfid = func_id;
3436 return hclge_cmd_send(&hdev->hw, &desc, 1);
3439 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3443 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3444 struct hclge_vport *vport = &hdev->vport[i];
3447 /* Send cmd to set/clear VF's FUNC_RST_ING */
3448 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3450 dev_err(&hdev->pdev->dev,
3451 "set vf(%u) rst failed %d!\n",
3452 vport->vport_id, ret);
3456 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3459 /* Inform VF to process the reset.
3460 * hclge_inform_reset_assert_to_vf may fail if VF
3461 * driver is not loaded.
3463 ret = hclge_inform_reset_assert_to_vf(vport);
3465 dev_warn(&hdev->pdev->dev,
3466 "inform reset to vf(%u) failed %d!\n",
3467 vport->vport_id, ret);
3473 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3475 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3476 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3477 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3480 hclge_mbx_handler(hdev);
3482 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3485 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3487 struct hclge_pf_rst_sync_cmd *req;
3488 struct hclge_desc desc;
3492 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3493 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3496 /* vf need to down netdev by mbx during PF or FLR reset */
3497 hclge_mailbox_service_task(hdev);
3499 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3500 /* for compatible with old firmware, wait
3501 * 100 ms for VF to stop IO
3503 if (ret == -EOPNOTSUPP) {
3504 msleep(HCLGE_RESET_SYNC_TIME);
3507 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3510 } else if (req->all_vf_ready) {
3513 msleep(HCLGE_PF_RESET_SYNC_TIME);
3514 hclge_cmd_reuse_desc(&desc, true);
3515 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3517 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3520 void hclge_report_hw_error(struct hclge_dev *hdev,
3521 enum hnae3_hw_error_type type)
3523 struct hnae3_client *client = hdev->nic_client;
3526 if (!client || !client->ops->process_hw_error ||
3527 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3530 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3531 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3534 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3538 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3540 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3541 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3542 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3545 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3546 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3547 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3548 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3552 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3554 struct hclge_desc desc;
3555 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3558 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3559 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3560 req->fun_reset_vfid = func_id;
3562 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3564 dev_err(&hdev->pdev->dev,
3565 "send function reset cmd fail, status =%d\n", ret);
3570 static void hclge_do_reset(struct hclge_dev *hdev)
3572 struct hnae3_handle *handle = &hdev->vport[0].nic;
3573 struct pci_dev *pdev = hdev->pdev;
3576 if (hclge_get_hw_reset_stat(handle)) {
3577 dev_info(&pdev->dev, "hardware reset not finish\n");
3578 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3579 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3580 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3584 switch (hdev->reset_type) {
3585 case HNAE3_GLOBAL_RESET:
3586 dev_info(&pdev->dev, "global reset requested\n");
3587 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3588 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3589 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3591 case HNAE3_FUNC_RESET:
3592 dev_info(&pdev->dev, "PF reset requested\n");
3593 /* schedule again to check later */
3594 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3595 hclge_reset_task_schedule(hdev);
3598 dev_warn(&pdev->dev,
3599 "unsupported reset type: %d\n", hdev->reset_type);
3604 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3605 unsigned long *addr)
3607 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3608 struct hclge_dev *hdev = ae_dev->priv;
3610 /* first, resolve any unknown reset type to the known type(s) */
3611 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3612 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3613 HCLGE_MISC_VECTOR_INT_STS);
3614 /* we will intentionally ignore any errors from this function
3615 * as we will end up in *some* reset request in any case
3617 if (hclge_handle_hw_msix_error(hdev, addr))
3618 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3621 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3622 /* We defered the clearing of the error event which caused
3623 * interrupt since it was not posssible to do that in
3624 * interrupt context (and this is the reason we introduced
3625 * new UNKNOWN reset type). Now, the errors have been
3626 * handled and cleared in hardware we can safely enable
3627 * interrupts. This is an exception to the norm.
3629 hclge_enable_vector(&hdev->misc_vector, true);
3632 /* return the highest priority reset level amongst all */
3633 if (test_bit(HNAE3_IMP_RESET, addr)) {
3634 rst_level = HNAE3_IMP_RESET;
3635 clear_bit(HNAE3_IMP_RESET, addr);
3636 clear_bit(HNAE3_GLOBAL_RESET, addr);
3637 clear_bit(HNAE3_FUNC_RESET, addr);
3638 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3639 rst_level = HNAE3_GLOBAL_RESET;
3640 clear_bit(HNAE3_GLOBAL_RESET, addr);
3641 clear_bit(HNAE3_FUNC_RESET, addr);
3642 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3643 rst_level = HNAE3_FUNC_RESET;
3644 clear_bit(HNAE3_FUNC_RESET, addr);
3645 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3646 rst_level = HNAE3_FLR_RESET;
3647 clear_bit(HNAE3_FLR_RESET, addr);
3650 if (hdev->reset_type != HNAE3_NONE_RESET &&
3651 rst_level < hdev->reset_type)
3652 return HNAE3_NONE_RESET;
3657 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3661 switch (hdev->reset_type) {
3662 case HNAE3_IMP_RESET:
3663 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3665 case HNAE3_GLOBAL_RESET:
3666 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3675 /* For revision 0x20, the reset interrupt source
3676 * can only be cleared after hardware reset done
3678 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3679 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3682 hclge_enable_vector(&hdev->misc_vector, true);
3685 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3689 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3691 reg_val |= HCLGE_NIC_SW_RST_RDY;
3693 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3695 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3698 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3702 ret = hclge_set_all_vf_rst(hdev, true);
3706 hclge_func_reset_sync_vf(hdev);
3711 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3716 switch (hdev->reset_type) {
3717 case HNAE3_FUNC_RESET:
3718 ret = hclge_func_reset_notify_vf(hdev);
3722 ret = hclge_func_reset_cmd(hdev, 0);
3724 dev_err(&hdev->pdev->dev,
3725 "asserting function reset fail %d!\n", ret);
3729 /* After performaning pf reset, it is not necessary to do the
3730 * mailbox handling or send any command to firmware, because
3731 * any mailbox handling or command to firmware is only valid
3732 * after hclge_cmd_init is called.
3734 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3735 hdev->rst_stats.pf_rst_cnt++;
3737 case HNAE3_FLR_RESET:
3738 ret = hclge_func_reset_notify_vf(hdev);
3742 case HNAE3_IMP_RESET:
3743 hclge_handle_imp_error(hdev);
3744 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3745 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3746 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3752 /* inform hardware that preparatory work is done */
3753 msleep(HCLGE_RESET_SYNC_TIME);
3754 hclge_reset_handshake(hdev, true);
3755 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3760 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3762 #define MAX_RESET_FAIL_CNT 5
3764 if (hdev->reset_pending) {
3765 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3766 hdev->reset_pending);
3768 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3769 HCLGE_RESET_INT_M) {
3770 dev_info(&hdev->pdev->dev,
3771 "reset failed because new reset interrupt\n");
3772 hclge_clear_reset_cause(hdev);
3774 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3775 hdev->rst_stats.reset_fail_cnt++;
3776 set_bit(hdev->reset_type, &hdev->reset_pending);
3777 dev_info(&hdev->pdev->dev,
3778 "re-schedule reset task(%u)\n",
3779 hdev->rst_stats.reset_fail_cnt);
3783 hclge_clear_reset_cause(hdev);
3785 /* recover the handshake status when reset fail */
3786 hclge_reset_handshake(hdev, true);
3788 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3790 hclge_dbg_dump_rst_info(hdev);
3792 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3797 static int hclge_set_rst_done(struct hclge_dev *hdev)
3799 struct hclge_pf_rst_done_cmd *req;
3800 struct hclge_desc desc;
3803 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3804 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3805 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3808 /* To be compatible with the old firmware, which does not support
3809 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3812 if (ret == -EOPNOTSUPP) {
3813 dev_warn(&hdev->pdev->dev,
3814 "current firmware does not support command(0x%x)!\n",
3815 HCLGE_OPC_PF_RST_DONE);
3818 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3825 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3829 switch (hdev->reset_type) {
3830 case HNAE3_FUNC_RESET:
3831 case HNAE3_FLR_RESET:
3832 ret = hclge_set_all_vf_rst(hdev, false);
3834 case HNAE3_GLOBAL_RESET:
3835 case HNAE3_IMP_RESET:
3836 ret = hclge_set_rst_done(hdev);
3842 /* clear up the handshake status after re-initialize done */
3843 hclge_reset_handshake(hdev, false);
3848 static int hclge_reset_stack(struct hclge_dev *hdev)
3852 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3856 ret = hclge_reset_ae_dev(hdev->ae_dev);
3860 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3863 static int hclge_reset_prepare(struct hclge_dev *hdev)
3867 hdev->rst_stats.reset_cnt++;
3868 /* perform reset of the stack & ae device for a client */
3869 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3874 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3879 return hclge_reset_prepare_wait(hdev);
3882 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3884 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3885 enum hnae3_reset_type reset_level;
3888 hdev->rst_stats.hw_reset_done_cnt++;
3890 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3895 ret = hclge_reset_stack(hdev);
3900 hclge_clear_reset_cause(hdev);
3902 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3903 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3907 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3910 ret = hclge_reset_prepare_up(hdev);
3915 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3920 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3924 hdev->last_reset_time = jiffies;
3925 hdev->rst_stats.reset_fail_cnt = 0;
3926 hdev->rst_stats.reset_done_cnt++;
3927 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3929 /* if default_reset_request has a higher level reset request,
3930 * it should be handled as soon as possible. since some errors
3931 * need this kind of reset to fix.
3933 reset_level = hclge_get_reset_level(ae_dev,
3934 &hdev->default_reset_request);
3935 if (reset_level != HNAE3_NONE_RESET)
3936 set_bit(reset_level, &hdev->reset_request);
3941 static void hclge_reset(struct hclge_dev *hdev)
3943 if (hclge_reset_prepare(hdev))
3946 if (hclge_reset_wait(hdev))
3949 if (hclge_reset_rebuild(hdev))
3955 if (hclge_reset_err_handle(hdev))
3956 hclge_reset_task_schedule(hdev);
3959 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3961 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3962 struct hclge_dev *hdev = ae_dev->priv;
3964 /* We might end up getting called broadly because of 2 below cases:
3965 * 1. Recoverable error was conveyed through APEI and only way to bring
3966 * normalcy is to reset.
3967 * 2. A new reset request from the stack due to timeout
3969 * For the first case,error event might not have ae handle available.
3970 * check if this is a new reset request and we are not here just because
3971 * last reset attempt did not succeed and watchdog hit us again. We will
3972 * know this if last reset request did not occur very recently (watchdog
3973 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3974 * In case of new request we reset the "reset level" to PF reset.
3975 * And if it is a repeat reset request of the most recent one then we
3976 * want to make sure we throttle the reset request. Therefore, we will
3977 * not allow it again before 3*HZ times.
3980 handle = &hdev->vport[0].nic;
3982 if (time_before(jiffies, (hdev->last_reset_time +
3983 HCLGE_RESET_INTERVAL))) {
3984 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3986 } else if (hdev->default_reset_request) {
3988 hclge_get_reset_level(ae_dev,
3989 &hdev->default_reset_request);
3990 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3991 hdev->reset_level = HNAE3_FUNC_RESET;
3994 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3997 /* request reset & schedule reset task */
3998 set_bit(hdev->reset_level, &hdev->reset_request);
3999 hclge_reset_task_schedule(hdev);
4001 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4002 hdev->reset_level++;
4005 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4006 enum hnae3_reset_type rst_type)
4008 struct hclge_dev *hdev = ae_dev->priv;
4010 set_bit(rst_type, &hdev->default_reset_request);
4013 static void hclge_reset_timer(struct timer_list *t)
4015 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4017 /* if default_reset_request has no value, it means that this reset
4018 * request has already be handled, so just return here
4020 if (!hdev->default_reset_request)
4023 dev_info(&hdev->pdev->dev,
4024 "triggering reset in reset timer\n");
4025 hclge_reset_event(hdev->pdev, NULL);
4028 static void hclge_reset_subtask(struct hclge_dev *hdev)
4030 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4032 /* check if there is any ongoing reset in the hardware. This status can
4033 * be checked from reset_pending. If there is then, we need to wait for
4034 * hardware to complete reset.
4035 * a. If we are able to figure out in reasonable time that hardware
4036 * has fully resetted then, we can proceed with driver, client
4038 * b. else, we can come back later to check this status so re-sched
4041 hdev->last_reset_time = jiffies;
4042 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4043 if (hdev->reset_type != HNAE3_NONE_RESET)
4046 /* check if we got any *new* reset requests to be honored */
4047 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4048 if (hdev->reset_type != HNAE3_NONE_RESET)
4049 hclge_do_reset(hdev);
4051 hdev->reset_type = HNAE3_NONE_RESET;
4054 static void hclge_reset_service_task(struct hclge_dev *hdev)
4056 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4059 down(&hdev->reset_sem);
4060 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4062 hclge_reset_subtask(hdev);
4064 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4065 up(&hdev->reset_sem);
4068 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4072 /* start from vport 1 for PF is always alive */
4073 for (i = 1; i < hdev->num_alloc_vport; i++) {
4074 struct hclge_vport *vport = &hdev->vport[i];
4076 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4077 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4079 /* If vf is not alive, set to default value */
4080 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4081 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4085 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4087 unsigned long delta = round_jiffies_relative(HZ);
4089 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4092 /* Always handle the link updating to make sure link state is
4093 * updated when it is triggered by mbx.
4095 hclge_update_link_status(hdev);
4096 hclge_sync_mac_table(hdev);
4097 hclge_sync_promisc_mode(hdev);
4099 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4100 delta = jiffies - hdev->last_serv_processed;
4102 if (delta < round_jiffies_relative(HZ)) {
4103 delta = round_jiffies_relative(HZ) - delta;
4108 hdev->serv_processed_cnt++;
4109 hclge_update_vport_alive(hdev);
4111 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4112 hdev->last_serv_processed = jiffies;
4116 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4117 hclge_update_stats_for_all(hdev);
4119 hclge_update_port_info(hdev);
4120 hclge_sync_vlan_filter(hdev);
4122 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4123 hclge_rfs_filter_expire(hdev);
4125 hdev->last_serv_processed = jiffies;
4128 hclge_task_schedule(hdev, delta);
4131 static void hclge_service_task(struct work_struct *work)
4133 struct hclge_dev *hdev =
4134 container_of(work, struct hclge_dev, service_task.work);
4136 hclge_reset_service_task(hdev);
4137 hclge_mailbox_service_task(hdev);
4138 hclge_periodic_service_task(hdev);
4140 /* Handle reset and mbx again in case periodical task delays the
4141 * handling by calling hclge_task_schedule() in
4142 * hclge_periodic_service_task().
4144 hclge_reset_service_task(hdev);
4145 hclge_mailbox_service_task(hdev);
4148 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4150 /* VF handle has no client */
4151 if (!handle->client)
4152 return container_of(handle, struct hclge_vport, nic);
4153 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4154 return container_of(handle, struct hclge_vport, roce);
4156 return container_of(handle, struct hclge_vport, nic);
4159 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4160 struct hnae3_vector_info *vector_info)
4162 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4164 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4166 /* need an extend offset to config vector >= 64 */
4167 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4168 vector_info->io_addr = hdev->hw.io_base +
4169 HCLGE_VECTOR_REG_BASE +
4170 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4172 vector_info->io_addr = hdev->hw.io_base +
4173 HCLGE_VECTOR_EXT_REG_BASE +
4174 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4175 HCLGE_VECTOR_REG_OFFSET_H +
4176 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4177 HCLGE_VECTOR_REG_OFFSET;
4179 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4180 hdev->vector_irq[idx] = vector_info->vector;
4183 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4184 struct hnae3_vector_info *vector_info)
4186 struct hclge_vport *vport = hclge_get_vport(handle);
4187 struct hnae3_vector_info *vector = vector_info;
4188 struct hclge_dev *hdev = vport->back;
4193 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4194 vector_num = min(hdev->num_msi_left, vector_num);
4196 for (j = 0; j < vector_num; j++) {
4197 while (++i < hdev->num_nic_msi) {
4198 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4199 hclge_get_vector_info(hdev, i, vector);
4207 hdev->num_msi_left -= alloc;
4208 hdev->num_msi_used += alloc;
4213 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4217 for (i = 0; i < hdev->num_msi; i++)
4218 if (vector == hdev->vector_irq[i])
4224 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4226 struct hclge_vport *vport = hclge_get_vport(handle);
4227 struct hclge_dev *hdev = vport->back;
4230 vector_id = hclge_get_vector_index(hdev, vector);
4231 if (vector_id < 0) {
4232 dev_err(&hdev->pdev->dev,
4233 "Get vector index fail. vector = %d\n", vector);
4237 hclge_free_vector(hdev, vector_id);
4242 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4244 return HCLGE_RSS_KEY_SIZE;
4247 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4248 const u8 hfunc, const u8 *key)
4250 struct hclge_rss_config_cmd *req;
4251 unsigned int key_offset = 0;
4252 struct hclge_desc desc;
4257 key_counts = HCLGE_RSS_KEY_SIZE;
4258 req = (struct hclge_rss_config_cmd *)desc.data;
4260 while (key_counts) {
4261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4264 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4265 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4267 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4268 memcpy(req->hash_key,
4269 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4271 key_counts -= key_size;
4273 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4275 dev_err(&hdev->pdev->dev,
4276 "Configure RSS config fail, status = %d\n",
4284 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4286 struct hclge_rss_indirection_table_cmd *req;
4287 struct hclge_desc desc;
4288 int rss_cfg_tbl_num;
4296 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4297 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4298 HCLGE_RSS_CFG_TBL_SIZE;
4300 for (i = 0; i < rss_cfg_tbl_num; i++) {
4301 hclge_cmd_setup_basic_desc
4302 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4304 req->start_table_index =
4305 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4306 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4307 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4308 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4309 req->rss_qid_l[j] = qid & 0xff;
4311 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4312 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4313 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4314 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4316 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4318 dev_err(&hdev->pdev->dev,
4319 "Configure rss indir table fail,status = %d\n",
4327 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4328 u16 *tc_size, u16 *tc_offset)
4330 struct hclge_rss_tc_mode_cmd *req;
4331 struct hclge_desc desc;
4335 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4336 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4338 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4341 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4342 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4343 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4344 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4345 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4346 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4347 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4349 req->rss_tc_mode[i] = cpu_to_le16(mode);
4352 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4354 dev_err(&hdev->pdev->dev,
4355 "Configure rss tc mode fail, status = %d\n", ret);
4360 static void hclge_get_rss_type(struct hclge_vport *vport)
4362 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4363 vport->rss_tuple_sets.ipv4_udp_en ||
4364 vport->rss_tuple_sets.ipv4_sctp_en ||
4365 vport->rss_tuple_sets.ipv6_tcp_en ||
4366 vport->rss_tuple_sets.ipv6_udp_en ||
4367 vport->rss_tuple_sets.ipv6_sctp_en)
4368 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4369 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4370 vport->rss_tuple_sets.ipv6_fragment_en)
4371 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4373 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4376 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4378 struct hclge_rss_input_tuple_cmd *req;
4379 struct hclge_desc desc;
4382 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4384 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4386 /* Get the tuple cfg from pf */
4387 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4388 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4389 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4390 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4391 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4392 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4393 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4394 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4395 hclge_get_rss_type(&hdev->vport[0]);
4396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4398 dev_err(&hdev->pdev->dev,
4399 "Configure rss input fail, status = %d\n", ret);
4403 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4406 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4407 struct hclge_vport *vport = hclge_get_vport(handle);
4410 /* Get hash algorithm */
4412 switch (vport->rss_algo) {
4413 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4414 *hfunc = ETH_RSS_HASH_TOP;
4416 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4417 *hfunc = ETH_RSS_HASH_XOR;
4420 *hfunc = ETH_RSS_HASH_UNKNOWN;
4425 /* Get the RSS Key required by the user */
4427 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4429 /* Get indirect table */
4431 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4432 indir[i] = vport->rss_indirection_tbl[i];
4437 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4438 const u8 *key, const u8 hfunc)
4440 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4441 struct hclge_vport *vport = hclge_get_vport(handle);
4442 struct hclge_dev *hdev = vport->back;
4446 /* Set the RSS Hash Key if specififed by the user */
4449 case ETH_RSS_HASH_TOP:
4450 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4452 case ETH_RSS_HASH_XOR:
4453 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4455 case ETH_RSS_HASH_NO_CHANGE:
4456 hash_algo = vport->rss_algo;
4462 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4466 /* Update the shadow RSS key with user specified qids */
4467 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4468 vport->rss_algo = hash_algo;
4471 /* Update the shadow RSS table with user specified qids */
4472 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4473 vport->rss_indirection_tbl[i] = indir[i];
4475 /* Update the hardware */
4476 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4479 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4481 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4483 if (nfc->data & RXH_L4_B_2_3)
4484 hash_sets |= HCLGE_D_PORT_BIT;
4486 hash_sets &= ~HCLGE_D_PORT_BIT;
4488 if (nfc->data & RXH_IP_SRC)
4489 hash_sets |= HCLGE_S_IP_BIT;
4491 hash_sets &= ~HCLGE_S_IP_BIT;
4493 if (nfc->data & RXH_IP_DST)
4494 hash_sets |= HCLGE_D_IP_BIT;
4496 hash_sets &= ~HCLGE_D_IP_BIT;
4498 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4499 hash_sets |= HCLGE_V_TAG_BIT;
4504 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4505 struct ethtool_rxnfc *nfc,
4506 struct hclge_rss_input_tuple_cmd *req)
4508 struct hclge_dev *hdev = vport->back;
4511 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4512 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4513 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4514 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4515 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4516 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4517 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4518 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4520 tuple_sets = hclge_get_rss_hash_bits(nfc);
4521 switch (nfc->flow_type) {
4523 req->ipv4_tcp_en = tuple_sets;
4526 req->ipv6_tcp_en = tuple_sets;
4529 req->ipv4_udp_en = tuple_sets;
4532 req->ipv6_udp_en = tuple_sets;
4535 req->ipv4_sctp_en = tuple_sets;
4538 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4539 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4542 req->ipv6_sctp_en = tuple_sets;
4545 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4548 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4557 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4558 struct ethtool_rxnfc *nfc)
4560 struct hclge_vport *vport = hclge_get_vport(handle);
4561 struct hclge_dev *hdev = vport->back;
4562 struct hclge_rss_input_tuple_cmd *req;
4563 struct hclge_desc desc;
4566 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4567 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4570 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4573 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4575 dev_err(&hdev->pdev->dev,
4576 "failed to init rss tuple cmd, ret = %d\n", ret);
4580 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4582 dev_err(&hdev->pdev->dev,
4583 "Set rss tuple fail, status = %d\n", ret);
4587 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4588 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4589 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4590 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4591 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4592 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4593 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4594 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4595 hclge_get_rss_type(vport);
4599 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4602 switch (flow_type) {
4604 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4607 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4610 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4613 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4616 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4619 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4623 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4632 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4636 if (tuple_sets & HCLGE_D_PORT_BIT)
4637 tuple_data |= RXH_L4_B_2_3;
4638 if (tuple_sets & HCLGE_S_PORT_BIT)
4639 tuple_data |= RXH_L4_B_0_1;
4640 if (tuple_sets & HCLGE_D_IP_BIT)
4641 tuple_data |= RXH_IP_DST;
4642 if (tuple_sets & HCLGE_S_IP_BIT)
4643 tuple_data |= RXH_IP_SRC;
4648 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4649 struct ethtool_rxnfc *nfc)
4651 struct hclge_vport *vport = hclge_get_vport(handle);
4657 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4658 if (ret || !tuple_sets)
4661 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4666 static int hclge_get_tc_size(struct hnae3_handle *handle)
4668 struct hclge_vport *vport = hclge_get_vport(handle);
4669 struct hclge_dev *hdev = vport->back;
4671 return hdev->pf_rss_size_max;
4674 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4676 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4677 struct hclge_vport *vport = hdev->vport;
4678 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4679 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4680 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4681 struct hnae3_tc_info *tc_info;
4686 tc_info = &vport->nic.kinfo.tc_info;
4687 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4688 rss_size = tc_info->tqp_count[i];
4691 if (!(hdev->hw_tc_map & BIT(i)))
4694 /* tc_size set to hardware is the log2 of roundup power of two
4695 * of rss_size, the acutal queue size is limited by indirection
4698 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4700 dev_err(&hdev->pdev->dev,
4701 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4706 roundup_size = roundup_pow_of_two(rss_size);
4707 roundup_size = ilog2(roundup_size);
4710 tc_size[i] = roundup_size;
4711 tc_offset[i] = tc_info->tqp_offset[i];
4714 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4717 int hclge_rss_init_hw(struct hclge_dev *hdev)
4719 struct hclge_vport *vport = hdev->vport;
4720 u16 *rss_indir = vport[0].rss_indirection_tbl;
4721 u8 *key = vport[0].rss_hash_key;
4722 u8 hfunc = vport[0].rss_algo;
4725 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4729 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4733 ret = hclge_set_rss_input_tuple(hdev);
4737 return hclge_init_rss_tc_mode(hdev);
4740 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4742 struct hclge_vport *vport = hdev->vport;
4745 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4746 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4747 vport[j].rss_indirection_tbl[i] =
4748 i % vport[j].alloc_rss_size;
4752 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4754 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4755 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4756 struct hclge_vport *vport = hdev->vport;
4758 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4759 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4761 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4764 vport[i].rss_tuple_sets.ipv4_tcp_en =
4765 HCLGE_RSS_INPUT_TUPLE_OTHER;
4766 vport[i].rss_tuple_sets.ipv4_udp_en =
4767 HCLGE_RSS_INPUT_TUPLE_OTHER;
4768 vport[i].rss_tuple_sets.ipv4_sctp_en =
4769 HCLGE_RSS_INPUT_TUPLE_SCTP;
4770 vport[i].rss_tuple_sets.ipv4_fragment_en =
4771 HCLGE_RSS_INPUT_TUPLE_OTHER;
4772 vport[i].rss_tuple_sets.ipv6_tcp_en =
4773 HCLGE_RSS_INPUT_TUPLE_OTHER;
4774 vport[i].rss_tuple_sets.ipv6_udp_en =
4775 HCLGE_RSS_INPUT_TUPLE_OTHER;
4776 vport[i].rss_tuple_sets.ipv6_sctp_en =
4777 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4778 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4779 HCLGE_RSS_INPUT_TUPLE_SCTP;
4780 vport[i].rss_tuple_sets.ipv6_fragment_en =
4781 HCLGE_RSS_INPUT_TUPLE_OTHER;
4783 vport[i].rss_algo = rss_algo;
4785 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4786 sizeof(*rss_ind_tbl), GFP_KERNEL);
4790 vport[i].rss_indirection_tbl = rss_ind_tbl;
4791 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4792 HCLGE_RSS_KEY_SIZE);
4795 hclge_rss_indir_init_cfg(hdev);
4800 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4801 int vector_id, bool en,
4802 struct hnae3_ring_chain_node *ring_chain)
4804 struct hclge_dev *hdev = vport->back;
4805 struct hnae3_ring_chain_node *node;
4806 struct hclge_desc desc;
4807 struct hclge_ctrl_vector_chain_cmd *req =
4808 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4809 enum hclge_cmd_status status;
4810 enum hclge_opcode_type op;
4811 u16 tqp_type_and_id;
4814 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4815 hclge_cmd_setup_basic_desc(&desc, op, false);
4816 req->int_vector_id_l = hnae3_get_field(vector_id,
4817 HCLGE_VECTOR_ID_L_M,
4818 HCLGE_VECTOR_ID_L_S);
4819 req->int_vector_id_h = hnae3_get_field(vector_id,
4820 HCLGE_VECTOR_ID_H_M,
4821 HCLGE_VECTOR_ID_H_S);
4824 for (node = ring_chain; node; node = node->next) {
4825 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4826 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4828 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4829 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4830 HCLGE_TQP_ID_S, node->tqp_index);
4831 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4833 hnae3_get_field(node->int_gl_idx,
4834 HNAE3_RING_GL_IDX_M,
4835 HNAE3_RING_GL_IDX_S));
4836 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4837 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4838 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4839 req->vfid = vport->vport_id;
4841 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4843 dev_err(&hdev->pdev->dev,
4844 "Map TQP fail, status is %d.\n",
4850 hclge_cmd_setup_basic_desc(&desc,
4853 req->int_vector_id_l =
4854 hnae3_get_field(vector_id,
4855 HCLGE_VECTOR_ID_L_M,
4856 HCLGE_VECTOR_ID_L_S);
4857 req->int_vector_id_h =
4858 hnae3_get_field(vector_id,
4859 HCLGE_VECTOR_ID_H_M,
4860 HCLGE_VECTOR_ID_H_S);
4865 req->int_cause_num = i;
4866 req->vfid = vport->vport_id;
4867 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4869 dev_err(&hdev->pdev->dev,
4870 "Map TQP fail, status is %d.\n", status);
4878 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4879 struct hnae3_ring_chain_node *ring_chain)
4881 struct hclge_vport *vport = hclge_get_vport(handle);
4882 struct hclge_dev *hdev = vport->back;
4885 vector_id = hclge_get_vector_index(hdev, vector);
4886 if (vector_id < 0) {
4887 dev_err(&hdev->pdev->dev,
4888 "failed to get vector index. vector=%d\n", vector);
4892 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4895 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4896 struct hnae3_ring_chain_node *ring_chain)
4898 struct hclge_vport *vport = hclge_get_vport(handle);
4899 struct hclge_dev *hdev = vport->back;
4902 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4905 vector_id = hclge_get_vector_index(hdev, vector);
4906 if (vector_id < 0) {
4907 dev_err(&handle->pdev->dev,
4908 "Get vector index fail. ret =%d\n", vector_id);
4912 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4914 dev_err(&handle->pdev->dev,
4915 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4921 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4922 bool en_uc, bool en_mc, bool en_bc)
4924 struct hclge_vport *vport = &hdev->vport[vf_id];
4925 struct hnae3_handle *handle = &vport->nic;
4926 struct hclge_promisc_cfg_cmd *req;
4927 struct hclge_desc desc;
4928 bool uc_tx_en = en_uc;
4932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4934 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4937 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4940 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4941 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4942 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4943 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4944 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4945 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4946 req->extend_promisc = promisc_cfg;
4948 /* to be compatible with DEVICE_VERSION_V1/2 */
4950 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4951 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4952 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4953 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4954 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4955 req->promisc = promisc_cfg;
4957 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4959 dev_err(&hdev->pdev->dev,
4960 "failed to set vport %u promisc mode, ret = %d.\n",
4966 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4967 bool en_mc_pmc, bool en_bc_pmc)
4969 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4970 en_uc_pmc, en_mc_pmc, en_bc_pmc);
4973 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4976 struct hclge_vport *vport = hclge_get_vport(handle);
4977 struct hclge_dev *hdev = vport->back;
4978 bool en_bc_pmc = true;
4980 /* For device whose version below V2, if broadcast promisc enabled,
4981 * vlan filter is always bypassed. So broadcast promisc should be
4982 * disabled until user enable promisc mode
4984 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4985 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4987 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4991 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4993 struct hclge_vport *vport = hclge_get_vport(handle);
4994 struct hclge_dev *hdev = vport->back;
4996 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4999 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5001 struct hclge_get_fd_mode_cmd *req;
5002 struct hclge_desc desc;
5005 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5007 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5009 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5011 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5015 *fd_mode = req->mode;
5020 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5021 u32 *stage1_entry_num,
5022 u32 *stage2_entry_num,
5023 u16 *stage1_counter_num,
5024 u16 *stage2_counter_num)
5026 struct hclge_get_fd_allocation_cmd *req;
5027 struct hclge_desc desc;
5030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5032 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5034 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5036 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5041 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5042 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5043 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5044 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5049 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5050 enum HCLGE_FD_STAGE stage_num)
5052 struct hclge_set_fd_key_config_cmd *req;
5053 struct hclge_fd_key_cfg *stage;
5054 struct hclge_desc desc;
5057 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5059 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5060 stage = &hdev->fd_cfg.key_cfg[stage_num];
5061 req->stage = stage_num;
5062 req->key_select = stage->key_sel;
5063 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5064 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5065 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5066 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5067 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5068 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5070 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5072 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5077 static int hclge_init_fd_config(struct hclge_dev *hdev)
5079 #define LOW_2_WORDS 0x03
5080 struct hclge_fd_key_cfg *key_cfg;
5083 if (!hnae3_dev_fd_supported(hdev))
5086 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5090 switch (hdev->fd_cfg.fd_mode) {
5091 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5092 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5094 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5095 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5098 dev_err(&hdev->pdev->dev,
5099 "Unsupported flow director mode %u\n",
5100 hdev->fd_cfg.fd_mode);
5104 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5105 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5106 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5107 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5108 key_cfg->outer_sipv6_word_en = 0;
5109 key_cfg->outer_dipv6_word_en = 0;
5111 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5112 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5113 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5114 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5116 /* If use max 400bit key, we can support tuples for ether type */
5117 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5118 key_cfg->tuple_active |=
5119 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5121 /* roce_type is used to filter roce frames
5122 * dst_vport is used to specify the rule
5124 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5126 ret = hclge_get_fd_allocation(hdev,
5127 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5128 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5129 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5130 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5134 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5137 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5138 int loc, u8 *key, bool is_add)
5140 struct hclge_fd_tcam_config_1_cmd *req1;
5141 struct hclge_fd_tcam_config_2_cmd *req2;
5142 struct hclge_fd_tcam_config_3_cmd *req3;
5143 struct hclge_desc desc[3];
5146 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5147 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5148 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5149 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5150 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5152 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5153 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5154 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5156 req1->stage = stage;
5157 req1->xy_sel = sel_x ? 1 : 0;
5158 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5159 req1->index = cpu_to_le32(loc);
5160 req1->entry_vld = sel_x ? is_add : 0;
5163 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5164 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5165 sizeof(req2->tcam_data));
5166 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5167 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5170 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5172 dev_err(&hdev->pdev->dev,
5173 "config tcam key fail, ret=%d\n",
5179 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5180 struct hclge_fd_ad_data *action)
5182 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5183 struct hclge_fd_ad_config_cmd *req;
5184 struct hclge_desc desc;
5188 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5190 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5191 req->index = cpu_to_le32(loc);
5194 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5195 action->write_rule_id_to_bd);
5196 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5198 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5199 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5200 action->override_tc);
5201 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5202 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5205 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5206 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5207 action->forward_to_direct_queue);
5208 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5210 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5211 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5212 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5213 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5214 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5215 action->counter_id);
5217 req->ad_data = cpu_to_le64(ad_data);
5218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5220 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5225 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5226 struct hclge_fd_rule *rule)
5228 u16 tmp_x_s, tmp_y_s;
5229 u32 tmp_x_l, tmp_y_l;
5232 if (rule->unused_tuple & tuple_bit)
5235 switch (tuple_bit) {
5236 case BIT(INNER_DST_MAC):
5237 for (i = 0; i < ETH_ALEN; i++) {
5238 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5239 rule->tuples_mask.dst_mac[i]);
5240 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5241 rule->tuples_mask.dst_mac[i]);
5245 case BIT(INNER_SRC_MAC):
5246 for (i = 0; i < ETH_ALEN; i++) {
5247 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5248 rule->tuples_mask.src_mac[i]);
5249 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5250 rule->tuples_mask.src_mac[i]);
5254 case BIT(INNER_VLAN_TAG_FST):
5255 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5256 rule->tuples_mask.vlan_tag1);
5257 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5258 rule->tuples_mask.vlan_tag1);
5259 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5260 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5263 case BIT(INNER_ETH_TYPE):
5264 calc_x(tmp_x_s, rule->tuples.ether_proto,
5265 rule->tuples_mask.ether_proto);
5266 calc_y(tmp_y_s, rule->tuples.ether_proto,
5267 rule->tuples_mask.ether_proto);
5268 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5269 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5272 case BIT(INNER_IP_TOS):
5273 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5274 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5277 case BIT(INNER_IP_PROTO):
5278 calc_x(*key_x, rule->tuples.ip_proto,
5279 rule->tuples_mask.ip_proto);
5280 calc_y(*key_y, rule->tuples.ip_proto,
5281 rule->tuples_mask.ip_proto);
5284 case BIT(INNER_SRC_IP):
5285 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5286 rule->tuples_mask.src_ip[IPV4_INDEX]);
5287 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5288 rule->tuples_mask.src_ip[IPV4_INDEX]);
5289 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5290 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5293 case BIT(INNER_DST_IP):
5294 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5295 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5296 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5297 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5298 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5299 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5302 case BIT(INNER_SRC_PORT):
5303 calc_x(tmp_x_s, rule->tuples.src_port,
5304 rule->tuples_mask.src_port);
5305 calc_y(tmp_y_s, rule->tuples.src_port,
5306 rule->tuples_mask.src_port);
5307 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5308 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5311 case BIT(INNER_DST_PORT):
5312 calc_x(tmp_x_s, rule->tuples.dst_port,
5313 rule->tuples_mask.dst_port);
5314 calc_y(tmp_y_s, rule->tuples.dst_port,
5315 rule->tuples_mask.dst_port);
5316 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5317 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5325 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5326 u8 vf_id, u8 network_port_id)
5328 u32 port_number = 0;
5330 if (port_type == HOST_PORT) {
5331 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5333 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5335 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5337 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5338 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5339 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5345 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5346 __le32 *key_x, __le32 *key_y,
5347 struct hclge_fd_rule *rule)
5349 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5350 u8 cur_pos = 0, tuple_size, shift_bits;
5353 for (i = 0; i < MAX_META_DATA; i++) {
5354 tuple_size = meta_data_key_info[i].key_length;
5355 tuple_bit = key_cfg->meta_data_active & BIT(i);
5357 switch (tuple_bit) {
5358 case BIT(ROCE_TYPE):
5359 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5360 cur_pos += tuple_size;
5362 case BIT(DST_VPORT):
5363 port_number = hclge_get_port_number(HOST_PORT, 0,
5365 hnae3_set_field(meta_data,
5366 GENMASK(cur_pos + tuple_size, cur_pos),
5367 cur_pos, port_number);
5368 cur_pos += tuple_size;
5375 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5376 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5377 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5379 *key_x = cpu_to_le32(tmp_x << shift_bits);
5380 *key_y = cpu_to_le32(tmp_y << shift_bits);
5383 /* A complete key is combined with meta data key and tuple key.
5384 * Meta data key is stored at the MSB region, and tuple key is stored at
5385 * the LSB region, unused bits will be filled 0.
5387 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5388 struct hclge_fd_rule *rule)
5390 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5391 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5392 u8 *cur_key_x, *cur_key_y;
5393 u8 meta_data_region;
5398 memset(key_x, 0, sizeof(key_x));
5399 memset(key_y, 0, sizeof(key_y));
5403 for (i = 0 ; i < MAX_TUPLE; i++) {
5407 tuple_size = tuple_key_info[i].key_length / 8;
5408 check_tuple = key_cfg->tuple_active & BIT(i);
5410 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5413 cur_key_x += tuple_size;
5414 cur_key_y += tuple_size;
5418 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5419 MAX_META_DATA_LENGTH / 8;
5421 hclge_fd_convert_meta_data(key_cfg,
5422 (__le32 *)(key_x + meta_data_region),
5423 (__le32 *)(key_y + meta_data_region),
5426 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5429 dev_err(&hdev->pdev->dev,
5430 "fd key_y config fail, loc=%u, ret=%d\n",
5431 rule->queue_id, ret);
5435 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5438 dev_err(&hdev->pdev->dev,
5439 "fd key_x config fail, loc=%u, ret=%d\n",
5440 rule->queue_id, ret);
5444 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5445 struct hclge_fd_rule *rule)
5447 struct hclge_vport *vport = hdev->vport;
5448 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5449 struct hclge_fd_ad_data ad_data;
5451 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5452 ad_data.ad_id = rule->location;
5454 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5455 ad_data.drop_packet = true;
5456 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5457 ad_data.override_tc = true;
5459 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5461 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5463 ad_data.forward_to_direct_queue = true;
5464 ad_data.queue_id = rule->queue_id;
5467 ad_data.use_counter = false;
5468 ad_data.counter_id = 0;
5470 ad_data.use_next_stage = false;
5471 ad_data.next_input_key = 0;
5473 ad_data.write_rule_id_to_bd = true;
5474 ad_data.rule_id = rule->location;
5476 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5479 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5482 if (!spec || !unused_tuple)
5485 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5488 *unused_tuple |= BIT(INNER_SRC_IP);
5491 *unused_tuple |= BIT(INNER_DST_IP);
5494 *unused_tuple |= BIT(INNER_SRC_PORT);
5497 *unused_tuple |= BIT(INNER_DST_PORT);
5500 *unused_tuple |= BIT(INNER_IP_TOS);
5505 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5508 if (!spec || !unused_tuple)
5511 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5512 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5515 *unused_tuple |= BIT(INNER_SRC_IP);
5518 *unused_tuple |= BIT(INNER_DST_IP);
5521 *unused_tuple |= BIT(INNER_IP_TOS);
5524 *unused_tuple |= BIT(INNER_IP_PROTO);
5526 if (spec->l4_4_bytes)
5529 if (spec->ip_ver != ETH_RX_NFC_IP4)
5535 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5538 if (!spec || !unused_tuple)
5541 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5544 /* check whether src/dst ip address used */
5545 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5546 *unused_tuple |= BIT(INNER_SRC_IP);
5548 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5549 *unused_tuple |= BIT(INNER_DST_IP);
5552 *unused_tuple |= BIT(INNER_SRC_PORT);
5555 *unused_tuple |= BIT(INNER_DST_PORT);
5563 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5566 if (!spec || !unused_tuple)
5569 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5570 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5572 /* check whether src/dst ip address used */
5573 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5574 *unused_tuple |= BIT(INNER_SRC_IP);
5576 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5577 *unused_tuple |= BIT(INNER_DST_IP);
5579 if (!spec->l4_proto)
5580 *unused_tuple |= BIT(INNER_IP_PROTO);
5585 if (spec->l4_4_bytes)
5591 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5593 if (!spec || !unused_tuple)
5596 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5597 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5598 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5600 if (is_zero_ether_addr(spec->h_source))
5601 *unused_tuple |= BIT(INNER_SRC_MAC);
5603 if (is_zero_ether_addr(spec->h_dest))
5604 *unused_tuple |= BIT(INNER_DST_MAC);
5607 *unused_tuple |= BIT(INNER_ETH_TYPE);
5612 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5613 struct ethtool_rx_flow_spec *fs,
5616 if (fs->flow_type & FLOW_EXT) {
5617 if (fs->h_ext.vlan_etype) {
5618 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5622 if (!fs->h_ext.vlan_tci)
5623 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5625 if (fs->m_ext.vlan_tci &&
5626 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5627 dev_err(&hdev->pdev->dev,
5628 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5629 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5633 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5636 if (fs->flow_type & FLOW_MAC_EXT) {
5637 if (hdev->fd_cfg.fd_mode !=
5638 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5639 dev_err(&hdev->pdev->dev,
5640 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5644 if (is_zero_ether_addr(fs->h_ext.h_dest))
5645 *unused_tuple |= BIT(INNER_DST_MAC);
5647 *unused_tuple &= ~BIT(INNER_DST_MAC);
5653 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5654 struct ethtool_rx_flow_spec *fs,
5660 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5661 dev_err(&hdev->pdev->dev,
5662 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5664 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5668 if ((fs->flow_type & FLOW_EXT) &&
5669 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5670 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5674 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5675 switch (flow_type) {
5679 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5683 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5689 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5692 case IPV6_USER_FLOW:
5693 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5697 if (hdev->fd_cfg.fd_mode !=
5698 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5699 dev_err(&hdev->pdev->dev,
5700 "ETHER_FLOW is not supported in current fd mode!\n");
5704 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5708 dev_err(&hdev->pdev->dev,
5709 "unsupported protocol type, protocol type = %#x\n",
5715 dev_err(&hdev->pdev->dev,
5716 "failed to check flow union tuple, ret = %d\n",
5721 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5724 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5726 struct hclge_fd_rule *rule = NULL;
5727 struct hlist_node *node2;
5729 spin_lock_bh(&hdev->fd_rule_lock);
5730 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5731 if (rule->location >= location)
5735 spin_unlock_bh(&hdev->fd_rule_lock);
5737 return rule && rule->location == location;
5740 /* make sure being called after lock up with fd_rule_lock */
5741 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5742 struct hclge_fd_rule *new_rule,
5746 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5747 struct hlist_node *node2;
5749 if (is_add && !new_rule)
5752 hlist_for_each_entry_safe(rule, node2,
5753 &hdev->fd_rule_list, rule_node) {
5754 if (rule->location >= location)
5759 if (rule && rule->location == location) {
5760 hlist_del(&rule->rule_node);
5762 hdev->hclge_fd_rule_num--;
5765 if (!hdev->hclge_fd_rule_num)
5766 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5767 clear_bit(location, hdev->fd_bmap);
5771 } else if (!is_add) {
5772 dev_err(&hdev->pdev->dev,
5773 "delete fail, rule %u is inexistent\n",
5778 INIT_HLIST_NODE(&new_rule->rule_node);
5781 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5783 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5785 set_bit(location, hdev->fd_bmap);
5786 hdev->hclge_fd_rule_num++;
5787 hdev->fd_active_type = new_rule->rule_type;
5792 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5793 struct ethtool_rx_flow_spec *fs,
5794 struct hclge_fd_rule *rule)
5796 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5798 switch (flow_type) {
5802 rule->tuples.src_ip[IPV4_INDEX] =
5803 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5804 rule->tuples_mask.src_ip[IPV4_INDEX] =
5805 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5807 rule->tuples.dst_ip[IPV4_INDEX] =
5808 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5809 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5810 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5812 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5813 rule->tuples_mask.src_port =
5814 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5816 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5817 rule->tuples_mask.dst_port =
5818 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5820 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5821 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5823 rule->tuples.ether_proto = ETH_P_IP;
5824 rule->tuples_mask.ether_proto = 0xFFFF;
5828 rule->tuples.src_ip[IPV4_INDEX] =
5829 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5830 rule->tuples_mask.src_ip[IPV4_INDEX] =
5831 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5833 rule->tuples.dst_ip[IPV4_INDEX] =
5834 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5835 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5836 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5838 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5839 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5841 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5842 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5844 rule->tuples.ether_proto = ETH_P_IP;
5845 rule->tuples_mask.ether_proto = 0xFFFF;
5851 be32_to_cpu_array(rule->tuples.src_ip,
5852 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5853 be32_to_cpu_array(rule->tuples_mask.src_ip,
5854 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5856 be32_to_cpu_array(rule->tuples.dst_ip,
5857 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5858 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5859 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5861 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5862 rule->tuples_mask.src_port =
5863 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5865 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5866 rule->tuples_mask.dst_port =
5867 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5869 rule->tuples.ether_proto = ETH_P_IPV6;
5870 rule->tuples_mask.ether_proto = 0xFFFF;
5873 case IPV6_USER_FLOW:
5874 be32_to_cpu_array(rule->tuples.src_ip,
5875 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5876 be32_to_cpu_array(rule->tuples_mask.src_ip,
5877 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5879 be32_to_cpu_array(rule->tuples.dst_ip,
5880 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5881 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5882 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5884 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5885 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5887 rule->tuples.ether_proto = ETH_P_IPV6;
5888 rule->tuples_mask.ether_proto = 0xFFFF;
5892 ether_addr_copy(rule->tuples.src_mac,
5893 fs->h_u.ether_spec.h_source);
5894 ether_addr_copy(rule->tuples_mask.src_mac,
5895 fs->m_u.ether_spec.h_source);
5897 ether_addr_copy(rule->tuples.dst_mac,
5898 fs->h_u.ether_spec.h_dest);
5899 ether_addr_copy(rule->tuples_mask.dst_mac,
5900 fs->m_u.ether_spec.h_dest);
5902 rule->tuples.ether_proto =
5903 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5904 rule->tuples_mask.ether_proto =
5905 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5912 switch (flow_type) {
5915 rule->tuples.ip_proto = IPPROTO_SCTP;
5916 rule->tuples_mask.ip_proto = 0xFF;
5920 rule->tuples.ip_proto = IPPROTO_TCP;
5921 rule->tuples_mask.ip_proto = 0xFF;
5925 rule->tuples.ip_proto = IPPROTO_UDP;
5926 rule->tuples_mask.ip_proto = 0xFF;
5932 if (fs->flow_type & FLOW_EXT) {
5933 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5934 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5937 if (fs->flow_type & FLOW_MAC_EXT) {
5938 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5939 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5945 /* make sure being called after lock up with fd_rule_lock */
5946 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5947 struct hclge_fd_rule *rule)
5952 dev_err(&hdev->pdev->dev,
5953 "The flow director rule is NULL\n");
5957 /* it will never fail here, so needn't to check return value */
5958 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5960 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5964 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5971 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5975 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5977 struct hclge_vport *vport = hclge_get_vport(handle);
5978 struct hclge_dev *hdev = vport->back;
5980 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5983 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5984 struct ethtool_rxnfc *cmd)
5986 struct hclge_vport *vport = hclge_get_vport(handle);
5987 struct hclge_dev *hdev = vport->back;
5988 u16 dst_vport_id = 0, q_index = 0;
5989 struct ethtool_rx_flow_spec *fs;
5990 struct hclge_fd_rule *rule;
5995 if (!hnae3_dev_fd_supported(hdev)) {
5996 dev_err(&hdev->pdev->dev,
5997 "flow table director is not supported\n");
6002 dev_err(&hdev->pdev->dev,
6003 "please enable flow director first\n");
6007 if (hclge_is_cls_flower_active(handle)) {
6008 dev_err(&hdev->pdev->dev,
6009 "please delete all exist cls flower rules first\n");
6013 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6015 ret = hclge_fd_check_spec(hdev, fs, &unused);
6019 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6020 action = HCLGE_FD_ACTION_DROP_PACKET;
6022 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6023 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6026 if (vf > hdev->num_req_vfs) {
6027 dev_err(&hdev->pdev->dev,
6028 "Error: vf id (%u) > max vf num (%u)\n",
6029 vf, hdev->num_req_vfs);
6033 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6034 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6037 dev_err(&hdev->pdev->dev,
6038 "Error: queue id (%u) > max tqp num (%u)\n",
6043 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6047 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6051 ret = hclge_fd_get_tuple(hdev, fs, rule);
6057 rule->flow_type = fs->flow_type;
6058 rule->location = fs->location;
6059 rule->unused_tuple = unused;
6060 rule->vf_id = dst_vport_id;
6061 rule->queue_id = q_index;
6062 rule->action = action;
6063 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6065 /* to avoid rule conflict, when user configure rule by ethtool,
6066 * we need to clear all arfs rules
6068 spin_lock_bh(&hdev->fd_rule_lock);
6069 hclge_clear_arfs_rules(handle);
6071 ret = hclge_fd_config_rule(hdev, rule);
6073 spin_unlock_bh(&hdev->fd_rule_lock);
6078 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6079 struct ethtool_rxnfc *cmd)
6081 struct hclge_vport *vport = hclge_get_vport(handle);
6082 struct hclge_dev *hdev = vport->back;
6083 struct ethtool_rx_flow_spec *fs;
6086 if (!hnae3_dev_fd_supported(hdev))
6089 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6091 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6094 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6095 !hclge_fd_rule_exist(hdev, fs->location)) {
6096 dev_err(&hdev->pdev->dev,
6097 "Delete fail, rule %u is inexistent\n", fs->location);
6101 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6106 spin_lock_bh(&hdev->fd_rule_lock);
6107 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6109 spin_unlock_bh(&hdev->fd_rule_lock);
6114 /* make sure being called after lock up with fd_rule_lock */
6115 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6118 struct hclge_vport *vport = hclge_get_vport(handle);
6119 struct hclge_dev *hdev = vport->back;
6120 struct hclge_fd_rule *rule;
6121 struct hlist_node *node;
6124 if (!hnae3_dev_fd_supported(hdev))
6127 for_each_set_bit(location, hdev->fd_bmap,
6128 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6129 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6133 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6135 hlist_del(&rule->rule_node);
6138 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6139 hdev->hclge_fd_rule_num = 0;
6140 bitmap_zero(hdev->fd_bmap,
6141 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6145 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6147 struct hclge_vport *vport = hclge_get_vport(handle);
6148 struct hclge_dev *hdev = vport->back;
6149 struct hclge_fd_rule *rule;
6150 struct hlist_node *node;
6153 /* Return ok here, because reset error handling will check this
6154 * return value. If error is returned here, the reset process will
6157 if (!hnae3_dev_fd_supported(hdev))
6160 /* if fd is disabled, should not restore it when reset */
6164 spin_lock_bh(&hdev->fd_rule_lock);
6165 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6166 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6168 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6171 dev_warn(&hdev->pdev->dev,
6172 "Restore rule %u failed, remove it\n",
6174 clear_bit(rule->location, hdev->fd_bmap);
6175 hlist_del(&rule->rule_node);
6177 hdev->hclge_fd_rule_num--;
6181 if (hdev->hclge_fd_rule_num)
6182 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6184 spin_unlock_bh(&hdev->fd_rule_lock);
6189 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6190 struct ethtool_rxnfc *cmd)
6192 struct hclge_vport *vport = hclge_get_vport(handle);
6193 struct hclge_dev *hdev = vport->back;
6195 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6198 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6199 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6204 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6205 struct ethtool_tcpip4_spec *spec,
6206 struct ethtool_tcpip4_spec *spec_mask)
6208 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6209 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6210 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6212 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6213 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6214 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6216 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6217 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6218 0 : cpu_to_be16(rule->tuples_mask.src_port);
6220 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6221 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6222 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6224 spec->tos = rule->tuples.ip_tos;
6225 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6226 0 : rule->tuples_mask.ip_tos;
6229 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6230 struct ethtool_usrip4_spec *spec,
6231 struct ethtool_usrip4_spec *spec_mask)
6233 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6234 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6235 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6237 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6238 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6239 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6241 spec->tos = rule->tuples.ip_tos;
6242 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6243 0 : rule->tuples_mask.ip_tos;
6245 spec->proto = rule->tuples.ip_proto;
6246 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6247 0 : rule->tuples_mask.ip_proto;
6249 spec->ip_ver = ETH_RX_NFC_IP4;
6252 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6253 struct ethtool_tcpip6_spec *spec,
6254 struct ethtool_tcpip6_spec *spec_mask)
6256 cpu_to_be32_array(spec->ip6src,
6257 rule->tuples.src_ip, IPV6_SIZE);
6258 cpu_to_be32_array(spec->ip6dst,
6259 rule->tuples.dst_ip, IPV6_SIZE);
6260 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6261 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6263 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6266 if (rule->unused_tuple & BIT(INNER_DST_IP))
6267 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6269 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6272 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6273 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6274 0 : cpu_to_be16(rule->tuples_mask.src_port);
6276 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6277 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6278 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6281 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6282 struct ethtool_usrip6_spec *spec,
6283 struct ethtool_usrip6_spec *spec_mask)
6285 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6286 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6287 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6288 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6290 cpu_to_be32_array(spec_mask->ip6src,
6291 rule->tuples_mask.src_ip, IPV6_SIZE);
6293 if (rule->unused_tuple & BIT(INNER_DST_IP))
6294 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6296 cpu_to_be32_array(spec_mask->ip6dst,
6297 rule->tuples_mask.dst_ip, IPV6_SIZE);
6299 spec->l4_proto = rule->tuples.ip_proto;
6300 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6301 0 : rule->tuples_mask.ip_proto;
6304 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6305 struct ethhdr *spec,
6306 struct ethhdr *spec_mask)
6308 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6309 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6311 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6312 eth_zero_addr(spec_mask->h_source);
6314 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6316 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6317 eth_zero_addr(spec_mask->h_dest);
6319 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6321 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6322 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6323 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6326 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6327 struct hclge_fd_rule *rule)
6329 if (fs->flow_type & FLOW_EXT) {
6330 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6331 fs->m_ext.vlan_tci =
6332 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6333 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6336 if (fs->flow_type & FLOW_MAC_EXT) {
6337 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6338 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6339 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6341 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6342 rule->tuples_mask.dst_mac);
6346 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6347 struct ethtool_rxnfc *cmd)
6349 struct hclge_vport *vport = hclge_get_vport(handle);
6350 struct hclge_fd_rule *rule = NULL;
6351 struct hclge_dev *hdev = vport->back;
6352 struct ethtool_rx_flow_spec *fs;
6353 struct hlist_node *node2;
6355 if (!hnae3_dev_fd_supported(hdev))
6358 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6360 spin_lock_bh(&hdev->fd_rule_lock);
6362 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6363 if (rule->location >= fs->location)
6367 if (!rule || fs->location != rule->location) {
6368 spin_unlock_bh(&hdev->fd_rule_lock);
6373 fs->flow_type = rule->flow_type;
6374 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6378 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6379 &fs->m_u.tcp_ip4_spec);
6382 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6383 &fs->m_u.usr_ip4_spec);
6388 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6389 &fs->m_u.tcp_ip6_spec);
6391 case IPV6_USER_FLOW:
6392 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6393 &fs->m_u.usr_ip6_spec);
6395 /* The flow type of fd rule has been checked before adding in to rule
6396 * list. As other flow types have been handled, it must be ETHER_FLOW
6397 * for the default case
6400 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6401 &fs->m_u.ether_spec);
6405 hclge_fd_get_ext_info(fs, rule);
6407 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6408 fs->ring_cookie = RX_CLS_FLOW_DISC;
6412 fs->ring_cookie = rule->queue_id;
6413 vf_id = rule->vf_id;
6414 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6415 fs->ring_cookie |= vf_id;
6418 spin_unlock_bh(&hdev->fd_rule_lock);
6423 static int hclge_get_all_rules(struct hnae3_handle *handle,
6424 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6426 struct hclge_vport *vport = hclge_get_vport(handle);
6427 struct hclge_dev *hdev = vport->back;
6428 struct hclge_fd_rule *rule;
6429 struct hlist_node *node2;
6432 if (!hnae3_dev_fd_supported(hdev))
6435 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6437 spin_lock_bh(&hdev->fd_rule_lock);
6438 hlist_for_each_entry_safe(rule, node2,
6439 &hdev->fd_rule_list, rule_node) {
6440 if (cnt == cmd->rule_cnt) {
6441 spin_unlock_bh(&hdev->fd_rule_lock);
6445 rule_locs[cnt] = rule->location;
6449 spin_unlock_bh(&hdev->fd_rule_lock);
6451 cmd->rule_cnt = cnt;
6456 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6457 struct hclge_fd_rule_tuples *tuples)
6459 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6460 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6462 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6463 tuples->ip_proto = fkeys->basic.ip_proto;
6464 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6466 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6467 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6468 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6472 for (i = 0; i < IPV6_SIZE; i++) {
6473 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6474 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6479 /* traverse all rules, check whether an existed rule has the same tuples */
6480 static struct hclge_fd_rule *
6481 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6482 const struct hclge_fd_rule_tuples *tuples)
6484 struct hclge_fd_rule *rule = NULL;
6485 struct hlist_node *node;
6487 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6488 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6495 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6496 struct hclge_fd_rule *rule)
6498 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6499 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6500 BIT(INNER_SRC_PORT);
6503 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6504 if (tuples->ether_proto == ETH_P_IP) {
6505 if (tuples->ip_proto == IPPROTO_TCP)
6506 rule->flow_type = TCP_V4_FLOW;
6508 rule->flow_type = UDP_V4_FLOW;
6510 if (tuples->ip_proto == IPPROTO_TCP)
6511 rule->flow_type = TCP_V6_FLOW;
6513 rule->flow_type = UDP_V6_FLOW;
6515 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6516 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6519 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6520 u16 flow_id, struct flow_keys *fkeys)
6522 struct hclge_vport *vport = hclge_get_vport(handle);
6523 struct hclge_fd_rule_tuples new_tuples = {};
6524 struct hclge_dev *hdev = vport->back;
6525 struct hclge_fd_rule *rule;
6530 if (!hnae3_dev_fd_supported(hdev))
6533 /* when there is already fd rule existed add by user,
6534 * arfs should not work
6536 spin_lock_bh(&hdev->fd_rule_lock);
6537 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6538 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6539 spin_unlock_bh(&hdev->fd_rule_lock);
6543 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6545 /* check is there flow director filter existed for this flow,
6546 * if not, create a new filter for it;
6547 * if filter exist with different queue id, modify the filter;
6548 * if filter exist with same queue id, do nothing
6550 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6552 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6553 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6554 spin_unlock_bh(&hdev->fd_rule_lock);
6558 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6560 spin_unlock_bh(&hdev->fd_rule_lock);
6564 set_bit(bit_id, hdev->fd_bmap);
6565 rule->location = bit_id;
6566 rule->arfs.flow_id = flow_id;
6567 rule->queue_id = queue_id;
6568 hclge_fd_build_arfs_rule(&new_tuples, rule);
6569 ret = hclge_fd_config_rule(hdev, rule);
6571 spin_unlock_bh(&hdev->fd_rule_lock);
6576 return rule->location;
6579 spin_unlock_bh(&hdev->fd_rule_lock);
6581 if (rule->queue_id == queue_id)
6582 return rule->location;
6584 tmp_queue_id = rule->queue_id;
6585 rule->queue_id = queue_id;
6586 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6588 rule->queue_id = tmp_queue_id;
6592 return rule->location;
6595 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6597 #ifdef CONFIG_RFS_ACCEL
6598 struct hnae3_handle *handle = &hdev->vport[0].nic;
6599 struct hclge_fd_rule *rule;
6600 struct hlist_node *node;
6601 HLIST_HEAD(del_list);
6603 spin_lock_bh(&hdev->fd_rule_lock);
6604 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6605 spin_unlock_bh(&hdev->fd_rule_lock);
6608 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6609 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6610 rule->arfs.flow_id, rule->location)) {
6611 hlist_del_init(&rule->rule_node);
6612 hlist_add_head(&rule->rule_node, &del_list);
6613 hdev->hclge_fd_rule_num--;
6614 clear_bit(rule->location, hdev->fd_bmap);
6617 spin_unlock_bh(&hdev->fd_rule_lock);
6619 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6620 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6621 rule->location, NULL, false);
6627 /* make sure being called after lock up with fd_rule_lock */
6628 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6630 #ifdef CONFIG_RFS_ACCEL
6631 struct hclge_vport *vport = hclge_get_vport(handle);
6632 struct hclge_dev *hdev = vport->back;
6634 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6635 hclge_del_all_fd_entries(handle, true);
6639 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6640 struct hclge_fd_rule *rule)
6642 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6643 struct flow_match_basic match;
6644 u16 ethtype_key, ethtype_mask;
6646 flow_rule_match_basic(flow, &match);
6647 ethtype_key = ntohs(match.key->n_proto);
6648 ethtype_mask = ntohs(match.mask->n_proto);
6650 if (ethtype_key == ETH_P_ALL) {
6654 rule->tuples.ether_proto = ethtype_key;
6655 rule->tuples_mask.ether_proto = ethtype_mask;
6656 rule->tuples.ip_proto = match.key->ip_proto;
6657 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6659 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6660 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6664 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6665 struct hclge_fd_rule *rule)
6667 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6668 struct flow_match_eth_addrs match;
6670 flow_rule_match_eth_addrs(flow, &match);
6671 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6672 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6673 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6674 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6676 rule->unused_tuple |= BIT(INNER_DST_MAC);
6677 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6681 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6682 struct hclge_fd_rule *rule)
6684 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6685 struct flow_match_vlan match;
6687 flow_rule_match_vlan(flow, &match);
6688 rule->tuples.vlan_tag1 = match.key->vlan_id |
6689 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6690 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6691 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6693 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6697 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6698 struct hclge_fd_rule *rule)
6702 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6703 struct flow_match_control match;
6705 flow_rule_match_control(flow, &match);
6706 addr_type = match.key->addr_type;
6709 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6710 struct flow_match_ipv4_addrs match;
6712 flow_rule_match_ipv4_addrs(flow, &match);
6713 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6714 rule->tuples_mask.src_ip[IPV4_INDEX] =
6715 be32_to_cpu(match.mask->src);
6716 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6717 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6718 be32_to_cpu(match.mask->dst);
6719 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6720 struct flow_match_ipv6_addrs match;
6722 flow_rule_match_ipv6_addrs(flow, &match);
6723 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6725 be32_to_cpu_array(rule->tuples_mask.src_ip,
6726 match.mask->src.s6_addr32, IPV6_SIZE);
6727 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6729 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6730 match.mask->dst.s6_addr32, IPV6_SIZE);
6732 rule->unused_tuple |= BIT(INNER_SRC_IP);
6733 rule->unused_tuple |= BIT(INNER_DST_IP);
6737 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6738 struct hclge_fd_rule *rule)
6740 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6741 struct flow_match_ports match;
6743 flow_rule_match_ports(flow, &match);
6745 rule->tuples.src_port = be16_to_cpu(match.key->src);
6746 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6747 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6748 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6750 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6751 rule->unused_tuple |= BIT(INNER_DST_PORT);
6755 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6756 struct flow_cls_offload *cls_flower,
6757 struct hclge_fd_rule *rule)
6759 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6760 struct flow_dissector *dissector = flow->match.dissector;
6762 if (dissector->used_keys &
6763 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6764 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6765 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6766 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6767 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6768 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6769 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6770 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6771 dissector->used_keys);
6775 hclge_get_cls_key_basic(flow, rule);
6776 hclge_get_cls_key_mac(flow, rule);
6777 hclge_get_cls_key_vlan(flow, rule);
6778 hclge_get_cls_key_ip(flow, rule);
6779 hclge_get_cls_key_port(flow, rule);
6784 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6785 struct flow_cls_offload *cls_flower, int tc)
6787 u32 prio = cls_flower->common.prio;
6789 if (tc < 0 || tc > hdev->tc_max) {
6790 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6795 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6796 dev_err(&hdev->pdev->dev,
6797 "prio %u should be in range[1, %u]\n",
6798 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6802 if (test_bit(prio - 1, hdev->fd_bmap)) {
6803 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6809 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6810 struct flow_cls_offload *cls_flower,
6813 struct hclge_vport *vport = hclge_get_vport(handle);
6814 struct hclge_dev *hdev = vport->back;
6815 struct hclge_fd_rule *rule;
6818 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6819 dev_err(&hdev->pdev->dev,
6820 "please remove all exist fd rules via ethtool first\n");
6824 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6826 dev_err(&hdev->pdev->dev,
6827 "failed to check cls flower params, ret = %d\n", ret);
6831 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6835 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6839 rule->action = HCLGE_FD_ACTION_SELECT_TC;
6840 rule->cls_flower.tc = tc;
6841 rule->location = cls_flower->common.prio - 1;
6843 rule->cls_flower.cookie = cls_flower->cookie;
6844 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6846 spin_lock_bh(&hdev->fd_rule_lock);
6847 hclge_clear_arfs_rules(handle);
6849 ret = hclge_fd_config_rule(hdev, rule);
6851 spin_unlock_bh(&hdev->fd_rule_lock);
6854 dev_err(&hdev->pdev->dev,
6855 "failed to add cls flower rule, ret = %d\n", ret);
6865 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6866 unsigned long cookie)
6868 struct hclge_fd_rule *rule;
6869 struct hlist_node *node;
6871 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6872 if (rule->cls_flower.cookie == cookie)
6879 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6880 struct flow_cls_offload *cls_flower)
6882 struct hclge_vport *vport = hclge_get_vport(handle);
6883 struct hclge_dev *hdev = vport->back;
6884 struct hclge_fd_rule *rule;
6887 spin_lock_bh(&hdev->fd_rule_lock);
6889 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6891 spin_unlock_bh(&hdev->fd_rule_lock);
6895 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6898 dev_err(&hdev->pdev->dev,
6899 "failed to delete cls flower rule %u, ret = %d\n",
6900 rule->location, ret);
6901 spin_unlock_bh(&hdev->fd_rule_lock);
6905 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6907 dev_err(&hdev->pdev->dev,
6908 "failed to delete cls flower rule %u in list, ret = %d\n",
6909 rule->location, ret);
6910 spin_unlock_bh(&hdev->fd_rule_lock);
6914 spin_unlock_bh(&hdev->fd_rule_lock);
6919 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6921 struct hclge_vport *vport = hclge_get_vport(handle);
6922 struct hclge_dev *hdev = vport->back;
6924 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6925 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6928 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6930 struct hclge_vport *vport = hclge_get_vport(handle);
6931 struct hclge_dev *hdev = vport->back;
6933 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6936 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6938 struct hclge_vport *vport = hclge_get_vport(handle);
6939 struct hclge_dev *hdev = vport->back;
6941 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6944 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6946 struct hclge_vport *vport = hclge_get_vport(handle);
6947 struct hclge_dev *hdev = vport->back;
6949 return hdev->rst_stats.hw_reset_done_cnt;
6952 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6954 struct hclge_vport *vport = hclge_get_vport(handle);
6955 struct hclge_dev *hdev = vport->back;
6958 hdev->fd_en = enable;
6959 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6962 spin_lock_bh(&hdev->fd_rule_lock);
6963 hclge_del_all_fd_entries(handle, clear);
6964 spin_unlock_bh(&hdev->fd_rule_lock);
6966 hclge_restore_fd_entries(handle);
6970 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6972 struct hclge_desc desc;
6973 struct hclge_config_mac_mode_cmd *req =
6974 (struct hclge_config_mac_mode_cmd *)desc.data;
6978 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6981 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6982 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6983 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6984 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6985 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6986 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6987 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6988 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6989 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6990 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6993 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6995 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997 dev_err(&hdev->pdev->dev,
6998 "mac enable fail, ret =%d.\n", ret);
7001 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7002 u8 switch_param, u8 param_mask)
7004 struct hclge_mac_vlan_switch_cmd *req;
7005 struct hclge_desc desc;
7009 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7010 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7012 /* read current config parameter */
7013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7015 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7016 req->func_id = cpu_to_le32(func_id);
7018 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7020 dev_err(&hdev->pdev->dev,
7021 "read mac vlan switch parameter fail, ret = %d\n", ret);
7025 /* modify and write new config parameter */
7026 hclge_cmd_reuse_desc(&desc, false);
7027 req->switch_param = (req->switch_param & param_mask) | switch_param;
7028 req->param_mask = param_mask;
7030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7032 dev_err(&hdev->pdev->dev,
7033 "set mac vlan switch parameter fail, ret = %d\n", ret);
7037 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7040 #define HCLGE_PHY_LINK_STATUS_NUM 200
7042 struct phy_device *phydev = hdev->hw.mac.phydev;
7047 ret = phy_read_status(phydev);
7049 dev_err(&hdev->pdev->dev,
7050 "phy update link status fail, ret = %d\n", ret);
7054 if (phydev->link == link_ret)
7057 msleep(HCLGE_LINK_STATUS_MS);
7058 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7061 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7063 #define HCLGE_MAC_LINK_STATUS_NUM 100
7070 ret = hclge_get_mac_link_status(hdev, &link_status);
7073 if (link_status == link_ret)
7076 msleep(HCLGE_LINK_STATUS_MS);
7077 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7081 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7086 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7089 hclge_phy_link_status_wait(hdev, link_ret);
7091 return hclge_mac_link_status_wait(hdev, link_ret);
7094 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7096 struct hclge_config_mac_mode_cmd *req;
7097 struct hclge_desc desc;
7101 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7102 /* 1 Read out the MAC mode config at first */
7103 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7106 dev_err(&hdev->pdev->dev,
7107 "mac loopback get fail, ret =%d.\n", ret);
7111 /* 2 Then setup the loopback flag */
7112 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7113 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7115 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7117 /* 3 Config mac work mode with loopback flag
7118 * and its original configure parameters
7120 hclge_cmd_reuse_desc(&desc, false);
7121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7123 dev_err(&hdev->pdev->dev,
7124 "mac loopback set fail, ret =%d.\n", ret);
7128 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7129 enum hnae3_loop loop_mode)
7131 #define HCLGE_SERDES_RETRY_MS 10
7132 #define HCLGE_SERDES_RETRY_NUM 100
7134 struct hclge_serdes_lb_cmd *req;
7135 struct hclge_desc desc;
7139 req = (struct hclge_serdes_lb_cmd *)desc.data;
7140 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7142 switch (loop_mode) {
7143 case HNAE3_LOOP_SERIAL_SERDES:
7144 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7146 case HNAE3_LOOP_PARALLEL_SERDES:
7147 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7150 dev_err(&hdev->pdev->dev,
7151 "unsupported serdes loopback mode %d\n", loop_mode);
7156 req->enable = loop_mode_b;
7157 req->mask = loop_mode_b;
7159 req->mask = loop_mode_b;
7162 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7164 dev_err(&hdev->pdev->dev,
7165 "serdes loopback set fail, ret = %d\n", ret);
7170 msleep(HCLGE_SERDES_RETRY_MS);
7171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7173 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7175 dev_err(&hdev->pdev->dev,
7176 "serdes loopback get, ret = %d\n", ret);
7179 } while (++i < HCLGE_SERDES_RETRY_NUM &&
7180 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7182 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7183 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7185 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7186 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7192 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7193 enum hnae3_loop loop_mode)
7197 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7201 hclge_cfg_mac_mode(hdev, en);
7203 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7205 dev_err(&hdev->pdev->dev,
7206 "serdes loopback config mac mode timeout\n");
7211 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7212 struct phy_device *phydev)
7216 if (!phydev->suspended) {
7217 ret = phy_suspend(phydev);
7222 ret = phy_resume(phydev);
7226 return phy_loopback(phydev, true);
7229 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7230 struct phy_device *phydev)
7234 ret = phy_loopback(phydev, false);
7238 return phy_suspend(phydev);
7241 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7243 struct phy_device *phydev = hdev->hw.mac.phydev;
7250 ret = hclge_enable_phy_loopback(hdev, phydev);
7252 ret = hclge_disable_phy_loopback(hdev, phydev);
7254 dev_err(&hdev->pdev->dev,
7255 "set phy loopback fail, ret = %d\n", ret);
7259 hclge_cfg_mac_mode(hdev, en);
7261 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7263 dev_err(&hdev->pdev->dev,
7264 "phy loopback config mac mode timeout\n");
7269 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7270 int stream_id, bool enable)
7272 struct hclge_desc desc;
7273 struct hclge_cfg_com_tqp_queue_cmd *req =
7274 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7278 req->tqp_id = cpu_to_le16(tqp_id);
7279 req->stream_id = cpu_to_le16(stream_id);
7281 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7283 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7285 dev_err(&hdev->pdev->dev,
7286 "Tqp enable fail, status =%d.\n", ret);
7290 static int hclge_set_loopback(struct hnae3_handle *handle,
7291 enum hnae3_loop loop_mode, bool en)
7293 struct hclge_vport *vport = hclge_get_vport(handle);
7294 struct hnae3_knic_private_info *kinfo;
7295 struct hclge_dev *hdev = vport->back;
7298 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7299 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7300 * the same, the packets are looped back in the SSU. If SSU loopback
7301 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7303 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7304 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7306 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7307 HCLGE_SWITCH_ALW_LPBK_MASK);
7312 switch (loop_mode) {
7313 case HNAE3_LOOP_APP:
7314 ret = hclge_set_app_loopback(hdev, en);
7316 case HNAE3_LOOP_SERIAL_SERDES:
7317 case HNAE3_LOOP_PARALLEL_SERDES:
7318 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7320 case HNAE3_LOOP_PHY:
7321 ret = hclge_set_phy_loopback(hdev, en);
7325 dev_err(&hdev->pdev->dev,
7326 "loop_mode %d is not supported\n", loop_mode);
7333 kinfo = &vport->nic.kinfo;
7334 for (i = 0; i < kinfo->num_tqps; i++) {
7335 ret = hclge_tqp_enable(hdev, i, 0, en);
7343 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7347 ret = hclge_set_app_loopback(hdev, false);
7351 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7355 return hclge_cfg_serdes_loopback(hdev, false,
7356 HNAE3_LOOP_PARALLEL_SERDES);
7359 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7361 struct hclge_vport *vport = hclge_get_vport(handle);
7362 struct hnae3_knic_private_info *kinfo;
7363 struct hnae3_queue *queue;
7364 struct hclge_tqp *tqp;
7367 kinfo = &vport->nic.kinfo;
7368 for (i = 0; i < kinfo->num_tqps; i++) {
7369 queue = handle->kinfo.tqp[i];
7370 tqp = container_of(queue, struct hclge_tqp, q);
7371 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7375 static void hclge_flush_link_update(struct hclge_dev *hdev)
7377 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7379 unsigned long last = hdev->serv_processed_cnt;
7382 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7383 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7384 last == hdev->serv_processed_cnt)
7388 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7390 struct hclge_vport *vport = hclge_get_vport(handle);
7391 struct hclge_dev *hdev = vport->back;
7394 hclge_task_schedule(hdev, 0);
7396 /* Set the DOWN flag here to disable link updating */
7397 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7399 /* flush memory to make sure DOWN is seen by service task */
7400 smp_mb__before_atomic();
7401 hclge_flush_link_update(hdev);
7405 static int hclge_ae_start(struct hnae3_handle *handle)
7407 struct hclge_vport *vport = hclge_get_vport(handle);
7408 struct hclge_dev *hdev = vport->back;
7411 hclge_cfg_mac_mode(hdev, true);
7412 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7413 hdev->hw.mac.link = 0;
7415 /* reset tqp stats */
7416 hclge_reset_tqp_stats(handle);
7418 hclge_mac_start_phy(hdev);
7423 static void hclge_ae_stop(struct hnae3_handle *handle)
7425 struct hclge_vport *vport = hclge_get_vport(handle);
7426 struct hclge_dev *hdev = vport->back;
7429 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7430 spin_lock_bh(&hdev->fd_rule_lock);
7431 hclge_clear_arfs_rules(handle);
7432 spin_unlock_bh(&hdev->fd_rule_lock);
7434 /* If it is not PF reset, the firmware will disable the MAC,
7435 * so it only need to stop phy here.
7437 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7438 hdev->reset_type != HNAE3_FUNC_RESET) {
7439 hclge_mac_stop_phy(hdev);
7440 hclge_update_link_status(hdev);
7444 for (i = 0; i < handle->kinfo.num_tqps; i++)
7445 hclge_reset_tqp(handle, i);
7447 hclge_config_mac_tnl_int(hdev, false);
7450 hclge_cfg_mac_mode(hdev, false);
7452 hclge_mac_stop_phy(hdev);
7454 /* reset tqp stats */
7455 hclge_reset_tqp_stats(handle);
7456 hclge_update_link_status(hdev);
7459 int hclge_vport_start(struct hclge_vport *vport)
7461 struct hclge_dev *hdev = vport->back;
7463 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7464 vport->last_active_jiffies = jiffies;
7466 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7467 if (vport->vport_id) {
7468 hclge_restore_mac_table_common(vport);
7469 hclge_restore_vport_vlan_table(vport);
7471 hclge_restore_hw_table(hdev);
7475 clear_bit(vport->vport_id, hdev->vport_config_block);
7480 void hclge_vport_stop(struct hclge_vport *vport)
7482 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7485 static int hclge_client_start(struct hnae3_handle *handle)
7487 struct hclge_vport *vport = hclge_get_vport(handle);
7489 return hclge_vport_start(vport);
7492 static void hclge_client_stop(struct hnae3_handle *handle)
7494 struct hclge_vport *vport = hclge_get_vport(handle);
7496 hclge_vport_stop(vport);
7499 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7500 u16 cmdq_resp, u8 resp_code,
7501 enum hclge_mac_vlan_tbl_opcode op)
7503 struct hclge_dev *hdev = vport->back;
7506 dev_err(&hdev->pdev->dev,
7507 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7512 if (op == HCLGE_MAC_VLAN_ADD) {
7513 if (!resp_code || resp_code == 1)
7515 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7516 resp_code == HCLGE_ADD_MC_OVERFLOW)
7519 dev_err(&hdev->pdev->dev,
7520 "add mac addr failed for undefined, code=%u.\n",
7523 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7526 } else if (resp_code == 1) {
7527 dev_dbg(&hdev->pdev->dev,
7528 "remove mac addr failed for miss.\n");
7532 dev_err(&hdev->pdev->dev,
7533 "remove mac addr failed for undefined, code=%u.\n",
7536 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7539 } else if (resp_code == 1) {
7540 dev_dbg(&hdev->pdev->dev,
7541 "lookup mac addr failed for miss.\n");
7545 dev_err(&hdev->pdev->dev,
7546 "lookup mac addr failed for undefined, code=%u.\n",
7551 dev_err(&hdev->pdev->dev,
7552 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7557 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7559 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7561 unsigned int word_num;
7562 unsigned int bit_num;
7564 if (vfid > 255 || vfid < 0)
7567 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7568 word_num = vfid / 32;
7569 bit_num = vfid % 32;
7571 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7573 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7575 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7576 bit_num = vfid % 32;
7578 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7580 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7586 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7588 #define HCLGE_DESC_NUMBER 3
7589 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7592 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7593 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7594 if (desc[i].data[j])
7600 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7601 const u8 *addr, bool is_mc)
7603 const unsigned char *mac_addr = addr;
7604 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7605 (mac_addr[0]) | (mac_addr[1] << 8);
7606 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7608 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7610 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7611 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7614 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7615 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7618 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7619 struct hclge_mac_vlan_tbl_entry_cmd *req)
7621 struct hclge_dev *hdev = vport->back;
7622 struct hclge_desc desc;
7627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7629 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7631 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7633 dev_err(&hdev->pdev->dev,
7634 "del mac addr failed for cmd_send, ret =%d.\n",
7638 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7639 retval = le16_to_cpu(desc.retval);
7641 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7642 HCLGE_MAC_VLAN_REMOVE);
7645 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7646 struct hclge_mac_vlan_tbl_entry_cmd *req,
7647 struct hclge_desc *desc,
7650 struct hclge_dev *hdev = vport->back;
7655 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7657 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7658 memcpy(desc[0].data,
7660 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7661 hclge_cmd_setup_basic_desc(&desc[1],
7662 HCLGE_OPC_MAC_VLAN_ADD,
7664 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7665 hclge_cmd_setup_basic_desc(&desc[2],
7666 HCLGE_OPC_MAC_VLAN_ADD,
7668 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7670 memcpy(desc[0].data,
7672 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7673 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7676 dev_err(&hdev->pdev->dev,
7677 "lookup mac addr failed for cmd_send, ret =%d.\n",
7681 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7682 retval = le16_to_cpu(desc[0].retval);
7684 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7685 HCLGE_MAC_VLAN_LKUP);
7688 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7689 struct hclge_mac_vlan_tbl_entry_cmd *req,
7690 struct hclge_desc *mc_desc)
7692 struct hclge_dev *hdev = vport->back;
7699 struct hclge_desc desc;
7701 hclge_cmd_setup_basic_desc(&desc,
7702 HCLGE_OPC_MAC_VLAN_ADD,
7704 memcpy(desc.data, req,
7705 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7707 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7708 retval = le16_to_cpu(desc.retval);
7710 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7712 HCLGE_MAC_VLAN_ADD);
7714 hclge_cmd_reuse_desc(&mc_desc[0], false);
7715 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7716 hclge_cmd_reuse_desc(&mc_desc[1], false);
7717 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7718 hclge_cmd_reuse_desc(&mc_desc[2], false);
7719 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7720 memcpy(mc_desc[0].data, req,
7721 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7722 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7723 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7724 retval = le16_to_cpu(mc_desc[0].retval);
7726 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7728 HCLGE_MAC_VLAN_ADD);
7732 dev_err(&hdev->pdev->dev,
7733 "add mac addr failed for cmd_send, ret =%d.\n",
7741 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7742 u16 *allocated_size)
7744 struct hclge_umv_spc_alc_cmd *req;
7745 struct hclge_desc desc;
7748 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7751 req->space_size = cpu_to_le32(space_size);
7753 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7755 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7760 *allocated_size = le32_to_cpu(desc.data[1]);
7765 static int hclge_init_umv_space(struct hclge_dev *hdev)
7767 u16 allocated_size = 0;
7770 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7774 if (allocated_size < hdev->wanted_umv_size)
7775 dev_warn(&hdev->pdev->dev,
7776 "failed to alloc umv space, want %u, get %u\n",
7777 hdev->wanted_umv_size, allocated_size);
7779 hdev->max_umv_size = allocated_size;
7780 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7781 hdev->share_umv_size = hdev->priv_umv_size +
7782 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7787 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7789 struct hclge_vport *vport;
7792 for (i = 0; i < hdev->num_alloc_vport; i++) {
7793 vport = &hdev->vport[i];
7794 vport->used_umv_num = 0;
7797 mutex_lock(&hdev->vport_lock);
7798 hdev->share_umv_size = hdev->priv_umv_size +
7799 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7800 mutex_unlock(&hdev->vport_lock);
7803 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7805 struct hclge_dev *hdev = vport->back;
7809 mutex_lock(&hdev->vport_lock);
7811 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7812 hdev->share_umv_size == 0);
7815 mutex_unlock(&hdev->vport_lock);
7820 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7822 struct hclge_dev *hdev = vport->back;
7825 if (vport->used_umv_num > hdev->priv_umv_size)
7826 hdev->share_umv_size++;
7828 if (vport->used_umv_num > 0)
7829 vport->used_umv_num--;
7831 if (vport->used_umv_num >= hdev->priv_umv_size &&
7832 hdev->share_umv_size > 0)
7833 hdev->share_umv_size--;
7834 vport->used_umv_num++;
7838 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7841 struct hclge_mac_node *mac_node, *tmp;
7843 list_for_each_entry_safe(mac_node, tmp, list, node)
7844 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7850 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7851 enum HCLGE_MAC_NODE_STATE state)
7854 /* from set_rx_mode or tmp_add_list */
7855 case HCLGE_MAC_TO_ADD:
7856 if (mac_node->state == HCLGE_MAC_TO_DEL)
7857 mac_node->state = HCLGE_MAC_ACTIVE;
7859 /* only from set_rx_mode */
7860 case HCLGE_MAC_TO_DEL:
7861 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7862 list_del(&mac_node->node);
7865 mac_node->state = HCLGE_MAC_TO_DEL;
7868 /* only from tmp_add_list, the mac_node->state won't be
7871 case HCLGE_MAC_ACTIVE:
7872 if (mac_node->state == HCLGE_MAC_TO_ADD)
7873 mac_node->state = HCLGE_MAC_ACTIVE;
7879 int hclge_update_mac_list(struct hclge_vport *vport,
7880 enum HCLGE_MAC_NODE_STATE state,
7881 enum HCLGE_MAC_ADDR_TYPE mac_type,
7882 const unsigned char *addr)
7884 struct hclge_dev *hdev = vport->back;
7885 struct hclge_mac_node *mac_node;
7886 struct list_head *list;
7888 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7889 &vport->uc_mac_list : &vport->mc_mac_list;
7891 spin_lock_bh(&vport->mac_list_lock);
7893 /* if the mac addr is already in the mac list, no need to add a new
7894 * one into it, just check the mac addr state, convert it to a new
7895 * new state, or just remove it, or do nothing.
7897 mac_node = hclge_find_mac_node(list, addr);
7899 hclge_update_mac_node(mac_node, state);
7900 spin_unlock_bh(&vport->mac_list_lock);
7901 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7905 /* if this address is never added, unnecessary to delete */
7906 if (state == HCLGE_MAC_TO_DEL) {
7907 spin_unlock_bh(&vport->mac_list_lock);
7908 dev_err(&hdev->pdev->dev,
7909 "failed to delete address %pM from mac list\n",
7914 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7916 spin_unlock_bh(&vport->mac_list_lock);
7920 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7922 mac_node->state = state;
7923 ether_addr_copy(mac_node->mac_addr, addr);
7924 list_add_tail(&mac_node->node, list);
7926 spin_unlock_bh(&vport->mac_list_lock);
7931 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7932 const unsigned char *addr)
7934 struct hclge_vport *vport = hclge_get_vport(handle);
7936 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7940 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7941 const unsigned char *addr)
7943 struct hclge_dev *hdev = vport->back;
7944 struct hclge_mac_vlan_tbl_entry_cmd req;
7945 struct hclge_desc desc;
7946 u16 egress_port = 0;
7949 /* mac addr check */
7950 if (is_zero_ether_addr(addr) ||
7951 is_broadcast_ether_addr(addr) ||
7952 is_multicast_ether_addr(addr)) {
7953 dev_err(&hdev->pdev->dev,
7954 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7955 addr, is_zero_ether_addr(addr),
7956 is_broadcast_ether_addr(addr),
7957 is_multicast_ether_addr(addr));
7961 memset(&req, 0, sizeof(req));
7963 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7964 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7966 req.egress_port = cpu_to_le16(egress_port);
7968 hclge_prepare_mac_addr(&req, addr, false);
7970 /* Lookup the mac address in the mac_vlan table, and add
7971 * it if the entry is inexistent. Repeated unicast entry
7972 * is not allowed in the mac vlan table.
7974 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7975 if (ret == -ENOENT) {
7976 mutex_lock(&hdev->vport_lock);
7977 if (!hclge_is_umv_space_full(vport, false)) {
7978 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7980 hclge_update_umv_space(vport, false);
7981 mutex_unlock(&hdev->vport_lock);
7984 mutex_unlock(&hdev->vport_lock);
7986 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7987 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7988 hdev->priv_umv_size);
7993 /* check if we just hit the duplicate */
7995 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7996 vport->vport_id, addr);
8000 dev_err(&hdev->pdev->dev,
8001 "PF failed to add unicast entry(%pM) in the MAC table\n",
8007 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8008 const unsigned char *addr)
8010 struct hclge_vport *vport = hclge_get_vport(handle);
8012 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8016 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8017 const unsigned char *addr)
8019 struct hclge_dev *hdev = vport->back;
8020 struct hclge_mac_vlan_tbl_entry_cmd req;
8023 /* mac addr check */
8024 if (is_zero_ether_addr(addr) ||
8025 is_broadcast_ether_addr(addr) ||
8026 is_multicast_ether_addr(addr)) {
8027 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8032 memset(&req, 0, sizeof(req));
8033 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8034 hclge_prepare_mac_addr(&req, addr, false);
8035 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8037 mutex_lock(&hdev->vport_lock);
8038 hclge_update_umv_space(vport, true);
8039 mutex_unlock(&hdev->vport_lock);
8040 } else if (ret == -ENOENT) {
8047 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8048 const unsigned char *addr)
8050 struct hclge_vport *vport = hclge_get_vport(handle);
8052 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8056 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8057 const unsigned char *addr)
8059 struct hclge_dev *hdev = vport->back;
8060 struct hclge_mac_vlan_tbl_entry_cmd req;
8061 struct hclge_desc desc[3];
8064 /* mac addr check */
8065 if (!is_multicast_ether_addr(addr)) {
8066 dev_err(&hdev->pdev->dev,
8067 "Add mc mac err! invalid mac:%pM.\n",
8071 memset(&req, 0, sizeof(req));
8072 hclge_prepare_mac_addr(&req, addr, true);
8073 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8075 /* This mac addr do not exist, add new entry for it */
8076 memset(desc[0].data, 0, sizeof(desc[0].data));
8077 memset(desc[1].data, 0, sizeof(desc[0].data));
8078 memset(desc[2].data, 0, sizeof(desc[0].data));
8080 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8083 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8085 /* if already overflow, not to print each time */
8086 if (status == -ENOSPC &&
8087 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8088 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8093 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8094 const unsigned char *addr)
8096 struct hclge_vport *vport = hclge_get_vport(handle);
8098 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8102 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8103 const unsigned char *addr)
8105 struct hclge_dev *hdev = vport->back;
8106 struct hclge_mac_vlan_tbl_entry_cmd req;
8107 enum hclge_cmd_status status;
8108 struct hclge_desc desc[3];
8110 /* mac addr check */
8111 if (!is_multicast_ether_addr(addr)) {
8112 dev_dbg(&hdev->pdev->dev,
8113 "Remove mc mac err! invalid mac:%pM.\n",
8118 memset(&req, 0, sizeof(req));
8119 hclge_prepare_mac_addr(&req, addr, true);
8120 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8122 /* This mac addr exist, remove this handle's VFID for it */
8123 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8127 if (hclge_is_all_function_id_zero(desc))
8128 /* All the vfid is zero, so need to delete this entry */
8129 status = hclge_remove_mac_vlan_tbl(vport, &req);
8131 /* Not all the vfid is zero, update the vfid */
8132 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8134 } else if (status == -ENOENT) {
8141 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8142 struct list_head *list,
8143 int (*sync)(struct hclge_vport *,
8144 const unsigned char *))
8146 struct hclge_mac_node *mac_node, *tmp;
8149 list_for_each_entry_safe(mac_node, tmp, list, node) {
8150 ret = sync(vport, mac_node->mac_addr);
8152 mac_node->state = HCLGE_MAC_ACTIVE;
8154 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8161 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8162 struct list_head *list,
8163 int (*unsync)(struct hclge_vport *,
8164 const unsigned char *))
8166 struct hclge_mac_node *mac_node, *tmp;
8169 list_for_each_entry_safe(mac_node, tmp, list, node) {
8170 ret = unsync(vport, mac_node->mac_addr);
8171 if (!ret || ret == -ENOENT) {
8172 list_del(&mac_node->node);
8175 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8182 static bool hclge_sync_from_add_list(struct list_head *add_list,
8183 struct list_head *mac_list)
8185 struct hclge_mac_node *mac_node, *tmp, *new_node;
8186 bool all_added = true;
8188 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8189 if (mac_node->state == HCLGE_MAC_TO_ADD)
8192 /* if the mac address from tmp_add_list is not in the
8193 * uc/mc_mac_list, it means have received a TO_DEL request
8194 * during the time window of adding the mac address into mac
8195 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8196 * then it will be removed at next time. else it must be TO_ADD,
8197 * this address hasn't been added into mac table,
8198 * so just remove the mac node.
8200 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8202 hclge_update_mac_node(new_node, mac_node->state);
8203 list_del(&mac_node->node);
8205 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8206 mac_node->state = HCLGE_MAC_TO_DEL;
8207 list_del(&mac_node->node);
8208 list_add_tail(&mac_node->node, mac_list);
8210 list_del(&mac_node->node);
8218 static void hclge_sync_from_del_list(struct list_head *del_list,
8219 struct list_head *mac_list)
8221 struct hclge_mac_node *mac_node, *tmp, *new_node;
8223 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8224 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8226 /* If the mac addr exists in the mac list, it means
8227 * received a new TO_ADD request during the time window
8228 * of configuring the mac address. For the mac node
8229 * state is TO_ADD, and the address is already in the
8230 * in the hardware(due to delete fail), so we just need
8231 * to change the mac node state to ACTIVE.
8233 new_node->state = HCLGE_MAC_ACTIVE;
8234 list_del(&mac_node->node);
8237 list_del(&mac_node->node);
8238 list_add_tail(&mac_node->node, mac_list);
8243 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8244 enum HCLGE_MAC_ADDR_TYPE mac_type,
8247 if (mac_type == HCLGE_MAC_ADDR_UC) {
8249 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8251 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8254 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8256 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8260 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8261 enum HCLGE_MAC_ADDR_TYPE mac_type)
8263 struct hclge_mac_node *mac_node, *tmp, *new_node;
8264 struct list_head tmp_add_list, tmp_del_list;
8265 struct list_head *list;
8268 INIT_LIST_HEAD(&tmp_add_list);
8269 INIT_LIST_HEAD(&tmp_del_list);
8271 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8272 * we can add/delete these mac addr outside the spin lock
8274 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8275 &vport->uc_mac_list : &vport->mc_mac_list;
8277 spin_lock_bh(&vport->mac_list_lock);
8279 list_for_each_entry_safe(mac_node, tmp, list, node) {
8280 switch (mac_node->state) {
8281 case HCLGE_MAC_TO_DEL:
8282 list_del(&mac_node->node);
8283 list_add_tail(&mac_node->node, &tmp_del_list);
8285 case HCLGE_MAC_TO_ADD:
8286 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8289 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8290 new_node->state = mac_node->state;
8291 list_add_tail(&new_node->node, &tmp_add_list);
8299 spin_unlock_bh(&vport->mac_list_lock);
8301 /* delete first, in order to get max mac table space for adding */
8302 if (mac_type == HCLGE_MAC_ADDR_UC) {
8303 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8304 hclge_rm_uc_addr_common);
8305 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8306 hclge_add_uc_addr_common);
8308 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8309 hclge_rm_mc_addr_common);
8310 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8311 hclge_add_mc_addr_common);
8314 /* if some mac addresses were added/deleted fail, move back to the
8315 * mac_list, and retry at next time.
8317 spin_lock_bh(&vport->mac_list_lock);
8319 hclge_sync_from_del_list(&tmp_del_list, list);
8320 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8322 spin_unlock_bh(&vport->mac_list_lock);
8324 hclge_update_overflow_flags(vport, mac_type, all_added);
8327 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8329 struct hclge_dev *hdev = vport->back;
8331 if (test_bit(vport->vport_id, hdev->vport_config_block))
8334 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8340 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8344 for (i = 0; i < hdev->num_alloc_vport; i++) {
8345 struct hclge_vport *vport = &hdev->vport[i];
8347 if (!hclge_need_sync_mac_table(vport))
8350 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8351 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8355 static void hclge_build_del_list(struct list_head *list,
8357 struct list_head *tmp_del_list)
8359 struct hclge_mac_node *mac_cfg, *tmp;
8361 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8362 switch (mac_cfg->state) {
8363 case HCLGE_MAC_TO_DEL:
8364 case HCLGE_MAC_ACTIVE:
8365 list_del(&mac_cfg->node);
8366 list_add_tail(&mac_cfg->node, tmp_del_list);
8368 case HCLGE_MAC_TO_ADD:
8370 list_del(&mac_cfg->node);
8378 static void hclge_unsync_del_list(struct hclge_vport *vport,
8379 int (*unsync)(struct hclge_vport *vport,
8380 const unsigned char *addr),
8382 struct list_head *tmp_del_list)
8384 struct hclge_mac_node *mac_cfg, *tmp;
8387 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8388 ret = unsync(vport, mac_cfg->mac_addr);
8389 if (!ret || ret == -ENOENT) {
8390 /* clear all mac addr from hardware, but remain these
8391 * mac addr in the mac list, and restore them after
8392 * vf reset finished.
8395 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8396 mac_cfg->state = HCLGE_MAC_TO_ADD;
8398 list_del(&mac_cfg->node);
8401 } else if (is_del_list) {
8402 mac_cfg->state = HCLGE_MAC_TO_DEL;
8407 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8408 enum HCLGE_MAC_ADDR_TYPE mac_type)
8410 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8411 struct hclge_dev *hdev = vport->back;
8412 struct list_head tmp_del_list, *list;
8414 if (mac_type == HCLGE_MAC_ADDR_UC) {
8415 list = &vport->uc_mac_list;
8416 unsync = hclge_rm_uc_addr_common;
8418 list = &vport->mc_mac_list;
8419 unsync = hclge_rm_mc_addr_common;
8422 INIT_LIST_HEAD(&tmp_del_list);
8425 set_bit(vport->vport_id, hdev->vport_config_block);
8427 spin_lock_bh(&vport->mac_list_lock);
8429 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8431 spin_unlock_bh(&vport->mac_list_lock);
8433 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8435 spin_lock_bh(&vport->mac_list_lock);
8437 hclge_sync_from_del_list(&tmp_del_list, list);
8439 spin_unlock_bh(&vport->mac_list_lock);
8442 /* remove all mac address when uninitailize */
8443 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8444 enum HCLGE_MAC_ADDR_TYPE mac_type)
8446 struct hclge_mac_node *mac_node, *tmp;
8447 struct hclge_dev *hdev = vport->back;
8448 struct list_head tmp_del_list, *list;
8450 INIT_LIST_HEAD(&tmp_del_list);
8452 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8453 &vport->uc_mac_list : &vport->mc_mac_list;
8455 spin_lock_bh(&vport->mac_list_lock);
8457 list_for_each_entry_safe(mac_node, tmp, list, node) {
8458 switch (mac_node->state) {
8459 case HCLGE_MAC_TO_DEL:
8460 case HCLGE_MAC_ACTIVE:
8461 list_del(&mac_node->node);
8462 list_add_tail(&mac_node->node, &tmp_del_list);
8464 case HCLGE_MAC_TO_ADD:
8465 list_del(&mac_node->node);
8471 spin_unlock_bh(&vport->mac_list_lock);
8473 if (mac_type == HCLGE_MAC_ADDR_UC)
8474 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8475 hclge_rm_uc_addr_common);
8477 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8478 hclge_rm_mc_addr_common);
8480 if (!list_empty(&tmp_del_list))
8481 dev_warn(&hdev->pdev->dev,
8482 "uninit %s mac list for vport %u not completely.\n",
8483 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8486 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8487 list_del(&mac_node->node);
8492 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8494 struct hclge_vport *vport;
8497 for (i = 0; i < hdev->num_alloc_vport; i++) {
8498 vport = &hdev->vport[i];
8499 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8500 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8504 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8505 u16 cmdq_resp, u8 resp_code)
8507 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8508 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8509 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8510 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8515 dev_err(&hdev->pdev->dev,
8516 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8521 switch (resp_code) {
8522 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8523 case HCLGE_ETHERTYPE_ALREADY_ADD:
8526 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8527 dev_err(&hdev->pdev->dev,
8528 "add mac ethertype failed for manager table overflow.\n");
8529 return_status = -EIO;
8531 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8532 dev_err(&hdev->pdev->dev,
8533 "add mac ethertype failed for key conflict.\n");
8534 return_status = -EIO;
8537 dev_err(&hdev->pdev->dev,
8538 "add mac ethertype failed for undefined, code=%u.\n",
8540 return_status = -EIO;
8543 return return_status;
8546 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8549 struct hclge_mac_vlan_tbl_entry_cmd req;
8550 struct hclge_dev *hdev = vport->back;
8551 struct hclge_desc desc;
8552 u16 egress_port = 0;
8555 if (is_zero_ether_addr(mac_addr))
8558 memset(&req, 0, sizeof(req));
8559 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8560 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8561 req.egress_port = cpu_to_le16(egress_port);
8562 hclge_prepare_mac_addr(&req, mac_addr, false);
8564 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8567 vf_idx += HCLGE_VF_VPORT_START_NUM;
8568 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8570 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8576 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8579 struct hclge_vport *vport = hclge_get_vport(handle);
8580 struct hclge_dev *hdev = vport->back;
8582 vport = hclge_get_vf_vport(hdev, vf);
8586 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8587 dev_info(&hdev->pdev->dev,
8588 "Specified MAC(=%pM) is same as before, no change committed!\n",
8593 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8594 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8599 ether_addr_copy(vport->vf_info.mac, mac_addr);
8601 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8602 dev_info(&hdev->pdev->dev,
8603 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8605 return hclge_inform_reset_assert_to_vf(vport);
8608 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8613 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8614 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8616 struct hclge_desc desc;
8621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8622 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8624 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8626 dev_err(&hdev->pdev->dev,
8627 "add mac ethertype failed for cmd_send, ret =%d.\n",
8632 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8633 retval = le16_to_cpu(desc.retval);
8635 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8638 static int init_mgr_tbl(struct hclge_dev *hdev)
8643 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8644 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8646 dev_err(&hdev->pdev->dev,
8647 "add mac ethertype failed, ret =%d.\n",
8656 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8658 struct hclge_vport *vport = hclge_get_vport(handle);
8659 struct hclge_dev *hdev = vport->back;
8661 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8664 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8665 const u8 *old_addr, const u8 *new_addr)
8667 struct list_head *list = &vport->uc_mac_list;
8668 struct hclge_mac_node *old_node, *new_node;
8670 new_node = hclge_find_mac_node(list, new_addr);
8672 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8676 new_node->state = HCLGE_MAC_TO_ADD;
8677 ether_addr_copy(new_node->mac_addr, new_addr);
8678 list_add(&new_node->node, list);
8680 if (new_node->state == HCLGE_MAC_TO_DEL)
8681 new_node->state = HCLGE_MAC_ACTIVE;
8683 /* make sure the new addr is in the list head, avoid dev
8684 * addr may be not re-added into mac table for the umv space
8685 * limitation after global/imp reset which will clear mac
8686 * table by hardware.
8688 list_move(&new_node->node, list);
8691 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8692 old_node = hclge_find_mac_node(list, old_addr);
8694 if (old_node->state == HCLGE_MAC_TO_ADD) {
8695 list_del(&old_node->node);
8698 old_node->state = HCLGE_MAC_TO_DEL;
8703 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8708 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8711 const unsigned char *new_addr = (const unsigned char *)p;
8712 struct hclge_vport *vport = hclge_get_vport(handle);
8713 struct hclge_dev *hdev = vport->back;
8714 unsigned char *old_addr = NULL;
8717 /* mac addr check */
8718 if (is_zero_ether_addr(new_addr) ||
8719 is_broadcast_ether_addr(new_addr) ||
8720 is_multicast_ether_addr(new_addr)) {
8721 dev_err(&hdev->pdev->dev,
8722 "change uc mac err! invalid mac: %pM.\n",
8727 ret = hclge_pause_addr_cfg(hdev, new_addr);
8729 dev_err(&hdev->pdev->dev,
8730 "failed to configure mac pause address, ret = %d\n",
8736 old_addr = hdev->hw.mac.mac_addr;
8738 spin_lock_bh(&vport->mac_list_lock);
8739 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8741 dev_err(&hdev->pdev->dev,
8742 "failed to change the mac addr:%pM, ret = %d\n",
8744 spin_unlock_bh(&vport->mac_list_lock);
8747 hclge_pause_addr_cfg(hdev, old_addr);
8751 /* we must update dev addr with spin lock protect, preventing dev addr
8752 * being removed by set_rx_mode path.
8754 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8755 spin_unlock_bh(&vport->mac_list_lock);
8757 hclge_task_schedule(hdev, 0);
8762 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8765 struct hclge_vport *vport = hclge_get_vport(handle);
8766 struct hclge_dev *hdev = vport->back;
8768 if (!hdev->hw.mac.phydev)
8771 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8774 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8775 u8 fe_type, bool filter_en, u8 vf_id)
8777 struct hclge_vlan_filter_ctrl_cmd *req;
8778 struct hclge_desc desc;
8781 /* read current vlan filter parameter */
8782 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8783 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8784 req->vlan_type = vlan_type;
8787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8789 dev_err(&hdev->pdev->dev,
8790 "failed to get vlan filter config, ret = %d.\n", ret);
8794 /* modify and write new config parameter */
8795 hclge_cmd_reuse_desc(&desc, false);
8796 req->vlan_fe = filter_en ?
8797 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8799 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8801 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8807 #define HCLGE_FILTER_TYPE_VF 0
8808 #define HCLGE_FILTER_TYPE_PORT 1
8809 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8810 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8811 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8812 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8813 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8814 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8815 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8816 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8817 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8819 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8821 struct hclge_vport *vport = hclge_get_vport(handle);
8822 struct hclge_dev *hdev = vport->back;
8824 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8825 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8826 HCLGE_FILTER_FE_EGRESS, enable, 0);
8827 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8828 HCLGE_FILTER_FE_INGRESS, enable, 0);
8830 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8831 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8835 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8837 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8840 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8841 bool is_kill, u16 vlan,
8842 struct hclge_desc *desc)
8844 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8845 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8850 hclge_cmd_setup_basic_desc(&desc[0],
8851 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8852 hclge_cmd_setup_basic_desc(&desc[1],
8853 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8855 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8857 vf_byte_off = vfid / 8;
8858 vf_byte_val = 1 << (vfid % 8);
8860 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8861 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8863 req0->vlan_id = cpu_to_le16(vlan);
8864 req0->vlan_cfg = is_kill;
8866 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8867 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8869 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8871 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8873 dev_err(&hdev->pdev->dev,
8874 "Send vf vlan command fail, ret =%d.\n",
8882 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
8883 bool is_kill, struct hclge_desc *desc)
8885 struct hclge_vlan_filter_vf_cfg_cmd *req;
8887 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8890 #define HCLGE_VF_VLAN_NO_ENTRY 2
8891 if (!req->resp_code || req->resp_code == 1)
8894 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8895 set_bit(vfid, hdev->vf_vlan_full);
8896 dev_warn(&hdev->pdev->dev,
8897 "vf vlan table is full, vf vlan filter is disabled\n");
8901 dev_err(&hdev->pdev->dev,
8902 "Add vf vlan filter fail, ret =%u.\n",
8905 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8906 if (!req->resp_code)
8909 /* vf vlan filter is disabled when vf vlan table is full,
8910 * then new vlan id will not be added into vf vlan table.
8911 * Just return 0 without warning, avoid massive verbose
8912 * print logs when unload.
8914 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8917 dev_err(&hdev->pdev->dev,
8918 "Kill vf vlan filter fail, ret =%u.\n",
8925 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8926 bool is_kill, u16 vlan,
8929 struct hclge_vport *vport = &hdev->vport[vfid];
8930 struct hclge_desc desc[2];
8933 /* if vf vlan table is full, firmware will close vf vlan filter, it
8934 * is unable and unnecessary to add new vlan id to vf vlan filter.
8935 * If spoof check is enable, and vf vlan is full, it shouldn't add
8936 * new vlan, because tx packets with these vlan id will be dropped.
8938 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8939 if (vport->vf_info.spoofchk && vlan) {
8940 dev_err(&hdev->pdev->dev,
8941 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8947 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
8951 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
8954 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8955 u16 vlan_id, bool is_kill)
8957 struct hclge_vlan_filter_pf_cfg_cmd *req;
8958 struct hclge_desc desc;
8959 u8 vlan_offset_byte_val;
8960 u8 vlan_offset_byte;
8964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8966 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8967 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8968 HCLGE_VLAN_BYTE_SIZE;
8969 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8971 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8972 req->vlan_offset = vlan_offset_160;
8973 req->vlan_cfg = is_kill;
8974 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8978 dev_err(&hdev->pdev->dev,
8979 "port vlan command, send fail, ret =%d.\n", ret);
8983 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8984 u16 vport_id, u16 vlan_id,
8987 u16 vport_idx, vport_num = 0;
8990 if (is_kill && !vlan_id)
8993 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8996 dev_err(&hdev->pdev->dev,
8997 "Set %u vport vlan filter config fail, ret =%d.\n",
9002 /* vlan 0 may be added twice when 8021q module is enabled */
9003 if (!is_kill && !vlan_id &&
9004 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9007 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9008 dev_err(&hdev->pdev->dev,
9009 "Add port vlan failed, vport %u is already in vlan %u\n",
9015 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9016 dev_err(&hdev->pdev->dev,
9017 "Delete port vlan failed, vport %u is not in vlan %u\n",
9022 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9025 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9026 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9032 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9034 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9035 struct hclge_vport_vtag_tx_cfg_cmd *req;
9036 struct hclge_dev *hdev = vport->back;
9037 struct hclge_desc desc;
9041 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9043 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9044 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9045 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9046 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9047 vcfg->accept_tag1 ? 1 : 0);
9048 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9049 vcfg->accept_untag1 ? 1 : 0);
9050 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9051 vcfg->accept_tag2 ? 1 : 0);
9052 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9053 vcfg->accept_untag2 ? 1 : 0);
9054 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9055 vcfg->insert_tag1_en ? 1 : 0);
9056 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9057 vcfg->insert_tag2_en ? 1 : 0);
9058 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9059 vcfg->tag_shift_mode_en ? 1 : 0);
9060 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9062 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9063 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9064 HCLGE_VF_NUM_PER_BYTE;
9065 req->vf_bitmap[bmap_index] =
9066 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9068 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9070 dev_err(&hdev->pdev->dev,
9071 "Send port txvlan cfg command fail, ret =%d\n",
9077 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9079 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9080 struct hclge_vport_vtag_rx_cfg_cmd *req;
9081 struct hclge_dev *hdev = vport->back;
9082 struct hclge_desc desc;
9086 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9088 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9089 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9090 vcfg->strip_tag1_en ? 1 : 0);
9091 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9092 vcfg->strip_tag2_en ? 1 : 0);
9093 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9094 vcfg->vlan1_vlan_prionly ? 1 : 0);
9095 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9096 vcfg->vlan2_vlan_prionly ? 1 : 0);
9097 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9098 vcfg->strip_tag1_discard_en ? 1 : 0);
9099 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9100 vcfg->strip_tag2_discard_en ? 1 : 0);
9102 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9103 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9104 HCLGE_VF_NUM_PER_BYTE;
9105 req->vf_bitmap[bmap_index] =
9106 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9108 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9110 dev_err(&hdev->pdev->dev,
9111 "Send port rxvlan cfg command fail, ret =%d\n",
9117 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9118 u16 port_base_vlan_state,
9123 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9124 vport->txvlan_cfg.accept_tag1 = true;
9125 vport->txvlan_cfg.insert_tag1_en = false;
9126 vport->txvlan_cfg.default_tag1 = 0;
9128 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9130 vport->txvlan_cfg.accept_tag1 =
9131 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9132 vport->txvlan_cfg.insert_tag1_en = true;
9133 vport->txvlan_cfg.default_tag1 = vlan_tag;
9136 vport->txvlan_cfg.accept_untag1 = true;
9138 /* accept_tag2 and accept_untag2 are not supported on
9139 * pdev revision(0x20), new revision support them,
9140 * this two fields can not be configured by user.
9142 vport->txvlan_cfg.accept_tag2 = true;
9143 vport->txvlan_cfg.accept_untag2 = true;
9144 vport->txvlan_cfg.insert_tag2_en = false;
9145 vport->txvlan_cfg.default_tag2 = 0;
9146 vport->txvlan_cfg.tag_shift_mode_en = true;
9148 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9149 vport->rxvlan_cfg.strip_tag1_en = false;
9150 vport->rxvlan_cfg.strip_tag2_en =
9151 vport->rxvlan_cfg.rx_vlan_offload_en;
9152 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9154 vport->rxvlan_cfg.strip_tag1_en =
9155 vport->rxvlan_cfg.rx_vlan_offload_en;
9156 vport->rxvlan_cfg.strip_tag2_en = true;
9157 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9160 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9161 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9162 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9164 ret = hclge_set_vlan_tx_offload_cfg(vport);
9168 return hclge_set_vlan_rx_offload_cfg(vport);
9171 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9173 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9174 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9175 struct hclge_desc desc;
9178 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9179 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9180 rx_req->ot_fst_vlan_type =
9181 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9182 rx_req->ot_sec_vlan_type =
9183 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9184 rx_req->in_fst_vlan_type =
9185 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9186 rx_req->in_sec_vlan_type =
9187 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9189 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9191 dev_err(&hdev->pdev->dev,
9192 "Send rxvlan protocol type command fail, ret =%d\n",
9197 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9199 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9200 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9201 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9203 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9205 dev_err(&hdev->pdev->dev,
9206 "Send txvlan protocol type command fail, ret =%d\n",
9212 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9214 #define HCLGE_DEF_VLAN_TYPE 0x8100
9216 struct hnae3_handle *handle = &hdev->vport[0].nic;
9217 struct hclge_vport *vport;
9221 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9222 /* for revision 0x21, vf vlan filter is per function */
9223 for (i = 0; i < hdev->num_alloc_vport; i++) {
9224 vport = &hdev->vport[i];
9225 ret = hclge_set_vlan_filter_ctrl(hdev,
9226 HCLGE_FILTER_TYPE_VF,
9227 HCLGE_FILTER_FE_EGRESS,
9234 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9235 HCLGE_FILTER_FE_INGRESS, true,
9240 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9241 HCLGE_FILTER_FE_EGRESS_V1_B,
9247 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9249 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9250 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9251 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9252 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9253 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9254 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9256 ret = hclge_set_vlan_protocol_type(hdev);
9260 for (i = 0; i < hdev->num_alloc_vport; i++) {
9263 vport = &hdev->vport[i];
9264 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9266 ret = hclge_vlan_offload_cfg(vport,
9267 vport->port_base_vlan_cfg.state,
9273 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9276 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9279 struct hclge_vport_vlan_cfg *vlan;
9281 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9285 vlan->hd_tbl_status = writen_to_tbl;
9286 vlan->vlan_id = vlan_id;
9288 list_add_tail(&vlan->node, &vport->vlan_list);
9291 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9293 struct hclge_vport_vlan_cfg *vlan, *tmp;
9294 struct hclge_dev *hdev = vport->back;
9297 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9298 if (!vlan->hd_tbl_status) {
9299 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9301 vlan->vlan_id, false);
9303 dev_err(&hdev->pdev->dev,
9304 "restore vport vlan list failed, ret=%d\n",
9309 vlan->hd_tbl_status = true;
9315 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9318 struct hclge_vport_vlan_cfg *vlan, *tmp;
9319 struct hclge_dev *hdev = vport->back;
9321 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9322 if (vlan->vlan_id == vlan_id) {
9323 if (is_write_tbl && vlan->hd_tbl_status)
9324 hclge_set_vlan_filter_hw(hdev,
9330 list_del(&vlan->node);
9337 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9339 struct hclge_vport_vlan_cfg *vlan, *tmp;
9340 struct hclge_dev *hdev = vport->back;
9342 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9343 if (vlan->hd_tbl_status)
9344 hclge_set_vlan_filter_hw(hdev,
9350 vlan->hd_tbl_status = false;
9352 list_del(&vlan->node);
9356 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9359 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9361 struct hclge_vport_vlan_cfg *vlan, *tmp;
9362 struct hclge_vport *vport;
9365 for (i = 0; i < hdev->num_alloc_vport; i++) {
9366 vport = &hdev->vport[i];
9367 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9368 list_del(&vlan->node);
9374 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9376 struct hclge_vport_vlan_cfg *vlan, *tmp;
9377 struct hclge_dev *hdev = vport->back;
9383 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9384 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9385 state = vport->port_base_vlan_cfg.state;
9387 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9388 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9389 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9390 vport->vport_id, vlan_id,
9395 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9396 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9398 vlan->vlan_id, false);
9401 vlan->hd_tbl_status = true;
9405 /* For global reset and imp reset, hardware will clear the mac table,
9406 * so we change the mac address state from ACTIVE to TO_ADD, then they
9407 * can be restored in the service task after reset complete. Furtherly,
9408 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9409 * be restored after reset, so just remove these mac nodes from mac_list.
9411 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9413 struct hclge_mac_node *mac_node, *tmp;
9415 list_for_each_entry_safe(mac_node, tmp, list, node) {
9416 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9417 mac_node->state = HCLGE_MAC_TO_ADD;
9418 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9419 list_del(&mac_node->node);
9425 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9427 spin_lock_bh(&vport->mac_list_lock);
9429 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9430 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9431 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9433 spin_unlock_bh(&vport->mac_list_lock);
9436 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9438 struct hclge_vport *vport = &hdev->vport[0];
9439 struct hnae3_handle *handle = &vport->nic;
9441 hclge_restore_mac_table_common(vport);
9442 hclge_restore_vport_vlan_table(vport);
9443 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9445 hclge_restore_fd_entries(handle);
9448 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9450 struct hclge_vport *vport = hclge_get_vport(handle);
9452 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9453 vport->rxvlan_cfg.strip_tag1_en = false;
9454 vport->rxvlan_cfg.strip_tag2_en = enable;
9455 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9457 vport->rxvlan_cfg.strip_tag1_en = enable;
9458 vport->rxvlan_cfg.strip_tag2_en = true;
9459 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9462 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9463 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9464 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9465 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9467 return hclge_set_vlan_rx_offload_cfg(vport);
9470 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9471 u16 port_base_vlan_state,
9472 struct hclge_vlan_info *new_info,
9473 struct hclge_vlan_info *old_info)
9475 struct hclge_dev *hdev = vport->back;
9478 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9479 hclge_rm_vport_all_vlan_table(vport, false);
9480 return hclge_set_vlan_filter_hw(hdev,
9481 htons(new_info->vlan_proto),
9487 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9488 vport->vport_id, old_info->vlan_tag,
9493 return hclge_add_vport_all_vlan_table(vport);
9496 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9497 struct hclge_vlan_info *vlan_info)
9499 struct hnae3_handle *nic = &vport->nic;
9500 struct hclge_vlan_info *old_vlan_info;
9501 struct hclge_dev *hdev = vport->back;
9504 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9506 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9510 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9511 /* add new VLAN tag */
9512 ret = hclge_set_vlan_filter_hw(hdev,
9513 htons(vlan_info->vlan_proto),
9515 vlan_info->vlan_tag,
9520 /* remove old VLAN tag */
9521 ret = hclge_set_vlan_filter_hw(hdev,
9522 htons(old_vlan_info->vlan_proto),
9524 old_vlan_info->vlan_tag,
9532 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9537 /* update state only when disable/enable port based VLAN */
9538 vport->port_base_vlan_cfg.state = state;
9539 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9540 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9542 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9545 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9546 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9547 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9552 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9553 enum hnae3_port_base_vlan_state state,
9556 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9558 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9560 return HNAE3_PORT_BASE_VLAN_ENABLE;
9563 return HNAE3_PORT_BASE_VLAN_DISABLE;
9564 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9565 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9567 return HNAE3_PORT_BASE_VLAN_MODIFY;
9571 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9572 u16 vlan, u8 qos, __be16 proto)
9574 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9575 struct hclge_vport *vport = hclge_get_vport(handle);
9576 struct hclge_dev *hdev = vport->back;
9577 struct hclge_vlan_info vlan_info;
9581 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9584 vport = hclge_get_vf_vport(hdev, vfid);
9588 /* qos is a 3 bits value, so can not be bigger than 7 */
9589 if (vlan > VLAN_N_VID - 1 || qos > 7)
9591 if (proto != htons(ETH_P_8021Q))
9592 return -EPROTONOSUPPORT;
9594 state = hclge_get_port_base_vlan_state(vport,
9595 vport->port_base_vlan_cfg.state,
9597 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9600 vlan_info.vlan_tag = vlan;
9601 vlan_info.qos = qos;
9602 vlan_info.vlan_proto = ntohs(proto);
9604 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9606 dev_err(&hdev->pdev->dev,
9607 "failed to update port base vlan for vf %d, ret = %d\n",
9612 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9615 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9616 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9617 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9618 vport->vport_id, state,
9625 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9627 struct hclge_vlan_info *vlan_info;
9628 struct hclge_vport *vport;
9632 /* clear port base vlan for all vf */
9633 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9634 vport = &hdev->vport[vf];
9635 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9637 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9639 vlan_info->vlan_tag, true);
9641 dev_err(&hdev->pdev->dev,
9642 "failed to clear vf vlan for vf%d, ret = %d\n",
9643 vf - HCLGE_VF_VPORT_START_NUM, ret);
9647 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9648 u16 vlan_id, bool is_kill)
9650 struct hclge_vport *vport = hclge_get_vport(handle);
9651 struct hclge_dev *hdev = vport->back;
9652 bool writen_to_tbl = false;
9655 /* When device is resetting or reset failed, firmware is unable to
9656 * handle mailbox. Just record the vlan id, and remove it after
9659 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9660 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9661 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9665 /* when port base vlan enabled, we use port base vlan as the vlan
9666 * filter entry. In this case, we don't update vlan filter table
9667 * when user add new vlan or remove exist vlan, just update the vport
9668 * vlan list. The vlan id in vlan list will be writen in vlan filter
9669 * table until port base vlan disabled
9671 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9672 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9674 writen_to_tbl = true;
9679 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9681 hclge_add_vport_vlan_table(vport, vlan_id,
9683 } else if (is_kill) {
9684 /* when remove hw vlan filter failed, record the vlan id,
9685 * and try to remove it from hw later, to be consistence
9688 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9693 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9695 #define HCLGE_MAX_SYNC_COUNT 60
9697 int i, ret, sync_cnt = 0;
9700 /* start from vport 1 for PF is always alive */
9701 for (i = 0; i < hdev->num_alloc_vport; i++) {
9702 struct hclge_vport *vport = &hdev->vport[i];
9704 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9706 while (vlan_id != VLAN_N_VID) {
9707 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9708 vport->vport_id, vlan_id,
9710 if (ret && ret != -EINVAL)
9713 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9714 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9717 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9720 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9726 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9728 struct hclge_config_max_frm_size_cmd *req;
9729 struct hclge_desc desc;
9731 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9733 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9734 req->max_frm_size = cpu_to_le16(new_mps);
9735 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9737 return hclge_cmd_send(&hdev->hw, &desc, 1);
9740 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9742 struct hclge_vport *vport = hclge_get_vport(handle);
9744 return hclge_set_vport_mtu(vport, new_mtu);
9747 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9749 struct hclge_dev *hdev = vport->back;
9750 int i, max_frm_size, ret;
9752 /* HW supprt 2 layer vlan */
9753 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9754 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9755 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9758 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9759 mutex_lock(&hdev->vport_lock);
9760 /* VF's mps must fit within hdev->mps */
9761 if (vport->vport_id && max_frm_size > hdev->mps) {
9762 mutex_unlock(&hdev->vport_lock);
9764 } else if (vport->vport_id) {
9765 vport->mps = max_frm_size;
9766 mutex_unlock(&hdev->vport_lock);
9770 /* PF's mps must be greater then VF's mps */
9771 for (i = 1; i < hdev->num_alloc_vport; i++)
9772 if (max_frm_size < hdev->vport[i].mps) {
9773 mutex_unlock(&hdev->vport_lock);
9777 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9779 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9781 dev_err(&hdev->pdev->dev,
9782 "Change mtu fail, ret =%d\n", ret);
9786 hdev->mps = max_frm_size;
9787 vport->mps = max_frm_size;
9789 ret = hclge_buffer_alloc(hdev);
9791 dev_err(&hdev->pdev->dev,
9792 "Allocate buffer fail, ret =%d\n", ret);
9795 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9796 mutex_unlock(&hdev->vport_lock);
9800 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9803 struct hclge_reset_tqp_queue_cmd *req;
9804 struct hclge_desc desc;
9807 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9809 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9810 req->tqp_id = cpu_to_le16(queue_id);
9812 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9814 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9816 dev_err(&hdev->pdev->dev,
9817 "Send tqp reset cmd error, status =%d\n", ret);
9824 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9826 struct hclge_reset_tqp_queue_cmd *req;
9827 struct hclge_desc desc;
9830 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9832 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9833 req->tqp_id = cpu_to_le16(queue_id);
9835 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9837 dev_err(&hdev->pdev->dev,
9838 "Get reset status error, status =%d\n", ret);
9842 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9845 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9847 struct hnae3_queue *queue;
9848 struct hclge_tqp *tqp;
9850 queue = handle->kinfo.tqp[queue_id];
9851 tqp = container_of(queue, struct hclge_tqp, q);
9856 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9858 struct hclge_vport *vport = hclge_get_vport(handle);
9859 struct hclge_dev *hdev = vport->back;
9860 int reset_try_times = 0;
9865 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9867 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9869 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9873 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9875 dev_err(&hdev->pdev->dev,
9876 "Send reset tqp cmd fail, ret = %d\n", ret);
9880 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9881 reset_status = hclge_get_reset_status(hdev, queue_gid);
9885 /* Wait for tqp hw reset */
9886 usleep_range(1000, 1200);
9889 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9890 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9894 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9896 dev_err(&hdev->pdev->dev,
9897 "Deassert the soft reset fail, ret = %d\n", ret);
9902 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9904 struct hnae3_handle *handle = &vport->nic;
9905 struct hclge_dev *hdev = vport->back;
9906 int reset_try_times = 0;
9911 if (queue_id >= handle->kinfo.num_tqps) {
9912 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9917 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9919 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9921 dev_warn(&hdev->pdev->dev,
9922 "Send reset tqp cmd fail, ret = %d\n", ret);
9926 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9927 reset_status = hclge_get_reset_status(hdev, queue_gid);
9931 /* Wait for tqp hw reset */
9932 usleep_range(1000, 1200);
9935 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9936 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9940 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9942 dev_warn(&hdev->pdev->dev,
9943 "Deassert the soft reset fail, ret = %d\n", ret);
9946 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9948 struct hclge_vport *vport = hclge_get_vport(handle);
9949 struct hclge_dev *hdev = vport->back;
9951 return hdev->fw_version;
9954 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9956 struct phy_device *phydev = hdev->hw.mac.phydev;
9961 phy_set_asym_pause(phydev, rx_en, tx_en);
9964 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9968 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9971 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9973 dev_err(&hdev->pdev->dev,
9974 "configure pauseparam error, ret = %d.\n", ret);
9979 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9981 struct phy_device *phydev = hdev->hw.mac.phydev;
9982 u16 remote_advertising = 0;
9983 u16 local_advertising;
9984 u32 rx_pause, tx_pause;
9987 if (!phydev->link || !phydev->autoneg)
9990 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9993 remote_advertising = LPA_PAUSE_CAP;
9995 if (phydev->asym_pause)
9996 remote_advertising |= LPA_PAUSE_ASYM;
9998 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9999 remote_advertising);
10000 tx_pause = flowctl & FLOW_CTRL_TX;
10001 rx_pause = flowctl & FLOW_CTRL_RX;
10003 if (phydev->duplex == HCLGE_MAC_HALF) {
10008 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10011 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10012 u32 *rx_en, u32 *tx_en)
10014 struct hclge_vport *vport = hclge_get_vport(handle);
10015 struct hclge_dev *hdev = vport->back;
10016 struct phy_device *phydev = hdev->hw.mac.phydev;
10018 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10020 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10026 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10029 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10032 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10041 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10042 u32 rx_en, u32 tx_en)
10044 if (rx_en && tx_en)
10045 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10046 else if (rx_en && !tx_en)
10047 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10048 else if (!rx_en && tx_en)
10049 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10051 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10053 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10056 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10057 u32 rx_en, u32 tx_en)
10059 struct hclge_vport *vport = hclge_get_vport(handle);
10060 struct hclge_dev *hdev = vport->back;
10061 struct phy_device *phydev = hdev->hw.mac.phydev;
10065 fc_autoneg = hclge_get_autoneg(handle);
10066 if (auto_neg != fc_autoneg) {
10067 dev_info(&hdev->pdev->dev,
10068 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10069 return -EOPNOTSUPP;
10073 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10074 dev_info(&hdev->pdev->dev,
10075 "Priority flow control enabled. Cannot set link flow control.\n");
10076 return -EOPNOTSUPP;
10079 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10081 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10084 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10087 return phy_start_aneg(phydev);
10089 return -EOPNOTSUPP;
10092 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10093 u8 *auto_neg, u32 *speed, u8 *duplex)
10095 struct hclge_vport *vport = hclge_get_vport(handle);
10096 struct hclge_dev *hdev = vport->back;
10099 *speed = hdev->hw.mac.speed;
10101 *duplex = hdev->hw.mac.duplex;
10103 *auto_neg = hdev->hw.mac.autoneg;
10106 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10109 struct hclge_vport *vport = hclge_get_vport(handle);
10110 struct hclge_dev *hdev = vport->back;
10112 /* When nic is down, the service task is not running, doesn't update
10113 * the port information per second. Query the port information before
10114 * return the media type, ensure getting the correct media information.
10116 hclge_update_port_info(hdev);
10119 *media_type = hdev->hw.mac.media_type;
10122 *module_type = hdev->hw.mac.module_type;
10125 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10126 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10128 struct hclge_vport *vport = hclge_get_vport(handle);
10129 struct hclge_dev *hdev = vport->back;
10130 struct phy_device *phydev = hdev->hw.mac.phydev;
10131 int mdix_ctrl, mdix, is_resolved;
10132 unsigned int retval;
10135 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10136 *tp_mdix = ETH_TP_MDI_INVALID;
10140 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10142 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10143 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10144 HCLGE_PHY_MDIX_CTRL_S);
10146 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10147 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10148 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10150 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10152 switch (mdix_ctrl) {
10154 *tp_mdix_ctrl = ETH_TP_MDI;
10157 *tp_mdix_ctrl = ETH_TP_MDI_X;
10160 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10163 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10168 *tp_mdix = ETH_TP_MDI_INVALID;
10170 *tp_mdix = ETH_TP_MDI_X;
10172 *tp_mdix = ETH_TP_MDI;
10175 static void hclge_info_show(struct hclge_dev *hdev)
10177 struct device *dev = &hdev->pdev->dev;
10179 dev_info(dev, "PF info begin:\n");
10181 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10182 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10183 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10184 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10185 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10186 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10187 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10188 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10189 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10190 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10191 dev_info(dev, "This is %s PF\n",
10192 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10193 dev_info(dev, "DCB %s\n",
10194 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10195 dev_info(dev, "MQPRIO %s\n",
10196 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10198 dev_info(dev, "PF info end.\n");
10201 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10202 struct hclge_vport *vport)
10204 struct hnae3_client *client = vport->nic.client;
10205 struct hclge_dev *hdev = ae_dev->priv;
10206 int rst_cnt = hdev->rst_stats.reset_cnt;
10209 ret = client->ops->init_instance(&vport->nic);
10213 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10214 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10215 rst_cnt != hdev->rst_stats.reset_cnt) {
10220 /* Enable nic hw error interrupts */
10221 ret = hclge_config_nic_hw_error(hdev, true);
10223 dev_err(&ae_dev->pdev->dev,
10224 "fail(%d) to enable hw error interrupts\n", ret);
10228 hnae3_set_client_init_flag(client, ae_dev, 1);
10230 if (netif_msg_drv(&hdev->vport->nic))
10231 hclge_info_show(hdev);
10236 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10237 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10238 msleep(HCLGE_WAIT_RESET_DONE);
10240 client->ops->uninit_instance(&vport->nic, 0);
10245 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10246 struct hclge_vport *vport)
10248 struct hclge_dev *hdev = ae_dev->priv;
10249 struct hnae3_client *client;
10253 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10257 client = hdev->roce_client;
10258 ret = hclge_init_roce_base_info(vport);
10262 rst_cnt = hdev->rst_stats.reset_cnt;
10263 ret = client->ops->init_instance(&vport->roce);
10267 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10268 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10269 rst_cnt != hdev->rst_stats.reset_cnt) {
10271 goto init_roce_err;
10274 /* Enable roce ras interrupts */
10275 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10277 dev_err(&ae_dev->pdev->dev,
10278 "fail(%d) to enable roce ras interrupts\n", ret);
10279 goto init_roce_err;
10282 hnae3_set_client_init_flag(client, ae_dev, 1);
10287 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10288 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10289 msleep(HCLGE_WAIT_RESET_DONE);
10291 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10296 static int hclge_init_client_instance(struct hnae3_client *client,
10297 struct hnae3_ae_dev *ae_dev)
10299 struct hclge_dev *hdev = ae_dev->priv;
10300 struct hclge_vport *vport;
10303 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10304 vport = &hdev->vport[i];
10306 switch (client->type) {
10307 case HNAE3_CLIENT_KNIC:
10308 hdev->nic_client = client;
10309 vport->nic.client = client;
10310 ret = hclge_init_nic_client_instance(ae_dev, vport);
10314 ret = hclge_init_roce_client_instance(ae_dev, vport);
10319 case HNAE3_CLIENT_ROCE:
10320 if (hnae3_dev_roce_supported(hdev)) {
10321 hdev->roce_client = client;
10322 vport->roce.client = client;
10325 ret = hclge_init_roce_client_instance(ae_dev, vport);
10338 hdev->nic_client = NULL;
10339 vport->nic.client = NULL;
10342 hdev->roce_client = NULL;
10343 vport->roce.client = NULL;
10347 static void hclge_uninit_client_instance(struct hnae3_client *client,
10348 struct hnae3_ae_dev *ae_dev)
10350 struct hclge_dev *hdev = ae_dev->priv;
10351 struct hclge_vport *vport;
10354 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10355 vport = &hdev->vport[i];
10356 if (hdev->roce_client) {
10357 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10358 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10359 msleep(HCLGE_WAIT_RESET_DONE);
10361 hdev->roce_client->ops->uninit_instance(&vport->roce,
10363 hdev->roce_client = NULL;
10364 vport->roce.client = NULL;
10366 if (client->type == HNAE3_CLIENT_ROCE)
10368 if (hdev->nic_client && client->ops->uninit_instance) {
10369 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10370 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10371 msleep(HCLGE_WAIT_RESET_DONE);
10373 client->ops->uninit_instance(&vport->nic, 0);
10374 hdev->nic_client = NULL;
10375 vport->nic.client = NULL;
10380 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10382 #define HCLGE_MEM_BAR 4
10384 struct pci_dev *pdev = hdev->pdev;
10385 struct hclge_hw *hw = &hdev->hw;
10387 /* for device does not have device memory, return directly */
10388 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10391 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10392 pci_resource_start(pdev, HCLGE_MEM_BAR),
10393 pci_resource_len(pdev, HCLGE_MEM_BAR));
10394 if (!hw->mem_base) {
10395 dev_err(&pdev->dev, "failed to map device memory\n");
10402 static int hclge_pci_init(struct hclge_dev *hdev)
10404 struct pci_dev *pdev = hdev->pdev;
10405 struct hclge_hw *hw;
10408 ret = pci_enable_device(pdev);
10410 dev_err(&pdev->dev, "failed to enable PCI device\n");
10414 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10416 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10418 dev_err(&pdev->dev,
10419 "can't set consistent PCI DMA");
10420 goto err_disable_device;
10422 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10425 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10427 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10428 goto err_disable_device;
10431 pci_set_master(pdev);
10433 hw->io_base = pcim_iomap(pdev, 2, 0);
10434 if (!hw->io_base) {
10435 dev_err(&pdev->dev, "Can't map configuration register space\n");
10437 goto err_clr_master;
10440 ret = hclge_dev_mem_map(hdev);
10442 goto err_unmap_io_base;
10444 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10449 pcim_iounmap(pdev, hdev->hw.io_base);
10451 pci_clear_master(pdev);
10452 pci_release_regions(pdev);
10453 err_disable_device:
10454 pci_disable_device(pdev);
10459 static void hclge_pci_uninit(struct hclge_dev *hdev)
10461 struct pci_dev *pdev = hdev->pdev;
10463 if (hdev->hw.mem_base)
10464 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10466 pcim_iounmap(pdev, hdev->hw.io_base);
10467 pci_free_irq_vectors(pdev);
10468 pci_clear_master(pdev);
10469 pci_release_mem_regions(pdev);
10470 pci_disable_device(pdev);
10473 static void hclge_state_init(struct hclge_dev *hdev)
10475 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10476 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10477 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10478 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10479 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10480 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10481 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10484 static void hclge_state_uninit(struct hclge_dev *hdev)
10486 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10487 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10489 if (hdev->reset_timer.function)
10490 del_timer_sync(&hdev->reset_timer);
10491 if (hdev->service_task.work.func)
10492 cancel_delayed_work_sync(&hdev->service_task);
10495 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10497 #define HCLGE_FLR_RETRY_WAIT_MS 500
10498 #define HCLGE_FLR_RETRY_CNT 5
10500 struct hclge_dev *hdev = ae_dev->priv;
10505 down(&hdev->reset_sem);
10506 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10507 hdev->reset_type = HNAE3_FLR_RESET;
10508 ret = hclge_reset_prepare(hdev);
10509 if (ret || hdev->reset_pending) {
10510 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10512 if (hdev->reset_pending ||
10513 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10514 dev_err(&hdev->pdev->dev,
10515 "reset_pending:0x%lx, retry_cnt:%d\n",
10516 hdev->reset_pending, retry_cnt);
10517 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10518 up(&hdev->reset_sem);
10519 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10524 /* disable misc vector before FLR done */
10525 hclge_enable_vector(&hdev->misc_vector, false);
10526 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10527 hdev->rst_stats.flr_rst_cnt++;
10530 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10532 struct hclge_dev *hdev = ae_dev->priv;
10535 hclge_enable_vector(&hdev->misc_vector, true);
10537 ret = hclge_reset_rebuild(hdev);
10539 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10541 hdev->reset_type = HNAE3_NONE_RESET;
10542 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10543 up(&hdev->reset_sem);
10546 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10550 for (i = 0; i < hdev->num_alloc_vport; i++) {
10551 struct hclge_vport *vport = &hdev->vport[i];
10554 /* Send cmd to clear VF's FUNC_RST_ING */
10555 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10557 dev_warn(&hdev->pdev->dev,
10558 "clear vf(%u) rst failed %d!\n",
10559 vport->vport_id, ret);
10563 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10565 struct pci_dev *pdev = ae_dev->pdev;
10566 struct hclge_dev *hdev;
10569 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10574 hdev->ae_dev = ae_dev;
10575 hdev->reset_type = HNAE3_NONE_RESET;
10576 hdev->reset_level = HNAE3_FUNC_RESET;
10577 ae_dev->priv = hdev;
10579 /* HW supprt 2 layer vlan */
10580 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10582 mutex_init(&hdev->vport_lock);
10583 spin_lock_init(&hdev->fd_rule_lock);
10584 sema_init(&hdev->reset_sem, 1);
10586 ret = hclge_pci_init(hdev);
10590 /* Firmware command queue initialize */
10591 ret = hclge_cmd_queue_init(hdev);
10593 goto err_pci_uninit;
10595 /* Firmware command initialize */
10596 ret = hclge_cmd_init(hdev);
10598 goto err_cmd_uninit;
10600 ret = hclge_get_cap(hdev);
10602 goto err_cmd_uninit;
10604 ret = hclge_query_dev_specs(hdev);
10606 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10608 goto err_cmd_uninit;
10611 ret = hclge_configure(hdev);
10613 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10614 goto err_cmd_uninit;
10617 ret = hclge_init_msi(hdev);
10619 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10620 goto err_cmd_uninit;
10623 ret = hclge_misc_irq_init(hdev);
10625 goto err_msi_uninit;
10627 ret = hclge_alloc_tqps(hdev);
10629 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10630 goto err_msi_irq_uninit;
10633 ret = hclge_alloc_vport(hdev);
10635 goto err_msi_irq_uninit;
10637 ret = hclge_map_tqp(hdev);
10639 goto err_msi_irq_uninit;
10641 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10642 ret = hclge_mac_mdio_config(hdev);
10644 goto err_msi_irq_uninit;
10647 ret = hclge_init_umv_space(hdev);
10649 goto err_mdiobus_unreg;
10651 ret = hclge_mac_init(hdev);
10653 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10654 goto err_mdiobus_unreg;
10657 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10659 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10660 goto err_mdiobus_unreg;
10663 ret = hclge_config_gro(hdev, true);
10665 goto err_mdiobus_unreg;
10667 ret = hclge_init_vlan_config(hdev);
10669 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10670 goto err_mdiobus_unreg;
10673 ret = hclge_tm_schd_init(hdev);
10675 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10676 goto err_mdiobus_unreg;
10679 ret = hclge_rss_init_cfg(hdev);
10681 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10682 goto err_mdiobus_unreg;
10685 ret = hclge_rss_init_hw(hdev);
10687 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10688 goto err_mdiobus_unreg;
10691 ret = init_mgr_tbl(hdev);
10693 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10694 goto err_mdiobus_unreg;
10697 ret = hclge_init_fd_config(hdev);
10699 dev_err(&pdev->dev,
10700 "fd table init fail, ret=%d\n", ret);
10701 goto err_mdiobus_unreg;
10704 INIT_KFIFO(hdev->mac_tnl_log);
10706 hclge_dcb_ops_set(hdev);
10708 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10709 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10711 /* Setup affinity after service timer setup because add_timer_on
10712 * is called in affinity notify.
10714 hclge_misc_affinity_setup(hdev);
10716 hclge_clear_all_event_cause(hdev);
10717 hclge_clear_resetting_state(hdev);
10719 /* Log and clear the hw errors those already occurred */
10720 hclge_handle_all_hns_hw_errors(ae_dev);
10722 /* request delayed reset for the error recovery because an immediate
10723 * global reset on a PF affecting pending initialization of other PFs
10725 if (ae_dev->hw_err_reset_req) {
10726 enum hnae3_reset_type reset_level;
10728 reset_level = hclge_get_reset_level(ae_dev,
10729 &ae_dev->hw_err_reset_req);
10730 hclge_set_def_reset_request(ae_dev, reset_level);
10731 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10734 /* Enable MISC vector(vector0) */
10735 hclge_enable_vector(&hdev->misc_vector, true);
10737 hclge_state_init(hdev);
10738 hdev->last_reset_time = jiffies;
10740 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10741 HCLGE_DRIVER_NAME);
10743 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10748 if (hdev->hw.mac.phydev)
10749 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10750 err_msi_irq_uninit:
10751 hclge_misc_irq_uninit(hdev);
10753 pci_free_irq_vectors(pdev);
10755 hclge_cmd_uninit(hdev);
10757 pcim_iounmap(pdev, hdev->hw.io_base);
10758 pci_clear_master(pdev);
10759 pci_release_regions(pdev);
10760 pci_disable_device(pdev);
10762 mutex_destroy(&hdev->vport_lock);
10766 static void hclge_stats_clear(struct hclge_dev *hdev)
10768 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10771 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10773 return hclge_config_switch_param(hdev, vf, enable,
10774 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10777 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10779 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10780 HCLGE_FILTER_FE_NIC_INGRESS_B,
10784 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10788 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10790 dev_err(&hdev->pdev->dev,
10791 "Set vf %d mac spoof check %s failed, ret=%d\n",
10792 vf, enable ? "on" : "off", ret);
10796 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10798 dev_err(&hdev->pdev->dev,
10799 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10800 vf, enable ? "on" : "off", ret);
10805 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10808 struct hclge_vport *vport = hclge_get_vport(handle);
10809 struct hclge_dev *hdev = vport->back;
10810 u32 new_spoofchk = enable ? 1 : 0;
10813 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10814 return -EOPNOTSUPP;
10816 vport = hclge_get_vf_vport(hdev, vf);
10820 if (vport->vf_info.spoofchk == new_spoofchk)
10823 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10824 dev_warn(&hdev->pdev->dev,
10825 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10827 else if (enable && hclge_is_umv_space_full(vport, true))
10828 dev_warn(&hdev->pdev->dev,
10829 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10832 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10836 vport->vf_info.spoofchk = new_spoofchk;
10840 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10842 struct hclge_vport *vport = hdev->vport;
10846 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10849 /* resume the vf spoof check state after reset */
10850 for (i = 0; i < hdev->num_alloc_vport; i++) {
10851 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10852 vport->vf_info.spoofchk);
10862 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10864 struct hclge_vport *vport = hclge_get_vport(handle);
10865 struct hclge_dev *hdev = vport->back;
10866 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10867 u32 new_trusted = enable ? 1 : 0;
10871 vport = hclge_get_vf_vport(hdev, vf);
10875 if (vport->vf_info.trusted == new_trusted)
10878 /* Disable promisc mode for VF if it is not trusted any more. */
10879 if (!enable && vport->vf_info.promisc_enable) {
10880 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10881 ret = hclge_set_vport_promisc_mode(vport, false, false,
10885 vport->vf_info.promisc_enable = 0;
10886 hclge_inform_vf_promisc_info(vport);
10889 vport->vf_info.trusted = new_trusted;
10894 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10899 /* reset vf rate to default value */
10900 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10901 struct hclge_vport *vport = &hdev->vport[vf];
10903 vport->vf_info.max_tx_rate = 0;
10904 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10906 dev_err(&hdev->pdev->dev,
10907 "vf%d failed to reset to default, ret=%d\n",
10908 vf - HCLGE_VF_VPORT_START_NUM, ret);
10912 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
10913 int min_tx_rate, int max_tx_rate)
10915 if (min_tx_rate != 0 ||
10916 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10917 dev_err(&hdev->pdev->dev,
10918 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10919 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10926 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10927 int min_tx_rate, int max_tx_rate, bool force)
10929 struct hclge_vport *vport = hclge_get_vport(handle);
10930 struct hclge_dev *hdev = vport->back;
10933 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
10937 vport = hclge_get_vf_vport(hdev, vf);
10941 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10944 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10948 vport->vf_info.max_tx_rate = max_tx_rate;
10953 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10955 struct hnae3_handle *handle = &hdev->vport->nic;
10956 struct hclge_vport *vport;
10960 /* resume the vf max_tx_rate after reset */
10961 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10962 vport = hclge_get_vf_vport(hdev, vf);
10966 /* zero means max rate, after reset, firmware already set it to
10967 * max rate, so just continue.
10969 if (!vport->vf_info.max_tx_rate)
10972 ret = hclge_set_vf_rate(handle, vf, 0,
10973 vport->vf_info.max_tx_rate, true);
10975 dev_err(&hdev->pdev->dev,
10976 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10977 vf, vport->vf_info.max_tx_rate, ret);
10985 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10987 struct hclge_vport *vport = hdev->vport;
10990 for (i = 0; i < hdev->num_alloc_vport; i++) {
10991 hclge_vport_stop(vport);
10996 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10998 struct hclge_dev *hdev = ae_dev->priv;
10999 struct pci_dev *pdev = ae_dev->pdev;
11002 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11004 hclge_stats_clear(hdev);
11005 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11006 * so here should not clean table in memory.
11008 if (hdev->reset_type == HNAE3_IMP_RESET ||
11009 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11010 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11011 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11012 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11013 hclge_reset_umv_space(hdev);
11016 ret = hclge_cmd_init(hdev);
11018 dev_err(&pdev->dev, "Cmd queue init failed\n");
11022 ret = hclge_map_tqp(hdev);
11024 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11028 ret = hclge_mac_init(hdev);
11030 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11034 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11036 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11040 ret = hclge_config_gro(hdev, true);
11044 ret = hclge_init_vlan_config(hdev);
11046 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11050 ret = hclge_tm_init_hw(hdev, true);
11052 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11056 ret = hclge_rss_init_hw(hdev);
11058 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11062 ret = init_mgr_tbl(hdev);
11064 dev_err(&pdev->dev,
11065 "failed to reinit manager table, ret = %d\n", ret);
11069 ret = hclge_init_fd_config(hdev);
11071 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11075 /* Log and clear the hw errors those already occurred */
11076 hclge_handle_all_hns_hw_errors(ae_dev);
11078 /* Re-enable the hw error interrupts because
11079 * the interrupts get disabled on global reset.
11081 ret = hclge_config_nic_hw_error(hdev, true);
11083 dev_err(&pdev->dev,
11084 "fail(%d) to re-enable NIC hw error interrupts\n",
11089 if (hdev->roce_client) {
11090 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11092 dev_err(&pdev->dev,
11093 "fail(%d) to re-enable roce ras interrupts\n",
11099 hclge_reset_vport_state(hdev);
11100 ret = hclge_reset_vport_spoofchk(hdev);
11104 ret = hclge_resume_vf_rate(hdev);
11108 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11109 HCLGE_DRIVER_NAME);
11114 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11116 struct hclge_dev *hdev = ae_dev->priv;
11117 struct hclge_mac *mac = &hdev->hw.mac;
11119 hclge_reset_vf_rate(hdev);
11120 hclge_clear_vf_vlan(hdev);
11121 hclge_misc_affinity_teardown(hdev);
11122 hclge_state_uninit(hdev);
11123 hclge_uninit_mac_table(hdev);
11126 mdiobus_unregister(mac->mdio_bus);
11128 /* Disable MISC vector(vector0) */
11129 hclge_enable_vector(&hdev->misc_vector, false);
11130 synchronize_irq(hdev->misc_vector.vector_irq);
11132 /* Disable all hw interrupts */
11133 hclge_config_mac_tnl_int(hdev, false);
11134 hclge_config_nic_hw_error(hdev, false);
11135 hclge_config_rocee_ras_interrupt(hdev, false);
11137 hclge_cmd_uninit(hdev);
11138 hclge_misc_irq_uninit(hdev);
11139 hclge_pci_uninit(hdev);
11140 mutex_destroy(&hdev->vport_lock);
11141 hclge_uninit_vport_vlan_table(hdev);
11142 ae_dev->priv = NULL;
11145 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11147 struct hclge_vport *vport = hclge_get_vport(handle);
11148 struct hclge_dev *hdev = vport->back;
11150 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11153 static void hclge_get_channels(struct hnae3_handle *handle,
11154 struct ethtool_channels *ch)
11156 ch->max_combined = hclge_get_max_channels(handle);
11157 ch->other_count = 1;
11159 ch->combined_count = handle->kinfo.rss_size;
11162 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11163 u16 *alloc_tqps, u16 *max_rss_size)
11165 struct hclge_vport *vport = hclge_get_vport(handle);
11166 struct hclge_dev *hdev = vport->back;
11168 *alloc_tqps = vport->alloc_tqps;
11169 *max_rss_size = hdev->pf_rss_size_max;
11172 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11173 bool rxfh_configured)
11175 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11176 struct hclge_vport *vport = hclge_get_vport(handle);
11177 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11178 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11179 struct hclge_dev *hdev = vport->back;
11180 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11181 u16 cur_rss_size = kinfo->rss_size;
11182 u16 cur_tqps = kinfo->num_tqps;
11183 u16 tc_valid[HCLGE_MAX_TC_NUM];
11189 kinfo->req_rss_size = new_tqps_num;
11191 ret = hclge_tm_vport_map_update(hdev);
11193 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11197 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11198 roundup_size = ilog2(roundup_size);
11199 /* Set the RSS TC mode according to the new RSS size */
11200 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11203 if (!(hdev->hw_tc_map & BIT(i)))
11207 tc_size[i] = roundup_size;
11208 tc_offset[i] = kinfo->rss_size * i;
11210 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11214 /* RSS indirection table has been configuared by user */
11215 if (rxfh_configured)
11218 /* Reinitializes the rss indirect table according to the new RSS size */
11219 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11224 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11225 rss_indir[i] = i % kinfo->rss_size;
11227 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11229 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11236 dev_info(&hdev->pdev->dev,
11237 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11238 cur_rss_size, kinfo->rss_size,
11239 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11244 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11245 u32 *regs_num_64_bit)
11247 struct hclge_desc desc;
11251 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11252 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11254 dev_err(&hdev->pdev->dev,
11255 "Query register number cmd failed, ret = %d.\n", ret);
11259 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11260 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11262 total_num = *regs_num_32_bit + *regs_num_64_bit;
11269 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11272 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11273 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11275 struct hclge_desc *desc;
11276 u32 *reg_val = data;
11286 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11287 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11288 HCLGE_32_BIT_REG_RTN_DATANUM);
11289 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11293 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11294 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11296 dev_err(&hdev->pdev->dev,
11297 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11302 for (i = 0; i < cmd_num; i++) {
11304 desc_data = (__le32 *)(&desc[i].data[0]);
11305 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11307 desc_data = (__le32 *)(&desc[i]);
11308 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11310 for (k = 0; k < n; k++) {
11311 *reg_val++ = le32_to_cpu(*desc_data++);
11323 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11326 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11327 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11329 struct hclge_desc *desc;
11330 u64 *reg_val = data;
11340 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11341 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11342 HCLGE_64_BIT_REG_RTN_DATANUM);
11343 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11347 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11348 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11350 dev_err(&hdev->pdev->dev,
11351 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11356 for (i = 0; i < cmd_num; i++) {
11358 desc_data = (__le64 *)(&desc[i].data[0]);
11359 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11361 desc_data = (__le64 *)(&desc[i]);
11362 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11364 for (k = 0; k < n; k++) {
11365 *reg_val++ = le64_to_cpu(*desc_data++);
11377 #define MAX_SEPARATE_NUM 4
11378 #define SEPARATOR_VALUE 0xFDFCFBFA
11379 #define REG_NUM_PER_LINE 4
11380 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11381 #define REG_SEPARATOR_LINE 1
11382 #define REG_NUM_REMAIN_MASK 3
11383 #define BD_LIST_MAX_NUM 30
11385 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11389 /* initialize command BD except the last one */
11390 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11391 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11393 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11396 /* initialize the last command BD */
11397 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11399 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11402 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11406 u32 entries_per_desc, desc_index, index, offset, i;
11407 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11410 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11412 dev_err(&hdev->pdev->dev,
11413 "Get dfx bd num fail, status is %d.\n", ret);
11417 entries_per_desc = ARRAY_SIZE(desc[0].data);
11418 for (i = 0; i < type_num; i++) {
11419 offset = hclge_dfx_bd_offset_list[i];
11420 index = offset % entries_per_desc;
11421 desc_index = offset / entries_per_desc;
11422 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11428 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11429 struct hclge_desc *desc_src, int bd_num,
11430 enum hclge_opcode_type cmd)
11432 struct hclge_desc *desc = desc_src;
11435 hclge_cmd_setup_basic_desc(desc, cmd, true);
11436 for (i = 0; i < bd_num - 1; i++) {
11437 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11439 hclge_cmd_setup_basic_desc(desc, cmd, true);
11443 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11445 dev_err(&hdev->pdev->dev,
11446 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11452 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11455 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11456 struct hclge_desc *desc = desc_src;
11459 entries_per_desc = ARRAY_SIZE(desc->data);
11460 reg_num = entries_per_desc * bd_num;
11461 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11462 for (i = 0; i < reg_num; i++) {
11463 index = i % entries_per_desc;
11464 desc_index = i / entries_per_desc;
11465 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11467 for (i = 0; i < separator_num; i++)
11468 *reg++ = SEPARATOR_VALUE;
11470 return reg_num + separator_num;
11473 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11475 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11476 int data_len_per_desc, bd_num, i;
11477 int bd_num_list[BD_LIST_MAX_NUM];
11481 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11483 dev_err(&hdev->pdev->dev,
11484 "Get dfx reg bd num fail, status is %d.\n", ret);
11488 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11490 for (i = 0; i < dfx_reg_type_num; i++) {
11491 bd_num = bd_num_list[i];
11492 data_len = data_len_per_desc * bd_num;
11493 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11499 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11501 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11502 int bd_num, bd_num_max, buf_len, i;
11503 int bd_num_list[BD_LIST_MAX_NUM];
11504 struct hclge_desc *desc_src;
11508 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11510 dev_err(&hdev->pdev->dev,
11511 "Get dfx reg bd num fail, status is %d.\n", ret);
11515 bd_num_max = bd_num_list[0];
11516 for (i = 1; i < dfx_reg_type_num; i++)
11517 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11519 buf_len = sizeof(*desc_src) * bd_num_max;
11520 desc_src = kzalloc(buf_len, GFP_KERNEL);
11524 for (i = 0; i < dfx_reg_type_num; i++) {
11525 bd_num = bd_num_list[i];
11526 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11527 hclge_dfx_reg_opcode_list[i]);
11529 dev_err(&hdev->pdev->dev,
11530 "Get dfx reg fail, status is %d.\n", ret);
11534 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11541 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11542 struct hnae3_knic_private_info *kinfo)
11544 #define HCLGE_RING_REG_OFFSET 0x200
11545 #define HCLGE_RING_INT_REG_OFFSET 0x4
11547 int i, j, reg_num, separator_num;
11551 /* fetching per-PF registers valus from PF PCIe register space */
11552 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11553 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11554 for (i = 0; i < reg_num; i++)
11555 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11556 for (i = 0; i < separator_num; i++)
11557 *reg++ = SEPARATOR_VALUE;
11558 data_num_sum = reg_num + separator_num;
11560 reg_num = ARRAY_SIZE(common_reg_addr_list);
11561 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11562 for (i = 0; i < reg_num; i++)
11563 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11564 for (i = 0; i < separator_num; i++)
11565 *reg++ = SEPARATOR_VALUE;
11566 data_num_sum += reg_num + separator_num;
11568 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11569 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11570 for (j = 0; j < kinfo->num_tqps; j++) {
11571 for (i = 0; i < reg_num; i++)
11572 *reg++ = hclge_read_dev(&hdev->hw,
11573 ring_reg_addr_list[i] +
11574 HCLGE_RING_REG_OFFSET * j);
11575 for (i = 0; i < separator_num; i++)
11576 *reg++ = SEPARATOR_VALUE;
11578 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11580 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11581 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11582 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11583 for (i = 0; i < reg_num; i++)
11584 *reg++ = hclge_read_dev(&hdev->hw,
11585 tqp_intr_reg_addr_list[i] +
11586 HCLGE_RING_INT_REG_OFFSET * j);
11587 for (i = 0; i < separator_num; i++)
11588 *reg++ = SEPARATOR_VALUE;
11590 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11592 return data_num_sum;
11595 static int hclge_get_regs_len(struct hnae3_handle *handle)
11597 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11598 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11599 struct hclge_vport *vport = hclge_get_vport(handle);
11600 struct hclge_dev *hdev = vport->back;
11601 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11602 int regs_lines_32_bit, regs_lines_64_bit;
11605 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11607 dev_err(&hdev->pdev->dev,
11608 "Get register number failed, ret = %d.\n", ret);
11612 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11614 dev_err(&hdev->pdev->dev,
11615 "Get dfx reg len failed, ret = %d.\n", ret);
11619 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11620 REG_SEPARATOR_LINE;
11621 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11622 REG_SEPARATOR_LINE;
11623 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11624 REG_SEPARATOR_LINE;
11625 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11626 REG_SEPARATOR_LINE;
11627 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11628 REG_SEPARATOR_LINE;
11629 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11630 REG_SEPARATOR_LINE;
11632 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11633 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11634 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11637 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11640 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11641 struct hclge_vport *vport = hclge_get_vport(handle);
11642 struct hclge_dev *hdev = vport->back;
11643 u32 regs_num_32_bit, regs_num_64_bit;
11644 int i, reg_num, separator_num, ret;
11647 *version = hdev->fw_version;
11649 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11651 dev_err(&hdev->pdev->dev,
11652 "Get register number failed, ret = %d.\n", ret);
11656 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11658 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11660 dev_err(&hdev->pdev->dev,
11661 "Get 32 bit register failed, ret = %d.\n", ret);
11664 reg_num = regs_num_32_bit;
11666 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11667 for (i = 0; i < separator_num; i++)
11668 *reg++ = SEPARATOR_VALUE;
11670 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11672 dev_err(&hdev->pdev->dev,
11673 "Get 64 bit register failed, ret = %d.\n", ret);
11676 reg_num = regs_num_64_bit * 2;
11678 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11679 for (i = 0; i < separator_num; i++)
11680 *reg++ = SEPARATOR_VALUE;
11682 ret = hclge_get_dfx_reg(hdev, reg);
11684 dev_err(&hdev->pdev->dev,
11685 "Get dfx register failed, ret = %d.\n", ret);
11688 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11690 struct hclge_set_led_state_cmd *req;
11691 struct hclge_desc desc;
11694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11696 req = (struct hclge_set_led_state_cmd *)desc.data;
11697 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11698 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11702 dev_err(&hdev->pdev->dev,
11703 "Send set led state cmd error, ret =%d\n", ret);
11708 enum hclge_led_status {
11711 HCLGE_LED_NO_CHANGE = 0xFF,
11714 static int hclge_set_led_id(struct hnae3_handle *handle,
11715 enum ethtool_phys_id_state status)
11717 struct hclge_vport *vport = hclge_get_vport(handle);
11718 struct hclge_dev *hdev = vport->back;
11721 case ETHTOOL_ID_ACTIVE:
11722 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11723 case ETHTOOL_ID_INACTIVE:
11724 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11730 static void hclge_get_link_mode(struct hnae3_handle *handle,
11731 unsigned long *supported,
11732 unsigned long *advertising)
11734 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11735 struct hclge_vport *vport = hclge_get_vport(handle);
11736 struct hclge_dev *hdev = vport->back;
11737 unsigned int idx = 0;
11739 for (; idx < size; idx++) {
11740 supported[idx] = hdev->hw.mac.supported[idx];
11741 advertising[idx] = hdev->hw.mac.advertising[idx];
11745 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11747 struct hclge_vport *vport = hclge_get_vport(handle);
11748 struct hclge_dev *hdev = vport->back;
11750 return hclge_config_gro(hdev, enable);
11753 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11755 struct hclge_vport *vport = &hdev->vport[0];
11756 struct hnae3_handle *handle = &vport->nic;
11760 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11761 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11762 vport->last_promisc_flags = vport->overflow_promisc_flags;
11765 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11766 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11767 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11768 tmp_flags & HNAE3_MPE);
11770 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11771 hclge_enable_vlan_filter(handle,
11772 tmp_flags & HNAE3_VLAN_FLTR);
11777 static bool hclge_module_existed(struct hclge_dev *hdev)
11779 struct hclge_desc desc;
11783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11786 dev_err(&hdev->pdev->dev,
11787 "failed to get SFP exist state, ret = %d\n", ret);
11791 existed = le32_to_cpu(desc.data[0]);
11793 return existed != 0;
11796 /* need 6 bds(total 140 bytes) in one reading
11797 * return the number of bytes actually read, 0 means read failed.
11799 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11802 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11803 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11809 /* setup all 6 bds to read module eeprom info. */
11810 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11811 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11814 /* bd0~bd4 need next flag */
11815 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11816 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11819 /* setup bd0, this bd contains offset and read length. */
11820 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11821 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11822 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11823 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11825 ret = hclge_cmd_send(&hdev->hw, desc, i);
11827 dev_err(&hdev->pdev->dev,
11828 "failed to get SFP eeprom info, ret = %d\n", ret);
11832 /* copy sfp info from bd0 to out buffer. */
11833 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11834 memcpy(data, sfp_info_bd0->data, copy_len);
11835 read_len = copy_len;
11837 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11838 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11839 if (read_len >= len)
11842 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11843 memcpy(data + read_len, desc[i].data, copy_len);
11844 read_len += copy_len;
11850 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11853 struct hclge_vport *vport = hclge_get_vport(handle);
11854 struct hclge_dev *hdev = vport->back;
11858 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11859 return -EOPNOTSUPP;
11861 if (!hclge_module_existed(hdev))
11864 while (read_len < len) {
11865 data_len = hclge_get_sfp_eeprom_info(hdev,
11872 read_len += data_len;
11878 static const struct hnae3_ae_ops hclge_ops = {
11879 .init_ae_dev = hclge_init_ae_dev,
11880 .uninit_ae_dev = hclge_uninit_ae_dev,
11881 .flr_prepare = hclge_flr_prepare,
11882 .flr_done = hclge_flr_done,
11883 .init_client_instance = hclge_init_client_instance,
11884 .uninit_client_instance = hclge_uninit_client_instance,
11885 .map_ring_to_vector = hclge_map_ring_to_vector,
11886 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11887 .get_vector = hclge_get_vector,
11888 .put_vector = hclge_put_vector,
11889 .set_promisc_mode = hclge_set_promisc_mode,
11890 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11891 .set_loopback = hclge_set_loopback,
11892 .start = hclge_ae_start,
11893 .stop = hclge_ae_stop,
11894 .client_start = hclge_client_start,
11895 .client_stop = hclge_client_stop,
11896 .get_status = hclge_get_status,
11897 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11898 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11899 .get_media_type = hclge_get_media_type,
11900 .check_port_speed = hclge_check_port_speed,
11901 .get_fec = hclge_get_fec,
11902 .set_fec = hclge_set_fec,
11903 .get_rss_key_size = hclge_get_rss_key_size,
11904 .get_rss = hclge_get_rss,
11905 .set_rss = hclge_set_rss,
11906 .set_rss_tuple = hclge_set_rss_tuple,
11907 .get_rss_tuple = hclge_get_rss_tuple,
11908 .get_tc_size = hclge_get_tc_size,
11909 .get_mac_addr = hclge_get_mac_addr,
11910 .set_mac_addr = hclge_set_mac_addr,
11911 .do_ioctl = hclge_do_ioctl,
11912 .add_uc_addr = hclge_add_uc_addr,
11913 .rm_uc_addr = hclge_rm_uc_addr,
11914 .add_mc_addr = hclge_add_mc_addr,
11915 .rm_mc_addr = hclge_rm_mc_addr,
11916 .set_autoneg = hclge_set_autoneg,
11917 .get_autoneg = hclge_get_autoneg,
11918 .restart_autoneg = hclge_restart_autoneg,
11919 .halt_autoneg = hclge_halt_autoneg,
11920 .get_pauseparam = hclge_get_pauseparam,
11921 .set_pauseparam = hclge_set_pauseparam,
11922 .set_mtu = hclge_set_mtu,
11923 .reset_queue = hclge_reset_tqp,
11924 .get_stats = hclge_get_stats,
11925 .get_mac_stats = hclge_get_mac_stat,
11926 .update_stats = hclge_update_stats,
11927 .get_strings = hclge_get_strings,
11928 .get_sset_count = hclge_get_sset_count,
11929 .get_fw_version = hclge_get_fw_version,
11930 .get_mdix_mode = hclge_get_mdix_mode,
11931 .enable_vlan_filter = hclge_enable_vlan_filter,
11932 .set_vlan_filter = hclge_set_vlan_filter,
11933 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11934 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11935 .reset_event = hclge_reset_event,
11936 .get_reset_level = hclge_get_reset_level,
11937 .set_default_reset_request = hclge_set_def_reset_request,
11938 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11939 .set_channels = hclge_set_channels,
11940 .get_channels = hclge_get_channels,
11941 .get_regs_len = hclge_get_regs_len,
11942 .get_regs = hclge_get_regs,
11943 .set_led_id = hclge_set_led_id,
11944 .get_link_mode = hclge_get_link_mode,
11945 .add_fd_entry = hclge_add_fd_entry,
11946 .del_fd_entry = hclge_del_fd_entry,
11947 .del_all_fd_entries = hclge_del_all_fd_entries,
11948 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11949 .get_fd_rule_info = hclge_get_fd_rule_info,
11950 .get_fd_all_rules = hclge_get_all_rules,
11951 .enable_fd = hclge_enable_fd,
11952 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11953 .dbg_run_cmd = hclge_dbg_run_cmd,
11954 .dbg_read_cmd = hclge_dbg_read_cmd,
11955 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11956 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11957 .ae_dev_resetting = hclge_ae_dev_resetting,
11958 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11959 .set_gro_en = hclge_gro_en,
11960 .get_global_queue_id = hclge_covert_handle_qid_global,
11961 .set_timer_task = hclge_set_timer_task,
11962 .mac_connect_phy = hclge_mac_connect_phy,
11963 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11964 .get_vf_config = hclge_get_vf_config,
11965 .set_vf_link_state = hclge_set_vf_link_state,
11966 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11967 .set_vf_trust = hclge_set_vf_trust,
11968 .set_vf_rate = hclge_set_vf_rate,
11969 .set_vf_mac = hclge_set_vf_mac,
11970 .get_module_eeprom = hclge_get_module_eeprom,
11971 .get_cmdq_stat = hclge_get_cmdq_stat,
11972 .add_cls_flower = hclge_add_cls_flower,
11973 .del_cls_flower = hclge_del_cls_flower,
11974 .cls_flower_active = hclge_is_cls_flower_active,
11977 static struct hnae3_ae_algo ae_algo = {
11979 .pdev_id_table = ae_algo_pci_tbl,
11982 static int hclge_init(void)
11984 pr_info("%s is initializing\n", HCLGE_NAME);
11986 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11988 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11992 hnae3_register_ae_algo(&ae_algo);
11997 static void hclge_exit(void)
11999 hnae3_unregister_ae_algo(&ae_algo);
12000 destroy_workqueue(hclge_wq);
12002 module_init(hclge_init);
12003 module_exit(hclge_exit);
12005 MODULE_LICENSE("GPL");
12006 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12007 MODULE_DESCRIPTION("HCLGE Driver");
12008 MODULE_VERSION(HCLGE_MOD_VERSION);