1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static struct hnae3_ae_algo ae_algo;
76 static struct workqueue_struct *hclge_wq;
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32(tqp->index);
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32(tqp->index);
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 hdev->hw.mac.phydev->drv->set_loopback) {
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
762 hclge_tqps_get_sset_count(handle, stringset);
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
771 u8 *p = (char *)data;
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 p += ETH_GSTRING_LEN;
785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 p += ETH_GSTRING_LEN;
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 p += ETH_GSTRING_LEN;
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 p += ETH_GSTRING_LEN;
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 ARRAY_SIZE(g_mac_stats_string), data);
812 p = hclge_tqps_get_stats(handle, p);
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
821 hclge_update_stats(handle, NULL);
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 struct hclge_func_status_cmd *status)
830 #define HCLGE_MAC_ID_MASK 0xF
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
845 static int hclge_query_function_status(struct hclge_dev *hdev)
847 #define HCLGE_QUERY_MAX_CNT 5
849 struct hclge_func_status_cmd *req;
850 struct hclge_desc desc;
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 req = (struct hclge_func_status_cmd *)desc.data;
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 dev_err(&hdev->pdev->dev,
861 "query function status failed %d.\n", ret);
865 /* Check pf reset is done */
868 usleep_range(1000, 2000);
869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871 return hclge_parse_func_status(hdev, req);
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 struct hclge_pf_res_cmd *req;
877 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
888 req = (struct hclge_pf_res_cmd *)desc.data;
889 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 le16_to_cpu(req->ext_tqp_num);
891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893 if (req->tx_buf_size)
895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
901 if (req->dv_buf_size)
903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
909 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 dev_err(&hdev->pdev->dev,
912 "only %u msi resources available, not enough for pf(min:2).\n",
917 if (hnae3_dev_roce_supported(hdev)) {
919 le16_to_cpu(req->pf_intr_vector_number_roce);
921 /* PF should have NIC vectors and Roce vectors,
922 * NIC vectors are queued before Roce vectors.
924 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
926 hdev->num_msi = hdev->num_nic_msi;
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
936 *speed = HCLGE_MAC_SPEED_10M;
939 *speed = HCLGE_MAC_SPEED_100M;
942 *speed = HCLGE_MAC_SPEED_1G;
945 *speed = HCLGE_MAC_SPEED_10G;
948 *speed = HCLGE_MAC_SPEED_25G;
951 *speed = HCLGE_MAC_SPEED_40G;
954 *speed = HCLGE_MAC_SPEED_50G;
957 *speed = HCLGE_MAC_SPEED_100G;
960 *speed = HCLGE_MAC_SPEED_200G;
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 struct hclge_vport *vport = hclge_get_vport(handle);
972 struct hclge_dev *hdev = vport->back;
973 u32 speed_ability = hdev->hw.mac.speed_ability;
977 case HCLGE_MAC_SPEED_10M:
978 speed_bit = HCLGE_SUPPORT_10M_BIT;
980 case HCLGE_MAC_SPEED_100M:
981 speed_bit = HCLGE_SUPPORT_100M_BIT;
983 case HCLGE_MAC_SPEED_1G:
984 speed_bit = HCLGE_SUPPORT_1G_BIT;
986 case HCLGE_MAC_SPEED_10G:
987 speed_bit = HCLGE_SUPPORT_10G_BIT;
989 case HCLGE_MAC_SPEED_25G:
990 speed_bit = HCLGE_SUPPORT_25G_BIT;
992 case HCLGE_MAC_SPEED_40G:
993 speed_bit = HCLGE_SUPPORT_40G_BIT;
995 case HCLGE_MAC_SPEED_50G:
996 speed_bit = HCLGE_SUPPORT_50G_BIT;
998 case HCLGE_MAC_SPEED_100G:
999 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001 case HCLGE_MAC_SPEED_200G:
1002 speed_bit = HCLGE_SUPPORT_200G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1053 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1055 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1061 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1083 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1089 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1095 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1098 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1101 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1108 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1111 switch (mac->speed) {
1112 case HCLGE_MAC_SPEED_10G:
1113 case HCLGE_MAC_SPEED_40G:
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1117 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1119 case HCLGE_MAC_SPEED_25G:
1120 case HCLGE_MAC_SPEED_50G:
1121 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1124 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 BIT(HNAE3_FEC_AUTO);
1127 case HCLGE_MAC_SPEED_100G:
1128 case HCLGE_MAC_SPEED_200G:
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1133 mac->fec_ability = 0;
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1141 struct hclge_mac *mac = &hdev->hw.mac;
1143 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1147 hclge_convert_setting_sr(mac, speed_ability);
1148 hclge_convert_setting_lr(mac, speed_ability);
1149 hclge_convert_setting_cr(mac, speed_ability);
1150 if (hnae3_dev_fec_supported(hdev))
1151 hclge_convert_setting_fec(mac);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1161 struct hclge_mac *mac = &hdev->hw.mac;
1163 hclge_convert_setting_kr(mac, speed_ability);
1164 if (hnae3_dev_fec_supported(hdev))
1165 hclge_convert_setting_fec(mac);
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1174 unsigned long *supported = hdev->hw.mac.supported;
1176 /* default to support all speed for GE port */
1178 speed_ability = HCLGE_SUPPORT_GE;
1180 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1184 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1191 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1204 u8 media_type = hdev->hw.mac.media_type;
1206 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207 hclge_parse_fiber_link_mode(hdev, speed_ability);
1208 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209 hclge_parse_copper_link_mode(hdev, speed_ability);
1210 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211 hclge_parse_backplane_link_mode(hdev, speed_ability);
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1216 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217 return HCLGE_MAC_SPEED_200G;
1219 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220 return HCLGE_MAC_SPEED_100G;
1222 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223 return HCLGE_MAC_SPEED_50G;
1225 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226 return HCLGE_MAC_SPEED_40G;
1228 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229 return HCLGE_MAC_SPEED_25G;
1231 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232 return HCLGE_MAC_SPEED_10G;
1234 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235 return HCLGE_MAC_SPEED_1G;
1237 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238 return HCLGE_MAC_SPEED_100M;
1240 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241 return HCLGE_MAC_SPEED_10M;
1243 return HCLGE_MAC_SPEED_1G;
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1248 #define SPEED_ABILITY_EXT_SHIFT 8
1250 struct hclge_cfg_param_cmd *req;
1251 u64 mac_addr_tmp_high;
1252 u16 speed_ability_ext;
1256 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1258 /* get the configuration */
1259 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1262 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 HCLGE_CFG_TQP_DESC_N_M,
1266 HCLGE_CFG_TQP_DESC_N_S);
1268 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 HCLGE_CFG_PHY_ADDR_M,
1270 HCLGE_CFG_PHY_ADDR_S);
1271 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 HCLGE_CFG_MEDIA_TP_M,
1273 HCLGE_CFG_MEDIA_TP_S);
1274 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275 HCLGE_CFG_RX_BUF_LEN_M,
1276 HCLGE_CFG_RX_BUF_LEN_S);
1277 /* get mac_address */
1278 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280 HCLGE_CFG_MAC_ADDR_H_M,
1281 HCLGE_CFG_MAC_ADDR_H_S);
1283 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1285 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 HCLGE_CFG_DEFAULT_SPEED_M,
1287 HCLGE_CFG_DEFAULT_SPEED_S);
1288 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 HCLGE_CFG_RSS_SIZE_M,
1290 HCLGE_CFG_RSS_SIZE_S);
1292 for (i = 0; i < ETH_ALEN; i++)
1293 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1295 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1298 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 HCLGE_CFG_SPEED_ABILITY_M,
1300 HCLGE_CFG_SPEED_ABILITY_S);
1301 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1306 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 HCLGE_CFG_UMV_TBL_SPACE_S);
1309 if (!cfg->umv_space)
1310 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1312 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313 HCLGE_CFG_PF_RSS_SIZE_M,
1314 HCLGE_CFG_PF_RSS_SIZE_S);
1316 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317 * power of 2, instead of reading out directly. This would
1318 * be more flexible for future changes and expansions.
1319 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1320 * it does not make sense if PF's field is 0. In this case, PF and VF
1321 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1323 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324 1U << cfg->pf_rss_size_max :
1325 cfg->vf_rss_size_max;
1328 /* hclge_get_cfg: query the static parameter from flash
1329 * @hdev: pointer to struct hclge_dev
1330 * @hcfg: the config structure to be getted
1332 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1334 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1335 struct hclge_cfg_param_cmd *req;
1339 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1342 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1343 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1345 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1347 /* Len should be united by 4 bytes when send to hardware */
1348 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1350 req->offset = cpu_to_le32(offset);
1353 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1355 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1359 hclge_parse_cfg(hcfg, desc);
1364 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1366 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1368 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1370 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1373 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1374 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1375 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1376 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1379 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1380 struct hclge_desc *desc)
1382 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1383 struct hclge_dev_specs_0_cmd *req0;
1384 struct hclge_dev_specs_1_cmd *req1;
1386 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1387 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1389 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1390 ae_dev->dev_specs.rss_ind_tbl_size =
1391 le16_to_cpu(req0->rss_ind_tbl_size);
1392 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1393 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1394 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1395 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1396 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1397 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1400 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1402 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1404 if (!dev_specs->max_non_tso_bd_num)
1405 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 if (!dev_specs->rss_ind_tbl_size)
1407 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408 if (!dev_specs->rss_key_size)
1409 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1410 if (!dev_specs->max_tm_rate)
1411 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1412 if (!dev_specs->max_qset_num)
1413 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1414 if (!dev_specs->max_int_gl)
1415 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1416 if (!dev_specs->max_frm_size)
1417 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1420 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1422 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1426 /* set default specifications as devices lower than version V3 do not
1427 * support querying specifications from firmware.
1429 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1430 hclge_set_default_dev_specs(hdev);
1434 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1435 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1437 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1439 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1441 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1445 hclge_parse_dev_specs(hdev, desc);
1446 hclge_check_dev_specs(hdev);
1451 static int hclge_get_cap(struct hclge_dev *hdev)
1455 ret = hclge_query_function_status(hdev);
1457 dev_err(&hdev->pdev->dev,
1458 "query function status error %d.\n", ret);
1462 /* get pf resource */
1463 return hclge_query_pf_resource(hdev);
1466 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1468 #define HCLGE_MIN_TX_DESC 64
1469 #define HCLGE_MIN_RX_DESC 64
1471 if (!is_kdump_kernel())
1474 dev_info(&hdev->pdev->dev,
1475 "Running kdump kernel. Using minimal resources\n");
1477 /* minimal queue pairs equals to the number of vports */
1478 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1479 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1480 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1483 static int hclge_configure(struct hclge_dev *hdev)
1485 struct hclge_cfg cfg;
1489 ret = hclge_get_cfg(hdev, &cfg);
1493 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1494 hdev->base_tqp_pid = 0;
1495 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1496 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1497 hdev->rx_buf_len = cfg.rx_buf_len;
1498 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1499 hdev->hw.mac.media_type = cfg.media_type;
1500 hdev->hw.mac.phy_addr = cfg.phy_addr;
1501 hdev->num_tx_desc = cfg.tqp_desc_num;
1502 hdev->num_rx_desc = cfg.tqp_desc_num;
1503 hdev->tm_info.num_pg = 1;
1504 hdev->tc_max = cfg.tc_num;
1505 hdev->tm_info.hw_pfc_map = 0;
1506 hdev->wanted_umv_size = cfg.umv_space;
1508 if (hnae3_dev_fd_supported(hdev)) {
1510 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1513 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1515 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1516 cfg.default_speed, ret);
1520 hclge_parse_link_mode(hdev, cfg.speed_ability);
1522 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1524 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1525 (hdev->tc_max < 1)) {
1526 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1531 /* Dev does not support DCB */
1532 if (!hnae3_dev_dcb_supported(hdev)) {
1536 hdev->pfc_max = hdev->tc_max;
1539 hdev->tm_info.num_tc = 1;
1541 /* Currently not support uncontiuous tc */
1542 for (i = 0; i < hdev->tm_info.num_tc; i++)
1543 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1545 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1547 hclge_init_kdump_kernel_config(hdev);
1549 /* Set the init affinity based on pci func number */
1550 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1551 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1552 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1553 &hdev->affinity_mask);
1558 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1561 struct hclge_cfg_tso_status_cmd *req;
1562 struct hclge_desc desc;
1564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1566 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1567 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1568 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1570 return hclge_cmd_send(&hdev->hw, &desc, 1);
1573 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1575 struct hclge_cfg_gro_status_cmd *req;
1576 struct hclge_desc desc;
1579 if (!hnae3_dev_gro_supported(hdev))
1582 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1583 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1585 req->gro_en = en ? 1 : 0;
1587 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1589 dev_err(&hdev->pdev->dev,
1590 "GRO hardware config cmd failed, ret = %d\n", ret);
1595 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1597 struct hclge_tqp *tqp;
1600 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1601 sizeof(struct hclge_tqp), GFP_KERNEL);
1607 for (i = 0; i < hdev->num_tqps; i++) {
1608 tqp->dev = &hdev->pdev->dev;
1611 tqp->q.ae_algo = &ae_algo;
1612 tqp->q.buf_size = hdev->rx_buf_len;
1613 tqp->q.tx_desc_num = hdev->num_tx_desc;
1614 tqp->q.rx_desc_num = hdev->num_rx_desc;
1616 /* need an extended offset to configure queues >=
1617 * HCLGE_TQP_MAX_SIZE_DEV_V2
1619 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1620 tqp->q.io_base = hdev->hw.io_base +
1621 HCLGE_TQP_REG_OFFSET +
1622 i * HCLGE_TQP_REG_SIZE;
1624 tqp->q.io_base = hdev->hw.io_base +
1625 HCLGE_TQP_REG_OFFSET +
1626 HCLGE_TQP_EXT_REG_OFFSET +
1627 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1636 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1637 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1639 struct hclge_tqp_map_cmd *req;
1640 struct hclge_desc desc;
1643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1645 req = (struct hclge_tqp_map_cmd *)desc.data;
1646 req->tqp_id = cpu_to_le16(tqp_pid);
1647 req->tqp_vf = func_id;
1648 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1650 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1651 req->tqp_vid = cpu_to_le16(tqp_vid);
1653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1655 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1660 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1662 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1663 struct hclge_dev *hdev = vport->back;
1666 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1667 alloced < num_tqps; i++) {
1668 if (!hdev->htqp[i].alloced) {
1669 hdev->htqp[i].q.handle = &vport->nic;
1670 hdev->htqp[i].q.tqp_index = alloced;
1671 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1672 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1673 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1674 hdev->htqp[i].alloced = true;
1678 vport->alloc_tqps = alloced;
1679 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1680 vport->alloc_tqps / hdev->tm_info.num_tc);
1682 /* ensure one to one mapping between irq and queue at default */
1683 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1684 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1689 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1690 u16 num_tx_desc, u16 num_rx_desc)
1693 struct hnae3_handle *nic = &vport->nic;
1694 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1695 struct hclge_dev *hdev = vport->back;
1698 kinfo->num_tx_desc = num_tx_desc;
1699 kinfo->num_rx_desc = num_rx_desc;
1701 kinfo->rx_buf_len = hdev->rx_buf_len;
1703 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1704 sizeof(struct hnae3_queue *), GFP_KERNEL);
1708 ret = hclge_assign_tqp(vport, num_tqps);
1710 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1715 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1716 struct hclge_vport *vport)
1718 struct hnae3_handle *nic = &vport->nic;
1719 struct hnae3_knic_private_info *kinfo;
1722 kinfo = &nic->kinfo;
1723 for (i = 0; i < vport->alloc_tqps; i++) {
1724 struct hclge_tqp *q =
1725 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1729 is_pf = !(vport->vport_id);
1730 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1739 static int hclge_map_tqp(struct hclge_dev *hdev)
1741 struct hclge_vport *vport = hdev->vport;
1744 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1745 for (i = 0; i < num_vport; i++) {
1748 ret = hclge_map_tqp_to_vport(hdev, vport);
1758 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1760 struct hnae3_handle *nic = &vport->nic;
1761 struct hclge_dev *hdev = vport->back;
1764 nic->pdev = hdev->pdev;
1765 nic->ae_algo = &ae_algo;
1766 nic->numa_node_mask = hdev->numa_node_mask;
1768 ret = hclge_knic_setup(vport, num_tqps,
1769 hdev->num_tx_desc, hdev->num_rx_desc);
1771 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1776 static int hclge_alloc_vport(struct hclge_dev *hdev)
1778 struct pci_dev *pdev = hdev->pdev;
1779 struct hclge_vport *vport;
1785 /* We need to alloc a vport for main NIC of PF */
1786 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1788 if (hdev->num_tqps < num_vport) {
1789 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1790 hdev->num_tqps, num_vport);
1794 /* Alloc the same number of TQPs for every vport */
1795 tqp_per_vport = hdev->num_tqps / num_vport;
1796 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1798 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1803 hdev->vport = vport;
1804 hdev->num_alloc_vport = num_vport;
1806 if (IS_ENABLED(CONFIG_PCI_IOV))
1807 hdev->num_alloc_vfs = hdev->num_req_vfs;
1809 for (i = 0; i < num_vport; i++) {
1811 vport->vport_id = i;
1812 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1813 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1814 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1815 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1816 INIT_LIST_HEAD(&vport->vlan_list);
1817 INIT_LIST_HEAD(&vport->uc_mac_list);
1818 INIT_LIST_HEAD(&vport->mc_mac_list);
1819 spin_lock_init(&vport->mac_list_lock);
1822 ret = hclge_vport_setup(vport, tqp_main_vport);
1824 ret = hclge_vport_setup(vport, tqp_per_vport);
1827 "vport setup failed for vport %d, %d\n",
1838 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1839 struct hclge_pkt_buf_alloc *buf_alloc)
1841 /* TX buffer size is unit by 128 byte */
1842 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1843 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1844 struct hclge_tx_buff_alloc_cmd *req;
1845 struct hclge_desc desc;
1849 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1851 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1852 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1853 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1855 req->tx_pkt_buff[i] =
1856 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1857 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1862 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1868 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1869 struct hclge_pkt_buf_alloc *buf_alloc)
1871 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1874 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1879 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1884 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1885 if (hdev->hw_tc_map & BIT(i))
1890 /* Get the number of pfc enabled TCs, which have private buffer */
1891 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1892 struct hclge_pkt_buf_alloc *buf_alloc)
1894 struct hclge_priv_buf *priv;
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 priv = &buf_alloc->priv_buf[i];
1900 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1908 /* Get the number of pfc disabled TCs, which have private buffer */
1909 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1910 struct hclge_pkt_buf_alloc *buf_alloc)
1912 struct hclge_priv_buf *priv;
1916 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1917 priv = &buf_alloc->priv_buf[i];
1918 if (hdev->hw_tc_map & BIT(i) &&
1919 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1927 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1929 struct hclge_priv_buf *priv;
1933 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1934 priv = &buf_alloc->priv_buf[i];
1936 rx_priv += priv->buf_size;
1941 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1943 u32 i, total_tx_size = 0;
1945 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1946 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1948 return total_tx_size;
1951 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1952 struct hclge_pkt_buf_alloc *buf_alloc,
1955 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1956 u32 tc_num = hclge_get_tc_num(hdev);
1957 u32 shared_buf, aligned_mps;
1961 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1963 if (hnae3_dev_dcb_supported(hdev))
1964 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1967 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1968 + hdev->dv_buf_size;
1970 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1971 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1972 HCLGE_BUF_SIZE_UNIT);
1974 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1975 if (rx_all < rx_priv + shared_std)
1978 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1979 buf_alloc->s_buf.buf_size = shared_buf;
1980 if (hnae3_dev_dcb_supported(hdev)) {
1981 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1982 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1983 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1984 HCLGE_BUF_SIZE_UNIT);
1986 buf_alloc->s_buf.self.high = aligned_mps +
1987 HCLGE_NON_DCB_ADDITIONAL_BUF;
1988 buf_alloc->s_buf.self.low = aligned_mps;
1991 if (hnae3_dev_dcb_supported(hdev)) {
1992 hi_thrd = shared_buf - hdev->dv_buf_size;
1994 if (tc_num <= NEED_RESERVE_TC_NUM)
1995 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1999 hi_thrd = hi_thrd / tc_num;
2001 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2002 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2003 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2005 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2006 lo_thrd = aligned_mps;
2009 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2010 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2011 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2017 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2018 struct hclge_pkt_buf_alloc *buf_alloc)
2022 total_size = hdev->pkt_buf_size;
2024 /* alloc tx buffer for all enabled tc */
2025 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2026 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2028 if (hdev->hw_tc_map & BIT(i)) {
2029 if (total_size < hdev->tx_buf_size)
2032 priv->tx_buf_size = hdev->tx_buf_size;
2034 priv->tx_buf_size = 0;
2037 total_size -= priv->tx_buf_size;
2043 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2044 struct hclge_pkt_buf_alloc *buf_alloc)
2046 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2047 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2050 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2051 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2058 if (!(hdev->hw_tc_map & BIT(i)))
2063 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2064 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2065 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2066 HCLGE_BUF_SIZE_UNIT);
2069 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2073 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2076 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2079 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2080 struct hclge_pkt_buf_alloc *buf_alloc)
2082 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2086 /* let the last to be cleared first */
2087 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089 unsigned int mask = BIT((unsigned int)i);
2091 if (hdev->hw_tc_map & mask &&
2092 !(hdev->tm_info.hw_pfc_map & mask)) {
2093 /* Clear the no pfc TC private buffer */
2101 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102 no_pfc_priv_num == 0)
2106 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2109 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2110 struct hclge_pkt_buf_alloc *buf_alloc)
2112 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2116 /* let the last to be cleared first */
2117 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 unsigned int mask = BIT((unsigned int)i);
2121 if (hdev->hw_tc_map & mask &&
2122 hdev->tm_info.hw_pfc_map & mask) {
2123 /* Reduce the number of pfc TC with private buffer */
2131 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2136 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2139 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2140 struct hclge_pkt_buf_alloc *buf_alloc)
2142 #define COMPENSATE_BUFFER 0x3C00
2143 #define COMPENSATE_HALF_MPS_NUM 5
2144 #define PRIV_WL_GAP 0x1800
2146 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2147 u32 tc_num = hclge_get_tc_num(hdev);
2148 u32 half_mps = hdev->mps >> 1;
2153 rx_priv = rx_priv / tc_num;
2155 if (tc_num <= NEED_RESERVE_TC_NUM)
2156 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2158 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2159 COMPENSATE_HALF_MPS_NUM * half_mps;
2160 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2161 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2163 if (rx_priv < min_rx_priv)
2166 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2167 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2174 if (!(hdev->hw_tc_map & BIT(i)))
2178 priv->buf_size = rx_priv;
2179 priv->wl.high = rx_priv - hdev->dv_buf_size;
2180 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2183 buf_alloc->s_buf.buf_size = 0;
2188 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2189 * @hdev: pointer to struct hclge_dev
2190 * @buf_alloc: pointer to buffer calculation data
2191 * @return: 0: calculate sucessful, negative: fail
2193 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2194 struct hclge_pkt_buf_alloc *buf_alloc)
2196 /* When DCB is not supported, rx private buffer is not allocated. */
2197 if (!hnae3_dev_dcb_supported(hdev)) {
2198 u32 rx_all = hdev->pkt_buf_size;
2200 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2201 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2207 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2210 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2213 /* try to decrease the buffer size */
2214 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2217 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2220 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2226 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2227 struct hclge_pkt_buf_alloc *buf_alloc)
2229 struct hclge_rx_priv_buff_cmd *req;
2230 struct hclge_desc desc;
2234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2235 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2237 /* Alloc private buffer TCs */
2238 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2239 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2242 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2244 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2248 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2249 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2251 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2253 dev_err(&hdev->pdev->dev,
2254 "rx private buffer alloc cmd failed %d\n", ret);
2259 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2260 struct hclge_pkt_buf_alloc *buf_alloc)
2262 struct hclge_rx_priv_wl_buf *req;
2263 struct hclge_priv_buf *priv;
2264 struct hclge_desc desc[2];
2268 for (i = 0; i < 2; i++) {
2269 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2271 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2273 /* The first descriptor set the NEXT bit to 1 */
2275 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2277 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2279 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2280 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2282 priv = &buf_alloc->priv_buf[idx];
2283 req->tc_wl[j].high =
2284 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2285 req->tc_wl[j].high |=
2286 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2288 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2289 req->tc_wl[j].low |=
2290 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2294 /* Send 2 descriptor at one time */
2295 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2297 dev_err(&hdev->pdev->dev,
2298 "rx private waterline config cmd failed %d\n",
2303 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2304 struct hclge_pkt_buf_alloc *buf_alloc)
2306 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2307 struct hclge_rx_com_thrd *req;
2308 struct hclge_desc desc[2];
2309 struct hclge_tc_thrd *tc;
2313 for (i = 0; i < 2; i++) {
2314 hclge_cmd_setup_basic_desc(&desc[i],
2315 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2316 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2318 /* The first descriptor set the NEXT bit to 1 */
2320 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2322 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2324 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2325 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2327 req->com_thrd[j].high =
2328 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2329 req->com_thrd[j].high |=
2330 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 req->com_thrd[j].low =
2332 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2333 req->com_thrd[j].low |=
2334 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2338 /* Send 2 descriptors at one time */
2339 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2341 dev_err(&hdev->pdev->dev,
2342 "common threshold config cmd failed %d\n", ret);
2346 static int hclge_common_wl_config(struct hclge_dev *hdev,
2347 struct hclge_pkt_buf_alloc *buf_alloc)
2349 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2350 struct hclge_rx_com_wl *req;
2351 struct hclge_desc desc;
2354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2356 req = (struct hclge_rx_com_wl *)desc.data;
2357 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2358 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2361 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2365 dev_err(&hdev->pdev->dev,
2366 "common waterline config cmd failed %d\n", ret);
2371 int hclge_buffer_alloc(struct hclge_dev *hdev)
2373 struct hclge_pkt_buf_alloc *pkt_buf;
2376 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2380 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2382 dev_err(&hdev->pdev->dev,
2383 "could not calc tx buffer size for all TCs %d\n", ret);
2387 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2389 dev_err(&hdev->pdev->dev,
2390 "could not alloc tx buffers %d\n", ret);
2394 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2396 dev_err(&hdev->pdev->dev,
2397 "could not calc rx priv buffer size for all TCs %d\n",
2402 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2404 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2409 if (hnae3_dev_dcb_supported(hdev)) {
2410 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2412 dev_err(&hdev->pdev->dev,
2413 "could not configure rx private waterline %d\n",
2418 ret = hclge_common_thrd_config(hdev, pkt_buf);
2420 dev_err(&hdev->pdev->dev,
2421 "could not configure common threshold %d\n",
2427 ret = hclge_common_wl_config(hdev, pkt_buf);
2429 dev_err(&hdev->pdev->dev,
2430 "could not configure common waterline %d\n", ret);
2437 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2439 struct hnae3_handle *roce = &vport->roce;
2440 struct hnae3_handle *nic = &vport->nic;
2441 struct hclge_dev *hdev = vport->back;
2443 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2445 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2448 roce->rinfo.base_vector = hdev->roce_base_vector;
2450 roce->rinfo.netdev = nic->kinfo.netdev;
2451 roce->rinfo.roce_io_base = hdev->hw.io_base;
2452 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2454 roce->pdev = nic->pdev;
2455 roce->ae_algo = nic->ae_algo;
2456 roce->numa_node_mask = nic->numa_node_mask;
2461 static int hclge_init_msi(struct hclge_dev *hdev)
2463 struct pci_dev *pdev = hdev->pdev;
2467 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2469 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2472 "failed(%d) to allocate MSI/MSI-X vectors\n",
2476 if (vectors < hdev->num_msi)
2477 dev_warn(&hdev->pdev->dev,
2478 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2479 hdev->num_msi, vectors);
2481 hdev->num_msi = vectors;
2482 hdev->num_msi_left = vectors;
2484 hdev->base_msi_vector = pdev->irq;
2485 hdev->roce_base_vector = hdev->base_msi_vector +
2488 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2489 sizeof(u16), GFP_KERNEL);
2490 if (!hdev->vector_status) {
2491 pci_free_irq_vectors(pdev);
2495 for (i = 0; i < hdev->num_msi; i++)
2496 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2498 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2499 sizeof(int), GFP_KERNEL);
2500 if (!hdev->vector_irq) {
2501 pci_free_irq_vectors(pdev);
2508 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2510 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2511 duplex = HCLGE_MAC_FULL;
2516 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2519 struct hclge_config_mac_speed_dup_cmd *req;
2520 struct hclge_desc desc;
2523 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2528 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2531 case HCLGE_MAC_SPEED_10M:
2532 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533 HCLGE_CFG_SPEED_S, 6);
2535 case HCLGE_MAC_SPEED_100M:
2536 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537 HCLGE_CFG_SPEED_S, 7);
2539 case HCLGE_MAC_SPEED_1G:
2540 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 HCLGE_CFG_SPEED_S, 0);
2543 case HCLGE_MAC_SPEED_10G:
2544 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545 HCLGE_CFG_SPEED_S, 1);
2547 case HCLGE_MAC_SPEED_25G:
2548 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549 HCLGE_CFG_SPEED_S, 2);
2551 case HCLGE_MAC_SPEED_40G:
2552 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553 HCLGE_CFG_SPEED_S, 3);
2555 case HCLGE_MAC_SPEED_50G:
2556 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557 HCLGE_CFG_SPEED_S, 4);
2559 case HCLGE_MAC_SPEED_100G:
2560 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561 HCLGE_CFG_SPEED_S, 5);
2563 case HCLGE_MAC_SPEED_200G:
2564 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565 HCLGE_CFG_SPEED_S, 8);
2568 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2572 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev,
2578 "mac speed/duplex config cmd failed %d.\n", ret);
2585 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2587 struct hclge_mac *mac = &hdev->hw.mac;
2590 duplex = hclge_check_speed_dup(duplex, speed);
2591 if (!mac->support_autoneg && mac->speed == speed &&
2592 mac->duplex == duplex)
2595 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2599 hdev->hw.mac.speed = speed;
2600 hdev->hw.mac.duplex = duplex;
2605 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2608 struct hclge_vport *vport = hclge_get_vport(handle);
2609 struct hclge_dev *hdev = vport->back;
2611 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2614 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2616 struct hclge_config_auto_neg_cmd *req;
2617 struct hclge_desc desc;
2621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2623 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2625 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2626 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2630 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2636 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2638 struct hclge_vport *vport = hclge_get_vport(handle);
2639 struct hclge_dev *hdev = vport->back;
2641 if (!hdev->hw.mac.support_autoneg) {
2643 dev_err(&hdev->pdev->dev,
2644 "autoneg is not supported by current port\n");
2651 return hclge_set_autoneg_en(hdev, enable);
2654 static int hclge_get_autoneg(struct hnae3_handle *handle)
2656 struct hclge_vport *vport = hclge_get_vport(handle);
2657 struct hclge_dev *hdev = vport->back;
2658 struct phy_device *phydev = hdev->hw.mac.phydev;
2661 return phydev->autoneg;
2663 return hdev->hw.mac.autoneg;
2666 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2668 struct hclge_vport *vport = hclge_get_vport(handle);
2669 struct hclge_dev *hdev = vport->back;
2672 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2674 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2677 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2680 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2682 struct hclge_vport *vport = hclge_get_vport(handle);
2683 struct hclge_dev *hdev = vport->back;
2685 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2686 return hclge_set_autoneg_en(hdev, !halt);
2691 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2693 struct hclge_config_fec_cmd *req;
2694 struct hclge_desc desc;
2697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2699 req = (struct hclge_config_fec_cmd *)desc.data;
2700 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2701 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2702 if (fec_mode & BIT(HNAE3_FEC_RS))
2703 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2704 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2705 if (fec_mode & BIT(HNAE3_FEC_BASER))
2706 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2707 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2711 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2716 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2720 struct hclge_mac *mac = &hdev->hw.mac;
2723 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2724 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2728 ret = hclge_set_fec_hw(hdev, fec_mode);
2732 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2736 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2739 struct hclge_vport *vport = hclge_get_vport(handle);
2740 struct hclge_dev *hdev = vport->back;
2741 struct hclge_mac *mac = &hdev->hw.mac;
2744 *fec_ability = mac->fec_ability;
2746 *fec_mode = mac->fec_mode;
2749 static int hclge_mac_init(struct hclge_dev *hdev)
2751 struct hclge_mac *mac = &hdev->hw.mac;
2754 hdev->support_sfp_query = true;
2755 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2756 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2757 hdev->hw.mac.duplex);
2761 if (hdev->hw.mac.support_autoneg) {
2762 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2769 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2770 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2775 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2777 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2781 ret = hclge_set_default_loopback(hdev);
2785 ret = hclge_buffer_alloc(hdev);
2787 dev_err(&hdev->pdev->dev,
2788 "allocate buffer fail, ret=%d\n", ret);
2793 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2795 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2796 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2797 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2798 hclge_wq, &hdev->service_task, 0);
2801 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2803 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2805 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806 hclge_wq, &hdev->service_task, 0);
2809 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2811 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2813 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814 hclge_wq, &hdev->service_task,
2818 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2820 struct hclge_link_status_cmd *req;
2821 struct hclge_desc desc;
2824 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2825 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2827 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2832 req = (struct hclge_link_status_cmd *)desc.data;
2833 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2834 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2839 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2841 struct phy_device *phydev = hdev->hw.mac.phydev;
2843 *link_status = HCLGE_LINK_STATUS_DOWN;
2845 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2848 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2851 return hclge_get_mac_link_status(hdev, link_status);
2854 static void hclge_update_link_status(struct hclge_dev *hdev)
2856 struct hnae3_client *rclient = hdev->roce_client;
2857 struct hnae3_client *client = hdev->nic_client;
2858 struct hnae3_handle *rhandle;
2859 struct hnae3_handle *handle;
2867 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2870 ret = hclge_get_mac_phy_link(hdev, &state);
2872 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2876 if (state != hdev->hw.mac.link) {
2877 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2878 handle = &hdev->vport[i].nic;
2879 client->ops->link_status_change(handle, state);
2880 hclge_config_mac_tnl_int(hdev, state);
2881 rhandle = &hdev->vport[i].roce;
2882 if (rclient && rclient->ops->link_status_change)
2883 rclient->ops->link_status_change(rhandle,
2886 hdev->hw.mac.link = state;
2889 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2892 static void hclge_update_port_capability(struct hclge_dev *hdev,
2893 struct hclge_mac *mac)
2895 if (hnae3_dev_fec_supported(hdev))
2896 /* update fec ability by speed */
2897 hclge_convert_setting_fec(mac);
2899 /* firmware can not identify back plane type, the media type
2900 * read from configuration can help deal it
2902 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2903 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2904 mac->module_type = HNAE3_MODULE_TYPE_KR;
2905 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2906 mac->module_type = HNAE3_MODULE_TYPE_TP;
2908 if (mac->support_autoneg) {
2909 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2910 linkmode_copy(mac->advertising, mac->supported);
2912 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2914 linkmode_zero(mac->advertising);
2918 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2920 struct hclge_sfp_info_cmd *resp;
2921 struct hclge_desc desc;
2924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2925 resp = (struct hclge_sfp_info_cmd *)desc.data;
2926 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2927 if (ret == -EOPNOTSUPP) {
2928 dev_warn(&hdev->pdev->dev,
2929 "IMP do not support get SFP speed %d\n", ret);
2932 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2936 *speed = le32_to_cpu(resp->speed);
2941 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2943 struct hclge_sfp_info_cmd *resp;
2944 struct hclge_desc desc;
2947 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2948 resp = (struct hclge_sfp_info_cmd *)desc.data;
2950 resp->query_type = QUERY_ACTIVE_SPEED;
2952 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2953 if (ret == -EOPNOTSUPP) {
2954 dev_warn(&hdev->pdev->dev,
2955 "IMP does not support get SFP info %d\n", ret);
2958 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2962 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2963 * set to mac->speed.
2965 if (!le32_to_cpu(resp->speed))
2968 mac->speed = le32_to_cpu(resp->speed);
2969 /* if resp->speed_ability is 0, it means it's an old version
2970 * firmware, do not update these params
2972 if (resp->speed_ability) {
2973 mac->module_type = le32_to_cpu(resp->module_type);
2974 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2975 mac->autoneg = resp->autoneg;
2976 mac->support_autoneg = resp->autoneg_ability;
2977 mac->speed_type = QUERY_ACTIVE_SPEED;
2978 if (!resp->active_fec)
2981 mac->fec_mode = BIT(resp->active_fec);
2983 mac->speed_type = QUERY_SFP_SPEED;
2989 static int hclge_update_port_info(struct hclge_dev *hdev)
2991 struct hclge_mac *mac = &hdev->hw.mac;
2992 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2995 /* get the port info from SFP cmd if not copper port */
2996 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2999 /* if IMP does not support get SFP/qSFP info, return directly */
3000 if (!hdev->support_sfp_query)
3003 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3004 ret = hclge_get_sfp_info(hdev, mac);
3006 ret = hclge_get_sfp_speed(hdev, &speed);
3008 if (ret == -EOPNOTSUPP) {
3009 hdev->support_sfp_query = false;
3015 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3016 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3017 hclge_update_port_capability(hdev, mac);
3020 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3023 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3024 return 0; /* do nothing if no SFP */
3026 /* must config full duplex for SFP */
3027 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3031 static int hclge_get_status(struct hnae3_handle *handle)
3033 struct hclge_vport *vport = hclge_get_vport(handle);
3034 struct hclge_dev *hdev = vport->back;
3036 hclge_update_link_status(hdev);
3038 return hdev->hw.mac.link;
3041 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3043 if (!pci_num_vf(hdev->pdev)) {
3044 dev_err(&hdev->pdev->dev,
3045 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3049 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3050 dev_err(&hdev->pdev->dev,
3051 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3052 vf, pci_num_vf(hdev->pdev));
3056 /* VF start from 1 in vport */
3057 vf += HCLGE_VF_VPORT_START_NUM;
3058 return &hdev->vport[vf];
3061 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3062 struct ifla_vf_info *ivf)
3064 struct hclge_vport *vport = hclge_get_vport(handle);
3065 struct hclge_dev *hdev = vport->back;
3067 vport = hclge_get_vf_vport(hdev, vf);
3072 ivf->linkstate = vport->vf_info.link_state;
3073 ivf->spoofchk = vport->vf_info.spoofchk;
3074 ivf->trusted = vport->vf_info.trusted;
3075 ivf->min_tx_rate = 0;
3076 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3077 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3078 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3079 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3080 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3085 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3088 struct hclge_vport *vport = hclge_get_vport(handle);
3089 struct hclge_dev *hdev = vport->back;
3091 vport = hclge_get_vf_vport(hdev, vf);
3095 vport->vf_info.link_state = link_state;
3100 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3102 u32 cmdq_src_reg, msix_src_reg;
3104 /* fetch the events from their corresponding regs */
3105 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3106 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3108 /* Assumption: If by any chance reset and mailbox events are reported
3109 * together then we will only process reset event in this go and will
3110 * defer the processing of the mailbox events. Since, we would have not
3111 * cleared RX CMDQ event this time we would receive again another
3112 * interrupt from H/W just for the mailbox.
3114 * check for vector0 reset event sources
3116 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3117 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3118 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3119 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3120 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3121 hdev->rst_stats.imp_rst_cnt++;
3122 return HCLGE_VECTOR0_EVENT_RST;
3125 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3126 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3127 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3128 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3129 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3130 hdev->rst_stats.global_rst_cnt++;
3131 return HCLGE_VECTOR0_EVENT_RST;
3134 /* check for vector0 msix event source */
3135 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3136 *clearval = msix_src_reg;
3137 return HCLGE_VECTOR0_EVENT_ERR;
3140 /* check for vector0 mailbox(=CMDQ RX) event source */
3141 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3142 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3143 *clearval = cmdq_src_reg;
3144 return HCLGE_VECTOR0_EVENT_MBX;
3147 /* print other vector0 event source */
3148 dev_info(&hdev->pdev->dev,
3149 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3150 cmdq_src_reg, msix_src_reg);
3151 *clearval = msix_src_reg;
3153 return HCLGE_VECTOR0_EVENT_OTHER;
3156 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3159 switch (event_type) {
3160 case HCLGE_VECTOR0_EVENT_RST:
3161 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3163 case HCLGE_VECTOR0_EVENT_MBX:
3164 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3171 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3173 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3174 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3175 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3176 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3177 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3180 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3182 writel(enable ? 1 : 0, vector->addr);
3185 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3187 struct hclge_dev *hdev = data;
3191 hclge_enable_vector(&hdev->misc_vector, false);
3192 event_cause = hclge_check_event_cause(hdev, &clearval);
3194 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3195 switch (event_cause) {
3196 case HCLGE_VECTOR0_EVENT_ERR:
3197 /* we do not know what type of reset is required now. This could
3198 * only be decided after we fetch the type of errors which
3199 * caused this event. Therefore, we will do below for now:
3200 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3201 * have defered type of reset to be used.
3202 * 2. Schedule the reset serivce task.
3203 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3204 * will fetch the correct type of reset. This would be done
3205 * by first decoding the types of errors.
3207 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3209 case HCLGE_VECTOR0_EVENT_RST:
3210 hclge_reset_task_schedule(hdev);
3212 case HCLGE_VECTOR0_EVENT_MBX:
3213 /* If we are here then,
3214 * 1. Either we are not handling any mbx task and we are not
3217 * 2. We could be handling a mbx task but nothing more is
3219 * In both cases, we should schedule mbx task as there are more
3220 * mbx messages reported by this interrupt.
3222 hclge_mbx_task_schedule(hdev);
3225 dev_warn(&hdev->pdev->dev,
3226 "received unknown or unhandled event of vector0\n");
3230 hclge_clear_event_cause(hdev, event_cause, clearval);
3232 /* Enable interrupt if it is not cause by reset. And when
3233 * clearval equal to 0, it means interrupt status may be
3234 * cleared by hardware before driver reads status register.
3235 * For this case, vector0 interrupt also should be enabled.
3238 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3239 hclge_enable_vector(&hdev->misc_vector, true);
3245 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3247 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3248 dev_warn(&hdev->pdev->dev,
3249 "vector(vector_id %d) has been freed.\n", vector_id);
3253 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3254 hdev->num_msi_left += 1;
3255 hdev->num_msi_used -= 1;
3258 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3260 struct hclge_misc_vector *vector = &hdev->misc_vector;
3262 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3264 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3265 hdev->vector_status[0] = 0;
3267 hdev->num_msi_left -= 1;
3268 hdev->num_msi_used += 1;
3271 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3272 const cpumask_t *mask)
3274 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3277 cpumask_copy(&hdev->affinity_mask, mask);
3280 static void hclge_irq_affinity_release(struct kref *ref)
3284 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3286 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3287 &hdev->affinity_mask);
3289 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3290 hdev->affinity_notify.release = hclge_irq_affinity_release;
3291 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3292 &hdev->affinity_notify);
3295 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3297 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3298 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3301 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3305 hclge_get_misc_vector(hdev);
3307 /* this would be explicitly freed in the end */
3308 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3309 HCLGE_NAME, pci_name(hdev->pdev));
3310 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3311 0, hdev->misc_vector.name, hdev);
3313 hclge_free_vector(hdev, 0);
3314 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3315 hdev->misc_vector.vector_irq);
3321 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3323 free_irq(hdev->misc_vector.vector_irq, hdev);
3324 hclge_free_vector(hdev, 0);
3327 int hclge_notify_client(struct hclge_dev *hdev,
3328 enum hnae3_reset_notify_type type)
3330 struct hnae3_client *client = hdev->nic_client;
3333 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3336 if (!client->ops->reset_notify)
3339 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3340 struct hnae3_handle *handle = &hdev->vport[i].nic;
3343 ret = client->ops->reset_notify(handle, type);
3345 dev_err(&hdev->pdev->dev,
3346 "notify nic client failed %d(%d)\n", type, ret);
3354 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3355 enum hnae3_reset_notify_type type)
3357 struct hnae3_client *client = hdev->roce_client;
3361 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3364 if (!client->ops->reset_notify)
3367 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3368 struct hnae3_handle *handle = &hdev->vport[i].roce;
3370 ret = client->ops->reset_notify(handle, type);
3372 dev_err(&hdev->pdev->dev,
3373 "notify roce client failed %d(%d)",
3382 static int hclge_reset_wait(struct hclge_dev *hdev)
3384 #define HCLGE_RESET_WATI_MS 100
3385 #define HCLGE_RESET_WAIT_CNT 350
3387 u32 val, reg, reg_bit;
3390 switch (hdev->reset_type) {
3391 case HNAE3_IMP_RESET:
3392 reg = HCLGE_GLOBAL_RESET_REG;
3393 reg_bit = HCLGE_IMP_RESET_BIT;
3395 case HNAE3_GLOBAL_RESET:
3396 reg = HCLGE_GLOBAL_RESET_REG;
3397 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3399 case HNAE3_FUNC_RESET:
3400 reg = HCLGE_FUN_RST_ING;
3401 reg_bit = HCLGE_FUN_RST_ING_B;
3404 dev_err(&hdev->pdev->dev,
3405 "Wait for unsupported reset type: %d\n",
3410 val = hclge_read_dev(&hdev->hw, reg);
3411 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3412 msleep(HCLGE_RESET_WATI_MS);
3413 val = hclge_read_dev(&hdev->hw, reg);
3417 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3418 dev_warn(&hdev->pdev->dev,
3419 "Wait for reset timeout: %d\n", hdev->reset_type);
3426 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3428 struct hclge_vf_rst_cmd *req;
3429 struct hclge_desc desc;
3431 req = (struct hclge_vf_rst_cmd *)desc.data;
3432 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3433 req->dest_vfid = func_id;
3438 return hclge_cmd_send(&hdev->hw, &desc, 1);
3441 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3445 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3446 struct hclge_vport *vport = &hdev->vport[i];
3449 /* Send cmd to set/clear VF's FUNC_RST_ING */
3450 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3452 dev_err(&hdev->pdev->dev,
3453 "set vf(%u) rst failed %d!\n",
3454 vport->vport_id, ret);
3458 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3461 /* Inform VF to process the reset.
3462 * hclge_inform_reset_assert_to_vf may fail if VF
3463 * driver is not loaded.
3465 ret = hclge_inform_reset_assert_to_vf(vport);
3467 dev_warn(&hdev->pdev->dev,
3468 "inform reset to vf(%u) failed %d!\n",
3469 vport->vport_id, ret);
3475 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3477 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3478 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3479 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3482 hclge_mbx_handler(hdev);
3484 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3487 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3489 struct hclge_pf_rst_sync_cmd *req;
3490 struct hclge_desc desc;
3494 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3495 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3498 /* vf need to down netdev by mbx during PF or FLR reset */
3499 hclge_mailbox_service_task(hdev);
3501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3502 /* for compatible with old firmware, wait
3503 * 100 ms for VF to stop IO
3505 if (ret == -EOPNOTSUPP) {
3506 msleep(HCLGE_RESET_SYNC_TIME);
3509 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3512 } else if (req->all_vf_ready) {
3515 msleep(HCLGE_PF_RESET_SYNC_TIME);
3516 hclge_cmd_reuse_desc(&desc, true);
3517 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3519 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3522 void hclge_report_hw_error(struct hclge_dev *hdev,
3523 enum hnae3_hw_error_type type)
3525 struct hnae3_client *client = hdev->nic_client;
3528 if (!client || !client->ops->process_hw_error ||
3529 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3532 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3533 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3536 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3540 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3541 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3542 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3543 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3544 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3547 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3548 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3549 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3550 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3554 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3556 struct hclge_desc desc;
3557 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3560 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3561 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3562 req->fun_reset_vfid = func_id;
3564 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3566 dev_err(&hdev->pdev->dev,
3567 "send function reset cmd fail, status =%d\n", ret);
3572 static void hclge_do_reset(struct hclge_dev *hdev)
3574 struct hnae3_handle *handle = &hdev->vport[0].nic;
3575 struct pci_dev *pdev = hdev->pdev;
3578 if (hclge_get_hw_reset_stat(handle)) {
3579 dev_info(&pdev->dev, "hardware reset not finish\n");
3580 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3581 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3582 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3586 switch (hdev->reset_type) {
3587 case HNAE3_GLOBAL_RESET:
3588 dev_info(&pdev->dev, "global reset requested\n");
3589 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3590 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3591 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3593 case HNAE3_FUNC_RESET:
3594 dev_info(&pdev->dev, "PF reset requested\n");
3595 /* schedule again to check later */
3596 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3597 hclge_reset_task_schedule(hdev);
3600 dev_warn(&pdev->dev,
3601 "unsupported reset type: %d\n", hdev->reset_type);
3606 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3607 unsigned long *addr)
3609 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3610 struct hclge_dev *hdev = ae_dev->priv;
3612 /* first, resolve any unknown reset type to the known type(s) */
3613 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3614 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3615 HCLGE_MISC_VECTOR_INT_STS);
3616 /* we will intentionally ignore any errors from this function
3617 * as we will end up in *some* reset request in any case
3619 if (hclge_handle_hw_msix_error(hdev, addr))
3620 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3623 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3624 /* We defered the clearing of the error event which caused
3625 * interrupt since it was not posssible to do that in
3626 * interrupt context (and this is the reason we introduced
3627 * new UNKNOWN reset type). Now, the errors have been
3628 * handled and cleared in hardware we can safely enable
3629 * interrupts. This is an exception to the norm.
3631 hclge_enable_vector(&hdev->misc_vector, true);
3634 /* return the highest priority reset level amongst all */
3635 if (test_bit(HNAE3_IMP_RESET, addr)) {
3636 rst_level = HNAE3_IMP_RESET;
3637 clear_bit(HNAE3_IMP_RESET, addr);
3638 clear_bit(HNAE3_GLOBAL_RESET, addr);
3639 clear_bit(HNAE3_FUNC_RESET, addr);
3640 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3641 rst_level = HNAE3_GLOBAL_RESET;
3642 clear_bit(HNAE3_GLOBAL_RESET, addr);
3643 clear_bit(HNAE3_FUNC_RESET, addr);
3644 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3645 rst_level = HNAE3_FUNC_RESET;
3646 clear_bit(HNAE3_FUNC_RESET, addr);
3647 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3648 rst_level = HNAE3_FLR_RESET;
3649 clear_bit(HNAE3_FLR_RESET, addr);
3652 if (hdev->reset_type != HNAE3_NONE_RESET &&
3653 rst_level < hdev->reset_type)
3654 return HNAE3_NONE_RESET;
3659 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3663 switch (hdev->reset_type) {
3664 case HNAE3_IMP_RESET:
3665 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3667 case HNAE3_GLOBAL_RESET:
3668 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3677 /* For revision 0x20, the reset interrupt source
3678 * can only be cleared after hardware reset done
3680 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3681 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3684 hclge_enable_vector(&hdev->misc_vector, true);
3687 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3691 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3693 reg_val |= HCLGE_NIC_SW_RST_RDY;
3695 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3697 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3700 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3704 ret = hclge_set_all_vf_rst(hdev, true);
3708 hclge_func_reset_sync_vf(hdev);
3713 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3718 switch (hdev->reset_type) {
3719 case HNAE3_FUNC_RESET:
3720 ret = hclge_func_reset_notify_vf(hdev);
3724 ret = hclge_func_reset_cmd(hdev, 0);
3726 dev_err(&hdev->pdev->dev,
3727 "asserting function reset fail %d!\n", ret);
3731 /* After performaning pf reset, it is not necessary to do the
3732 * mailbox handling or send any command to firmware, because
3733 * any mailbox handling or command to firmware is only valid
3734 * after hclge_cmd_init is called.
3736 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3737 hdev->rst_stats.pf_rst_cnt++;
3739 case HNAE3_FLR_RESET:
3740 ret = hclge_func_reset_notify_vf(hdev);
3744 case HNAE3_IMP_RESET:
3745 hclge_handle_imp_error(hdev);
3746 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3747 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3748 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3754 /* inform hardware that preparatory work is done */
3755 msleep(HCLGE_RESET_SYNC_TIME);
3756 hclge_reset_handshake(hdev, true);
3757 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3762 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3764 #define MAX_RESET_FAIL_CNT 5
3766 if (hdev->reset_pending) {
3767 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3768 hdev->reset_pending);
3770 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3771 HCLGE_RESET_INT_M) {
3772 dev_info(&hdev->pdev->dev,
3773 "reset failed because new reset interrupt\n");
3774 hclge_clear_reset_cause(hdev);
3776 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3777 hdev->rst_stats.reset_fail_cnt++;
3778 set_bit(hdev->reset_type, &hdev->reset_pending);
3779 dev_info(&hdev->pdev->dev,
3780 "re-schedule reset task(%u)\n",
3781 hdev->rst_stats.reset_fail_cnt);
3785 hclge_clear_reset_cause(hdev);
3787 /* recover the handshake status when reset fail */
3788 hclge_reset_handshake(hdev, true);
3790 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3792 hclge_dbg_dump_rst_info(hdev);
3794 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3799 static int hclge_set_rst_done(struct hclge_dev *hdev)
3801 struct hclge_pf_rst_done_cmd *req;
3802 struct hclge_desc desc;
3805 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3806 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3807 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3809 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3810 /* To be compatible with the old firmware, which does not support
3811 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3814 if (ret == -EOPNOTSUPP) {
3815 dev_warn(&hdev->pdev->dev,
3816 "current firmware does not support command(0x%x)!\n",
3817 HCLGE_OPC_PF_RST_DONE);
3820 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3827 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3831 switch (hdev->reset_type) {
3832 case HNAE3_FUNC_RESET:
3833 case HNAE3_FLR_RESET:
3834 ret = hclge_set_all_vf_rst(hdev, false);
3836 case HNAE3_GLOBAL_RESET:
3837 case HNAE3_IMP_RESET:
3838 ret = hclge_set_rst_done(hdev);
3844 /* clear up the handshake status after re-initialize done */
3845 hclge_reset_handshake(hdev, false);
3850 static int hclge_reset_stack(struct hclge_dev *hdev)
3854 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3858 ret = hclge_reset_ae_dev(hdev->ae_dev);
3862 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3865 static int hclge_reset_prepare(struct hclge_dev *hdev)
3869 hdev->rst_stats.reset_cnt++;
3870 /* perform reset of the stack & ae device for a client */
3871 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3876 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3881 return hclge_reset_prepare_wait(hdev);
3884 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3886 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3887 enum hnae3_reset_type reset_level;
3890 hdev->rst_stats.hw_reset_done_cnt++;
3892 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3897 ret = hclge_reset_stack(hdev);
3902 hclge_clear_reset_cause(hdev);
3904 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3905 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3909 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3912 ret = hclge_reset_prepare_up(hdev);
3917 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3922 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3926 hdev->last_reset_time = jiffies;
3927 hdev->rst_stats.reset_fail_cnt = 0;
3928 hdev->rst_stats.reset_done_cnt++;
3929 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3931 /* if default_reset_request has a higher level reset request,
3932 * it should be handled as soon as possible. since some errors
3933 * need this kind of reset to fix.
3935 reset_level = hclge_get_reset_level(ae_dev,
3936 &hdev->default_reset_request);
3937 if (reset_level != HNAE3_NONE_RESET)
3938 set_bit(reset_level, &hdev->reset_request);
3943 static void hclge_reset(struct hclge_dev *hdev)
3945 if (hclge_reset_prepare(hdev))
3948 if (hclge_reset_wait(hdev))
3951 if (hclge_reset_rebuild(hdev))
3957 if (hclge_reset_err_handle(hdev))
3958 hclge_reset_task_schedule(hdev);
3961 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3963 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3964 struct hclge_dev *hdev = ae_dev->priv;
3966 /* We might end up getting called broadly because of 2 below cases:
3967 * 1. Recoverable error was conveyed through APEI and only way to bring
3968 * normalcy is to reset.
3969 * 2. A new reset request from the stack due to timeout
3971 * For the first case,error event might not have ae handle available.
3972 * check if this is a new reset request and we are not here just because
3973 * last reset attempt did not succeed and watchdog hit us again. We will
3974 * know this if last reset request did not occur very recently (watchdog
3975 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3976 * In case of new request we reset the "reset level" to PF reset.
3977 * And if it is a repeat reset request of the most recent one then we
3978 * want to make sure we throttle the reset request. Therefore, we will
3979 * not allow it again before 3*HZ times.
3982 handle = &hdev->vport[0].nic;
3984 if (time_before(jiffies, (hdev->last_reset_time +
3985 HCLGE_RESET_INTERVAL))) {
3986 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3988 } else if (hdev->default_reset_request) {
3990 hclge_get_reset_level(ae_dev,
3991 &hdev->default_reset_request);
3992 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3993 hdev->reset_level = HNAE3_FUNC_RESET;
3996 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3999 /* request reset & schedule reset task */
4000 set_bit(hdev->reset_level, &hdev->reset_request);
4001 hclge_reset_task_schedule(hdev);
4003 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4004 hdev->reset_level++;
4007 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4008 enum hnae3_reset_type rst_type)
4010 struct hclge_dev *hdev = ae_dev->priv;
4012 set_bit(rst_type, &hdev->default_reset_request);
4015 static void hclge_reset_timer(struct timer_list *t)
4017 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4019 /* if default_reset_request has no value, it means that this reset
4020 * request has already be handled, so just return here
4022 if (!hdev->default_reset_request)
4025 dev_info(&hdev->pdev->dev,
4026 "triggering reset in reset timer\n");
4027 hclge_reset_event(hdev->pdev, NULL);
4030 static void hclge_reset_subtask(struct hclge_dev *hdev)
4032 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4034 /* check if there is any ongoing reset in the hardware. This status can
4035 * be checked from reset_pending. If there is then, we need to wait for
4036 * hardware to complete reset.
4037 * a. If we are able to figure out in reasonable time that hardware
4038 * has fully resetted then, we can proceed with driver, client
4040 * b. else, we can come back later to check this status so re-sched
4043 hdev->last_reset_time = jiffies;
4044 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4045 if (hdev->reset_type != HNAE3_NONE_RESET)
4048 /* check if we got any *new* reset requests to be honored */
4049 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4050 if (hdev->reset_type != HNAE3_NONE_RESET)
4051 hclge_do_reset(hdev);
4053 hdev->reset_type = HNAE3_NONE_RESET;
4056 static void hclge_reset_service_task(struct hclge_dev *hdev)
4058 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4061 down(&hdev->reset_sem);
4062 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4064 hclge_reset_subtask(hdev);
4066 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4067 up(&hdev->reset_sem);
4070 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4074 /* start from vport 1 for PF is always alive */
4075 for (i = 1; i < hdev->num_alloc_vport; i++) {
4076 struct hclge_vport *vport = &hdev->vport[i];
4078 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4079 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4081 /* If vf is not alive, set to default value */
4082 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4083 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4087 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4089 unsigned long delta = round_jiffies_relative(HZ);
4091 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4094 /* Always handle the link updating to make sure link state is
4095 * updated when it is triggered by mbx.
4097 hclge_update_link_status(hdev);
4098 hclge_sync_mac_table(hdev);
4099 hclge_sync_promisc_mode(hdev);
4101 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4102 delta = jiffies - hdev->last_serv_processed;
4104 if (delta < round_jiffies_relative(HZ)) {
4105 delta = round_jiffies_relative(HZ) - delta;
4110 hdev->serv_processed_cnt++;
4111 hclge_update_vport_alive(hdev);
4113 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4114 hdev->last_serv_processed = jiffies;
4118 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4119 hclge_update_stats_for_all(hdev);
4121 hclge_update_port_info(hdev);
4122 hclge_sync_vlan_filter(hdev);
4124 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4125 hclge_rfs_filter_expire(hdev);
4127 hdev->last_serv_processed = jiffies;
4130 hclge_task_schedule(hdev, delta);
4133 static void hclge_service_task(struct work_struct *work)
4135 struct hclge_dev *hdev =
4136 container_of(work, struct hclge_dev, service_task.work);
4138 hclge_reset_service_task(hdev);
4139 hclge_mailbox_service_task(hdev);
4140 hclge_periodic_service_task(hdev);
4142 /* Handle reset and mbx again in case periodical task delays the
4143 * handling by calling hclge_task_schedule() in
4144 * hclge_periodic_service_task().
4146 hclge_reset_service_task(hdev);
4147 hclge_mailbox_service_task(hdev);
4150 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4152 /* VF handle has no client */
4153 if (!handle->client)
4154 return container_of(handle, struct hclge_vport, nic);
4155 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4156 return container_of(handle, struct hclge_vport, roce);
4158 return container_of(handle, struct hclge_vport, nic);
4161 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4162 struct hnae3_vector_info *vector_info)
4164 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4166 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4168 /* need an extend offset to config vector >= 64 */
4169 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4170 vector_info->io_addr = hdev->hw.io_base +
4171 HCLGE_VECTOR_REG_BASE +
4172 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4174 vector_info->io_addr = hdev->hw.io_base +
4175 HCLGE_VECTOR_EXT_REG_BASE +
4176 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4177 HCLGE_VECTOR_REG_OFFSET_H +
4178 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4179 HCLGE_VECTOR_REG_OFFSET;
4181 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4182 hdev->vector_irq[idx] = vector_info->vector;
4185 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4186 struct hnae3_vector_info *vector_info)
4188 struct hclge_vport *vport = hclge_get_vport(handle);
4189 struct hnae3_vector_info *vector = vector_info;
4190 struct hclge_dev *hdev = vport->back;
4195 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4196 vector_num = min(hdev->num_msi_left, vector_num);
4198 for (j = 0; j < vector_num; j++) {
4199 while (++i < hdev->num_nic_msi) {
4200 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4201 hclge_get_vector_info(hdev, i, vector);
4209 hdev->num_msi_left -= alloc;
4210 hdev->num_msi_used += alloc;
4215 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4219 for (i = 0; i < hdev->num_msi; i++)
4220 if (vector == hdev->vector_irq[i])
4226 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4228 struct hclge_vport *vport = hclge_get_vport(handle);
4229 struct hclge_dev *hdev = vport->back;
4232 vector_id = hclge_get_vector_index(hdev, vector);
4233 if (vector_id < 0) {
4234 dev_err(&hdev->pdev->dev,
4235 "Get vector index fail. vector = %d\n", vector);
4239 hclge_free_vector(hdev, vector_id);
4244 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4246 return HCLGE_RSS_KEY_SIZE;
4249 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4250 const u8 hfunc, const u8 *key)
4252 struct hclge_rss_config_cmd *req;
4253 unsigned int key_offset = 0;
4254 struct hclge_desc desc;
4259 key_counts = HCLGE_RSS_KEY_SIZE;
4260 req = (struct hclge_rss_config_cmd *)desc.data;
4262 while (key_counts) {
4263 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4266 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4267 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4269 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4270 memcpy(req->hash_key,
4271 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4273 key_counts -= key_size;
4275 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4277 dev_err(&hdev->pdev->dev,
4278 "Configure RSS config fail, status = %d\n",
4286 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4288 struct hclge_rss_indirection_table_cmd *req;
4289 struct hclge_desc desc;
4290 int rss_cfg_tbl_num;
4298 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4299 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4300 HCLGE_RSS_CFG_TBL_SIZE;
4302 for (i = 0; i < rss_cfg_tbl_num; i++) {
4303 hclge_cmd_setup_basic_desc
4304 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4306 req->start_table_index =
4307 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4308 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4309 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4310 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4311 req->rss_qid_l[j] = qid & 0xff;
4313 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4314 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4315 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4316 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4318 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4320 dev_err(&hdev->pdev->dev,
4321 "Configure rss indir table fail,status = %d\n",
4329 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4330 u16 *tc_size, u16 *tc_offset)
4332 struct hclge_rss_tc_mode_cmd *req;
4333 struct hclge_desc desc;
4337 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4338 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4340 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4343 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4344 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4345 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4346 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4347 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4348 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4349 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4351 req->rss_tc_mode[i] = cpu_to_le16(mode);
4354 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4356 dev_err(&hdev->pdev->dev,
4357 "Configure rss tc mode fail, status = %d\n", ret);
4362 static void hclge_get_rss_type(struct hclge_vport *vport)
4364 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4365 vport->rss_tuple_sets.ipv4_udp_en ||
4366 vport->rss_tuple_sets.ipv4_sctp_en ||
4367 vport->rss_tuple_sets.ipv6_tcp_en ||
4368 vport->rss_tuple_sets.ipv6_udp_en ||
4369 vport->rss_tuple_sets.ipv6_sctp_en)
4370 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4371 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4372 vport->rss_tuple_sets.ipv6_fragment_en)
4373 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4375 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4378 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4380 struct hclge_rss_input_tuple_cmd *req;
4381 struct hclge_desc desc;
4384 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4386 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4388 /* Get the tuple cfg from pf */
4389 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4390 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4391 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4392 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4393 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4394 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4395 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4396 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4397 hclge_get_rss_type(&hdev->vport[0]);
4398 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4400 dev_err(&hdev->pdev->dev,
4401 "Configure rss input fail, status = %d\n", ret);
4405 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4408 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4409 struct hclge_vport *vport = hclge_get_vport(handle);
4412 /* Get hash algorithm */
4414 switch (vport->rss_algo) {
4415 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4416 *hfunc = ETH_RSS_HASH_TOP;
4418 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4419 *hfunc = ETH_RSS_HASH_XOR;
4422 *hfunc = ETH_RSS_HASH_UNKNOWN;
4427 /* Get the RSS Key required by the user */
4429 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4431 /* Get indirect table */
4433 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4434 indir[i] = vport->rss_indirection_tbl[i];
4439 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4440 const u8 *key, const u8 hfunc)
4442 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4443 struct hclge_vport *vport = hclge_get_vport(handle);
4444 struct hclge_dev *hdev = vport->back;
4448 /* Set the RSS Hash Key if specififed by the user */
4451 case ETH_RSS_HASH_TOP:
4452 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4454 case ETH_RSS_HASH_XOR:
4455 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4457 case ETH_RSS_HASH_NO_CHANGE:
4458 hash_algo = vport->rss_algo;
4464 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4468 /* Update the shadow RSS key with user specified qids */
4469 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4470 vport->rss_algo = hash_algo;
4473 /* Update the shadow RSS table with user specified qids */
4474 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4475 vport->rss_indirection_tbl[i] = indir[i];
4477 /* Update the hardware */
4478 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4481 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4483 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4485 if (nfc->data & RXH_L4_B_2_3)
4486 hash_sets |= HCLGE_D_PORT_BIT;
4488 hash_sets &= ~HCLGE_D_PORT_BIT;
4490 if (nfc->data & RXH_IP_SRC)
4491 hash_sets |= HCLGE_S_IP_BIT;
4493 hash_sets &= ~HCLGE_S_IP_BIT;
4495 if (nfc->data & RXH_IP_DST)
4496 hash_sets |= HCLGE_D_IP_BIT;
4498 hash_sets &= ~HCLGE_D_IP_BIT;
4500 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4501 hash_sets |= HCLGE_V_TAG_BIT;
4506 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4507 struct ethtool_rxnfc *nfc,
4508 struct hclge_rss_input_tuple_cmd *req)
4510 struct hclge_dev *hdev = vport->back;
4513 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4514 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4515 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4516 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4517 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4518 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4519 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4520 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4522 tuple_sets = hclge_get_rss_hash_bits(nfc);
4523 switch (nfc->flow_type) {
4525 req->ipv4_tcp_en = tuple_sets;
4528 req->ipv6_tcp_en = tuple_sets;
4531 req->ipv4_udp_en = tuple_sets;
4534 req->ipv6_udp_en = tuple_sets;
4537 req->ipv4_sctp_en = tuple_sets;
4540 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4541 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4544 req->ipv6_sctp_en = tuple_sets;
4547 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4550 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4559 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4560 struct ethtool_rxnfc *nfc)
4562 struct hclge_vport *vport = hclge_get_vport(handle);
4563 struct hclge_dev *hdev = vport->back;
4564 struct hclge_rss_input_tuple_cmd *req;
4565 struct hclge_desc desc;
4568 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4569 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4572 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4573 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4575 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4577 dev_err(&hdev->pdev->dev,
4578 "failed to init rss tuple cmd, ret = %d\n", ret);
4582 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4584 dev_err(&hdev->pdev->dev,
4585 "Set rss tuple fail, status = %d\n", ret);
4589 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4590 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4591 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4592 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4593 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4594 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4595 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4596 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4597 hclge_get_rss_type(vport);
4601 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4604 switch (flow_type) {
4606 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4609 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4612 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4615 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4618 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4621 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4625 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4634 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4638 if (tuple_sets & HCLGE_D_PORT_BIT)
4639 tuple_data |= RXH_L4_B_2_3;
4640 if (tuple_sets & HCLGE_S_PORT_BIT)
4641 tuple_data |= RXH_L4_B_0_1;
4642 if (tuple_sets & HCLGE_D_IP_BIT)
4643 tuple_data |= RXH_IP_DST;
4644 if (tuple_sets & HCLGE_S_IP_BIT)
4645 tuple_data |= RXH_IP_SRC;
4650 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4651 struct ethtool_rxnfc *nfc)
4653 struct hclge_vport *vport = hclge_get_vport(handle);
4659 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4660 if (ret || !tuple_sets)
4663 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4668 static int hclge_get_tc_size(struct hnae3_handle *handle)
4670 struct hclge_vport *vport = hclge_get_vport(handle);
4671 struct hclge_dev *hdev = vport->back;
4673 return hdev->pf_rss_size_max;
4676 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4678 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4679 struct hclge_vport *vport = hdev->vport;
4680 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4681 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4682 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4683 struct hnae3_tc_info *tc_info;
4688 tc_info = &vport->nic.kinfo.tc_info;
4689 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4690 rss_size = tc_info->tqp_count[i];
4693 if (!(hdev->hw_tc_map & BIT(i)))
4696 /* tc_size set to hardware is the log2 of roundup power of two
4697 * of rss_size, the acutal queue size is limited by indirection
4700 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4702 dev_err(&hdev->pdev->dev,
4703 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4708 roundup_size = roundup_pow_of_two(rss_size);
4709 roundup_size = ilog2(roundup_size);
4712 tc_size[i] = roundup_size;
4713 tc_offset[i] = tc_info->tqp_offset[i];
4716 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4719 int hclge_rss_init_hw(struct hclge_dev *hdev)
4721 struct hclge_vport *vport = hdev->vport;
4722 u16 *rss_indir = vport[0].rss_indirection_tbl;
4723 u8 *key = vport[0].rss_hash_key;
4724 u8 hfunc = vport[0].rss_algo;
4727 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4731 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4735 ret = hclge_set_rss_input_tuple(hdev);
4739 return hclge_init_rss_tc_mode(hdev);
4742 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4744 struct hclge_vport *vport = hdev->vport;
4747 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4748 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4749 vport[j].rss_indirection_tbl[i] =
4750 i % vport[j].alloc_rss_size;
4754 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4756 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4757 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4758 struct hclge_vport *vport = hdev->vport;
4760 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4761 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4763 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4766 vport[i].rss_tuple_sets.ipv4_tcp_en =
4767 HCLGE_RSS_INPUT_TUPLE_OTHER;
4768 vport[i].rss_tuple_sets.ipv4_udp_en =
4769 HCLGE_RSS_INPUT_TUPLE_OTHER;
4770 vport[i].rss_tuple_sets.ipv4_sctp_en =
4771 HCLGE_RSS_INPUT_TUPLE_SCTP;
4772 vport[i].rss_tuple_sets.ipv4_fragment_en =
4773 HCLGE_RSS_INPUT_TUPLE_OTHER;
4774 vport[i].rss_tuple_sets.ipv6_tcp_en =
4775 HCLGE_RSS_INPUT_TUPLE_OTHER;
4776 vport[i].rss_tuple_sets.ipv6_udp_en =
4777 HCLGE_RSS_INPUT_TUPLE_OTHER;
4778 vport[i].rss_tuple_sets.ipv6_sctp_en =
4779 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4780 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4781 HCLGE_RSS_INPUT_TUPLE_SCTP;
4782 vport[i].rss_tuple_sets.ipv6_fragment_en =
4783 HCLGE_RSS_INPUT_TUPLE_OTHER;
4785 vport[i].rss_algo = rss_algo;
4787 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4788 sizeof(*rss_ind_tbl), GFP_KERNEL);
4792 vport[i].rss_indirection_tbl = rss_ind_tbl;
4793 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4794 HCLGE_RSS_KEY_SIZE);
4797 hclge_rss_indir_init_cfg(hdev);
4802 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4803 int vector_id, bool en,
4804 struct hnae3_ring_chain_node *ring_chain)
4806 struct hclge_dev *hdev = vport->back;
4807 struct hnae3_ring_chain_node *node;
4808 struct hclge_desc desc;
4809 struct hclge_ctrl_vector_chain_cmd *req =
4810 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4811 enum hclge_cmd_status status;
4812 enum hclge_opcode_type op;
4813 u16 tqp_type_and_id;
4816 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4817 hclge_cmd_setup_basic_desc(&desc, op, false);
4818 req->int_vector_id_l = hnae3_get_field(vector_id,
4819 HCLGE_VECTOR_ID_L_M,
4820 HCLGE_VECTOR_ID_L_S);
4821 req->int_vector_id_h = hnae3_get_field(vector_id,
4822 HCLGE_VECTOR_ID_H_M,
4823 HCLGE_VECTOR_ID_H_S);
4826 for (node = ring_chain; node; node = node->next) {
4827 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4828 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4830 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4831 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4832 HCLGE_TQP_ID_S, node->tqp_index);
4833 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4835 hnae3_get_field(node->int_gl_idx,
4836 HNAE3_RING_GL_IDX_M,
4837 HNAE3_RING_GL_IDX_S));
4838 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4839 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4840 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4841 req->vfid = vport->vport_id;
4843 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4845 dev_err(&hdev->pdev->dev,
4846 "Map TQP fail, status is %d.\n",
4852 hclge_cmd_setup_basic_desc(&desc,
4855 req->int_vector_id_l =
4856 hnae3_get_field(vector_id,
4857 HCLGE_VECTOR_ID_L_M,
4858 HCLGE_VECTOR_ID_L_S);
4859 req->int_vector_id_h =
4860 hnae3_get_field(vector_id,
4861 HCLGE_VECTOR_ID_H_M,
4862 HCLGE_VECTOR_ID_H_S);
4867 req->int_cause_num = i;
4868 req->vfid = vport->vport_id;
4869 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4871 dev_err(&hdev->pdev->dev,
4872 "Map TQP fail, status is %d.\n", status);
4880 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4881 struct hnae3_ring_chain_node *ring_chain)
4883 struct hclge_vport *vport = hclge_get_vport(handle);
4884 struct hclge_dev *hdev = vport->back;
4887 vector_id = hclge_get_vector_index(hdev, vector);
4888 if (vector_id < 0) {
4889 dev_err(&hdev->pdev->dev,
4890 "failed to get vector index. vector=%d\n", vector);
4894 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4897 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4898 struct hnae3_ring_chain_node *ring_chain)
4900 struct hclge_vport *vport = hclge_get_vport(handle);
4901 struct hclge_dev *hdev = vport->back;
4904 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4907 vector_id = hclge_get_vector_index(hdev, vector);
4908 if (vector_id < 0) {
4909 dev_err(&handle->pdev->dev,
4910 "Get vector index fail. ret =%d\n", vector_id);
4914 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4916 dev_err(&handle->pdev->dev,
4917 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4923 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4924 bool en_uc, bool en_mc, bool en_bc)
4926 struct hclge_vport *vport = &hdev->vport[vf_id];
4927 struct hnae3_handle *handle = &vport->nic;
4928 struct hclge_promisc_cfg_cmd *req;
4929 struct hclge_desc desc;
4930 bool uc_tx_en = en_uc;
4934 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4936 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4939 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4942 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4943 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4944 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4945 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4946 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4947 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4948 req->extend_promisc = promisc_cfg;
4950 /* to be compatible with DEVICE_VERSION_V1/2 */
4952 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4953 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4954 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4955 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4956 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4957 req->promisc = promisc_cfg;
4959 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4961 dev_err(&hdev->pdev->dev,
4962 "failed to set vport %u promisc mode, ret = %d.\n",
4968 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4969 bool en_mc_pmc, bool en_bc_pmc)
4971 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4972 en_uc_pmc, en_mc_pmc, en_bc_pmc);
4975 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4978 struct hclge_vport *vport = hclge_get_vport(handle);
4979 struct hclge_dev *hdev = vport->back;
4980 bool en_bc_pmc = true;
4982 /* For device whose version below V2, if broadcast promisc enabled,
4983 * vlan filter is always bypassed. So broadcast promisc should be
4984 * disabled until user enable promisc mode
4986 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4987 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4989 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4993 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4995 struct hclge_vport *vport = hclge_get_vport(handle);
4996 struct hclge_dev *hdev = vport->back;
4998 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5001 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5003 struct hclge_get_fd_mode_cmd *req;
5004 struct hclge_desc desc;
5007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5009 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5013 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5017 *fd_mode = req->mode;
5022 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5023 u32 *stage1_entry_num,
5024 u32 *stage2_entry_num,
5025 u16 *stage1_counter_num,
5026 u16 *stage2_counter_num)
5028 struct hclge_get_fd_allocation_cmd *req;
5029 struct hclge_desc desc;
5032 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5034 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5038 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5043 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5044 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5045 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5046 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5051 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5052 enum HCLGE_FD_STAGE stage_num)
5054 struct hclge_set_fd_key_config_cmd *req;
5055 struct hclge_fd_key_cfg *stage;
5056 struct hclge_desc desc;
5059 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5061 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5062 stage = &hdev->fd_cfg.key_cfg[stage_num];
5063 req->stage = stage_num;
5064 req->key_select = stage->key_sel;
5065 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5066 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5067 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5068 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5069 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5070 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5072 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5074 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5079 static int hclge_init_fd_config(struct hclge_dev *hdev)
5081 #define LOW_2_WORDS 0x03
5082 struct hclge_fd_key_cfg *key_cfg;
5085 if (!hnae3_dev_fd_supported(hdev))
5088 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5092 switch (hdev->fd_cfg.fd_mode) {
5093 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5094 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5096 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5097 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5100 dev_err(&hdev->pdev->dev,
5101 "Unsupported flow director mode %u\n",
5102 hdev->fd_cfg.fd_mode);
5106 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5107 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5108 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5109 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5110 key_cfg->outer_sipv6_word_en = 0;
5111 key_cfg->outer_dipv6_word_en = 0;
5113 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5114 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5115 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5116 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5118 /* If use max 400bit key, we can support tuples for ether type */
5119 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5120 key_cfg->tuple_active |=
5121 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5123 /* roce_type is used to filter roce frames
5124 * dst_vport is used to specify the rule
5126 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5128 ret = hclge_get_fd_allocation(hdev,
5129 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5130 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5131 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5132 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5136 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5139 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5140 int loc, u8 *key, bool is_add)
5142 struct hclge_fd_tcam_config_1_cmd *req1;
5143 struct hclge_fd_tcam_config_2_cmd *req2;
5144 struct hclge_fd_tcam_config_3_cmd *req3;
5145 struct hclge_desc desc[3];
5148 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5149 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5150 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5151 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5152 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5154 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5155 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5156 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5158 req1->stage = stage;
5159 req1->xy_sel = sel_x ? 1 : 0;
5160 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5161 req1->index = cpu_to_le32(loc);
5162 req1->entry_vld = sel_x ? is_add : 0;
5165 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5166 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5167 sizeof(req2->tcam_data));
5168 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5169 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5172 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5174 dev_err(&hdev->pdev->dev,
5175 "config tcam key fail, ret=%d\n",
5181 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5182 struct hclge_fd_ad_data *action)
5184 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5185 struct hclge_fd_ad_config_cmd *req;
5186 struct hclge_desc desc;
5190 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5192 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5193 req->index = cpu_to_le32(loc);
5196 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5197 action->write_rule_id_to_bd);
5198 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5200 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5201 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5202 action->override_tc);
5203 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5204 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5207 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5208 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5209 action->forward_to_direct_queue);
5210 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5212 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5213 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5214 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5215 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5216 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5217 action->counter_id);
5219 req->ad_data = cpu_to_le64(ad_data);
5220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5222 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5227 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5228 struct hclge_fd_rule *rule)
5230 u16 tmp_x_s, tmp_y_s;
5231 u32 tmp_x_l, tmp_y_l;
5234 if (rule->unused_tuple & tuple_bit)
5237 switch (tuple_bit) {
5238 case BIT(INNER_DST_MAC):
5239 for (i = 0; i < ETH_ALEN; i++) {
5240 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5241 rule->tuples_mask.dst_mac[i]);
5242 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5243 rule->tuples_mask.dst_mac[i]);
5247 case BIT(INNER_SRC_MAC):
5248 for (i = 0; i < ETH_ALEN; i++) {
5249 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5250 rule->tuples_mask.src_mac[i]);
5251 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5252 rule->tuples_mask.src_mac[i]);
5256 case BIT(INNER_VLAN_TAG_FST):
5257 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5258 rule->tuples_mask.vlan_tag1);
5259 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5260 rule->tuples_mask.vlan_tag1);
5261 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5262 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5265 case BIT(INNER_ETH_TYPE):
5266 calc_x(tmp_x_s, rule->tuples.ether_proto,
5267 rule->tuples_mask.ether_proto);
5268 calc_y(tmp_y_s, rule->tuples.ether_proto,
5269 rule->tuples_mask.ether_proto);
5270 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5271 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5274 case BIT(INNER_IP_TOS):
5275 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5276 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5279 case BIT(INNER_IP_PROTO):
5280 calc_x(*key_x, rule->tuples.ip_proto,
5281 rule->tuples_mask.ip_proto);
5282 calc_y(*key_y, rule->tuples.ip_proto,
5283 rule->tuples_mask.ip_proto);
5286 case BIT(INNER_SRC_IP):
5287 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5288 rule->tuples_mask.src_ip[IPV4_INDEX]);
5289 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5290 rule->tuples_mask.src_ip[IPV4_INDEX]);
5291 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5292 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5295 case BIT(INNER_DST_IP):
5296 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5297 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5298 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5299 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5300 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5301 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5304 case BIT(INNER_SRC_PORT):
5305 calc_x(tmp_x_s, rule->tuples.src_port,
5306 rule->tuples_mask.src_port);
5307 calc_y(tmp_y_s, rule->tuples.src_port,
5308 rule->tuples_mask.src_port);
5309 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5310 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5313 case BIT(INNER_DST_PORT):
5314 calc_x(tmp_x_s, rule->tuples.dst_port,
5315 rule->tuples_mask.dst_port);
5316 calc_y(tmp_y_s, rule->tuples.dst_port,
5317 rule->tuples_mask.dst_port);
5318 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5319 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5327 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5328 u8 vf_id, u8 network_port_id)
5330 u32 port_number = 0;
5332 if (port_type == HOST_PORT) {
5333 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5335 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5337 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5339 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5340 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5341 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5347 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5348 __le32 *key_x, __le32 *key_y,
5349 struct hclge_fd_rule *rule)
5351 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5352 u8 cur_pos = 0, tuple_size, shift_bits;
5355 for (i = 0; i < MAX_META_DATA; i++) {
5356 tuple_size = meta_data_key_info[i].key_length;
5357 tuple_bit = key_cfg->meta_data_active & BIT(i);
5359 switch (tuple_bit) {
5360 case BIT(ROCE_TYPE):
5361 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5362 cur_pos += tuple_size;
5364 case BIT(DST_VPORT):
5365 port_number = hclge_get_port_number(HOST_PORT, 0,
5367 hnae3_set_field(meta_data,
5368 GENMASK(cur_pos + tuple_size, cur_pos),
5369 cur_pos, port_number);
5370 cur_pos += tuple_size;
5377 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5378 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5379 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5381 *key_x = cpu_to_le32(tmp_x << shift_bits);
5382 *key_y = cpu_to_le32(tmp_y << shift_bits);
5385 /* A complete key is combined with meta data key and tuple key.
5386 * Meta data key is stored at the MSB region, and tuple key is stored at
5387 * the LSB region, unused bits will be filled 0.
5389 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5390 struct hclge_fd_rule *rule)
5392 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5393 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5394 u8 *cur_key_x, *cur_key_y;
5395 u8 meta_data_region;
5400 memset(key_x, 0, sizeof(key_x));
5401 memset(key_y, 0, sizeof(key_y));
5405 for (i = 0 ; i < MAX_TUPLE; i++) {
5409 tuple_size = tuple_key_info[i].key_length / 8;
5410 check_tuple = key_cfg->tuple_active & BIT(i);
5412 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5415 cur_key_x += tuple_size;
5416 cur_key_y += tuple_size;
5420 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5421 MAX_META_DATA_LENGTH / 8;
5423 hclge_fd_convert_meta_data(key_cfg,
5424 (__le32 *)(key_x + meta_data_region),
5425 (__le32 *)(key_y + meta_data_region),
5428 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5431 dev_err(&hdev->pdev->dev,
5432 "fd key_y config fail, loc=%u, ret=%d\n",
5433 rule->queue_id, ret);
5437 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5440 dev_err(&hdev->pdev->dev,
5441 "fd key_x config fail, loc=%u, ret=%d\n",
5442 rule->queue_id, ret);
5446 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5447 struct hclge_fd_rule *rule)
5449 struct hclge_vport *vport = hdev->vport;
5450 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5451 struct hclge_fd_ad_data ad_data;
5453 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5454 ad_data.ad_id = rule->location;
5456 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5457 ad_data.drop_packet = true;
5458 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5459 ad_data.override_tc = true;
5461 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5463 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5465 ad_data.forward_to_direct_queue = true;
5466 ad_data.queue_id = rule->queue_id;
5469 ad_data.use_counter = false;
5470 ad_data.counter_id = 0;
5472 ad_data.use_next_stage = false;
5473 ad_data.next_input_key = 0;
5475 ad_data.write_rule_id_to_bd = true;
5476 ad_data.rule_id = rule->location;
5478 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5481 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5484 if (!spec || !unused_tuple)
5487 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5490 *unused_tuple |= BIT(INNER_SRC_IP);
5493 *unused_tuple |= BIT(INNER_DST_IP);
5496 *unused_tuple |= BIT(INNER_SRC_PORT);
5499 *unused_tuple |= BIT(INNER_DST_PORT);
5502 *unused_tuple |= BIT(INNER_IP_TOS);
5507 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5510 if (!spec || !unused_tuple)
5513 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5514 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5517 *unused_tuple |= BIT(INNER_SRC_IP);
5520 *unused_tuple |= BIT(INNER_DST_IP);
5523 *unused_tuple |= BIT(INNER_IP_TOS);
5526 *unused_tuple |= BIT(INNER_IP_PROTO);
5528 if (spec->l4_4_bytes)
5531 if (spec->ip_ver != ETH_RX_NFC_IP4)
5537 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5540 if (!spec || !unused_tuple)
5543 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5546 /* check whether src/dst ip address used */
5547 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5548 *unused_tuple |= BIT(INNER_SRC_IP);
5550 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5551 *unused_tuple |= BIT(INNER_DST_IP);
5554 *unused_tuple |= BIT(INNER_SRC_PORT);
5557 *unused_tuple |= BIT(INNER_DST_PORT);
5565 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5568 if (!spec || !unused_tuple)
5571 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5572 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5574 /* check whether src/dst ip address used */
5575 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5576 *unused_tuple |= BIT(INNER_SRC_IP);
5578 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5579 *unused_tuple |= BIT(INNER_DST_IP);
5581 if (!spec->l4_proto)
5582 *unused_tuple |= BIT(INNER_IP_PROTO);
5587 if (spec->l4_4_bytes)
5593 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5595 if (!spec || !unused_tuple)
5598 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5599 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5600 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5602 if (is_zero_ether_addr(spec->h_source))
5603 *unused_tuple |= BIT(INNER_SRC_MAC);
5605 if (is_zero_ether_addr(spec->h_dest))
5606 *unused_tuple |= BIT(INNER_DST_MAC);
5609 *unused_tuple |= BIT(INNER_ETH_TYPE);
5614 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5615 struct ethtool_rx_flow_spec *fs,
5618 if (fs->flow_type & FLOW_EXT) {
5619 if (fs->h_ext.vlan_etype) {
5620 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5624 if (!fs->h_ext.vlan_tci)
5625 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5627 if (fs->m_ext.vlan_tci &&
5628 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5629 dev_err(&hdev->pdev->dev,
5630 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5631 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5635 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5638 if (fs->flow_type & FLOW_MAC_EXT) {
5639 if (hdev->fd_cfg.fd_mode !=
5640 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5641 dev_err(&hdev->pdev->dev,
5642 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5646 if (is_zero_ether_addr(fs->h_ext.h_dest))
5647 *unused_tuple |= BIT(INNER_DST_MAC);
5649 *unused_tuple &= ~BIT(INNER_DST_MAC);
5655 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5656 struct ethtool_rx_flow_spec *fs,
5662 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5663 dev_err(&hdev->pdev->dev,
5664 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5666 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5670 if ((fs->flow_type & FLOW_EXT) &&
5671 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5672 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5676 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5677 switch (flow_type) {
5681 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5685 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5691 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5694 case IPV6_USER_FLOW:
5695 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5699 if (hdev->fd_cfg.fd_mode !=
5700 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5701 dev_err(&hdev->pdev->dev,
5702 "ETHER_FLOW is not supported in current fd mode!\n");
5706 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5710 dev_err(&hdev->pdev->dev,
5711 "unsupported protocol type, protocol type = %#x\n",
5717 dev_err(&hdev->pdev->dev,
5718 "failed to check flow union tuple, ret = %d\n",
5723 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5726 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5728 struct hclge_fd_rule *rule = NULL;
5729 struct hlist_node *node2;
5731 spin_lock_bh(&hdev->fd_rule_lock);
5732 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5733 if (rule->location >= location)
5737 spin_unlock_bh(&hdev->fd_rule_lock);
5739 return rule && rule->location == location;
5742 /* make sure being called after lock up with fd_rule_lock */
5743 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5744 struct hclge_fd_rule *new_rule,
5748 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5749 struct hlist_node *node2;
5751 if (is_add && !new_rule)
5754 hlist_for_each_entry_safe(rule, node2,
5755 &hdev->fd_rule_list, rule_node) {
5756 if (rule->location >= location)
5761 if (rule && rule->location == location) {
5762 hlist_del(&rule->rule_node);
5764 hdev->hclge_fd_rule_num--;
5767 if (!hdev->hclge_fd_rule_num)
5768 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5769 clear_bit(location, hdev->fd_bmap);
5773 } else if (!is_add) {
5774 dev_err(&hdev->pdev->dev,
5775 "delete fail, rule %u is inexistent\n",
5780 INIT_HLIST_NODE(&new_rule->rule_node);
5783 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5785 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5787 set_bit(location, hdev->fd_bmap);
5788 hdev->hclge_fd_rule_num++;
5789 hdev->fd_active_type = new_rule->rule_type;
5794 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5795 struct ethtool_rx_flow_spec *fs,
5796 struct hclge_fd_rule *rule)
5798 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5800 switch (flow_type) {
5804 rule->tuples.src_ip[IPV4_INDEX] =
5805 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5806 rule->tuples_mask.src_ip[IPV4_INDEX] =
5807 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5809 rule->tuples.dst_ip[IPV4_INDEX] =
5810 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5811 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5812 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5814 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5815 rule->tuples_mask.src_port =
5816 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5818 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5819 rule->tuples_mask.dst_port =
5820 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5822 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5823 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5825 rule->tuples.ether_proto = ETH_P_IP;
5826 rule->tuples_mask.ether_proto = 0xFFFF;
5830 rule->tuples.src_ip[IPV4_INDEX] =
5831 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5832 rule->tuples_mask.src_ip[IPV4_INDEX] =
5833 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5835 rule->tuples.dst_ip[IPV4_INDEX] =
5836 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5837 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5838 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5840 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5841 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5843 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5844 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5846 rule->tuples.ether_proto = ETH_P_IP;
5847 rule->tuples_mask.ether_proto = 0xFFFF;
5853 be32_to_cpu_array(rule->tuples.src_ip,
5854 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5855 be32_to_cpu_array(rule->tuples_mask.src_ip,
5856 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5858 be32_to_cpu_array(rule->tuples.dst_ip,
5859 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5860 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5861 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5863 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5864 rule->tuples_mask.src_port =
5865 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5867 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5868 rule->tuples_mask.dst_port =
5869 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5871 rule->tuples.ether_proto = ETH_P_IPV6;
5872 rule->tuples_mask.ether_proto = 0xFFFF;
5875 case IPV6_USER_FLOW:
5876 be32_to_cpu_array(rule->tuples.src_ip,
5877 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5878 be32_to_cpu_array(rule->tuples_mask.src_ip,
5879 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5881 be32_to_cpu_array(rule->tuples.dst_ip,
5882 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5883 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5884 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5886 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5887 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5889 rule->tuples.ether_proto = ETH_P_IPV6;
5890 rule->tuples_mask.ether_proto = 0xFFFF;
5894 ether_addr_copy(rule->tuples.src_mac,
5895 fs->h_u.ether_spec.h_source);
5896 ether_addr_copy(rule->tuples_mask.src_mac,
5897 fs->m_u.ether_spec.h_source);
5899 ether_addr_copy(rule->tuples.dst_mac,
5900 fs->h_u.ether_spec.h_dest);
5901 ether_addr_copy(rule->tuples_mask.dst_mac,
5902 fs->m_u.ether_spec.h_dest);
5904 rule->tuples.ether_proto =
5905 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5906 rule->tuples_mask.ether_proto =
5907 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5914 switch (flow_type) {
5917 rule->tuples.ip_proto = IPPROTO_SCTP;
5918 rule->tuples_mask.ip_proto = 0xFF;
5922 rule->tuples.ip_proto = IPPROTO_TCP;
5923 rule->tuples_mask.ip_proto = 0xFF;
5927 rule->tuples.ip_proto = IPPROTO_UDP;
5928 rule->tuples_mask.ip_proto = 0xFF;
5934 if (fs->flow_type & FLOW_EXT) {
5935 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5936 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5939 if (fs->flow_type & FLOW_MAC_EXT) {
5940 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5941 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5947 /* make sure being called after lock up with fd_rule_lock */
5948 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5949 struct hclge_fd_rule *rule)
5954 dev_err(&hdev->pdev->dev,
5955 "The flow director rule is NULL\n");
5959 /* it will never fail here, so needn't to check return value */
5960 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5962 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5966 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5973 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5977 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5979 struct hclge_vport *vport = hclge_get_vport(handle);
5980 struct hclge_dev *hdev = vport->back;
5982 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5985 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5986 struct ethtool_rxnfc *cmd)
5988 struct hclge_vport *vport = hclge_get_vport(handle);
5989 struct hclge_dev *hdev = vport->back;
5990 u16 dst_vport_id = 0, q_index = 0;
5991 struct ethtool_rx_flow_spec *fs;
5992 struct hclge_fd_rule *rule;
5997 if (!hnae3_dev_fd_supported(hdev)) {
5998 dev_err(&hdev->pdev->dev,
5999 "flow table director is not supported\n");
6004 dev_err(&hdev->pdev->dev,
6005 "please enable flow director first\n");
6009 if (hclge_is_cls_flower_active(handle)) {
6010 dev_err(&hdev->pdev->dev,
6011 "please delete all exist cls flower rules first\n");
6015 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6017 ret = hclge_fd_check_spec(hdev, fs, &unused);
6021 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6022 action = HCLGE_FD_ACTION_DROP_PACKET;
6024 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6025 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6028 if (vf > hdev->num_req_vfs) {
6029 dev_err(&hdev->pdev->dev,
6030 "Error: vf id (%u) > max vf num (%u)\n",
6031 vf, hdev->num_req_vfs);
6035 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6036 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6039 dev_err(&hdev->pdev->dev,
6040 "Error: queue id (%u) > max tqp num (%u)\n",
6045 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6049 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6053 ret = hclge_fd_get_tuple(hdev, fs, rule);
6059 rule->flow_type = fs->flow_type;
6060 rule->location = fs->location;
6061 rule->unused_tuple = unused;
6062 rule->vf_id = dst_vport_id;
6063 rule->queue_id = q_index;
6064 rule->action = action;
6065 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6067 /* to avoid rule conflict, when user configure rule by ethtool,
6068 * we need to clear all arfs rules
6070 spin_lock_bh(&hdev->fd_rule_lock);
6071 hclge_clear_arfs_rules(handle);
6073 ret = hclge_fd_config_rule(hdev, rule);
6075 spin_unlock_bh(&hdev->fd_rule_lock);
6080 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6081 struct ethtool_rxnfc *cmd)
6083 struct hclge_vport *vport = hclge_get_vport(handle);
6084 struct hclge_dev *hdev = vport->back;
6085 struct ethtool_rx_flow_spec *fs;
6088 if (!hnae3_dev_fd_supported(hdev))
6091 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6093 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6096 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6097 !hclge_fd_rule_exist(hdev, fs->location)) {
6098 dev_err(&hdev->pdev->dev,
6099 "Delete fail, rule %u is inexistent\n", fs->location);
6103 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6108 spin_lock_bh(&hdev->fd_rule_lock);
6109 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6111 spin_unlock_bh(&hdev->fd_rule_lock);
6116 /* make sure being called after lock up with fd_rule_lock */
6117 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6120 struct hclge_vport *vport = hclge_get_vport(handle);
6121 struct hclge_dev *hdev = vport->back;
6122 struct hclge_fd_rule *rule;
6123 struct hlist_node *node;
6126 if (!hnae3_dev_fd_supported(hdev))
6129 for_each_set_bit(location, hdev->fd_bmap,
6130 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6131 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6135 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6137 hlist_del(&rule->rule_node);
6140 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6141 hdev->hclge_fd_rule_num = 0;
6142 bitmap_zero(hdev->fd_bmap,
6143 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6147 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6149 struct hclge_vport *vport = hclge_get_vport(handle);
6150 struct hclge_dev *hdev = vport->back;
6151 struct hclge_fd_rule *rule;
6152 struct hlist_node *node;
6155 /* Return ok here, because reset error handling will check this
6156 * return value. If error is returned here, the reset process will
6159 if (!hnae3_dev_fd_supported(hdev))
6162 /* if fd is disabled, should not restore it when reset */
6166 spin_lock_bh(&hdev->fd_rule_lock);
6167 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6168 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6170 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6173 dev_warn(&hdev->pdev->dev,
6174 "Restore rule %u failed, remove it\n",
6176 clear_bit(rule->location, hdev->fd_bmap);
6177 hlist_del(&rule->rule_node);
6179 hdev->hclge_fd_rule_num--;
6183 if (hdev->hclge_fd_rule_num)
6184 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6186 spin_unlock_bh(&hdev->fd_rule_lock);
6191 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6192 struct ethtool_rxnfc *cmd)
6194 struct hclge_vport *vport = hclge_get_vport(handle);
6195 struct hclge_dev *hdev = vport->back;
6197 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6200 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6201 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6206 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6207 struct ethtool_tcpip4_spec *spec,
6208 struct ethtool_tcpip4_spec *spec_mask)
6210 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6211 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6212 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6214 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6215 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6216 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6218 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6219 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6220 0 : cpu_to_be16(rule->tuples_mask.src_port);
6222 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6223 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6224 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6226 spec->tos = rule->tuples.ip_tos;
6227 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6228 0 : rule->tuples_mask.ip_tos;
6231 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6232 struct ethtool_usrip4_spec *spec,
6233 struct ethtool_usrip4_spec *spec_mask)
6235 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6236 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6237 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6239 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6240 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6241 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6243 spec->tos = rule->tuples.ip_tos;
6244 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6245 0 : rule->tuples_mask.ip_tos;
6247 spec->proto = rule->tuples.ip_proto;
6248 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6249 0 : rule->tuples_mask.ip_proto;
6251 spec->ip_ver = ETH_RX_NFC_IP4;
6254 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6255 struct ethtool_tcpip6_spec *spec,
6256 struct ethtool_tcpip6_spec *spec_mask)
6258 cpu_to_be32_array(spec->ip6src,
6259 rule->tuples.src_ip, IPV6_SIZE);
6260 cpu_to_be32_array(spec->ip6dst,
6261 rule->tuples.dst_ip, IPV6_SIZE);
6262 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6263 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6265 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6268 if (rule->unused_tuple & BIT(INNER_DST_IP))
6269 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6271 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6274 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6275 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6276 0 : cpu_to_be16(rule->tuples_mask.src_port);
6278 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6279 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6280 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6283 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6284 struct ethtool_usrip6_spec *spec,
6285 struct ethtool_usrip6_spec *spec_mask)
6287 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6288 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6289 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6290 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6292 cpu_to_be32_array(spec_mask->ip6src,
6293 rule->tuples_mask.src_ip, IPV6_SIZE);
6295 if (rule->unused_tuple & BIT(INNER_DST_IP))
6296 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6298 cpu_to_be32_array(spec_mask->ip6dst,
6299 rule->tuples_mask.dst_ip, IPV6_SIZE);
6301 spec->l4_proto = rule->tuples.ip_proto;
6302 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6303 0 : rule->tuples_mask.ip_proto;
6306 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6307 struct ethhdr *spec,
6308 struct ethhdr *spec_mask)
6310 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6311 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6313 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6314 eth_zero_addr(spec_mask->h_source);
6316 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6318 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6319 eth_zero_addr(spec_mask->h_dest);
6321 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6323 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6324 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6325 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6328 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6329 struct hclge_fd_rule *rule)
6331 if (fs->flow_type & FLOW_EXT) {
6332 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6333 fs->m_ext.vlan_tci =
6334 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6335 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6338 if (fs->flow_type & FLOW_MAC_EXT) {
6339 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6340 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6341 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6343 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6344 rule->tuples_mask.dst_mac);
6348 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6349 struct ethtool_rxnfc *cmd)
6351 struct hclge_vport *vport = hclge_get_vport(handle);
6352 struct hclge_fd_rule *rule = NULL;
6353 struct hclge_dev *hdev = vport->back;
6354 struct ethtool_rx_flow_spec *fs;
6355 struct hlist_node *node2;
6357 if (!hnae3_dev_fd_supported(hdev))
6360 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6362 spin_lock_bh(&hdev->fd_rule_lock);
6364 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6365 if (rule->location >= fs->location)
6369 if (!rule || fs->location != rule->location) {
6370 spin_unlock_bh(&hdev->fd_rule_lock);
6375 fs->flow_type = rule->flow_type;
6376 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6380 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6381 &fs->m_u.tcp_ip4_spec);
6384 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6385 &fs->m_u.usr_ip4_spec);
6390 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6391 &fs->m_u.tcp_ip6_spec);
6393 case IPV6_USER_FLOW:
6394 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6395 &fs->m_u.usr_ip6_spec);
6397 /* The flow type of fd rule has been checked before adding in to rule
6398 * list. As other flow types have been handled, it must be ETHER_FLOW
6399 * for the default case
6402 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6403 &fs->m_u.ether_spec);
6407 hclge_fd_get_ext_info(fs, rule);
6409 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6410 fs->ring_cookie = RX_CLS_FLOW_DISC;
6414 fs->ring_cookie = rule->queue_id;
6415 vf_id = rule->vf_id;
6416 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6417 fs->ring_cookie |= vf_id;
6420 spin_unlock_bh(&hdev->fd_rule_lock);
6425 static int hclge_get_all_rules(struct hnae3_handle *handle,
6426 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6428 struct hclge_vport *vport = hclge_get_vport(handle);
6429 struct hclge_dev *hdev = vport->back;
6430 struct hclge_fd_rule *rule;
6431 struct hlist_node *node2;
6434 if (!hnae3_dev_fd_supported(hdev))
6437 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6439 spin_lock_bh(&hdev->fd_rule_lock);
6440 hlist_for_each_entry_safe(rule, node2,
6441 &hdev->fd_rule_list, rule_node) {
6442 if (cnt == cmd->rule_cnt) {
6443 spin_unlock_bh(&hdev->fd_rule_lock);
6447 rule_locs[cnt] = rule->location;
6451 spin_unlock_bh(&hdev->fd_rule_lock);
6453 cmd->rule_cnt = cnt;
6458 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6459 struct hclge_fd_rule_tuples *tuples)
6461 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6462 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6464 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6465 tuples->ip_proto = fkeys->basic.ip_proto;
6466 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6468 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6469 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6470 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6474 for (i = 0; i < IPV6_SIZE; i++) {
6475 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6476 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6481 /* traverse all rules, check whether an existed rule has the same tuples */
6482 static struct hclge_fd_rule *
6483 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6484 const struct hclge_fd_rule_tuples *tuples)
6486 struct hclge_fd_rule *rule = NULL;
6487 struct hlist_node *node;
6489 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6490 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6497 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6498 struct hclge_fd_rule *rule)
6500 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6501 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6502 BIT(INNER_SRC_PORT);
6505 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6506 if (tuples->ether_proto == ETH_P_IP) {
6507 if (tuples->ip_proto == IPPROTO_TCP)
6508 rule->flow_type = TCP_V4_FLOW;
6510 rule->flow_type = UDP_V4_FLOW;
6512 if (tuples->ip_proto == IPPROTO_TCP)
6513 rule->flow_type = TCP_V6_FLOW;
6515 rule->flow_type = UDP_V6_FLOW;
6517 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6518 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6521 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6522 u16 flow_id, struct flow_keys *fkeys)
6524 struct hclge_vport *vport = hclge_get_vport(handle);
6525 struct hclge_fd_rule_tuples new_tuples = {};
6526 struct hclge_dev *hdev = vport->back;
6527 struct hclge_fd_rule *rule;
6532 if (!hnae3_dev_fd_supported(hdev))
6535 /* when there is already fd rule existed add by user,
6536 * arfs should not work
6538 spin_lock_bh(&hdev->fd_rule_lock);
6539 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6540 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6541 spin_unlock_bh(&hdev->fd_rule_lock);
6545 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6547 /* check is there flow director filter existed for this flow,
6548 * if not, create a new filter for it;
6549 * if filter exist with different queue id, modify the filter;
6550 * if filter exist with same queue id, do nothing
6552 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6554 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6555 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6556 spin_unlock_bh(&hdev->fd_rule_lock);
6560 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6562 spin_unlock_bh(&hdev->fd_rule_lock);
6566 set_bit(bit_id, hdev->fd_bmap);
6567 rule->location = bit_id;
6568 rule->arfs.flow_id = flow_id;
6569 rule->queue_id = queue_id;
6570 hclge_fd_build_arfs_rule(&new_tuples, rule);
6571 ret = hclge_fd_config_rule(hdev, rule);
6573 spin_unlock_bh(&hdev->fd_rule_lock);
6578 return rule->location;
6581 spin_unlock_bh(&hdev->fd_rule_lock);
6583 if (rule->queue_id == queue_id)
6584 return rule->location;
6586 tmp_queue_id = rule->queue_id;
6587 rule->queue_id = queue_id;
6588 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6590 rule->queue_id = tmp_queue_id;
6594 return rule->location;
6597 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6599 #ifdef CONFIG_RFS_ACCEL
6600 struct hnae3_handle *handle = &hdev->vport[0].nic;
6601 struct hclge_fd_rule *rule;
6602 struct hlist_node *node;
6603 HLIST_HEAD(del_list);
6605 spin_lock_bh(&hdev->fd_rule_lock);
6606 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6607 spin_unlock_bh(&hdev->fd_rule_lock);
6610 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6611 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6612 rule->arfs.flow_id, rule->location)) {
6613 hlist_del_init(&rule->rule_node);
6614 hlist_add_head(&rule->rule_node, &del_list);
6615 hdev->hclge_fd_rule_num--;
6616 clear_bit(rule->location, hdev->fd_bmap);
6619 spin_unlock_bh(&hdev->fd_rule_lock);
6621 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6622 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6623 rule->location, NULL, false);
6629 /* make sure being called after lock up with fd_rule_lock */
6630 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6632 #ifdef CONFIG_RFS_ACCEL
6633 struct hclge_vport *vport = hclge_get_vport(handle);
6634 struct hclge_dev *hdev = vport->back;
6636 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6637 hclge_del_all_fd_entries(handle, true);
6641 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6642 struct hclge_fd_rule *rule)
6644 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6645 struct flow_match_basic match;
6646 u16 ethtype_key, ethtype_mask;
6648 flow_rule_match_basic(flow, &match);
6649 ethtype_key = ntohs(match.key->n_proto);
6650 ethtype_mask = ntohs(match.mask->n_proto);
6652 if (ethtype_key == ETH_P_ALL) {
6656 rule->tuples.ether_proto = ethtype_key;
6657 rule->tuples_mask.ether_proto = ethtype_mask;
6658 rule->tuples.ip_proto = match.key->ip_proto;
6659 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6661 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6662 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6666 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6667 struct hclge_fd_rule *rule)
6669 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6670 struct flow_match_eth_addrs match;
6672 flow_rule_match_eth_addrs(flow, &match);
6673 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6674 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6675 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6676 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6678 rule->unused_tuple |= BIT(INNER_DST_MAC);
6679 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6683 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6684 struct hclge_fd_rule *rule)
6686 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6687 struct flow_match_vlan match;
6689 flow_rule_match_vlan(flow, &match);
6690 rule->tuples.vlan_tag1 = match.key->vlan_id |
6691 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6692 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6693 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6695 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6699 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6700 struct hclge_fd_rule *rule)
6704 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6705 struct flow_match_control match;
6707 flow_rule_match_control(flow, &match);
6708 addr_type = match.key->addr_type;
6711 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6712 struct flow_match_ipv4_addrs match;
6714 flow_rule_match_ipv4_addrs(flow, &match);
6715 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6716 rule->tuples_mask.src_ip[IPV4_INDEX] =
6717 be32_to_cpu(match.mask->src);
6718 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6719 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6720 be32_to_cpu(match.mask->dst);
6721 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6722 struct flow_match_ipv6_addrs match;
6724 flow_rule_match_ipv6_addrs(flow, &match);
6725 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6727 be32_to_cpu_array(rule->tuples_mask.src_ip,
6728 match.mask->src.s6_addr32, IPV6_SIZE);
6729 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6731 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6732 match.mask->dst.s6_addr32, IPV6_SIZE);
6734 rule->unused_tuple |= BIT(INNER_SRC_IP);
6735 rule->unused_tuple |= BIT(INNER_DST_IP);
6739 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6740 struct hclge_fd_rule *rule)
6742 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6743 struct flow_match_ports match;
6745 flow_rule_match_ports(flow, &match);
6747 rule->tuples.src_port = be16_to_cpu(match.key->src);
6748 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6749 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6750 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6752 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6753 rule->unused_tuple |= BIT(INNER_DST_PORT);
6757 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6758 struct flow_cls_offload *cls_flower,
6759 struct hclge_fd_rule *rule)
6761 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6762 struct flow_dissector *dissector = flow->match.dissector;
6764 if (dissector->used_keys &
6765 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6766 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6767 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6768 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6769 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6770 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6771 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6772 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6773 dissector->used_keys);
6777 hclge_get_cls_key_basic(flow, rule);
6778 hclge_get_cls_key_mac(flow, rule);
6779 hclge_get_cls_key_vlan(flow, rule);
6780 hclge_get_cls_key_ip(flow, rule);
6781 hclge_get_cls_key_port(flow, rule);
6786 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6787 struct flow_cls_offload *cls_flower, int tc)
6789 u32 prio = cls_flower->common.prio;
6791 if (tc < 0 || tc > hdev->tc_max) {
6792 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6797 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6798 dev_err(&hdev->pdev->dev,
6799 "prio %u should be in range[1, %u]\n",
6800 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6804 if (test_bit(prio - 1, hdev->fd_bmap)) {
6805 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6811 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6812 struct flow_cls_offload *cls_flower,
6815 struct hclge_vport *vport = hclge_get_vport(handle);
6816 struct hclge_dev *hdev = vport->back;
6817 struct hclge_fd_rule *rule;
6820 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6821 dev_err(&hdev->pdev->dev,
6822 "please remove all exist fd rules via ethtool first\n");
6826 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6828 dev_err(&hdev->pdev->dev,
6829 "failed to check cls flower params, ret = %d\n", ret);
6833 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6837 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6841 rule->action = HCLGE_FD_ACTION_SELECT_TC;
6842 rule->cls_flower.tc = tc;
6843 rule->location = cls_flower->common.prio - 1;
6845 rule->cls_flower.cookie = cls_flower->cookie;
6846 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6848 spin_lock_bh(&hdev->fd_rule_lock);
6849 hclge_clear_arfs_rules(handle);
6851 ret = hclge_fd_config_rule(hdev, rule);
6853 spin_unlock_bh(&hdev->fd_rule_lock);
6856 dev_err(&hdev->pdev->dev,
6857 "failed to add cls flower rule, ret = %d\n", ret);
6867 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6868 unsigned long cookie)
6870 struct hclge_fd_rule *rule;
6871 struct hlist_node *node;
6873 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6874 if (rule->cls_flower.cookie == cookie)
6881 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6882 struct flow_cls_offload *cls_flower)
6884 struct hclge_vport *vport = hclge_get_vport(handle);
6885 struct hclge_dev *hdev = vport->back;
6886 struct hclge_fd_rule *rule;
6889 spin_lock_bh(&hdev->fd_rule_lock);
6891 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6893 spin_unlock_bh(&hdev->fd_rule_lock);
6897 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6900 dev_err(&hdev->pdev->dev,
6901 "failed to delete cls flower rule %u, ret = %d\n",
6902 rule->location, ret);
6903 spin_unlock_bh(&hdev->fd_rule_lock);
6907 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6909 dev_err(&hdev->pdev->dev,
6910 "failed to delete cls flower rule %u in list, ret = %d\n",
6911 rule->location, ret);
6912 spin_unlock_bh(&hdev->fd_rule_lock);
6916 spin_unlock_bh(&hdev->fd_rule_lock);
6921 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6923 struct hclge_vport *vport = hclge_get_vport(handle);
6924 struct hclge_dev *hdev = vport->back;
6926 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6927 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6930 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6932 struct hclge_vport *vport = hclge_get_vport(handle);
6933 struct hclge_dev *hdev = vport->back;
6935 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6938 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6940 struct hclge_vport *vport = hclge_get_vport(handle);
6941 struct hclge_dev *hdev = vport->back;
6943 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6946 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6948 struct hclge_vport *vport = hclge_get_vport(handle);
6949 struct hclge_dev *hdev = vport->back;
6951 return hdev->rst_stats.hw_reset_done_cnt;
6954 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6956 struct hclge_vport *vport = hclge_get_vport(handle);
6957 struct hclge_dev *hdev = vport->back;
6960 hdev->fd_en = enable;
6961 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6964 spin_lock_bh(&hdev->fd_rule_lock);
6965 hclge_del_all_fd_entries(handle, clear);
6966 spin_unlock_bh(&hdev->fd_rule_lock);
6968 hclge_restore_fd_entries(handle);
6972 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6974 struct hclge_desc desc;
6975 struct hclge_config_mac_mode_cmd *req =
6976 (struct hclge_config_mac_mode_cmd *)desc.data;
6980 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6983 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6984 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6985 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6986 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6987 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6988 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6989 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6990 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6991 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6992 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6995 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6997 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6999 dev_err(&hdev->pdev->dev,
7000 "mac enable fail, ret =%d.\n", ret);
7003 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7004 u8 switch_param, u8 param_mask)
7006 struct hclge_mac_vlan_switch_cmd *req;
7007 struct hclge_desc desc;
7011 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7012 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7014 /* read current config parameter */
7015 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7017 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7018 req->func_id = cpu_to_le32(func_id);
7020 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7022 dev_err(&hdev->pdev->dev,
7023 "read mac vlan switch parameter fail, ret = %d\n", ret);
7027 /* modify and write new config parameter */
7028 hclge_cmd_reuse_desc(&desc, false);
7029 req->switch_param = (req->switch_param & param_mask) | switch_param;
7030 req->param_mask = param_mask;
7032 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7034 dev_err(&hdev->pdev->dev,
7035 "set mac vlan switch parameter fail, ret = %d\n", ret);
7039 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7042 #define HCLGE_PHY_LINK_STATUS_NUM 200
7044 struct phy_device *phydev = hdev->hw.mac.phydev;
7049 ret = phy_read_status(phydev);
7051 dev_err(&hdev->pdev->dev,
7052 "phy update link status fail, ret = %d\n", ret);
7056 if (phydev->link == link_ret)
7059 msleep(HCLGE_LINK_STATUS_MS);
7060 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7063 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7065 #define HCLGE_MAC_LINK_STATUS_NUM 100
7072 ret = hclge_get_mac_link_status(hdev, &link_status);
7075 if (link_status == link_ret)
7078 msleep(HCLGE_LINK_STATUS_MS);
7079 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7083 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7088 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7091 hclge_phy_link_status_wait(hdev, link_ret);
7093 return hclge_mac_link_status_wait(hdev, link_ret);
7096 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7098 struct hclge_config_mac_mode_cmd *req;
7099 struct hclge_desc desc;
7103 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7104 /* 1 Read out the MAC mode config at first */
7105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7106 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7108 dev_err(&hdev->pdev->dev,
7109 "mac loopback get fail, ret =%d.\n", ret);
7113 /* 2 Then setup the loopback flag */
7114 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7115 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7117 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7119 /* 3 Config mac work mode with loopback flag
7120 * and its original configure parameters
7122 hclge_cmd_reuse_desc(&desc, false);
7123 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7125 dev_err(&hdev->pdev->dev,
7126 "mac loopback set fail, ret =%d.\n", ret);
7130 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7131 enum hnae3_loop loop_mode)
7133 #define HCLGE_SERDES_RETRY_MS 10
7134 #define HCLGE_SERDES_RETRY_NUM 100
7136 struct hclge_serdes_lb_cmd *req;
7137 struct hclge_desc desc;
7141 req = (struct hclge_serdes_lb_cmd *)desc.data;
7142 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7144 switch (loop_mode) {
7145 case HNAE3_LOOP_SERIAL_SERDES:
7146 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7148 case HNAE3_LOOP_PARALLEL_SERDES:
7149 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7152 dev_err(&hdev->pdev->dev,
7153 "unsupported serdes loopback mode %d\n", loop_mode);
7158 req->enable = loop_mode_b;
7159 req->mask = loop_mode_b;
7161 req->mask = loop_mode_b;
7164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7166 dev_err(&hdev->pdev->dev,
7167 "serdes loopback set fail, ret = %d\n", ret);
7172 msleep(HCLGE_SERDES_RETRY_MS);
7173 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7177 dev_err(&hdev->pdev->dev,
7178 "serdes loopback get, ret = %d\n", ret);
7181 } while (++i < HCLGE_SERDES_RETRY_NUM &&
7182 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7184 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7185 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7187 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7188 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7194 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7195 enum hnae3_loop loop_mode)
7199 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7203 hclge_cfg_mac_mode(hdev, en);
7205 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7207 dev_err(&hdev->pdev->dev,
7208 "serdes loopback config mac mode timeout\n");
7213 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7214 struct phy_device *phydev)
7218 if (!phydev->suspended) {
7219 ret = phy_suspend(phydev);
7224 ret = phy_resume(phydev);
7228 return phy_loopback(phydev, true);
7231 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7232 struct phy_device *phydev)
7236 ret = phy_loopback(phydev, false);
7240 return phy_suspend(phydev);
7243 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7245 struct phy_device *phydev = hdev->hw.mac.phydev;
7252 ret = hclge_enable_phy_loopback(hdev, phydev);
7254 ret = hclge_disable_phy_loopback(hdev, phydev);
7256 dev_err(&hdev->pdev->dev,
7257 "set phy loopback fail, ret = %d\n", ret);
7261 hclge_cfg_mac_mode(hdev, en);
7263 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7265 dev_err(&hdev->pdev->dev,
7266 "phy loopback config mac mode timeout\n");
7271 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7272 int stream_id, bool enable)
7274 struct hclge_desc desc;
7275 struct hclge_cfg_com_tqp_queue_cmd *req =
7276 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7280 req->tqp_id = cpu_to_le16(tqp_id);
7281 req->stream_id = cpu_to_le16(stream_id);
7283 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7285 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7287 dev_err(&hdev->pdev->dev,
7288 "Tqp enable fail, status =%d.\n", ret);
7292 static int hclge_set_loopback(struct hnae3_handle *handle,
7293 enum hnae3_loop loop_mode, bool en)
7295 struct hclge_vport *vport = hclge_get_vport(handle);
7296 struct hnae3_knic_private_info *kinfo;
7297 struct hclge_dev *hdev = vport->back;
7300 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7301 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7302 * the same, the packets are looped back in the SSU. If SSU loopback
7303 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7305 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7306 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7308 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7309 HCLGE_SWITCH_ALW_LPBK_MASK);
7314 switch (loop_mode) {
7315 case HNAE3_LOOP_APP:
7316 ret = hclge_set_app_loopback(hdev, en);
7318 case HNAE3_LOOP_SERIAL_SERDES:
7319 case HNAE3_LOOP_PARALLEL_SERDES:
7320 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7322 case HNAE3_LOOP_PHY:
7323 ret = hclge_set_phy_loopback(hdev, en);
7327 dev_err(&hdev->pdev->dev,
7328 "loop_mode %d is not supported\n", loop_mode);
7335 kinfo = &vport->nic.kinfo;
7336 for (i = 0; i < kinfo->num_tqps; i++) {
7337 ret = hclge_tqp_enable(hdev, i, 0, en);
7345 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7349 ret = hclge_set_app_loopback(hdev, false);
7353 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7357 return hclge_cfg_serdes_loopback(hdev, false,
7358 HNAE3_LOOP_PARALLEL_SERDES);
7361 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7363 struct hclge_vport *vport = hclge_get_vport(handle);
7364 struct hnae3_knic_private_info *kinfo;
7365 struct hnae3_queue *queue;
7366 struct hclge_tqp *tqp;
7369 kinfo = &vport->nic.kinfo;
7370 for (i = 0; i < kinfo->num_tqps; i++) {
7371 queue = handle->kinfo.tqp[i];
7372 tqp = container_of(queue, struct hclge_tqp, q);
7373 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7377 static void hclge_flush_link_update(struct hclge_dev *hdev)
7379 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7381 unsigned long last = hdev->serv_processed_cnt;
7384 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7385 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7386 last == hdev->serv_processed_cnt)
7390 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7392 struct hclge_vport *vport = hclge_get_vport(handle);
7393 struct hclge_dev *hdev = vport->back;
7396 hclge_task_schedule(hdev, 0);
7398 /* Set the DOWN flag here to disable link updating */
7399 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7401 /* flush memory to make sure DOWN is seen by service task */
7402 smp_mb__before_atomic();
7403 hclge_flush_link_update(hdev);
7407 static int hclge_ae_start(struct hnae3_handle *handle)
7409 struct hclge_vport *vport = hclge_get_vport(handle);
7410 struct hclge_dev *hdev = vport->back;
7413 hclge_cfg_mac_mode(hdev, true);
7414 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7415 hdev->hw.mac.link = 0;
7417 /* reset tqp stats */
7418 hclge_reset_tqp_stats(handle);
7420 hclge_mac_start_phy(hdev);
7425 static void hclge_ae_stop(struct hnae3_handle *handle)
7427 struct hclge_vport *vport = hclge_get_vport(handle);
7428 struct hclge_dev *hdev = vport->back;
7431 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7432 spin_lock_bh(&hdev->fd_rule_lock);
7433 hclge_clear_arfs_rules(handle);
7434 spin_unlock_bh(&hdev->fd_rule_lock);
7436 /* If it is not PF reset, the firmware will disable the MAC,
7437 * so it only need to stop phy here.
7439 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7440 hdev->reset_type != HNAE3_FUNC_RESET) {
7441 hclge_mac_stop_phy(hdev);
7442 hclge_update_link_status(hdev);
7446 for (i = 0; i < handle->kinfo.num_tqps; i++)
7447 hclge_reset_tqp(handle, i);
7449 hclge_config_mac_tnl_int(hdev, false);
7452 hclge_cfg_mac_mode(hdev, false);
7454 hclge_mac_stop_phy(hdev);
7456 /* reset tqp stats */
7457 hclge_reset_tqp_stats(handle);
7458 hclge_update_link_status(hdev);
7461 int hclge_vport_start(struct hclge_vport *vport)
7463 struct hclge_dev *hdev = vport->back;
7465 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7466 vport->last_active_jiffies = jiffies;
7468 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7469 if (vport->vport_id) {
7470 hclge_restore_mac_table_common(vport);
7471 hclge_restore_vport_vlan_table(vport);
7473 hclge_restore_hw_table(hdev);
7477 clear_bit(vport->vport_id, hdev->vport_config_block);
7482 void hclge_vport_stop(struct hclge_vport *vport)
7484 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7487 static int hclge_client_start(struct hnae3_handle *handle)
7489 struct hclge_vport *vport = hclge_get_vport(handle);
7491 return hclge_vport_start(vport);
7494 static void hclge_client_stop(struct hnae3_handle *handle)
7496 struct hclge_vport *vport = hclge_get_vport(handle);
7498 hclge_vport_stop(vport);
7501 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7502 u16 cmdq_resp, u8 resp_code,
7503 enum hclge_mac_vlan_tbl_opcode op)
7505 struct hclge_dev *hdev = vport->back;
7508 dev_err(&hdev->pdev->dev,
7509 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7514 if (op == HCLGE_MAC_VLAN_ADD) {
7515 if (!resp_code || resp_code == 1)
7517 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7518 resp_code == HCLGE_ADD_MC_OVERFLOW)
7521 dev_err(&hdev->pdev->dev,
7522 "add mac addr failed for undefined, code=%u.\n",
7525 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7528 } else if (resp_code == 1) {
7529 dev_dbg(&hdev->pdev->dev,
7530 "remove mac addr failed for miss.\n");
7534 dev_err(&hdev->pdev->dev,
7535 "remove mac addr failed for undefined, code=%u.\n",
7538 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7541 } else if (resp_code == 1) {
7542 dev_dbg(&hdev->pdev->dev,
7543 "lookup mac addr failed for miss.\n");
7547 dev_err(&hdev->pdev->dev,
7548 "lookup mac addr failed for undefined, code=%u.\n",
7553 dev_err(&hdev->pdev->dev,
7554 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7559 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7561 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7563 unsigned int word_num;
7564 unsigned int bit_num;
7566 if (vfid > 255 || vfid < 0)
7569 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7570 word_num = vfid / 32;
7571 bit_num = vfid % 32;
7573 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7575 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7577 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7578 bit_num = vfid % 32;
7580 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7582 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7588 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7590 #define HCLGE_DESC_NUMBER 3
7591 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7594 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7595 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7596 if (desc[i].data[j])
7602 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7603 const u8 *addr, bool is_mc)
7605 const unsigned char *mac_addr = addr;
7606 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7607 (mac_addr[0]) | (mac_addr[1] << 8);
7608 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7610 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7612 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7613 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7616 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7617 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7620 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7621 struct hclge_mac_vlan_tbl_entry_cmd *req)
7623 struct hclge_dev *hdev = vport->back;
7624 struct hclge_desc desc;
7629 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7631 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7633 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7635 dev_err(&hdev->pdev->dev,
7636 "del mac addr failed for cmd_send, ret =%d.\n",
7640 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7641 retval = le16_to_cpu(desc.retval);
7643 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7644 HCLGE_MAC_VLAN_REMOVE);
7647 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7648 struct hclge_mac_vlan_tbl_entry_cmd *req,
7649 struct hclge_desc *desc,
7652 struct hclge_dev *hdev = vport->back;
7657 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7659 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7660 memcpy(desc[0].data,
7662 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7663 hclge_cmd_setup_basic_desc(&desc[1],
7664 HCLGE_OPC_MAC_VLAN_ADD,
7666 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7667 hclge_cmd_setup_basic_desc(&desc[2],
7668 HCLGE_OPC_MAC_VLAN_ADD,
7670 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7672 memcpy(desc[0].data,
7674 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7675 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7678 dev_err(&hdev->pdev->dev,
7679 "lookup mac addr failed for cmd_send, ret =%d.\n",
7683 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7684 retval = le16_to_cpu(desc[0].retval);
7686 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7687 HCLGE_MAC_VLAN_LKUP);
7690 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7691 struct hclge_mac_vlan_tbl_entry_cmd *req,
7692 struct hclge_desc *mc_desc)
7694 struct hclge_dev *hdev = vport->back;
7701 struct hclge_desc desc;
7703 hclge_cmd_setup_basic_desc(&desc,
7704 HCLGE_OPC_MAC_VLAN_ADD,
7706 memcpy(desc.data, req,
7707 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7709 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7710 retval = le16_to_cpu(desc.retval);
7712 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7714 HCLGE_MAC_VLAN_ADD);
7716 hclge_cmd_reuse_desc(&mc_desc[0], false);
7717 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7718 hclge_cmd_reuse_desc(&mc_desc[1], false);
7719 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7720 hclge_cmd_reuse_desc(&mc_desc[2], false);
7721 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7722 memcpy(mc_desc[0].data, req,
7723 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7724 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7725 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7726 retval = le16_to_cpu(mc_desc[0].retval);
7728 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7730 HCLGE_MAC_VLAN_ADD);
7734 dev_err(&hdev->pdev->dev,
7735 "add mac addr failed for cmd_send, ret =%d.\n",
7743 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7744 u16 *allocated_size)
7746 struct hclge_umv_spc_alc_cmd *req;
7747 struct hclge_desc desc;
7750 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7751 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7753 req->space_size = cpu_to_le32(space_size);
7755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7757 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7762 *allocated_size = le32_to_cpu(desc.data[1]);
7767 static int hclge_init_umv_space(struct hclge_dev *hdev)
7769 u16 allocated_size = 0;
7772 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7776 if (allocated_size < hdev->wanted_umv_size)
7777 dev_warn(&hdev->pdev->dev,
7778 "failed to alloc umv space, want %u, get %u\n",
7779 hdev->wanted_umv_size, allocated_size);
7781 hdev->max_umv_size = allocated_size;
7782 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7783 hdev->share_umv_size = hdev->priv_umv_size +
7784 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7789 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7791 struct hclge_vport *vport;
7794 for (i = 0; i < hdev->num_alloc_vport; i++) {
7795 vport = &hdev->vport[i];
7796 vport->used_umv_num = 0;
7799 mutex_lock(&hdev->vport_lock);
7800 hdev->share_umv_size = hdev->priv_umv_size +
7801 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7802 mutex_unlock(&hdev->vport_lock);
7805 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7807 struct hclge_dev *hdev = vport->back;
7811 mutex_lock(&hdev->vport_lock);
7813 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7814 hdev->share_umv_size == 0);
7817 mutex_unlock(&hdev->vport_lock);
7822 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7824 struct hclge_dev *hdev = vport->back;
7827 if (vport->used_umv_num > hdev->priv_umv_size)
7828 hdev->share_umv_size++;
7830 if (vport->used_umv_num > 0)
7831 vport->used_umv_num--;
7833 if (vport->used_umv_num >= hdev->priv_umv_size &&
7834 hdev->share_umv_size > 0)
7835 hdev->share_umv_size--;
7836 vport->used_umv_num++;
7840 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7843 struct hclge_mac_node *mac_node, *tmp;
7845 list_for_each_entry_safe(mac_node, tmp, list, node)
7846 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7852 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7853 enum HCLGE_MAC_NODE_STATE state)
7856 /* from set_rx_mode or tmp_add_list */
7857 case HCLGE_MAC_TO_ADD:
7858 if (mac_node->state == HCLGE_MAC_TO_DEL)
7859 mac_node->state = HCLGE_MAC_ACTIVE;
7861 /* only from set_rx_mode */
7862 case HCLGE_MAC_TO_DEL:
7863 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7864 list_del(&mac_node->node);
7867 mac_node->state = HCLGE_MAC_TO_DEL;
7870 /* only from tmp_add_list, the mac_node->state won't be
7873 case HCLGE_MAC_ACTIVE:
7874 if (mac_node->state == HCLGE_MAC_TO_ADD)
7875 mac_node->state = HCLGE_MAC_ACTIVE;
7881 int hclge_update_mac_list(struct hclge_vport *vport,
7882 enum HCLGE_MAC_NODE_STATE state,
7883 enum HCLGE_MAC_ADDR_TYPE mac_type,
7884 const unsigned char *addr)
7886 struct hclge_dev *hdev = vport->back;
7887 struct hclge_mac_node *mac_node;
7888 struct list_head *list;
7890 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7891 &vport->uc_mac_list : &vport->mc_mac_list;
7893 spin_lock_bh(&vport->mac_list_lock);
7895 /* if the mac addr is already in the mac list, no need to add a new
7896 * one into it, just check the mac addr state, convert it to a new
7897 * new state, or just remove it, or do nothing.
7899 mac_node = hclge_find_mac_node(list, addr);
7901 hclge_update_mac_node(mac_node, state);
7902 spin_unlock_bh(&vport->mac_list_lock);
7903 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7907 /* if this address is never added, unnecessary to delete */
7908 if (state == HCLGE_MAC_TO_DEL) {
7909 spin_unlock_bh(&vport->mac_list_lock);
7910 dev_err(&hdev->pdev->dev,
7911 "failed to delete address %pM from mac list\n",
7916 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7918 spin_unlock_bh(&vport->mac_list_lock);
7922 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7924 mac_node->state = state;
7925 ether_addr_copy(mac_node->mac_addr, addr);
7926 list_add_tail(&mac_node->node, list);
7928 spin_unlock_bh(&vport->mac_list_lock);
7933 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7934 const unsigned char *addr)
7936 struct hclge_vport *vport = hclge_get_vport(handle);
7938 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7942 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7943 const unsigned char *addr)
7945 struct hclge_dev *hdev = vport->back;
7946 struct hclge_mac_vlan_tbl_entry_cmd req;
7947 struct hclge_desc desc;
7948 u16 egress_port = 0;
7951 /* mac addr check */
7952 if (is_zero_ether_addr(addr) ||
7953 is_broadcast_ether_addr(addr) ||
7954 is_multicast_ether_addr(addr)) {
7955 dev_err(&hdev->pdev->dev,
7956 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7957 addr, is_zero_ether_addr(addr),
7958 is_broadcast_ether_addr(addr),
7959 is_multicast_ether_addr(addr));
7963 memset(&req, 0, sizeof(req));
7965 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7966 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7968 req.egress_port = cpu_to_le16(egress_port);
7970 hclge_prepare_mac_addr(&req, addr, false);
7972 /* Lookup the mac address in the mac_vlan table, and add
7973 * it if the entry is inexistent. Repeated unicast entry
7974 * is not allowed in the mac vlan table.
7976 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7977 if (ret == -ENOENT) {
7978 mutex_lock(&hdev->vport_lock);
7979 if (!hclge_is_umv_space_full(vport, false)) {
7980 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7982 hclge_update_umv_space(vport, false);
7983 mutex_unlock(&hdev->vport_lock);
7986 mutex_unlock(&hdev->vport_lock);
7988 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7989 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7990 hdev->priv_umv_size);
7995 /* check if we just hit the duplicate */
7997 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7998 vport->vport_id, addr);
8002 dev_err(&hdev->pdev->dev,
8003 "PF failed to add unicast entry(%pM) in the MAC table\n",
8009 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8010 const unsigned char *addr)
8012 struct hclge_vport *vport = hclge_get_vport(handle);
8014 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8018 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8019 const unsigned char *addr)
8021 struct hclge_dev *hdev = vport->back;
8022 struct hclge_mac_vlan_tbl_entry_cmd req;
8025 /* mac addr check */
8026 if (is_zero_ether_addr(addr) ||
8027 is_broadcast_ether_addr(addr) ||
8028 is_multicast_ether_addr(addr)) {
8029 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8034 memset(&req, 0, sizeof(req));
8035 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8036 hclge_prepare_mac_addr(&req, addr, false);
8037 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8039 mutex_lock(&hdev->vport_lock);
8040 hclge_update_umv_space(vport, true);
8041 mutex_unlock(&hdev->vport_lock);
8042 } else if (ret == -ENOENT) {
8049 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8050 const unsigned char *addr)
8052 struct hclge_vport *vport = hclge_get_vport(handle);
8054 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8058 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8059 const unsigned char *addr)
8061 struct hclge_dev *hdev = vport->back;
8062 struct hclge_mac_vlan_tbl_entry_cmd req;
8063 struct hclge_desc desc[3];
8066 /* mac addr check */
8067 if (!is_multicast_ether_addr(addr)) {
8068 dev_err(&hdev->pdev->dev,
8069 "Add mc mac err! invalid mac:%pM.\n",
8073 memset(&req, 0, sizeof(req));
8074 hclge_prepare_mac_addr(&req, addr, true);
8075 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8077 /* This mac addr do not exist, add new entry for it */
8078 memset(desc[0].data, 0, sizeof(desc[0].data));
8079 memset(desc[1].data, 0, sizeof(desc[0].data));
8080 memset(desc[2].data, 0, sizeof(desc[0].data));
8082 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8085 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8087 /* if already overflow, not to print each time */
8088 if (status == -ENOSPC &&
8089 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8090 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8095 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8096 const unsigned char *addr)
8098 struct hclge_vport *vport = hclge_get_vport(handle);
8100 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8104 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8105 const unsigned char *addr)
8107 struct hclge_dev *hdev = vport->back;
8108 struct hclge_mac_vlan_tbl_entry_cmd req;
8109 enum hclge_cmd_status status;
8110 struct hclge_desc desc[3];
8112 /* mac addr check */
8113 if (!is_multicast_ether_addr(addr)) {
8114 dev_dbg(&hdev->pdev->dev,
8115 "Remove mc mac err! invalid mac:%pM.\n",
8120 memset(&req, 0, sizeof(req));
8121 hclge_prepare_mac_addr(&req, addr, true);
8122 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8124 /* This mac addr exist, remove this handle's VFID for it */
8125 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8129 if (hclge_is_all_function_id_zero(desc))
8130 /* All the vfid is zero, so need to delete this entry */
8131 status = hclge_remove_mac_vlan_tbl(vport, &req);
8133 /* Not all the vfid is zero, update the vfid */
8134 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8136 } else if (status == -ENOENT) {
8143 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8144 struct list_head *list,
8145 int (*sync)(struct hclge_vport *,
8146 const unsigned char *))
8148 struct hclge_mac_node *mac_node, *tmp;
8151 list_for_each_entry_safe(mac_node, tmp, list, node) {
8152 ret = sync(vport, mac_node->mac_addr);
8154 mac_node->state = HCLGE_MAC_ACTIVE;
8156 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8163 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8164 struct list_head *list,
8165 int (*unsync)(struct hclge_vport *,
8166 const unsigned char *))
8168 struct hclge_mac_node *mac_node, *tmp;
8171 list_for_each_entry_safe(mac_node, tmp, list, node) {
8172 ret = unsync(vport, mac_node->mac_addr);
8173 if (!ret || ret == -ENOENT) {
8174 list_del(&mac_node->node);
8177 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8184 static bool hclge_sync_from_add_list(struct list_head *add_list,
8185 struct list_head *mac_list)
8187 struct hclge_mac_node *mac_node, *tmp, *new_node;
8188 bool all_added = true;
8190 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8191 if (mac_node->state == HCLGE_MAC_TO_ADD)
8194 /* if the mac address from tmp_add_list is not in the
8195 * uc/mc_mac_list, it means have received a TO_DEL request
8196 * during the time window of adding the mac address into mac
8197 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8198 * then it will be removed at next time. else it must be TO_ADD,
8199 * this address hasn't been added into mac table,
8200 * so just remove the mac node.
8202 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8204 hclge_update_mac_node(new_node, mac_node->state);
8205 list_del(&mac_node->node);
8207 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8208 mac_node->state = HCLGE_MAC_TO_DEL;
8209 list_del(&mac_node->node);
8210 list_add_tail(&mac_node->node, mac_list);
8212 list_del(&mac_node->node);
8220 static void hclge_sync_from_del_list(struct list_head *del_list,
8221 struct list_head *mac_list)
8223 struct hclge_mac_node *mac_node, *tmp, *new_node;
8225 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8226 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8228 /* If the mac addr exists in the mac list, it means
8229 * received a new TO_ADD request during the time window
8230 * of configuring the mac address. For the mac node
8231 * state is TO_ADD, and the address is already in the
8232 * in the hardware(due to delete fail), so we just need
8233 * to change the mac node state to ACTIVE.
8235 new_node->state = HCLGE_MAC_ACTIVE;
8236 list_del(&mac_node->node);
8239 list_del(&mac_node->node);
8240 list_add_tail(&mac_node->node, mac_list);
8245 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8246 enum HCLGE_MAC_ADDR_TYPE mac_type,
8249 if (mac_type == HCLGE_MAC_ADDR_UC) {
8251 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8253 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8256 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8258 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8262 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8263 enum HCLGE_MAC_ADDR_TYPE mac_type)
8265 struct hclge_mac_node *mac_node, *tmp, *new_node;
8266 struct list_head tmp_add_list, tmp_del_list;
8267 struct list_head *list;
8270 INIT_LIST_HEAD(&tmp_add_list);
8271 INIT_LIST_HEAD(&tmp_del_list);
8273 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8274 * we can add/delete these mac addr outside the spin lock
8276 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8277 &vport->uc_mac_list : &vport->mc_mac_list;
8279 spin_lock_bh(&vport->mac_list_lock);
8281 list_for_each_entry_safe(mac_node, tmp, list, node) {
8282 switch (mac_node->state) {
8283 case HCLGE_MAC_TO_DEL:
8284 list_del(&mac_node->node);
8285 list_add_tail(&mac_node->node, &tmp_del_list);
8287 case HCLGE_MAC_TO_ADD:
8288 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8291 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8292 new_node->state = mac_node->state;
8293 list_add_tail(&new_node->node, &tmp_add_list);
8301 spin_unlock_bh(&vport->mac_list_lock);
8303 /* delete first, in order to get max mac table space for adding */
8304 if (mac_type == HCLGE_MAC_ADDR_UC) {
8305 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8306 hclge_rm_uc_addr_common);
8307 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8308 hclge_add_uc_addr_common);
8310 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8311 hclge_rm_mc_addr_common);
8312 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8313 hclge_add_mc_addr_common);
8316 /* if some mac addresses were added/deleted fail, move back to the
8317 * mac_list, and retry at next time.
8319 spin_lock_bh(&vport->mac_list_lock);
8321 hclge_sync_from_del_list(&tmp_del_list, list);
8322 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8324 spin_unlock_bh(&vport->mac_list_lock);
8326 hclge_update_overflow_flags(vport, mac_type, all_added);
8329 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8331 struct hclge_dev *hdev = vport->back;
8333 if (test_bit(vport->vport_id, hdev->vport_config_block))
8336 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8342 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8346 for (i = 0; i < hdev->num_alloc_vport; i++) {
8347 struct hclge_vport *vport = &hdev->vport[i];
8349 if (!hclge_need_sync_mac_table(vport))
8352 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8353 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8357 static void hclge_build_del_list(struct list_head *list,
8359 struct list_head *tmp_del_list)
8361 struct hclge_mac_node *mac_cfg, *tmp;
8363 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8364 switch (mac_cfg->state) {
8365 case HCLGE_MAC_TO_DEL:
8366 case HCLGE_MAC_ACTIVE:
8367 list_del(&mac_cfg->node);
8368 list_add_tail(&mac_cfg->node, tmp_del_list);
8370 case HCLGE_MAC_TO_ADD:
8372 list_del(&mac_cfg->node);
8380 static void hclge_unsync_del_list(struct hclge_vport *vport,
8381 int (*unsync)(struct hclge_vport *vport,
8382 const unsigned char *addr),
8384 struct list_head *tmp_del_list)
8386 struct hclge_mac_node *mac_cfg, *tmp;
8389 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8390 ret = unsync(vport, mac_cfg->mac_addr);
8391 if (!ret || ret == -ENOENT) {
8392 /* clear all mac addr from hardware, but remain these
8393 * mac addr in the mac list, and restore them after
8394 * vf reset finished.
8397 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8398 mac_cfg->state = HCLGE_MAC_TO_ADD;
8400 list_del(&mac_cfg->node);
8403 } else if (is_del_list) {
8404 mac_cfg->state = HCLGE_MAC_TO_DEL;
8409 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8410 enum HCLGE_MAC_ADDR_TYPE mac_type)
8412 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8413 struct hclge_dev *hdev = vport->back;
8414 struct list_head tmp_del_list, *list;
8416 if (mac_type == HCLGE_MAC_ADDR_UC) {
8417 list = &vport->uc_mac_list;
8418 unsync = hclge_rm_uc_addr_common;
8420 list = &vport->mc_mac_list;
8421 unsync = hclge_rm_mc_addr_common;
8424 INIT_LIST_HEAD(&tmp_del_list);
8427 set_bit(vport->vport_id, hdev->vport_config_block);
8429 spin_lock_bh(&vport->mac_list_lock);
8431 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8433 spin_unlock_bh(&vport->mac_list_lock);
8435 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8437 spin_lock_bh(&vport->mac_list_lock);
8439 hclge_sync_from_del_list(&tmp_del_list, list);
8441 spin_unlock_bh(&vport->mac_list_lock);
8444 /* remove all mac address when uninitailize */
8445 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8446 enum HCLGE_MAC_ADDR_TYPE mac_type)
8448 struct hclge_mac_node *mac_node, *tmp;
8449 struct hclge_dev *hdev = vport->back;
8450 struct list_head tmp_del_list, *list;
8452 INIT_LIST_HEAD(&tmp_del_list);
8454 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8455 &vport->uc_mac_list : &vport->mc_mac_list;
8457 spin_lock_bh(&vport->mac_list_lock);
8459 list_for_each_entry_safe(mac_node, tmp, list, node) {
8460 switch (mac_node->state) {
8461 case HCLGE_MAC_TO_DEL:
8462 case HCLGE_MAC_ACTIVE:
8463 list_del(&mac_node->node);
8464 list_add_tail(&mac_node->node, &tmp_del_list);
8466 case HCLGE_MAC_TO_ADD:
8467 list_del(&mac_node->node);
8473 spin_unlock_bh(&vport->mac_list_lock);
8475 if (mac_type == HCLGE_MAC_ADDR_UC)
8476 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8477 hclge_rm_uc_addr_common);
8479 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8480 hclge_rm_mc_addr_common);
8482 if (!list_empty(&tmp_del_list))
8483 dev_warn(&hdev->pdev->dev,
8484 "uninit %s mac list for vport %u not completely.\n",
8485 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8488 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8489 list_del(&mac_node->node);
8494 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8496 struct hclge_vport *vport;
8499 for (i = 0; i < hdev->num_alloc_vport; i++) {
8500 vport = &hdev->vport[i];
8501 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8502 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8506 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8507 u16 cmdq_resp, u8 resp_code)
8509 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8510 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8511 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8512 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8517 dev_err(&hdev->pdev->dev,
8518 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8523 switch (resp_code) {
8524 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8525 case HCLGE_ETHERTYPE_ALREADY_ADD:
8528 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8529 dev_err(&hdev->pdev->dev,
8530 "add mac ethertype failed for manager table overflow.\n");
8531 return_status = -EIO;
8533 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8534 dev_err(&hdev->pdev->dev,
8535 "add mac ethertype failed for key conflict.\n");
8536 return_status = -EIO;
8539 dev_err(&hdev->pdev->dev,
8540 "add mac ethertype failed for undefined, code=%u.\n",
8542 return_status = -EIO;
8545 return return_status;
8548 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8551 struct hclge_mac_vlan_tbl_entry_cmd req;
8552 struct hclge_dev *hdev = vport->back;
8553 struct hclge_desc desc;
8554 u16 egress_port = 0;
8557 if (is_zero_ether_addr(mac_addr))
8560 memset(&req, 0, sizeof(req));
8561 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8562 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8563 req.egress_port = cpu_to_le16(egress_port);
8564 hclge_prepare_mac_addr(&req, mac_addr, false);
8566 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8569 vf_idx += HCLGE_VF_VPORT_START_NUM;
8570 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8572 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8578 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8581 struct hclge_vport *vport = hclge_get_vport(handle);
8582 struct hclge_dev *hdev = vport->back;
8584 vport = hclge_get_vf_vport(hdev, vf);
8588 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8589 dev_info(&hdev->pdev->dev,
8590 "Specified MAC(=%pM) is same as before, no change committed!\n",
8595 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8596 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8601 ether_addr_copy(vport->vf_info.mac, mac_addr);
8603 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8604 dev_info(&hdev->pdev->dev,
8605 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8607 return hclge_inform_reset_assert_to_vf(vport);
8610 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8615 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8616 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8618 struct hclge_desc desc;
8623 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8624 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8626 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8628 dev_err(&hdev->pdev->dev,
8629 "add mac ethertype failed for cmd_send, ret =%d.\n",
8634 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8635 retval = le16_to_cpu(desc.retval);
8637 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8640 static int init_mgr_tbl(struct hclge_dev *hdev)
8645 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8646 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8648 dev_err(&hdev->pdev->dev,
8649 "add mac ethertype failed, ret =%d.\n",
8658 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8660 struct hclge_vport *vport = hclge_get_vport(handle);
8661 struct hclge_dev *hdev = vport->back;
8663 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8666 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8667 const u8 *old_addr, const u8 *new_addr)
8669 struct list_head *list = &vport->uc_mac_list;
8670 struct hclge_mac_node *old_node, *new_node;
8672 new_node = hclge_find_mac_node(list, new_addr);
8674 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8678 new_node->state = HCLGE_MAC_TO_ADD;
8679 ether_addr_copy(new_node->mac_addr, new_addr);
8680 list_add(&new_node->node, list);
8682 if (new_node->state == HCLGE_MAC_TO_DEL)
8683 new_node->state = HCLGE_MAC_ACTIVE;
8685 /* make sure the new addr is in the list head, avoid dev
8686 * addr may be not re-added into mac table for the umv space
8687 * limitation after global/imp reset which will clear mac
8688 * table by hardware.
8690 list_move(&new_node->node, list);
8693 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8694 old_node = hclge_find_mac_node(list, old_addr);
8696 if (old_node->state == HCLGE_MAC_TO_ADD) {
8697 list_del(&old_node->node);
8700 old_node->state = HCLGE_MAC_TO_DEL;
8705 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8710 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8713 const unsigned char *new_addr = (const unsigned char *)p;
8714 struct hclge_vport *vport = hclge_get_vport(handle);
8715 struct hclge_dev *hdev = vport->back;
8716 unsigned char *old_addr = NULL;
8719 /* mac addr check */
8720 if (is_zero_ether_addr(new_addr) ||
8721 is_broadcast_ether_addr(new_addr) ||
8722 is_multicast_ether_addr(new_addr)) {
8723 dev_err(&hdev->pdev->dev,
8724 "change uc mac err! invalid mac: %pM.\n",
8729 ret = hclge_pause_addr_cfg(hdev, new_addr);
8731 dev_err(&hdev->pdev->dev,
8732 "failed to configure mac pause address, ret = %d\n",
8738 old_addr = hdev->hw.mac.mac_addr;
8740 spin_lock_bh(&vport->mac_list_lock);
8741 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8743 dev_err(&hdev->pdev->dev,
8744 "failed to change the mac addr:%pM, ret = %d\n",
8746 spin_unlock_bh(&vport->mac_list_lock);
8749 hclge_pause_addr_cfg(hdev, old_addr);
8753 /* we must update dev addr with spin lock protect, preventing dev addr
8754 * being removed by set_rx_mode path.
8756 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8757 spin_unlock_bh(&vport->mac_list_lock);
8759 hclge_task_schedule(hdev, 0);
8764 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8767 struct hclge_vport *vport = hclge_get_vport(handle);
8768 struct hclge_dev *hdev = vport->back;
8770 if (!hdev->hw.mac.phydev)
8773 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8776 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8777 u8 fe_type, bool filter_en, u8 vf_id)
8779 struct hclge_vlan_filter_ctrl_cmd *req;
8780 struct hclge_desc desc;
8783 /* read current vlan filter parameter */
8784 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8785 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8786 req->vlan_type = vlan_type;
8789 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8791 dev_err(&hdev->pdev->dev,
8792 "failed to get vlan filter config, ret = %d.\n", ret);
8796 /* modify and write new config parameter */
8797 hclge_cmd_reuse_desc(&desc, false);
8798 req->vlan_fe = filter_en ?
8799 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8801 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8803 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8809 #define HCLGE_FILTER_TYPE_VF 0
8810 #define HCLGE_FILTER_TYPE_PORT 1
8811 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8812 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8813 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8814 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8815 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8816 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8817 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8818 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8819 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8821 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8823 struct hclge_vport *vport = hclge_get_vport(handle);
8824 struct hclge_dev *hdev = vport->back;
8826 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8827 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8828 HCLGE_FILTER_FE_EGRESS, enable, 0);
8829 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8830 HCLGE_FILTER_FE_INGRESS, enable, 0);
8832 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8833 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8837 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8839 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8842 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8843 bool is_kill, u16 vlan,
8844 struct hclge_desc *desc)
8846 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8847 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8852 hclge_cmd_setup_basic_desc(&desc[0],
8853 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8854 hclge_cmd_setup_basic_desc(&desc[1],
8855 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8857 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8859 vf_byte_off = vfid / 8;
8860 vf_byte_val = 1 << (vfid % 8);
8862 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8863 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8865 req0->vlan_id = cpu_to_le16(vlan);
8866 req0->vlan_cfg = is_kill;
8868 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8869 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8871 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8873 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8875 dev_err(&hdev->pdev->dev,
8876 "Send vf vlan command fail, ret =%d.\n",
8884 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
8885 bool is_kill, struct hclge_desc *desc)
8887 struct hclge_vlan_filter_vf_cfg_cmd *req;
8889 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8892 #define HCLGE_VF_VLAN_NO_ENTRY 2
8893 if (!req->resp_code || req->resp_code == 1)
8896 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8897 set_bit(vfid, hdev->vf_vlan_full);
8898 dev_warn(&hdev->pdev->dev,
8899 "vf vlan table is full, vf vlan filter is disabled\n");
8903 dev_err(&hdev->pdev->dev,
8904 "Add vf vlan filter fail, ret =%u.\n",
8907 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8908 if (!req->resp_code)
8911 /* vf vlan filter is disabled when vf vlan table is full,
8912 * then new vlan id will not be added into vf vlan table.
8913 * Just return 0 without warning, avoid massive verbose
8914 * print logs when unload.
8916 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8919 dev_err(&hdev->pdev->dev,
8920 "Kill vf vlan filter fail, ret =%u.\n",
8927 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8928 bool is_kill, u16 vlan,
8931 struct hclge_vport *vport = &hdev->vport[vfid];
8932 struct hclge_desc desc[2];
8935 /* if vf vlan table is full, firmware will close vf vlan filter, it
8936 * is unable and unnecessary to add new vlan id to vf vlan filter.
8937 * If spoof check is enable, and vf vlan is full, it shouldn't add
8938 * new vlan, because tx packets with these vlan id will be dropped.
8940 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8941 if (vport->vf_info.spoofchk && vlan) {
8942 dev_err(&hdev->pdev->dev,
8943 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8949 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
8953 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
8956 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8957 u16 vlan_id, bool is_kill)
8959 struct hclge_vlan_filter_pf_cfg_cmd *req;
8960 struct hclge_desc desc;
8961 u8 vlan_offset_byte_val;
8962 u8 vlan_offset_byte;
8966 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8968 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8969 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8970 HCLGE_VLAN_BYTE_SIZE;
8971 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8973 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8974 req->vlan_offset = vlan_offset_160;
8975 req->vlan_cfg = is_kill;
8976 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8978 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8980 dev_err(&hdev->pdev->dev,
8981 "port vlan command, send fail, ret =%d.\n", ret);
8985 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8986 u16 vport_id, u16 vlan_id,
8989 u16 vport_idx, vport_num = 0;
8992 if (is_kill && !vlan_id)
8995 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8998 dev_err(&hdev->pdev->dev,
8999 "Set %u vport vlan filter config fail, ret =%d.\n",
9004 /* vlan 0 may be added twice when 8021q module is enabled */
9005 if (!is_kill && !vlan_id &&
9006 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9009 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9010 dev_err(&hdev->pdev->dev,
9011 "Add port vlan failed, vport %u is already in vlan %u\n",
9017 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9018 dev_err(&hdev->pdev->dev,
9019 "Delete port vlan failed, vport %u is not in vlan %u\n",
9024 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9027 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9028 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9034 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9036 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9037 struct hclge_vport_vtag_tx_cfg_cmd *req;
9038 struct hclge_dev *hdev = vport->back;
9039 struct hclge_desc desc;
9043 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9045 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9046 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9047 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9048 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9049 vcfg->accept_tag1 ? 1 : 0);
9050 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9051 vcfg->accept_untag1 ? 1 : 0);
9052 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9053 vcfg->accept_tag2 ? 1 : 0);
9054 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9055 vcfg->accept_untag2 ? 1 : 0);
9056 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9057 vcfg->insert_tag1_en ? 1 : 0);
9058 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9059 vcfg->insert_tag2_en ? 1 : 0);
9060 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9061 vcfg->tag_shift_mode_en ? 1 : 0);
9062 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9064 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9065 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9066 HCLGE_VF_NUM_PER_BYTE;
9067 req->vf_bitmap[bmap_index] =
9068 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9070 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9072 dev_err(&hdev->pdev->dev,
9073 "Send port txvlan cfg command fail, ret =%d\n",
9079 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9081 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9082 struct hclge_vport_vtag_rx_cfg_cmd *req;
9083 struct hclge_dev *hdev = vport->back;
9084 struct hclge_desc desc;
9088 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9090 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9091 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9092 vcfg->strip_tag1_en ? 1 : 0);
9093 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9094 vcfg->strip_tag2_en ? 1 : 0);
9095 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9096 vcfg->vlan1_vlan_prionly ? 1 : 0);
9097 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9098 vcfg->vlan2_vlan_prionly ? 1 : 0);
9099 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9100 vcfg->strip_tag1_discard_en ? 1 : 0);
9101 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9102 vcfg->strip_tag2_discard_en ? 1 : 0);
9104 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9105 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9106 HCLGE_VF_NUM_PER_BYTE;
9107 req->vf_bitmap[bmap_index] =
9108 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9110 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9112 dev_err(&hdev->pdev->dev,
9113 "Send port rxvlan cfg command fail, ret =%d\n",
9119 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9120 u16 port_base_vlan_state,
9125 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9126 vport->txvlan_cfg.accept_tag1 = true;
9127 vport->txvlan_cfg.insert_tag1_en = false;
9128 vport->txvlan_cfg.default_tag1 = 0;
9130 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9132 vport->txvlan_cfg.accept_tag1 =
9133 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9134 vport->txvlan_cfg.insert_tag1_en = true;
9135 vport->txvlan_cfg.default_tag1 = vlan_tag;
9138 vport->txvlan_cfg.accept_untag1 = true;
9140 /* accept_tag2 and accept_untag2 are not supported on
9141 * pdev revision(0x20), new revision support them,
9142 * this two fields can not be configured by user.
9144 vport->txvlan_cfg.accept_tag2 = true;
9145 vport->txvlan_cfg.accept_untag2 = true;
9146 vport->txvlan_cfg.insert_tag2_en = false;
9147 vport->txvlan_cfg.default_tag2 = 0;
9148 vport->txvlan_cfg.tag_shift_mode_en = true;
9150 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9151 vport->rxvlan_cfg.strip_tag1_en = false;
9152 vport->rxvlan_cfg.strip_tag2_en =
9153 vport->rxvlan_cfg.rx_vlan_offload_en;
9154 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9156 vport->rxvlan_cfg.strip_tag1_en =
9157 vport->rxvlan_cfg.rx_vlan_offload_en;
9158 vport->rxvlan_cfg.strip_tag2_en = true;
9159 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9162 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9163 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9164 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9166 ret = hclge_set_vlan_tx_offload_cfg(vport);
9170 return hclge_set_vlan_rx_offload_cfg(vport);
9173 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9175 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9176 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9177 struct hclge_desc desc;
9180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9181 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9182 rx_req->ot_fst_vlan_type =
9183 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9184 rx_req->ot_sec_vlan_type =
9185 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9186 rx_req->in_fst_vlan_type =
9187 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9188 rx_req->in_sec_vlan_type =
9189 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9191 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9193 dev_err(&hdev->pdev->dev,
9194 "Send rxvlan protocol type command fail, ret =%d\n",
9199 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9201 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9202 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9203 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9205 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9207 dev_err(&hdev->pdev->dev,
9208 "Send txvlan protocol type command fail, ret =%d\n",
9214 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9216 #define HCLGE_DEF_VLAN_TYPE 0x8100
9218 struct hnae3_handle *handle = &hdev->vport[0].nic;
9219 struct hclge_vport *vport;
9223 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9224 /* for revision 0x21, vf vlan filter is per function */
9225 for (i = 0; i < hdev->num_alloc_vport; i++) {
9226 vport = &hdev->vport[i];
9227 ret = hclge_set_vlan_filter_ctrl(hdev,
9228 HCLGE_FILTER_TYPE_VF,
9229 HCLGE_FILTER_FE_EGRESS,
9236 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9237 HCLGE_FILTER_FE_INGRESS, true,
9242 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9243 HCLGE_FILTER_FE_EGRESS_V1_B,
9249 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9251 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9252 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9253 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9254 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9255 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9256 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9258 ret = hclge_set_vlan_protocol_type(hdev);
9262 for (i = 0; i < hdev->num_alloc_vport; i++) {
9265 vport = &hdev->vport[i];
9266 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9268 ret = hclge_vlan_offload_cfg(vport,
9269 vport->port_base_vlan_cfg.state,
9275 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9278 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9281 struct hclge_vport_vlan_cfg *vlan;
9283 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9287 vlan->hd_tbl_status = writen_to_tbl;
9288 vlan->vlan_id = vlan_id;
9290 list_add_tail(&vlan->node, &vport->vlan_list);
9293 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9295 struct hclge_vport_vlan_cfg *vlan, *tmp;
9296 struct hclge_dev *hdev = vport->back;
9299 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9300 if (!vlan->hd_tbl_status) {
9301 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9303 vlan->vlan_id, false);
9305 dev_err(&hdev->pdev->dev,
9306 "restore vport vlan list failed, ret=%d\n",
9311 vlan->hd_tbl_status = true;
9317 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9320 struct hclge_vport_vlan_cfg *vlan, *tmp;
9321 struct hclge_dev *hdev = vport->back;
9323 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9324 if (vlan->vlan_id == vlan_id) {
9325 if (is_write_tbl && vlan->hd_tbl_status)
9326 hclge_set_vlan_filter_hw(hdev,
9332 list_del(&vlan->node);
9339 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9341 struct hclge_vport_vlan_cfg *vlan, *tmp;
9342 struct hclge_dev *hdev = vport->back;
9344 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9345 if (vlan->hd_tbl_status)
9346 hclge_set_vlan_filter_hw(hdev,
9352 vlan->hd_tbl_status = false;
9354 list_del(&vlan->node);
9358 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9361 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9363 struct hclge_vport_vlan_cfg *vlan, *tmp;
9364 struct hclge_vport *vport;
9367 for (i = 0; i < hdev->num_alloc_vport; i++) {
9368 vport = &hdev->vport[i];
9369 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9370 list_del(&vlan->node);
9376 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9378 struct hclge_vport_vlan_cfg *vlan, *tmp;
9379 struct hclge_dev *hdev = vport->back;
9385 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9386 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9387 state = vport->port_base_vlan_cfg.state;
9389 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9390 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9391 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9392 vport->vport_id, vlan_id,
9397 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9398 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9400 vlan->vlan_id, false);
9403 vlan->hd_tbl_status = true;
9407 /* For global reset and imp reset, hardware will clear the mac table,
9408 * so we change the mac address state from ACTIVE to TO_ADD, then they
9409 * can be restored in the service task after reset complete. Furtherly,
9410 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9411 * be restored after reset, so just remove these mac nodes from mac_list.
9413 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9415 struct hclge_mac_node *mac_node, *tmp;
9417 list_for_each_entry_safe(mac_node, tmp, list, node) {
9418 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9419 mac_node->state = HCLGE_MAC_TO_ADD;
9420 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9421 list_del(&mac_node->node);
9427 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9429 spin_lock_bh(&vport->mac_list_lock);
9431 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9432 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9433 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9435 spin_unlock_bh(&vport->mac_list_lock);
9438 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9440 struct hclge_vport *vport = &hdev->vport[0];
9441 struct hnae3_handle *handle = &vport->nic;
9443 hclge_restore_mac_table_common(vport);
9444 hclge_restore_vport_vlan_table(vport);
9445 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9447 hclge_restore_fd_entries(handle);
9450 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9452 struct hclge_vport *vport = hclge_get_vport(handle);
9454 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9455 vport->rxvlan_cfg.strip_tag1_en = false;
9456 vport->rxvlan_cfg.strip_tag2_en = enable;
9457 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9459 vport->rxvlan_cfg.strip_tag1_en = enable;
9460 vport->rxvlan_cfg.strip_tag2_en = true;
9461 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9464 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9465 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9466 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9467 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9469 return hclge_set_vlan_rx_offload_cfg(vport);
9472 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9473 u16 port_base_vlan_state,
9474 struct hclge_vlan_info *new_info,
9475 struct hclge_vlan_info *old_info)
9477 struct hclge_dev *hdev = vport->back;
9480 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9481 hclge_rm_vport_all_vlan_table(vport, false);
9482 return hclge_set_vlan_filter_hw(hdev,
9483 htons(new_info->vlan_proto),
9489 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9490 vport->vport_id, old_info->vlan_tag,
9495 return hclge_add_vport_all_vlan_table(vport);
9498 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9499 struct hclge_vlan_info *vlan_info)
9501 struct hnae3_handle *nic = &vport->nic;
9502 struct hclge_vlan_info *old_vlan_info;
9503 struct hclge_dev *hdev = vport->back;
9506 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9508 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9512 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9513 /* add new VLAN tag */
9514 ret = hclge_set_vlan_filter_hw(hdev,
9515 htons(vlan_info->vlan_proto),
9517 vlan_info->vlan_tag,
9522 /* remove old VLAN tag */
9523 ret = hclge_set_vlan_filter_hw(hdev,
9524 htons(old_vlan_info->vlan_proto),
9526 old_vlan_info->vlan_tag,
9534 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9539 /* update state only when disable/enable port based VLAN */
9540 vport->port_base_vlan_cfg.state = state;
9541 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9542 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9544 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9547 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9548 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9549 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9554 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9555 enum hnae3_port_base_vlan_state state,
9558 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9560 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9562 return HNAE3_PORT_BASE_VLAN_ENABLE;
9565 return HNAE3_PORT_BASE_VLAN_DISABLE;
9566 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9567 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9569 return HNAE3_PORT_BASE_VLAN_MODIFY;
9573 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9574 u16 vlan, u8 qos, __be16 proto)
9576 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9577 struct hclge_vport *vport = hclge_get_vport(handle);
9578 struct hclge_dev *hdev = vport->back;
9579 struct hclge_vlan_info vlan_info;
9583 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9586 vport = hclge_get_vf_vport(hdev, vfid);
9590 /* qos is a 3 bits value, so can not be bigger than 7 */
9591 if (vlan > VLAN_N_VID - 1 || qos > 7)
9593 if (proto != htons(ETH_P_8021Q))
9594 return -EPROTONOSUPPORT;
9596 state = hclge_get_port_base_vlan_state(vport,
9597 vport->port_base_vlan_cfg.state,
9599 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9602 vlan_info.vlan_tag = vlan;
9603 vlan_info.qos = qos;
9604 vlan_info.vlan_proto = ntohs(proto);
9606 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9608 dev_err(&hdev->pdev->dev,
9609 "failed to update port base vlan for vf %d, ret = %d\n",
9614 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9617 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9618 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9619 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9620 vport->vport_id, state,
9627 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9629 struct hclge_vlan_info *vlan_info;
9630 struct hclge_vport *vport;
9634 /* clear port base vlan for all vf */
9635 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9636 vport = &hdev->vport[vf];
9637 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9639 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9641 vlan_info->vlan_tag, true);
9643 dev_err(&hdev->pdev->dev,
9644 "failed to clear vf vlan for vf%d, ret = %d\n",
9645 vf - HCLGE_VF_VPORT_START_NUM, ret);
9649 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9650 u16 vlan_id, bool is_kill)
9652 struct hclge_vport *vport = hclge_get_vport(handle);
9653 struct hclge_dev *hdev = vport->back;
9654 bool writen_to_tbl = false;
9657 /* When device is resetting or reset failed, firmware is unable to
9658 * handle mailbox. Just record the vlan id, and remove it after
9661 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9662 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9663 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9667 /* when port base vlan enabled, we use port base vlan as the vlan
9668 * filter entry. In this case, we don't update vlan filter table
9669 * when user add new vlan or remove exist vlan, just update the vport
9670 * vlan list. The vlan id in vlan list will be writen in vlan filter
9671 * table until port base vlan disabled
9673 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9674 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9676 writen_to_tbl = true;
9681 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9683 hclge_add_vport_vlan_table(vport, vlan_id,
9685 } else if (is_kill) {
9686 /* when remove hw vlan filter failed, record the vlan id,
9687 * and try to remove it from hw later, to be consistence
9690 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9695 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9697 #define HCLGE_MAX_SYNC_COUNT 60
9699 int i, ret, sync_cnt = 0;
9702 /* start from vport 1 for PF is always alive */
9703 for (i = 0; i < hdev->num_alloc_vport; i++) {
9704 struct hclge_vport *vport = &hdev->vport[i];
9706 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9708 while (vlan_id != VLAN_N_VID) {
9709 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9710 vport->vport_id, vlan_id,
9712 if (ret && ret != -EINVAL)
9715 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9716 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9719 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9722 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9728 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9730 struct hclge_config_max_frm_size_cmd *req;
9731 struct hclge_desc desc;
9733 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9735 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9736 req->max_frm_size = cpu_to_le16(new_mps);
9737 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9739 return hclge_cmd_send(&hdev->hw, &desc, 1);
9742 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9744 struct hclge_vport *vport = hclge_get_vport(handle);
9746 return hclge_set_vport_mtu(vport, new_mtu);
9749 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9751 struct hclge_dev *hdev = vport->back;
9752 int i, max_frm_size, ret;
9754 /* HW supprt 2 layer vlan */
9755 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9756 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9757 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9760 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9761 mutex_lock(&hdev->vport_lock);
9762 /* VF's mps must fit within hdev->mps */
9763 if (vport->vport_id && max_frm_size > hdev->mps) {
9764 mutex_unlock(&hdev->vport_lock);
9766 } else if (vport->vport_id) {
9767 vport->mps = max_frm_size;
9768 mutex_unlock(&hdev->vport_lock);
9772 /* PF's mps must be greater then VF's mps */
9773 for (i = 1; i < hdev->num_alloc_vport; i++)
9774 if (max_frm_size < hdev->vport[i].mps) {
9775 mutex_unlock(&hdev->vport_lock);
9779 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9781 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9783 dev_err(&hdev->pdev->dev,
9784 "Change mtu fail, ret =%d\n", ret);
9788 hdev->mps = max_frm_size;
9789 vport->mps = max_frm_size;
9791 ret = hclge_buffer_alloc(hdev);
9793 dev_err(&hdev->pdev->dev,
9794 "Allocate buffer fail, ret =%d\n", ret);
9797 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9798 mutex_unlock(&hdev->vport_lock);
9802 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9805 struct hclge_reset_tqp_queue_cmd *req;
9806 struct hclge_desc desc;
9809 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9811 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9812 req->tqp_id = cpu_to_le16(queue_id);
9814 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9818 dev_err(&hdev->pdev->dev,
9819 "Send tqp reset cmd error, status =%d\n", ret);
9826 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9828 struct hclge_reset_tqp_queue_cmd *req;
9829 struct hclge_desc desc;
9832 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9834 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9835 req->tqp_id = cpu_to_le16(queue_id);
9837 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9839 dev_err(&hdev->pdev->dev,
9840 "Get reset status error, status =%d\n", ret);
9844 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9847 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9849 struct hnae3_queue *queue;
9850 struct hclge_tqp *tqp;
9852 queue = handle->kinfo.tqp[queue_id];
9853 tqp = container_of(queue, struct hclge_tqp, q);
9858 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9860 struct hclge_vport *vport = hclge_get_vport(handle);
9861 struct hclge_dev *hdev = vport->back;
9862 int reset_try_times = 0;
9867 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9869 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9871 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9875 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9877 dev_err(&hdev->pdev->dev,
9878 "Send reset tqp cmd fail, ret = %d\n", ret);
9882 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9883 reset_status = hclge_get_reset_status(hdev, queue_gid);
9887 /* Wait for tqp hw reset */
9888 usleep_range(1000, 1200);
9891 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9892 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9896 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9898 dev_err(&hdev->pdev->dev,
9899 "Deassert the soft reset fail, ret = %d\n", ret);
9904 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9906 struct hnae3_handle *handle = &vport->nic;
9907 struct hclge_dev *hdev = vport->back;
9908 int reset_try_times = 0;
9913 if (queue_id >= handle->kinfo.num_tqps) {
9914 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9919 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9921 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9923 dev_warn(&hdev->pdev->dev,
9924 "Send reset tqp cmd fail, ret = %d\n", ret);
9928 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9929 reset_status = hclge_get_reset_status(hdev, queue_gid);
9933 /* Wait for tqp hw reset */
9934 usleep_range(1000, 1200);
9937 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9938 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9942 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9944 dev_warn(&hdev->pdev->dev,
9945 "Deassert the soft reset fail, ret = %d\n", ret);
9948 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9950 struct hclge_vport *vport = hclge_get_vport(handle);
9951 struct hclge_dev *hdev = vport->back;
9953 return hdev->fw_version;
9956 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9958 struct phy_device *phydev = hdev->hw.mac.phydev;
9963 phy_set_asym_pause(phydev, rx_en, tx_en);
9966 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9970 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9973 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9975 dev_err(&hdev->pdev->dev,
9976 "configure pauseparam error, ret = %d.\n", ret);
9981 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9983 struct phy_device *phydev = hdev->hw.mac.phydev;
9984 u16 remote_advertising = 0;
9985 u16 local_advertising;
9986 u32 rx_pause, tx_pause;
9989 if (!phydev->link || !phydev->autoneg)
9992 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9995 remote_advertising = LPA_PAUSE_CAP;
9997 if (phydev->asym_pause)
9998 remote_advertising |= LPA_PAUSE_ASYM;
10000 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10001 remote_advertising);
10002 tx_pause = flowctl & FLOW_CTRL_TX;
10003 rx_pause = flowctl & FLOW_CTRL_RX;
10005 if (phydev->duplex == HCLGE_MAC_HALF) {
10010 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10013 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10014 u32 *rx_en, u32 *tx_en)
10016 struct hclge_vport *vport = hclge_get_vport(handle);
10017 struct hclge_dev *hdev = vport->back;
10018 struct phy_device *phydev = hdev->hw.mac.phydev;
10020 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10022 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10028 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10031 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10034 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10043 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10044 u32 rx_en, u32 tx_en)
10046 if (rx_en && tx_en)
10047 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10048 else if (rx_en && !tx_en)
10049 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10050 else if (!rx_en && tx_en)
10051 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10053 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10055 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10058 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10059 u32 rx_en, u32 tx_en)
10061 struct hclge_vport *vport = hclge_get_vport(handle);
10062 struct hclge_dev *hdev = vport->back;
10063 struct phy_device *phydev = hdev->hw.mac.phydev;
10067 fc_autoneg = hclge_get_autoneg(handle);
10068 if (auto_neg != fc_autoneg) {
10069 dev_info(&hdev->pdev->dev,
10070 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10071 return -EOPNOTSUPP;
10075 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10076 dev_info(&hdev->pdev->dev,
10077 "Priority flow control enabled. Cannot set link flow control.\n");
10078 return -EOPNOTSUPP;
10081 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10083 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10086 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10089 return phy_start_aneg(phydev);
10091 return -EOPNOTSUPP;
10094 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10095 u8 *auto_neg, u32 *speed, u8 *duplex)
10097 struct hclge_vport *vport = hclge_get_vport(handle);
10098 struct hclge_dev *hdev = vport->back;
10101 *speed = hdev->hw.mac.speed;
10103 *duplex = hdev->hw.mac.duplex;
10105 *auto_neg = hdev->hw.mac.autoneg;
10108 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10111 struct hclge_vport *vport = hclge_get_vport(handle);
10112 struct hclge_dev *hdev = vport->back;
10114 /* When nic is down, the service task is not running, doesn't update
10115 * the port information per second. Query the port information before
10116 * return the media type, ensure getting the correct media information.
10118 hclge_update_port_info(hdev);
10121 *media_type = hdev->hw.mac.media_type;
10124 *module_type = hdev->hw.mac.module_type;
10127 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10128 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10130 struct hclge_vport *vport = hclge_get_vport(handle);
10131 struct hclge_dev *hdev = vport->back;
10132 struct phy_device *phydev = hdev->hw.mac.phydev;
10133 int mdix_ctrl, mdix, is_resolved;
10134 unsigned int retval;
10137 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10138 *tp_mdix = ETH_TP_MDI_INVALID;
10142 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10144 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10145 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10146 HCLGE_PHY_MDIX_CTRL_S);
10148 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10149 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10150 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10152 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10154 switch (mdix_ctrl) {
10156 *tp_mdix_ctrl = ETH_TP_MDI;
10159 *tp_mdix_ctrl = ETH_TP_MDI_X;
10162 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10165 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10170 *tp_mdix = ETH_TP_MDI_INVALID;
10172 *tp_mdix = ETH_TP_MDI_X;
10174 *tp_mdix = ETH_TP_MDI;
10177 static void hclge_info_show(struct hclge_dev *hdev)
10179 struct device *dev = &hdev->pdev->dev;
10181 dev_info(dev, "PF info begin:\n");
10183 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10184 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10185 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10186 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10187 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10188 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10189 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10190 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10191 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10192 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10193 dev_info(dev, "This is %s PF\n",
10194 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10195 dev_info(dev, "DCB %s\n",
10196 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10197 dev_info(dev, "MQPRIO %s\n",
10198 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10200 dev_info(dev, "PF info end.\n");
10203 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10204 struct hclge_vport *vport)
10206 struct hnae3_client *client = vport->nic.client;
10207 struct hclge_dev *hdev = ae_dev->priv;
10208 int rst_cnt = hdev->rst_stats.reset_cnt;
10211 ret = client->ops->init_instance(&vport->nic);
10215 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10216 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10217 rst_cnt != hdev->rst_stats.reset_cnt) {
10222 /* Enable nic hw error interrupts */
10223 ret = hclge_config_nic_hw_error(hdev, true);
10225 dev_err(&ae_dev->pdev->dev,
10226 "fail(%d) to enable hw error interrupts\n", ret);
10230 hnae3_set_client_init_flag(client, ae_dev, 1);
10232 if (netif_msg_drv(&hdev->vport->nic))
10233 hclge_info_show(hdev);
10238 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10239 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10240 msleep(HCLGE_WAIT_RESET_DONE);
10242 client->ops->uninit_instance(&vport->nic, 0);
10247 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10248 struct hclge_vport *vport)
10250 struct hclge_dev *hdev = ae_dev->priv;
10251 struct hnae3_client *client;
10255 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10259 client = hdev->roce_client;
10260 ret = hclge_init_roce_base_info(vport);
10264 rst_cnt = hdev->rst_stats.reset_cnt;
10265 ret = client->ops->init_instance(&vport->roce);
10269 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10270 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10271 rst_cnt != hdev->rst_stats.reset_cnt) {
10273 goto init_roce_err;
10276 /* Enable roce ras interrupts */
10277 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10279 dev_err(&ae_dev->pdev->dev,
10280 "fail(%d) to enable roce ras interrupts\n", ret);
10281 goto init_roce_err;
10284 hnae3_set_client_init_flag(client, ae_dev, 1);
10289 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10290 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10291 msleep(HCLGE_WAIT_RESET_DONE);
10293 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10298 static int hclge_init_client_instance(struct hnae3_client *client,
10299 struct hnae3_ae_dev *ae_dev)
10301 struct hclge_dev *hdev = ae_dev->priv;
10302 struct hclge_vport *vport;
10305 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10306 vport = &hdev->vport[i];
10308 switch (client->type) {
10309 case HNAE3_CLIENT_KNIC:
10310 hdev->nic_client = client;
10311 vport->nic.client = client;
10312 ret = hclge_init_nic_client_instance(ae_dev, vport);
10316 ret = hclge_init_roce_client_instance(ae_dev, vport);
10321 case HNAE3_CLIENT_ROCE:
10322 if (hnae3_dev_roce_supported(hdev)) {
10323 hdev->roce_client = client;
10324 vport->roce.client = client;
10327 ret = hclge_init_roce_client_instance(ae_dev, vport);
10340 hdev->nic_client = NULL;
10341 vport->nic.client = NULL;
10344 hdev->roce_client = NULL;
10345 vport->roce.client = NULL;
10349 static void hclge_uninit_client_instance(struct hnae3_client *client,
10350 struct hnae3_ae_dev *ae_dev)
10352 struct hclge_dev *hdev = ae_dev->priv;
10353 struct hclge_vport *vport;
10356 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10357 vport = &hdev->vport[i];
10358 if (hdev->roce_client) {
10359 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10360 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10361 msleep(HCLGE_WAIT_RESET_DONE);
10363 hdev->roce_client->ops->uninit_instance(&vport->roce,
10365 hdev->roce_client = NULL;
10366 vport->roce.client = NULL;
10368 if (client->type == HNAE3_CLIENT_ROCE)
10370 if (hdev->nic_client && client->ops->uninit_instance) {
10371 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10372 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10373 msleep(HCLGE_WAIT_RESET_DONE);
10375 client->ops->uninit_instance(&vport->nic, 0);
10376 hdev->nic_client = NULL;
10377 vport->nic.client = NULL;
10382 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10384 #define HCLGE_MEM_BAR 4
10386 struct pci_dev *pdev = hdev->pdev;
10387 struct hclge_hw *hw = &hdev->hw;
10389 /* for device does not have device memory, return directly */
10390 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10393 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10394 pci_resource_start(pdev, HCLGE_MEM_BAR),
10395 pci_resource_len(pdev, HCLGE_MEM_BAR));
10396 if (!hw->mem_base) {
10397 dev_err(&pdev->dev, "failed to map device memory\n");
10404 static int hclge_pci_init(struct hclge_dev *hdev)
10406 struct pci_dev *pdev = hdev->pdev;
10407 struct hclge_hw *hw;
10410 ret = pci_enable_device(pdev);
10412 dev_err(&pdev->dev, "failed to enable PCI device\n");
10416 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10418 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10420 dev_err(&pdev->dev,
10421 "can't set consistent PCI DMA");
10422 goto err_disable_device;
10424 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10427 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10429 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10430 goto err_disable_device;
10433 pci_set_master(pdev);
10435 hw->io_base = pcim_iomap(pdev, 2, 0);
10436 if (!hw->io_base) {
10437 dev_err(&pdev->dev, "Can't map configuration register space\n");
10439 goto err_clr_master;
10442 ret = hclge_dev_mem_map(hdev);
10444 goto err_unmap_io_base;
10446 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10451 pcim_iounmap(pdev, hdev->hw.io_base);
10453 pci_clear_master(pdev);
10454 pci_release_regions(pdev);
10455 err_disable_device:
10456 pci_disable_device(pdev);
10461 static void hclge_pci_uninit(struct hclge_dev *hdev)
10463 struct pci_dev *pdev = hdev->pdev;
10465 if (hdev->hw.mem_base)
10466 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10468 pcim_iounmap(pdev, hdev->hw.io_base);
10469 pci_free_irq_vectors(pdev);
10470 pci_clear_master(pdev);
10471 pci_release_mem_regions(pdev);
10472 pci_disable_device(pdev);
10475 static void hclge_state_init(struct hclge_dev *hdev)
10477 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10478 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10479 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10480 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10481 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10482 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10483 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10486 static void hclge_state_uninit(struct hclge_dev *hdev)
10488 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10489 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10491 if (hdev->reset_timer.function)
10492 del_timer_sync(&hdev->reset_timer);
10493 if (hdev->service_task.work.func)
10494 cancel_delayed_work_sync(&hdev->service_task);
10497 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10499 #define HCLGE_FLR_RETRY_WAIT_MS 500
10500 #define HCLGE_FLR_RETRY_CNT 5
10502 struct hclge_dev *hdev = ae_dev->priv;
10507 down(&hdev->reset_sem);
10508 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10509 hdev->reset_type = HNAE3_FLR_RESET;
10510 ret = hclge_reset_prepare(hdev);
10511 if (ret || hdev->reset_pending) {
10512 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10514 if (hdev->reset_pending ||
10515 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10516 dev_err(&hdev->pdev->dev,
10517 "reset_pending:0x%lx, retry_cnt:%d\n",
10518 hdev->reset_pending, retry_cnt);
10519 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10520 up(&hdev->reset_sem);
10521 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10526 /* disable misc vector before FLR done */
10527 hclge_enable_vector(&hdev->misc_vector, false);
10528 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10529 hdev->rst_stats.flr_rst_cnt++;
10532 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10534 struct hclge_dev *hdev = ae_dev->priv;
10537 hclge_enable_vector(&hdev->misc_vector, true);
10539 ret = hclge_reset_rebuild(hdev);
10541 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10543 hdev->reset_type = HNAE3_NONE_RESET;
10544 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10545 up(&hdev->reset_sem);
10548 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10552 for (i = 0; i < hdev->num_alloc_vport; i++) {
10553 struct hclge_vport *vport = &hdev->vport[i];
10556 /* Send cmd to clear VF's FUNC_RST_ING */
10557 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10559 dev_warn(&hdev->pdev->dev,
10560 "clear vf(%u) rst failed %d!\n",
10561 vport->vport_id, ret);
10565 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10567 struct pci_dev *pdev = ae_dev->pdev;
10568 struct hclge_dev *hdev;
10571 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10576 hdev->ae_dev = ae_dev;
10577 hdev->reset_type = HNAE3_NONE_RESET;
10578 hdev->reset_level = HNAE3_FUNC_RESET;
10579 ae_dev->priv = hdev;
10581 /* HW supprt 2 layer vlan */
10582 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10584 mutex_init(&hdev->vport_lock);
10585 spin_lock_init(&hdev->fd_rule_lock);
10586 sema_init(&hdev->reset_sem, 1);
10588 ret = hclge_pci_init(hdev);
10592 /* Firmware command queue initialize */
10593 ret = hclge_cmd_queue_init(hdev);
10595 goto err_pci_uninit;
10597 /* Firmware command initialize */
10598 ret = hclge_cmd_init(hdev);
10600 goto err_cmd_uninit;
10602 ret = hclge_get_cap(hdev);
10604 goto err_cmd_uninit;
10606 ret = hclge_query_dev_specs(hdev);
10608 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10610 goto err_cmd_uninit;
10613 ret = hclge_configure(hdev);
10615 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10616 goto err_cmd_uninit;
10619 ret = hclge_init_msi(hdev);
10621 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10622 goto err_cmd_uninit;
10625 ret = hclge_misc_irq_init(hdev);
10627 goto err_msi_uninit;
10629 ret = hclge_alloc_tqps(hdev);
10631 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10632 goto err_msi_irq_uninit;
10635 ret = hclge_alloc_vport(hdev);
10637 goto err_msi_irq_uninit;
10639 ret = hclge_map_tqp(hdev);
10641 goto err_msi_irq_uninit;
10643 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10644 ret = hclge_mac_mdio_config(hdev);
10646 goto err_msi_irq_uninit;
10649 ret = hclge_init_umv_space(hdev);
10651 goto err_mdiobus_unreg;
10653 ret = hclge_mac_init(hdev);
10655 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10656 goto err_mdiobus_unreg;
10659 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10661 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10662 goto err_mdiobus_unreg;
10665 ret = hclge_config_gro(hdev, true);
10667 goto err_mdiobus_unreg;
10669 ret = hclge_init_vlan_config(hdev);
10671 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10672 goto err_mdiobus_unreg;
10675 ret = hclge_tm_schd_init(hdev);
10677 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10678 goto err_mdiobus_unreg;
10681 ret = hclge_rss_init_cfg(hdev);
10683 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10684 goto err_mdiobus_unreg;
10687 ret = hclge_rss_init_hw(hdev);
10689 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10690 goto err_mdiobus_unreg;
10693 ret = init_mgr_tbl(hdev);
10695 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10696 goto err_mdiobus_unreg;
10699 ret = hclge_init_fd_config(hdev);
10701 dev_err(&pdev->dev,
10702 "fd table init fail, ret=%d\n", ret);
10703 goto err_mdiobus_unreg;
10706 INIT_KFIFO(hdev->mac_tnl_log);
10708 hclge_dcb_ops_set(hdev);
10710 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10711 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10713 /* Setup affinity after service timer setup because add_timer_on
10714 * is called in affinity notify.
10716 hclge_misc_affinity_setup(hdev);
10718 hclge_clear_all_event_cause(hdev);
10719 hclge_clear_resetting_state(hdev);
10721 /* Log and clear the hw errors those already occurred */
10722 hclge_handle_all_hns_hw_errors(ae_dev);
10724 /* request delayed reset for the error recovery because an immediate
10725 * global reset on a PF affecting pending initialization of other PFs
10727 if (ae_dev->hw_err_reset_req) {
10728 enum hnae3_reset_type reset_level;
10730 reset_level = hclge_get_reset_level(ae_dev,
10731 &ae_dev->hw_err_reset_req);
10732 hclge_set_def_reset_request(ae_dev, reset_level);
10733 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10736 /* Enable MISC vector(vector0) */
10737 hclge_enable_vector(&hdev->misc_vector, true);
10739 hclge_state_init(hdev);
10740 hdev->last_reset_time = jiffies;
10742 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10743 HCLGE_DRIVER_NAME);
10745 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10750 if (hdev->hw.mac.phydev)
10751 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10752 err_msi_irq_uninit:
10753 hclge_misc_irq_uninit(hdev);
10755 pci_free_irq_vectors(pdev);
10757 hclge_cmd_uninit(hdev);
10759 pcim_iounmap(pdev, hdev->hw.io_base);
10760 pci_clear_master(pdev);
10761 pci_release_regions(pdev);
10762 pci_disable_device(pdev);
10764 mutex_destroy(&hdev->vport_lock);
10768 static void hclge_stats_clear(struct hclge_dev *hdev)
10770 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10773 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10775 return hclge_config_switch_param(hdev, vf, enable,
10776 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10779 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10781 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10782 HCLGE_FILTER_FE_NIC_INGRESS_B,
10786 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10790 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10792 dev_err(&hdev->pdev->dev,
10793 "Set vf %d mac spoof check %s failed, ret=%d\n",
10794 vf, enable ? "on" : "off", ret);
10798 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10800 dev_err(&hdev->pdev->dev,
10801 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10802 vf, enable ? "on" : "off", ret);
10807 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10810 struct hclge_vport *vport = hclge_get_vport(handle);
10811 struct hclge_dev *hdev = vport->back;
10812 u32 new_spoofchk = enable ? 1 : 0;
10815 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10816 return -EOPNOTSUPP;
10818 vport = hclge_get_vf_vport(hdev, vf);
10822 if (vport->vf_info.spoofchk == new_spoofchk)
10825 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10826 dev_warn(&hdev->pdev->dev,
10827 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10829 else if (enable && hclge_is_umv_space_full(vport, true))
10830 dev_warn(&hdev->pdev->dev,
10831 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10834 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10838 vport->vf_info.spoofchk = new_spoofchk;
10842 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10844 struct hclge_vport *vport = hdev->vport;
10848 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10851 /* resume the vf spoof check state after reset */
10852 for (i = 0; i < hdev->num_alloc_vport; i++) {
10853 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10854 vport->vf_info.spoofchk);
10864 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10866 struct hclge_vport *vport = hclge_get_vport(handle);
10867 struct hclge_dev *hdev = vport->back;
10868 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10869 u32 new_trusted = enable ? 1 : 0;
10873 vport = hclge_get_vf_vport(hdev, vf);
10877 if (vport->vf_info.trusted == new_trusted)
10880 /* Disable promisc mode for VF if it is not trusted any more. */
10881 if (!enable && vport->vf_info.promisc_enable) {
10882 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10883 ret = hclge_set_vport_promisc_mode(vport, false, false,
10887 vport->vf_info.promisc_enable = 0;
10888 hclge_inform_vf_promisc_info(vport);
10891 vport->vf_info.trusted = new_trusted;
10896 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10901 /* reset vf rate to default value */
10902 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10903 struct hclge_vport *vport = &hdev->vport[vf];
10905 vport->vf_info.max_tx_rate = 0;
10906 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10908 dev_err(&hdev->pdev->dev,
10909 "vf%d failed to reset to default, ret=%d\n",
10910 vf - HCLGE_VF_VPORT_START_NUM, ret);
10914 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
10915 int min_tx_rate, int max_tx_rate)
10917 if (min_tx_rate != 0 ||
10918 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10919 dev_err(&hdev->pdev->dev,
10920 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10921 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10928 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10929 int min_tx_rate, int max_tx_rate, bool force)
10931 struct hclge_vport *vport = hclge_get_vport(handle);
10932 struct hclge_dev *hdev = vport->back;
10935 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
10939 vport = hclge_get_vf_vport(hdev, vf);
10943 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10946 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10950 vport->vf_info.max_tx_rate = max_tx_rate;
10955 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10957 struct hnae3_handle *handle = &hdev->vport->nic;
10958 struct hclge_vport *vport;
10962 /* resume the vf max_tx_rate after reset */
10963 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10964 vport = hclge_get_vf_vport(hdev, vf);
10968 /* zero means max rate, after reset, firmware already set it to
10969 * max rate, so just continue.
10971 if (!vport->vf_info.max_tx_rate)
10974 ret = hclge_set_vf_rate(handle, vf, 0,
10975 vport->vf_info.max_tx_rate, true);
10977 dev_err(&hdev->pdev->dev,
10978 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10979 vf, vport->vf_info.max_tx_rate, ret);
10987 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10989 struct hclge_vport *vport = hdev->vport;
10992 for (i = 0; i < hdev->num_alloc_vport; i++) {
10993 hclge_vport_stop(vport);
10998 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11000 struct hclge_dev *hdev = ae_dev->priv;
11001 struct pci_dev *pdev = ae_dev->pdev;
11004 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11006 hclge_stats_clear(hdev);
11007 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11008 * so here should not clean table in memory.
11010 if (hdev->reset_type == HNAE3_IMP_RESET ||
11011 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11012 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11013 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11014 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11015 hclge_reset_umv_space(hdev);
11018 ret = hclge_cmd_init(hdev);
11020 dev_err(&pdev->dev, "Cmd queue init failed\n");
11024 ret = hclge_map_tqp(hdev);
11026 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11030 ret = hclge_mac_init(hdev);
11032 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11036 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11038 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11042 ret = hclge_config_gro(hdev, true);
11046 ret = hclge_init_vlan_config(hdev);
11048 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11052 ret = hclge_tm_init_hw(hdev, true);
11054 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11058 ret = hclge_rss_init_hw(hdev);
11060 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11064 ret = init_mgr_tbl(hdev);
11066 dev_err(&pdev->dev,
11067 "failed to reinit manager table, ret = %d\n", ret);
11071 ret = hclge_init_fd_config(hdev);
11073 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11077 /* Log and clear the hw errors those already occurred */
11078 hclge_handle_all_hns_hw_errors(ae_dev);
11080 /* Re-enable the hw error interrupts because
11081 * the interrupts get disabled on global reset.
11083 ret = hclge_config_nic_hw_error(hdev, true);
11085 dev_err(&pdev->dev,
11086 "fail(%d) to re-enable NIC hw error interrupts\n",
11091 if (hdev->roce_client) {
11092 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11094 dev_err(&pdev->dev,
11095 "fail(%d) to re-enable roce ras interrupts\n",
11101 hclge_reset_vport_state(hdev);
11102 ret = hclge_reset_vport_spoofchk(hdev);
11106 ret = hclge_resume_vf_rate(hdev);
11110 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11111 HCLGE_DRIVER_NAME);
11116 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11118 struct hclge_dev *hdev = ae_dev->priv;
11119 struct hclge_mac *mac = &hdev->hw.mac;
11121 hclge_reset_vf_rate(hdev);
11122 hclge_clear_vf_vlan(hdev);
11123 hclge_misc_affinity_teardown(hdev);
11124 hclge_state_uninit(hdev);
11125 hclge_uninit_mac_table(hdev);
11128 mdiobus_unregister(mac->mdio_bus);
11130 /* Disable MISC vector(vector0) */
11131 hclge_enable_vector(&hdev->misc_vector, false);
11132 synchronize_irq(hdev->misc_vector.vector_irq);
11134 /* Disable all hw interrupts */
11135 hclge_config_mac_tnl_int(hdev, false);
11136 hclge_config_nic_hw_error(hdev, false);
11137 hclge_config_rocee_ras_interrupt(hdev, false);
11139 hclge_cmd_uninit(hdev);
11140 hclge_misc_irq_uninit(hdev);
11141 hclge_pci_uninit(hdev);
11142 mutex_destroy(&hdev->vport_lock);
11143 hclge_uninit_vport_vlan_table(hdev);
11144 ae_dev->priv = NULL;
11147 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11149 struct hclge_vport *vport = hclge_get_vport(handle);
11150 struct hclge_dev *hdev = vport->back;
11152 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11155 static void hclge_get_channels(struct hnae3_handle *handle,
11156 struct ethtool_channels *ch)
11158 ch->max_combined = hclge_get_max_channels(handle);
11159 ch->other_count = 1;
11161 ch->combined_count = handle->kinfo.rss_size;
11164 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11165 u16 *alloc_tqps, u16 *max_rss_size)
11167 struct hclge_vport *vport = hclge_get_vport(handle);
11168 struct hclge_dev *hdev = vport->back;
11170 *alloc_tqps = vport->alloc_tqps;
11171 *max_rss_size = hdev->pf_rss_size_max;
11174 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11175 bool rxfh_configured)
11177 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11178 struct hclge_vport *vport = hclge_get_vport(handle);
11179 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11180 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11181 struct hclge_dev *hdev = vport->back;
11182 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11183 u16 cur_rss_size = kinfo->rss_size;
11184 u16 cur_tqps = kinfo->num_tqps;
11185 u16 tc_valid[HCLGE_MAX_TC_NUM];
11191 kinfo->req_rss_size = new_tqps_num;
11193 ret = hclge_tm_vport_map_update(hdev);
11195 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11199 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11200 roundup_size = ilog2(roundup_size);
11201 /* Set the RSS TC mode according to the new RSS size */
11202 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11205 if (!(hdev->hw_tc_map & BIT(i)))
11209 tc_size[i] = roundup_size;
11210 tc_offset[i] = kinfo->rss_size * i;
11212 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11216 /* RSS indirection table has been configuared by user */
11217 if (rxfh_configured)
11220 /* Reinitializes the rss indirect table according to the new RSS size */
11221 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11226 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11227 rss_indir[i] = i % kinfo->rss_size;
11229 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11231 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11238 dev_info(&hdev->pdev->dev,
11239 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11240 cur_rss_size, kinfo->rss_size,
11241 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11246 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11247 u32 *regs_num_64_bit)
11249 struct hclge_desc desc;
11253 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11256 dev_err(&hdev->pdev->dev,
11257 "Query register number cmd failed, ret = %d.\n", ret);
11261 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11262 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11264 total_num = *regs_num_32_bit + *regs_num_64_bit;
11271 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11274 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11275 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11277 struct hclge_desc *desc;
11278 u32 *reg_val = data;
11288 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11289 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11290 HCLGE_32_BIT_REG_RTN_DATANUM);
11291 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11295 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11296 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11298 dev_err(&hdev->pdev->dev,
11299 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11304 for (i = 0; i < cmd_num; i++) {
11306 desc_data = (__le32 *)(&desc[i].data[0]);
11307 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11309 desc_data = (__le32 *)(&desc[i]);
11310 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11312 for (k = 0; k < n; k++) {
11313 *reg_val++ = le32_to_cpu(*desc_data++);
11325 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11328 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11329 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11331 struct hclge_desc *desc;
11332 u64 *reg_val = data;
11342 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11343 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11344 HCLGE_64_BIT_REG_RTN_DATANUM);
11345 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11349 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11350 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11352 dev_err(&hdev->pdev->dev,
11353 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11358 for (i = 0; i < cmd_num; i++) {
11360 desc_data = (__le64 *)(&desc[i].data[0]);
11361 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11363 desc_data = (__le64 *)(&desc[i]);
11364 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11366 for (k = 0; k < n; k++) {
11367 *reg_val++ = le64_to_cpu(*desc_data++);
11379 #define MAX_SEPARATE_NUM 4
11380 #define SEPARATOR_VALUE 0xFDFCFBFA
11381 #define REG_NUM_PER_LINE 4
11382 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11383 #define REG_SEPARATOR_LINE 1
11384 #define REG_NUM_REMAIN_MASK 3
11385 #define BD_LIST_MAX_NUM 30
11387 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11391 /* initialize command BD except the last one */
11392 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11393 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11395 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11398 /* initialize the last command BD */
11399 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11401 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11404 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11408 u32 entries_per_desc, desc_index, index, offset, i;
11409 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11412 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11414 dev_err(&hdev->pdev->dev,
11415 "Get dfx bd num fail, status is %d.\n", ret);
11419 entries_per_desc = ARRAY_SIZE(desc[0].data);
11420 for (i = 0; i < type_num; i++) {
11421 offset = hclge_dfx_bd_offset_list[i];
11422 index = offset % entries_per_desc;
11423 desc_index = offset / entries_per_desc;
11424 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11430 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11431 struct hclge_desc *desc_src, int bd_num,
11432 enum hclge_opcode_type cmd)
11434 struct hclge_desc *desc = desc_src;
11437 hclge_cmd_setup_basic_desc(desc, cmd, true);
11438 for (i = 0; i < bd_num - 1; i++) {
11439 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11441 hclge_cmd_setup_basic_desc(desc, cmd, true);
11445 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11447 dev_err(&hdev->pdev->dev,
11448 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11454 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11457 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11458 struct hclge_desc *desc = desc_src;
11461 entries_per_desc = ARRAY_SIZE(desc->data);
11462 reg_num = entries_per_desc * bd_num;
11463 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11464 for (i = 0; i < reg_num; i++) {
11465 index = i % entries_per_desc;
11466 desc_index = i / entries_per_desc;
11467 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11469 for (i = 0; i < separator_num; i++)
11470 *reg++ = SEPARATOR_VALUE;
11472 return reg_num + separator_num;
11475 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11477 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11478 int data_len_per_desc, bd_num, i;
11479 int bd_num_list[BD_LIST_MAX_NUM];
11483 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11485 dev_err(&hdev->pdev->dev,
11486 "Get dfx reg bd num fail, status is %d.\n", ret);
11490 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11492 for (i = 0; i < dfx_reg_type_num; i++) {
11493 bd_num = bd_num_list[i];
11494 data_len = data_len_per_desc * bd_num;
11495 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11501 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11503 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11504 int bd_num, bd_num_max, buf_len, i;
11505 int bd_num_list[BD_LIST_MAX_NUM];
11506 struct hclge_desc *desc_src;
11510 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11512 dev_err(&hdev->pdev->dev,
11513 "Get dfx reg bd num fail, status is %d.\n", ret);
11517 bd_num_max = bd_num_list[0];
11518 for (i = 1; i < dfx_reg_type_num; i++)
11519 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11521 buf_len = sizeof(*desc_src) * bd_num_max;
11522 desc_src = kzalloc(buf_len, GFP_KERNEL);
11526 for (i = 0; i < dfx_reg_type_num; i++) {
11527 bd_num = bd_num_list[i];
11528 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11529 hclge_dfx_reg_opcode_list[i]);
11531 dev_err(&hdev->pdev->dev,
11532 "Get dfx reg fail, status is %d.\n", ret);
11536 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11543 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11544 struct hnae3_knic_private_info *kinfo)
11546 #define HCLGE_RING_REG_OFFSET 0x200
11547 #define HCLGE_RING_INT_REG_OFFSET 0x4
11549 int i, j, reg_num, separator_num;
11553 /* fetching per-PF registers valus from PF PCIe register space */
11554 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11555 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11556 for (i = 0; i < reg_num; i++)
11557 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11558 for (i = 0; i < separator_num; i++)
11559 *reg++ = SEPARATOR_VALUE;
11560 data_num_sum = reg_num + separator_num;
11562 reg_num = ARRAY_SIZE(common_reg_addr_list);
11563 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11564 for (i = 0; i < reg_num; i++)
11565 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11566 for (i = 0; i < separator_num; i++)
11567 *reg++ = SEPARATOR_VALUE;
11568 data_num_sum += reg_num + separator_num;
11570 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11571 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11572 for (j = 0; j < kinfo->num_tqps; j++) {
11573 for (i = 0; i < reg_num; i++)
11574 *reg++ = hclge_read_dev(&hdev->hw,
11575 ring_reg_addr_list[i] +
11576 HCLGE_RING_REG_OFFSET * j);
11577 for (i = 0; i < separator_num; i++)
11578 *reg++ = SEPARATOR_VALUE;
11580 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11582 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11583 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11584 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11585 for (i = 0; i < reg_num; i++)
11586 *reg++ = hclge_read_dev(&hdev->hw,
11587 tqp_intr_reg_addr_list[i] +
11588 HCLGE_RING_INT_REG_OFFSET * j);
11589 for (i = 0; i < separator_num; i++)
11590 *reg++ = SEPARATOR_VALUE;
11592 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11594 return data_num_sum;
11597 static int hclge_get_regs_len(struct hnae3_handle *handle)
11599 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11600 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11601 struct hclge_vport *vport = hclge_get_vport(handle);
11602 struct hclge_dev *hdev = vport->back;
11603 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11604 int regs_lines_32_bit, regs_lines_64_bit;
11607 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11609 dev_err(&hdev->pdev->dev,
11610 "Get register number failed, ret = %d.\n", ret);
11614 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11616 dev_err(&hdev->pdev->dev,
11617 "Get dfx reg len failed, ret = %d.\n", ret);
11621 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11622 REG_SEPARATOR_LINE;
11623 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11624 REG_SEPARATOR_LINE;
11625 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11626 REG_SEPARATOR_LINE;
11627 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11628 REG_SEPARATOR_LINE;
11629 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11630 REG_SEPARATOR_LINE;
11631 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11632 REG_SEPARATOR_LINE;
11634 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11635 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11636 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11639 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11643 struct hclge_vport *vport = hclge_get_vport(handle);
11644 struct hclge_dev *hdev = vport->back;
11645 u32 regs_num_32_bit, regs_num_64_bit;
11646 int i, reg_num, separator_num, ret;
11649 *version = hdev->fw_version;
11651 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11653 dev_err(&hdev->pdev->dev,
11654 "Get register number failed, ret = %d.\n", ret);
11658 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11660 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11662 dev_err(&hdev->pdev->dev,
11663 "Get 32 bit register failed, ret = %d.\n", ret);
11666 reg_num = regs_num_32_bit;
11668 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11669 for (i = 0; i < separator_num; i++)
11670 *reg++ = SEPARATOR_VALUE;
11672 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11674 dev_err(&hdev->pdev->dev,
11675 "Get 64 bit register failed, ret = %d.\n", ret);
11678 reg_num = regs_num_64_bit * 2;
11680 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11681 for (i = 0; i < separator_num; i++)
11682 *reg++ = SEPARATOR_VALUE;
11684 ret = hclge_get_dfx_reg(hdev, reg);
11686 dev_err(&hdev->pdev->dev,
11687 "Get dfx register failed, ret = %d.\n", ret);
11690 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11692 struct hclge_set_led_state_cmd *req;
11693 struct hclge_desc desc;
11696 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11698 req = (struct hclge_set_led_state_cmd *)desc.data;
11699 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11700 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11704 dev_err(&hdev->pdev->dev,
11705 "Send set led state cmd error, ret =%d\n", ret);
11710 enum hclge_led_status {
11713 HCLGE_LED_NO_CHANGE = 0xFF,
11716 static int hclge_set_led_id(struct hnae3_handle *handle,
11717 enum ethtool_phys_id_state status)
11719 struct hclge_vport *vport = hclge_get_vport(handle);
11720 struct hclge_dev *hdev = vport->back;
11723 case ETHTOOL_ID_ACTIVE:
11724 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11725 case ETHTOOL_ID_INACTIVE:
11726 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11732 static void hclge_get_link_mode(struct hnae3_handle *handle,
11733 unsigned long *supported,
11734 unsigned long *advertising)
11736 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11737 struct hclge_vport *vport = hclge_get_vport(handle);
11738 struct hclge_dev *hdev = vport->back;
11739 unsigned int idx = 0;
11741 for (; idx < size; idx++) {
11742 supported[idx] = hdev->hw.mac.supported[idx];
11743 advertising[idx] = hdev->hw.mac.advertising[idx];
11747 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11749 struct hclge_vport *vport = hclge_get_vport(handle);
11750 struct hclge_dev *hdev = vport->back;
11752 return hclge_config_gro(hdev, enable);
11755 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11757 struct hclge_vport *vport = &hdev->vport[0];
11758 struct hnae3_handle *handle = &vport->nic;
11762 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11763 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11764 vport->last_promisc_flags = vport->overflow_promisc_flags;
11767 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11768 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11769 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11770 tmp_flags & HNAE3_MPE);
11772 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11773 hclge_enable_vlan_filter(handle,
11774 tmp_flags & HNAE3_VLAN_FLTR);
11779 static bool hclge_module_existed(struct hclge_dev *hdev)
11781 struct hclge_desc desc;
11785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11786 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11788 dev_err(&hdev->pdev->dev,
11789 "failed to get SFP exist state, ret = %d\n", ret);
11793 existed = le32_to_cpu(desc.data[0]);
11795 return existed != 0;
11798 /* need 6 bds(total 140 bytes) in one reading
11799 * return the number of bytes actually read, 0 means read failed.
11801 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11804 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11805 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11811 /* setup all 6 bds to read module eeprom info. */
11812 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11813 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11816 /* bd0~bd4 need next flag */
11817 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11818 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11821 /* setup bd0, this bd contains offset and read length. */
11822 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11823 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11824 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11825 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11827 ret = hclge_cmd_send(&hdev->hw, desc, i);
11829 dev_err(&hdev->pdev->dev,
11830 "failed to get SFP eeprom info, ret = %d\n", ret);
11834 /* copy sfp info from bd0 to out buffer. */
11835 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11836 memcpy(data, sfp_info_bd0->data, copy_len);
11837 read_len = copy_len;
11839 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11840 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11841 if (read_len >= len)
11844 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11845 memcpy(data + read_len, desc[i].data, copy_len);
11846 read_len += copy_len;
11852 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11855 struct hclge_vport *vport = hclge_get_vport(handle);
11856 struct hclge_dev *hdev = vport->back;
11860 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11861 return -EOPNOTSUPP;
11863 if (!hclge_module_existed(hdev))
11866 while (read_len < len) {
11867 data_len = hclge_get_sfp_eeprom_info(hdev,
11874 read_len += data_len;
11880 static const struct hnae3_ae_ops hclge_ops = {
11881 .init_ae_dev = hclge_init_ae_dev,
11882 .uninit_ae_dev = hclge_uninit_ae_dev,
11883 .flr_prepare = hclge_flr_prepare,
11884 .flr_done = hclge_flr_done,
11885 .init_client_instance = hclge_init_client_instance,
11886 .uninit_client_instance = hclge_uninit_client_instance,
11887 .map_ring_to_vector = hclge_map_ring_to_vector,
11888 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11889 .get_vector = hclge_get_vector,
11890 .put_vector = hclge_put_vector,
11891 .set_promisc_mode = hclge_set_promisc_mode,
11892 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11893 .set_loopback = hclge_set_loopback,
11894 .start = hclge_ae_start,
11895 .stop = hclge_ae_stop,
11896 .client_start = hclge_client_start,
11897 .client_stop = hclge_client_stop,
11898 .get_status = hclge_get_status,
11899 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11900 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11901 .get_media_type = hclge_get_media_type,
11902 .check_port_speed = hclge_check_port_speed,
11903 .get_fec = hclge_get_fec,
11904 .set_fec = hclge_set_fec,
11905 .get_rss_key_size = hclge_get_rss_key_size,
11906 .get_rss = hclge_get_rss,
11907 .set_rss = hclge_set_rss,
11908 .set_rss_tuple = hclge_set_rss_tuple,
11909 .get_rss_tuple = hclge_get_rss_tuple,
11910 .get_tc_size = hclge_get_tc_size,
11911 .get_mac_addr = hclge_get_mac_addr,
11912 .set_mac_addr = hclge_set_mac_addr,
11913 .do_ioctl = hclge_do_ioctl,
11914 .add_uc_addr = hclge_add_uc_addr,
11915 .rm_uc_addr = hclge_rm_uc_addr,
11916 .add_mc_addr = hclge_add_mc_addr,
11917 .rm_mc_addr = hclge_rm_mc_addr,
11918 .set_autoneg = hclge_set_autoneg,
11919 .get_autoneg = hclge_get_autoneg,
11920 .restart_autoneg = hclge_restart_autoneg,
11921 .halt_autoneg = hclge_halt_autoneg,
11922 .get_pauseparam = hclge_get_pauseparam,
11923 .set_pauseparam = hclge_set_pauseparam,
11924 .set_mtu = hclge_set_mtu,
11925 .reset_queue = hclge_reset_tqp,
11926 .get_stats = hclge_get_stats,
11927 .get_mac_stats = hclge_get_mac_stat,
11928 .update_stats = hclge_update_stats,
11929 .get_strings = hclge_get_strings,
11930 .get_sset_count = hclge_get_sset_count,
11931 .get_fw_version = hclge_get_fw_version,
11932 .get_mdix_mode = hclge_get_mdix_mode,
11933 .enable_vlan_filter = hclge_enable_vlan_filter,
11934 .set_vlan_filter = hclge_set_vlan_filter,
11935 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11936 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11937 .reset_event = hclge_reset_event,
11938 .get_reset_level = hclge_get_reset_level,
11939 .set_default_reset_request = hclge_set_def_reset_request,
11940 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11941 .set_channels = hclge_set_channels,
11942 .get_channels = hclge_get_channels,
11943 .get_regs_len = hclge_get_regs_len,
11944 .get_regs = hclge_get_regs,
11945 .set_led_id = hclge_set_led_id,
11946 .get_link_mode = hclge_get_link_mode,
11947 .add_fd_entry = hclge_add_fd_entry,
11948 .del_fd_entry = hclge_del_fd_entry,
11949 .del_all_fd_entries = hclge_del_all_fd_entries,
11950 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11951 .get_fd_rule_info = hclge_get_fd_rule_info,
11952 .get_fd_all_rules = hclge_get_all_rules,
11953 .enable_fd = hclge_enable_fd,
11954 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11955 .dbg_run_cmd = hclge_dbg_run_cmd,
11956 .dbg_read_cmd = hclge_dbg_read_cmd,
11957 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11958 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11959 .ae_dev_resetting = hclge_ae_dev_resetting,
11960 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11961 .set_gro_en = hclge_gro_en,
11962 .get_global_queue_id = hclge_covert_handle_qid_global,
11963 .set_timer_task = hclge_set_timer_task,
11964 .mac_connect_phy = hclge_mac_connect_phy,
11965 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11966 .get_vf_config = hclge_get_vf_config,
11967 .set_vf_link_state = hclge_set_vf_link_state,
11968 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11969 .set_vf_trust = hclge_set_vf_trust,
11970 .set_vf_rate = hclge_set_vf_rate,
11971 .set_vf_mac = hclge_set_vf_mac,
11972 .get_module_eeprom = hclge_get_module_eeprom,
11973 .get_cmdq_stat = hclge_get_cmdq_stat,
11974 .add_cls_flower = hclge_add_cls_flower,
11975 .del_cls_flower = hclge_del_cls_flower,
11976 .cls_flower_active = hclge_is_cls_flower_active,
11979 static struct hnae3_ae_algo ae_algo = {
11981 .pdev_id_table = ae_algo_pci_tbl,
11984 static int hclge_init(void)
11986 pr_info("%s is initializing\n", HCLGE_NAME);
11988 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11990 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11994 hnae3_register_ae_algo(&ae_algo);
11999 static void hclge_exit(void)
12001 hnae3_unregister_ae_algo(&ae_algo);
12002 destroy_workqueue(hclge_wq);
12004 module_init(hclge_init);
12005 module_exit(hclge_exit);
12007 MODULE_LICENSE("GPL");
12008 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12009 MODULE_DESCRIPTION("HCLGE Driver");
12010 MODULE_VERSION(HCLGE_MOD_VERSION);