1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
27 #define HCLGE_NAME "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static struct hnae3_ae_algo ae_algo;
76 static struct workqueue_struct *hclge_wq;
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32(tqp->index);
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32(tqp->index);
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 hdev->hw.mac.phydev->drv->set_loopback) {
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
762 hclge_tqps_get_sset_count(handle, stringset);
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
771 u8 *p = (char *)data;
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 p += ETH_GSTRING_LEN;
785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 p += ETH_GSTRING_LEN;
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 p += ETH_GSTRING_LEN;
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 p += ETH_GSTRING_LEN;
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 ARRAY_SIZE(g_mac_stats_string), data);
812 p = hclge_tqps_get_stats(handle, p);
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
821 hclge_update_stats(handle, NULL);
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 struct hclge_func_status_cmd *status)
830 #define HCLGE_MAC_ID_MASK 0xF
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
845 static int hclge_query_function_status(struct hclge_dev *hdev)
847 #define HCLGE_QUERY_MAX_CNT 5
849 struct hclge_func_status_cmd *req;
850 struct hclge_desc desc;
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 req = (struct hclge_func_status_cmd *)desc.data;
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 dev_err(&hdev->pdev->dev,
861 "query function status failed %d.\n", ret);
865 /* Check pf reset is done */
868 usleep_range(1000, 2000);
869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871 return hclge_parse_func_status(hdev, req);
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 struct hclge_pf_res_cmd *req;
877 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
888 req = (struct hclge_pf_res_cmd *)desc.data;
889 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 le16_to_cpu(req->ext_tqp_num);
891 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893 if (req->tx_buf_size)
895 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
897 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
899 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
901 if (req->dv_buf_size)
903 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
905 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
907 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
909 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 dev_err(&hdev->pdev->dev,
912 "only %u msi resources available, not enough for pf(min:2).\n",
917 if (hnae3_dev_roce_supported(hdev)) {
919 le16_to_cpu(req->pf_intr_vector_number_roce);
921 /* PF should have NIC vectors and Roce vectors,
922 * NIC vectors are queued before Roce vectors.
924 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
926 hdev->num_msi = hdev->num_nic_msi;
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
936 *speed = HCLGE_MAC_SPEED_10M;
939 *speed = HCLGE_MAC_SPEED_100M;
942 *speed = HCLGE_MAC_SPEED_1G;
945 *speed = HCLGE_MAC_SPEED_10G;
948 *speed = HCLGE_MAC_SPEED_25G;
951 *speed = HCLGE_MAC_SPEED_40G;
954 *speed = HCLGE_MAC_SPEED_50G;
957 *speed = HCLGE_MAC_SPEED_100G;
960 *speed = HCLGE_MAC_SPEED_200G;
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 struct hclge_vport *vport = hclge_get_vport(handle);
972 struct hclge_dev *hdev = vport->back;
973 u32 speed_ability = hdev->hw.mac.speed_ability;
977 case HCLGE_MAC_SPEED_10M:
978 speed_bit = HCLGE_SUPPORT_10M_BIT;
980 case HCLGE_MAC_SPEED_100M:
981 speed_bit = HCLGE_SUPPORT_100M_BIT;
983 case HCLGE_MAC_SPEED_1G:
984 speed_bit = HCLGE_SUPPORT_1G_BIT;
986 case HCLGE_MAC_SPEED_10G:
987 speed_bit = HCLGE_SUPPORT_10G_BIT;
989 case HCLGE_MAC_SPEED_25G:
990 speed_bit = HCLGE_SUPPORT_25G_BIT;
992 case HCLGE_MAC_SPEED_40G:
993 speed_bit = HCLGE_SUPPORT_40G_BIT;
995 case HCLGE_MAC_SPEED_50G:
996 speed_bit = HCLGE_SUPPORT_50G_BIT;
998 case HCLGE_MAC_SPEED_100G:
999 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001 case HCLGE_MAC_SPEED_200G:
1002 speed_bit = HCLGE_SUPPORT_200G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1053 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1055 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1061 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1083 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1089 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1095 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1098 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1101 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1108 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1111 switch (mac->speed) {
1112 case HCLGE_MAC_SPEED_10G:
1113 case HCLGE_MAC_SPEED_40G:
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1117 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1119 case HCLGE_MAC_SPEED_25G:
1120 case HCLGE_MAC_SPEED_50G:
1121 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1124 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 BIT(HNAE3_FEC_AUTO);
1127 case HCLGE_MAC_SPEED_100G:
1128 case HCLGE_MAC_SPEED_200G:
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1133 mac->fec_ability = 0;
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1141 struct hclge_mac *mac = &hdev->hw.mac;
1143 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1147 hclge_convert_setting_sr(mac, speed_ability);
1148 hclge_convert_setting_lr(mac, speed_ability);
1149 hclge_convert_setting_cr(mac, speed_ability);
1150 if (hnae3_dev_fec_supported(hdev))
1151 hclge_convert_setting_fec(mac);
1153 if (hnae3_dev_pause_supported(hdev))
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1160 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1163 struct hclge_mac *mac = &hdev->hw.mac;
1165 hclge_convert_setting_kr(mac, speed_ability);
1166 if (hnae3_dev_fec_supported(hdev))
1167 hclge_convert_setting_fec(mac);
1169 if (hnae3_dev_pause_supported(hdev))
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1172 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1176 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1179 unsigned long *supported = hdev->hw.mac.supported;
1181 /* default to support all speed for GE port */
1183 speed_ability = HCLGE_SUPPORT_GE;
1185 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1189 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1190 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1192 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1196 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1201 if (hnae3_dev_pause_supported(hdev)) {
1202 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1206 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1210 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1212 u8 media_type = hdev->hw.mac.media_type;
1214 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1215 hclge_parse_fiber_link_mode(hdev, speed_ability);
1216 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1217 hclge_parse_copper_link_mode(hdev, speed_ability);
1218 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1219 hclge_parse_backplane_link_mode(hdev, speed_ability);
1222 static u32 hclge_get_max_speed(u16 speed_ability)
1224 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1225 return HCLGE_MAC_SPEED_200G;
1227 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1228 return HCLGE_MAC_SPEED_100G;
1230 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1231 return HCLGE_MAC_SPEED_50G;
1233 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1234 return HCLGE_MAC_SPEED_40G;
1236 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1237 return HCLGE_MAC_SPEED_25G;
1239 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1240 return HCLGE_MAC_SPEED_10G;
1242 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1243 return HCLGE_MAC_SPEED_1G;
1245 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1246 return HCLGE_MAC_SPEED_100M;
1248 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1249 return HCLGE_MAC_SPEED_10M;
1251 return HCLGE_MAC_SPEED_1G;
1254 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1256 #define SPEED_ABILITY_EXT_SHIFT 8
1258 struct hclge_cfg_param_cmd *req;
1259 u64 mac_addr_tmp_high;
1260 u16 speed_ability_ext;
1264 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1266 /* get the configuration */
1267 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1270 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1272 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1273 HCLGE_CFG_TQP_DESC_N_M,
1274 HCLGE_CFG_TQP_DESC_N_S);
1276 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_PHY_ADDR_M,
1278 HCLGE_CFG_PHY_ADDR_S);
1279 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_MEDIA_TP_M,
1281 HCLGE_CFG_MEDIA_TP_S);
1282 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 HCLGE_CFG_RX_BUF_LEN_M,
1284 HCLGE_CFG_RX_BUF_LEN_S);
1285 /* get mac_address */
1286 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1287 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1288 HCLGE_CFG_MAC_ADDR_H_M,
1289 HCLGE_CFG_MAC_ADDR_H_S);
1291 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1293 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1294 HCLGE_CFG_DEFAULT_SPEED_M,
1295 HCLGE_CFG_DEFAULT_SPEED_S);
1296 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1297 HCLGE_CFG_RSS_SIZE_M,
1298 HCLGE_CFG_RSS_SIZE_S);
1300 for (i = 0; i < ETH_ALEN; i++)
1301 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1303 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1304 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1306 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_SPEED_ABILITY_M,
1308 HCLGE_CFG_SPEED_ABILITY_S);
1309 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1311 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1312 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1314 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1315 HCLGE_CFG_UMV_TBL_SPACE_M,
1316 HCLGE_CFG_UMV_TBL_SPACE_S);
1317 if (!cfg->umv_space)
1318 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1320 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1321 HCLGE_CFG_PF_RSS_SIZE_M,
1322 HCLGE_CFG_PF_RSS_SIZE_S);
1324 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1325 * power of 2, instead of reading out directly. This would
1326 * be more flexible for future changes and expansions.
1327 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1328 * it does not make sense if PF's field is 0. In this case, PF and VF
1329 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1331 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1332 1U << cfg->pf_rss_size_max :
1333 cfg->vf_rss_size_max;
1336 /* hclge_get_cfg: query the static parameter from flash
1337 * @hdev: pointer to struct hclge_dev
1338 * @hcfg: the config structure to be getted
1340 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1342 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1343 struct hclge_cfg_param_cmd *req;
1347 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1350 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1351 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1353 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1354 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1355 /* Len should be united by 4 bytes when send to hardware */
1356 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1357 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1358 req->offset = cpu_to_le32(offset);
1361 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1363 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1367 hclge_parse_cfg(hcfg, desc);
1372 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1374 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1376 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1378 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1379 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1380 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1381 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1382 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1383 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1384 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1387 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1388 struct hclge_desc *desc)
1390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1391 struct hclge_dev_specs_0_cmd *req0;
1392 struct hclge_dev_specs_1_cmd *req1;
1394 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1395 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1397 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1398 ae_dev->dev_specs.rss_ind_tbl_size =
1399 le16_to_cpu(req0->rss_ind_tbl_size);
1400 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1401 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1402 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1403 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1404 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1405 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1408 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1410 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1412 if (!dev_specs->max_non_tso_bd_num)
1413 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1414 if (!dev_specs->rss_ind_tbl_size)
1415 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1416 if (!dev_specs->rss_key_size)
1417 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 if (!dev_specs->max_tm_rate)
1419 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1420 if (!dev_specs->max_qset_num)
1421 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1422 if (!dev_specs->max_int_gl)
1423 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1424 if (!dev_specs->max_frm_size)
1425 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1428 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1430 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1434 /* set default specifications as devices lower than version V3 do not
1435 * support querying specifications from firmware.
1437 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1438 hclge_set_default_dev_specs(hdev);
1442 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1443 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1445 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1447 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1449 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1453 hclge_parse_dev_specs(hdev, desc);
1454 hclge_check_dev_specs(hdev);
1459 static int hclge_get_cap(struct hclge_dev *hdev)
1463 ret = hclge_query_function_status(hdev);
1465 dev_err(&hdev->pdev->dev,
1466 "query function status error %d.\n", ret);
1470 /* get pf resource */
1471 return hclge_query_pf_resource(hdev);
1474 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1476 #define HCLGE_MIN_TX_DESC 64
1477 #define HCLGE_MIN_RX_DESC 64
1479 if (!is_kdump_kernel())
1482 dev_info(&hdev->pdev->dev,
1483 "Running kdump kernel. Using minimal resources\n");
1485 /* minimal queue pairs equals to the number of vports */
1486 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1487 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1488 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1491 static int hclge_configure(struct hclge_dev *hdev)
1493 struct hclge_cfg cfg;
1497 ret = hclge_get_cfg(hdev, &cfg);
1501 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1502 hdev->base_tqp_pid = 0;
1503 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1504 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1505 hdev->rx_buf_len = cfg.rx_buf_len;
1506 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1507 hdev->hw.mac.media_type = cfg.media_type;
1508 hdev->hw.mac.phy_addr = cfg.phy_addr;
1509 hdev->num_tx_desc = cfg.tqp_desc_num;
1510 hdev->num_rx_desc = cfg.tqp_desc_num;
1511 hdev->tm_info.num_pg = 1;
1512 hdev->tc_max = cfg.tc_num;
1513 hdev->tm_info.hw_pfc_map = 0;
1514 hdev->wanted_umv_size = cfg.umv_space;
1516 if (hnae3_dev_fd_supported(hdev)) {
1518 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1521 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1523 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1524 cfg.default_speed, ret);
1528 hclge_parse_link_mode(hdev, cfg.speed_ability);
1530 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1532 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1533 (hdev->tc_max < 1)) {
1534 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1539 /* Dev does not support DCB */
1540 if (!hnae3_dev_dcb_supported(hdev)) {
1544 hdev->pfc_max = hdev->tc_max;
1547 hdev->tm_info.num_tc = 1;
1549 /* Currently not support uncontiuous tc */
1550 for (i = 0; i < hdev->tm_info.num_tc; i++)
1551 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1553 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1555 hclge_init_kdump_kernel_config(hdev);
1557 /* Set the init affinity based on pci func number */
1558 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1559 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1560 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1561 &hdev->affinity_mask);
1566 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1569 struct hclge_cfg_tso_status_cmd *req;
1570 struct hclge_desc desc;
1572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1574 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1575 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1576 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1578 return hclge_cmd_send(&hdev->hw, &desc, 1);
1581 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1583 struct hclge_cfg_gro_status_cmd *req;
1584 struct hclge_desc desc;
1587 if (!hnae3_dev_gro_supported(hdev))
1590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1591 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1593 req->gro_en = en ? 1 : 0;
1595 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1597 dev_err(&hdev->pdev->dev,
1598 "GRO hardware config cmd failed, ret = %d\n", ret);
1603 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1605 struct hclge_tqp *tqp;
1608 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1609 sizeof(struct hclge_tqp), GFP_KERNEL);
1615 for (i = 0; i < hdev->num_tqps; i++) {
1616 tqp->dev = &hdev->pdev->dev;
1619 tqp->q.ae_algo = &ae_algo;
1620 tqp->q.buf_size = hdev->rx_buf_len;
1621 tqp->q.tx_desc_num = hdev->num_tx_desc;
1622 tqp->q.rx_desc_num = hdev->num_rx_desc;
1624 /* need an extended offset to configure queues >=
1625 * HCLGE_TQP_MAX_SIZE_DEV_V2
1627 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1628 tqp->q.io_base = hdev->hw.io_base +
1629 HCLGE_TQP_REG_OFFSET +
1630 i * HCLGE_TQP_REG_SIZE;
1632 tqp->q.io_base = hdev->hw.io_base +
1633 HCLGE_TQP_REG_OFFSET +
1634 HCLGE_TQP_EXT_REG_OFFSET +
1635 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1644 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1645 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1647 struct hclge_tqp_map_cmd *req;
1648 struct hclge_desc desc;
1651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1653 req = (struct hclge_tqp_map_cmd *)desc.data;
1654 req->tqp_id = cpu_to_le16(tqp_pid);
1655 req->tqp_vf = func_id;
1656 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1658 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1659 req->tqp_vid = cpu_to_le16(tqp_vid);
1661 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1663 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1668 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1670 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1671 struct hclge_dev *hdev = vport->back;
1674 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1675 alloced < num_tqps; i++) {
1676 if (!hdev->htqp[i].alloced) {
1677 hdev->htqp[i].q.handle = &vport->nic;
1678 hdev->htqp[i].q.tqp_index = alloced;
1679 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1680 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1681 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1682 hdev->htqp[i].alloced = true;
1686 vport->alloc_tqps = alloced;
1687 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1688 vport->alloc_tqps / hdev->tm_info.num_tc);
1690 /* ensure one to one mapping between irq and queue at default */
1691 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1692 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1697 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1698 u16 num_tx_desc, u16 num_rx_desc)
1701 struct hnae3_handle *nic = &vport->nic;
1702 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1703 struct hclge_dev *hdev = vport->back;
1706 kinfo->num_tx_desc = num_tx_desc;
1707 kinfo->num_rx_desc = num_rx_desc;
1709 kinfo->rx_buf_len = hdev->rx_buf_len;
1711 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1712 sizeof(struct hnae3_queue *), GFP_KERNEL);
1716 ret = hclge_assign_tqp(vport, num_tqps);
1718 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1723 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1724 struct hclge_vport *vport)
1726 struct hnae3_handle *nic = &vport->nic;
1727 struct hnae3_knic_private_info *kinfo;
1730 kinfo = &nic->kinfo;
1731 for (i = 0; i < vport->alloc_tqps; i++) {
1732 struct hclge_tqp *q =
1733 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1737 is_pf = !(vport->vport_id);
1738 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1747 static int hclge_map_tqp(struct hclge_dev *hdev)
1749 struct hclge_vport *vport = hdev->vport;
1752 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1753 for (i = 0; i < num_vport; i++) {
1756 ret = hclge_map_tqp_to_vport(hdev, vport);
1766 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1768 struct hnae3_handle *nic = &vport->nic;
1769 struct hclge_dev *hdev = vport->back;
1772 nic->pdev = hdev->pdev;
1773 nic->ae_algo = &ae_algo;
1774 nic->numa_node_mask = hdev->numa_node_mask;
1776 ret = hclge_knic_setup(vport, num_tqps,
1777 hdev->num_tx_desc, hdev->num_rx_desc);
1779 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1784 static int hclge_alloc_vport(struct hclge_dev *hdev)
1786 struct pci_dev *pdev = hdev->pdev;
1787 struct hclge_vport *vport;
1793 /* We need to alloc a vport for main NIC of PF */
1794 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1796 if (hdev->num_tqps < num_vport) {
1797 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1798 hdev->num_tqps, num_vport);
1802 /* Alloc the same number of TQPs for every vport */
1803 tqp_per_vport = hdev->num_tqps / num_vport;
1804 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1806 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1811 hdev->vport = vport;
1812 hdev->num_alloc_vport = num_vport;
1814 if (IS_ENABLED(CONFIG_PCI_IOV))
1815 hdev->num_alloc_vfs = hdev->num_req_vfs;
1817 for (i = 0; i < num_vport; i++) {
1819 vport->vport_id = i;
1820 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1821 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1822 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1823 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1824 INIT_LIST_HEAD(&vport->vlan_list);
1825 INIT_LIST_HEAD(&vport->uc_mac_list);
1826 INIT_LIST_HEAD(&vport->mc_mac_list);
1827 spin_lock_init(&vport->mac_list_lock);
1830 ret = hclge_vport_setup(vport, tqp_main_vport);
1832 ret = hclge_vport_setup(vport, tqp_per_vport);
1835 "vport setup failed for vport %d, %d\n",
1846 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1847 struct hclge_pkt_buf_alloc *buf_alloc)
1849 /* TX buffer size is unit by 128 byte */
1850 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1851 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1852 struct hclge_tx_buff_alloc_cmd *req;
1853 struct hclge_desc desc;
1857 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1859 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1860 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1861 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1863 req->tx_pkt_buff[i] =
1864 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1865 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1868 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1870 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1876 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1877 struct hclge_pkt_buf_alloc *buf_alloc)
1879 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1882 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1887 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1892 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1893 if (hdev->hw_tc_map & BIT(i))
1898 /* Get the number of pfc enabled TCs, which have private buffer */
1899 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1900 struct hclge_pkt_buf_alloc *buf_alloc)
1902 struct hclge_priv_buf *priv;
1906 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907 priv = &buf_alloc->priv_buf[i];
1908 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1916 /* Get the number of pfc disabled TCs, which have private buffer */
1917 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1918 struct hclge_pkt_buf_alloc *buf_alloc)
1920 struct hclge_priv_buf *priv;
1924 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1925 priv = &buf_alloc->priv_buf[i];
1926 if (hdev->hw_tc_map & BIT(i) &&
1927 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1935 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1937 struct hclge_priv_buf *priv;
1941 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1942 priv = &buf_alloc->priv_buf[i];
1944 rx_priv += priv->buf_size;
1949 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1951 u32 i, total_tx_size = 0;
1953 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1954 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1956 return total_tx_size;
1959 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1960 struct hclge_pkt_buf_alloc *buf_alloc,
1963 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1964 u32 tc_num = hclge_get_tc_num(hdev);
1965 u32 shared_buf, aligned_mps;
1969 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1971 if (hnae3_dev_dcb_supported(hdev))
1972 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1975 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1976 + hdev->dv_buf_size;
1978 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1979 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1980 HCLGE_BUF_SIZE_UNIT);
1982 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1983 if (rx_all < rx_priv + shared_std)
1986 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1987 buf_alloc->s_buf.buf_size = shared_buf;
1988 if (hnae3_dev_dcb_supported(hdev)) {
1989 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1990 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1991 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1992 HCLGE_BUF_SIZE_UNIT);
1994 buf_alloc->s_buf.self.high = aligned_mps +
1995 HCLGE_NON_DCB_ADDITIONAL_BUF;
1996 buf_alloc->s_buf.self.low = aligned_mps;
1999 if (hnae3_dev_dcb_supported(hdev)) {
2000 hi_thrd = shared_buf - hdev->dv_buf_size;
2002 if (tc_num <= NEED_RESERVE_TC_NUM)
2003 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2007 hi_thrd = hi_thrd / tc_num;
2009 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2010 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2011 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2013 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2014 lo_thrd = aligned_mps;
2017 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2018 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2019 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2025 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2026 struct hclge_pkt_buf_alloc *buf_alloc)
2030 total_size = hdev->pkt_buf_size;
2032 /* alloc tx buffer for all enabled tc */
2033 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2036 if (hdev->hw_tc_map & BIT(i)) {
2037 if (total_size < hdev->tx_buf_size)
2040 priv->tx_buf_size = hdev->tx_buf_size;
2042 priv->tx_buf_size = 0;
2045 total_size -= priv->tx_buf_size;
2051 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2052 struct hclge_pkt_buf_alloc *buf_alloc)
2054 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2055 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2058 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2059 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2066 if (!(hdev->hw_tc_map & BIT(i)))
2071 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2072 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2073 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2074 HCLGE_BUF_SIZE_UNIT);
2077 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2081 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2084 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2087 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2088 struct hclge_pkt_buf_alloc *buf_alloc)
2090 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2091 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2094 /* let the last to be cleared first */
2095 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2096 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2097 unsigned int mask = BIT((unsigned int)i);
2099 if (hdev->hw_tc_map & mask &&
2100 !(hdev->tm_info.hw_pfc_map & mask)) {
2101 /* Clear the no pfc TC private buffer */
2109 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2110 no_pfc_priv_num == 0)
2114 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2117 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2118 struct hclge_pkt_buf_alloc *buf_alloc)
2120 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2124 /* let the last to be cleared first */
2125 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127 unsigned int mask = BIT((unsigned int)i);
2129 if (hdev->hw_tc_map & mask &&
2130 hdev->tm_info.hw_pfc_map & mask) {
2131 /* Reduce the number of pfc TC with private buffer */
2139 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2144 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2147 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2148 struct hclge_pkt_buf_alloc *buf_alloc)
2150 #define COMPENSATE_BUFFER 0x3C00
2151 #define COMPENSATE_HALF_MPS_NUM 5
2152 #define PRIV_WL_GAP 0x1800
2154 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2155 u32 tc_num = hclge_get_tc_num(hdev);
2156 u32 half_mps = hdev->mps >> 1;
2161 rx_priv = rx_priv / tc_num;
2163 if (tc_num <= NEED_RESERVE_TC_NUM)
2164 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2166 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2167 COMPENSATE_HALF_MPS_NUM * half_mps;
2168 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2169 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2171 if (rx_priv < min_rx_priv)
2174 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2175 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2182 if (!(hdev->hw_tc_map & BIT(i)))
2186 priv->buf_size = rx_priv;
2187 priv->wl.high = rx_priv - hdev->dv_buf_size;
2188 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2191 buf_alloc->s_buf.buf_size = 0;
2196 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2197 * @hdev: pointer to struct hclge_dev
2198 * @buf_alloc: pointer to buffer calculation data
2199 * @return: 0: calculate sucessful, negative: fail
2201 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2202 struct hclge_pkt_buf_alloc *buf_alloc)
2204 /* When DCB is not supported, rx private buffer is not allocated. */
2205 if (!hnae3_dev_dcb_supported(hdev)) {
2206 u32 rx_all = hdev->pkt_buf_size;
2208 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2209 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2215 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2218 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2221 /* try to decrease the buffer size */
2222 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2225 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2228 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2234 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2235 struct hclge_pkt_buf_alloc *buf_alloc)
2237 struct hclge_rx_priv_buff_cmd *req;
2238 struct hclge_desc desc;
2242 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2243 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2245 /* Alloc private buffer TCs */
2246 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2247 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2250 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2252 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2256 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2257 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2259 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2261 dev_err(&hdev->pdev->dev,
2262 "rx private buffer alloc cmd failed %d\n", ret);
2267 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2268 struct hclge_pkt_buf_alloc *buf_alloc)
2270 struct hclge_rx_priv_wl_buf *req;
2271 struct hclge_priv_buf *priv;
2272 struct hclge_desc desc[2];
2276 for (i = 0; i < 2; i++) {
2277 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2279 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2281 /* The first descriptor set the NEXT bit to 1 */
2283 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2285 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2287 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2288 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2290 priv = &buf_alloc->priv_buf[idx];
2291 req->tc_wl[j].high =
2292 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2293 req->tc_wl[j].high |=
2294 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2296 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2297 req->tc_wl[j].low |=
2298 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2302 /* Send 2 descriptor at one time */
2303 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2305 dev_err(&hdev->pdev->dev,
2306 "rx private waterline config cmd failed %d\n",
2311 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2312 struct hclge_pkt_buf_alloc *buf_alloc)
2314 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2315 struct hclge_rx_com_thrd *req;
2316 struct hclge_desc desc[2];
2317 struct hclge_tc_thrd *tc;
2321 for (i = 0; i < 2; i++) {
2322 hclge_cmd_setup_basic_desc(&desc[i],
2323 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2324 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2326 /* The first descriptor set the NEXT bit to 1 */
2328 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2330 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2332 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2335 req->com_thrd[j].high =
2336 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2337 req->com_thrd[j].high |=
2338 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2339 req->com_thrd[j].low =
2340 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2341 req->com_thrd[j].low |=
2342 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2346 /* Send 2 descriptors at one time */
2347 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349 dev_err(&hdev->pdev->dev,
2350 "common threshold config cmd failed %d\n", ret);
2354 static int hclge_common_wl_config(struct hclge_dev *hdev,
2355 struct hclge_pkt_buf_alloc *buf_alloc)
2357 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2358 struct hclge_rx_com_wl *req;
2359 struct hclge_desc desc;
2362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2364 req = (struct hclge_rx_com_wl *)desc.data;
2365 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2366 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2369 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2373 dev_err(&hdev->pdev->dev,
2374 "common waterline config cmd failed %d\n", ret);
2379 int hclge_buffer_alloc(struct hclge_dev *hdev)
2381 struct hclge_pkt_buf_alloc *pkt_buf;
2384 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2388 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2390 dev_err(&hdev->pdev->dev,
2391 "could not calc tx buffer size for all TCs %d\n", ret);
2395 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2397 dev_err(&hdev->pdev->dev,
2398 "could not alloc tx buffers %d\n", ret);
2402 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2404 dev_err(&hdev->pdev->dev,
2405 "could not calc rx priv buffer size for all TCs %d\n",
2410 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2412 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2417 if (hnae3_dev_dcb_supported(hdev)) {
2418 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2420 dev_err(&hdev->pdev->dev,
2421 "could not configure rx private waterline %d\n",
2426 ret = hclge_common_thrd_config(hdev, pkt_buf);
2428 dev_err(&hdev->pdev->dev,
2429 "could not configure common threshold %d\n",
2435 ret = hclge_common_wl_config(hdev, pkt_buf);
2437 dev_err(&hdev->pdev->dev,
2438 "could not configure common waterline %d\n", ret);
2445 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2447 struct hnae3_handle *roce = &vport->roce;
2448 struct hnae3_handle *nic = &vport->nic;
2449 struct hclge_dev *hdev = vport->back;
2451 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2453 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2456 roce->rinfo.base_vector = hdev->roce_base_vector;
2458 roce->rinfo.netdev = nic->kinfo.netdev;
2459 roce->rinfo.roce_io_base = hdev->hw.io_base;
2460 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2462 roce->pdev = nic->pdev;
2463 roce->ae_algo = nic->ae_algo;
2464 roce->numa_node_mask = nic->numa_node_mask;
2469 static int hclge_init_msi(struct hclge_dev *hdev)
2471 struct pci_dev *pdev = hdev->pdev;
2475 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2477 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2480 "failed(%d) to allocate MSI/MSI-X vectors\n",
2484 if (vectors < hdev->num_msi)
2485 dev_warn(&hdev->pdev->dev,
2486 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2487 hdev->num_msi, vectors);
2489 hdev->num_msi = vectors;
2490 hdev->num_msi_left = vectors;
2492 hdev->base_msi_vector = pdev->irq;
2493 hdev->roce_base_vector = hdev->base_msi_vector +
2496 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2497 sizeof(u16), GFP_KERNEL);
2498 if (!hdev->vector_status) {
2499 pci_free_irq_vectors(pdev);
2503 for (i = 0; i < hdev->num_msi; i++)
2504 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2506 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2507 sizeof(int), GFP_KERNEL);
2508 if (!hdev->vector_irq) {
2509 pci_free_irq_vectors(pdev);
2516 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2518 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2519 duplex = HCLGE_MAC_FULL;
2524 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2527 struct hclge_config_mac_speed_dup_cmd *req;
2528 struct hclge_desc desc;
2531 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2533 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2536 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2539 case HCLGE_MAC_SPEED_10M:
2540 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 HCLGE_CFG_SPEED_S, 6);
2543 case HCLGE_MAC_SPEED_100M:
2544 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545 HCLGE_CFG_SPEED_S, 7);
2547 case HCLGE_MAC_SPEED_1G:
2548 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549 HCLGE_CFG_SPEED_S, 0);
2551 case HCLGE_MAC_SPEED_10G:
2552 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553 HCLGE_CFG_SPEED_S, 1);
2555 case HCLGE_MAC_SPEED_25G:
2556 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557 HCLGE_CFG_SPEED_S, 2);
2559 case HCLGE_MAC_SPEED_40G:
2560 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561 HCLGE_CFG_SPEED_S, 3);
2563 case HCLGE_MAC_SPEED_50G:
2564 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565 HCLGE_CFG_SPEED_S, 4);
2567 case HCLGE_MAC_SPEED_100G:
2568 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2569 HCLGE_CFG_SPEED_S, 5);
2571 case HCLGE_MAC_SPEED_200G:
2572 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2573 HCLGE_CFG_SPEED_S, 8);
2576 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2580 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2583 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2585 dev_err(&hdev->pdev->dev,
2586 "mac speed/duplex config cmd failed %d.\n", ret);
2593 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2595 struct hclge_mac *mac = &hdev->hw.mac;
2598 duplex = hclge_check_speed_dup(duplex, speed);
2599 if (!mac->support_autoneg && mac->speed == speed &&
2600 mac->duplex == duplex)
2603 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2607 hdev->hw.mac.speed = speed;
2608 hdev->hw.mac.duplex = duplex;
2613 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2616 struct hclge_vport *vport = hclge_get_vport(handle);
2617 struct hclge_dev *hdev = vport->back;
2619 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2622 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2624 struct hclge_config_auto_neg_cmd *req;
2625 struct hclge_desc desc;
2629 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2631 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2633 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2634 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2638 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2644 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2646 struct hclge_vport *vport = hclge_get_vport(handle);
2647 struct hclge_dev *hdev = vport->back;
2649 if (!hdev->hw.mac.support_autoneg) {
2651 dev_err(&hdev->pdev->dev,
2652 "autoneg is not supported by current port\n");
2659 return hclge_set_autoneg_en(hdev, enable);
2662 static int hclge_get_autoneg(struct hnae3_handle *handle)
2664 struct hclge_vport *vport = hclge_get_vport(handle);
2665 struct hclge_dev *hdev = vport->back;
2666 struct phy_device *phydev = hdev->hw.mac.phydev;
2669 return phydev->autoneg;
2671 return hdev->hw.mac.autoneg;
2674 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2676 struct hclge_vport *vport = hclge_get_vport(handle);
2677 struct hclge_dev *hdev = vport->back;
2680 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2682 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2685 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2688 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2690 struct hclge_vport *vport = hclge_get_vport(handle);
2691 struct hclge_dev *hdev = vport->back;
2693 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2694 return hclge_set_autoneg_en(hdev, !halt);
2699 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2701 struct hclge_config_fec_cmd *req;
2702 struct hclge_desc desc;
2705 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2707 req = (struct hclge_config_fec_cmd *)desc.data;
2708 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2709 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2710 if (fec_mode & BIT(HNAE3_FEC_RS))
2711 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2712 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2713 if (fec_mode & BIT(HNAE3_FEC_BASER))
2714 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2715 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2717 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2719 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2724 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2726 struct hclge_vport *vport = hclge_get_vport(handle);
2727 struct hclge_dev *hdev = vport->back;
2728 struct hclge_mac *mac = &hdev->hw.mac;
2731 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2732 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2736 ret = hclge_set_fec_hw(hdev, fec_mode);
2740 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2744 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2747 struct hclge_vport *vport = hclge_get_vport(handle);
2748 struct hclge_dev *hdev = vport->back;
2749 struct hclge_mac *mac = &hdev->hw.mac;
2752 *fec_ability = mac->fec_ability;
2754 *fec_mode = mac->fec_mode;
2757 static int hclge_mac_init(struct hclge_dev *hdev)
2759 struct hclge_mac *mac = &hdev->hw.mac;
2762 hdev->support_sfp_query = true;
2763 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2764 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2765 hdev->hw.mac.duplex);
2769 if (hdev->hw.mac.support_autoneg) {
2770 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2777 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2778 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2783 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2785 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2789 ret = hclge_set_default_loopback(hdev);
2793 ret = hclge_buffer_alloc(hdev);
2795 dev_err(&hdev->pdev->dev,
2796 "allocate buffer fail, ret=%d\n", ret);
2801 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2803 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2805 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806 hclge_wq, &hdev->service_task, 0);
2809 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2811 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2813 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814 hclge_wq, &hdev->service_task, 0);
2817 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2819 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2820 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2821 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2822 hclge_wq, &hdev->service_task,
2826 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2828 struct hclge_link_status_cmd *req;
2829 struct hclge_desc desc;
2832 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2833 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2835 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2840 req = (struct hclge_link_status_cmd *)desc.data;
2841 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2842 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2847 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2849 struct phy_device *phydev = hdev->hw.mac.phydev;
2851 *link_status = HCLGE_LINK_STATUS_DOWN;
2853 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2856 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2859 return hclge_get_mac_link_status(hdev, link_status);
2862 static void hclge_update_link_status(struct hclge_dev *hdev)
2864 struct hnae3_client *rclient = hdev->roce_client;
2865 struct hnae3_client *client = hdev->nic_client;
2866 struct hnae3_handle *rhandle;
2867 struct hnae3_handle *handle;
2875 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2878 ret = hclge_get_mac_phy_link(hdev, &state);
2880 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2884 if (state != hdev->hw.mac.link) {
2885 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2886 handle = &hdev->vport[i].nic;
2887 client->ops->link_status_change(handle, state);
2888 hclge_config_mac_tnl_int(hdev, state);
2889 rhandle = &hdev->vport[i].roce;
2890 if (rclient && rclient->ops->link_status_change)
2891 rclient->ops->link_status_change(rhandle,
2894 hdev->hw.mac.link = state;
2897 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2900 static void hclge_update_port_capability(struct hclge_dev *hdev,
2901 struct hclge_mac *mac)
2903 if (hnae3_dev_fec_supported(hdev))
2904 /* update fec ability by speed */
2905 hclge_convert_setting_fec(mac);
2907 /* firmware can not identify back plane type, the media type
2908 * read from configuration can help deal it
2910 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2911 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2912 mac->module_type = HNAE3_MODULE_TYPE_KR;
2913 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2914 mac->module_type = HNAE3_MODULE_TYPE_TP;
2916 if (mac->support_autoneg) {
2917 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2918 linkmode_copy(mac->advertising, mac->supported);
2920 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2922 linkmode_zero(mac->advertising);
2926 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2928 struct hclge_sfp_info_cmd *resp;
2929 struct hclge_desc desc;
2932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2933 resp = (struct hclge_sfp_info_cmd *)desc.data;
2934 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2935 if (ret == -EOPNOTSUPP) {
2936 dev_warn(&hdev->pdev->dev,
2937 "IMP do not support get SFP speed %d\n", ret);
2940 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2944 *speed = le32_to_cpu(resp->speed);
2949 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2951 struct hclge_sfp_info_cmd *resp;
2952 struct hclge_desc desc;
2955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2956 resp = (struct hclge_sfp_info_cmd *)desc.data;
2958 resp->query_type = QUERY_ACTIVE_SPEED;
2960 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2961 if (ret == -EOPNOTSUPP) {
2962 dev_warn(&hdev->pdev->dev,
2963 "IMP does not support get SFP info %d\n", ret);
2966 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2970 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2971 * set to mac->speed.
2973 if (!le32_to_cpu(resp->speed))
2976 mac->speed = le32_to_cpu(resp->speed);
2977 /* if resp->speed_ability is 0, it means it's an old version
2978 * firmware, do not update these params
2980 if (resp->speed_ability) {
2981 mac->module_type = le32_to_cpu(resp->module_type);
2982 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2983 mac->autoneg = resp->autoneg;
2984 mac->support_autoneg = resp->autoneg_ability;
2985 mac->speed_type = QUERY_ACTIVE_SPEED;
2986 if (!resp->active_fec)
2989 mac->fec_mode = BIT(resp->active_fec);
2991 mac->speed_type = QUERY_SFP_SPEED;
2997 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
2998 struct ethtool_link_ksettings *cmd)
3000 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3001 struct hclge_vport *vport = hclge_get_vport(handle);
3002 struct hclge_phy_link_ksetting_0_cmd *req0;
3003 struct hclge_phy_link_ksetting_1_cmd *req1;
3004 u32 supported, advertising, lp_advertising;
3005 struct hclge_dev *hdev = vport->back;
3008 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3010 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3011 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3014 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3016 dev_err(&hdev->pdev->dev,
3017 "failed to get phy link ksetting, ret = %d.\n", ret);
3021 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3022 cmd->base.autoneg = req0->autoneg;
3023 cmd->base.speed = le32_to_cpu(req0->speed);
3024 cmd->base.duplex = req0->duplex;
3025 cmd->base.port = req0->port;
3026 cmd->base.transceiver = req0->transceiver;
3027 cmd->base.phy_address = req0->phy_address;
3028 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3029 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3030 supported = le32_to_cpu(req0->supported);
3031 advertising = le32_to_cpu(req0->advertising);
3032 lp_advertising = le32_to_cpu(req0->lp_advertising);
3033 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3035 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3037 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3040 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3041 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3042 cmd->base.master_slave_state = req1->master_slave_state;
3048 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3049 const struct ethtool_link_ksettings *cmd)
3051 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3052 struct hclge_vport *vport = hclge_get_vport(handle);
3053 struct hclge_phy_link_ksetting_0_cmd *req0;
3054 struct hclge_phy_link_ksetting_1_cmd *req1;
3055 struct hclge_dev *hdev = vport->back;
3059 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3060 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3061 (cmd->base.duplex != DUPLEX_HALF &&
3062 cmd->base.duplex != DUPLEX_FULL)))
3065 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3067 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3068 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3071 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3072 req0->autoneg = cmd->base.autoneg;
3073 req0->speed = cpu_to_le32(cmd->base.speed);
3074 req0->duplex = cmd->base.duplex;
3075 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3076 cmd->link_modes.advertising);
3077 req0->advertising = cpu_to_le32(advertising);
3078 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3080 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3081 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3083 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3085 dev_err(&hdev->pdev->dev,
3086 "failed to set phy link ksettings, ret = %d.\n", ret);
3090 hdev->hw.mac.autoneg = cmd->base.autoneg;
3091 hdev->hw.mac.speed = cmd->base.speed;
3092 hdev->hw.mac.duplex = cmd->base.duplex;
3093 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3098 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3100 struct ethtool_link_ksettings cmd;
3103 if (!hnae3_dev_phy_imp_supported(hdev))
3106 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3110 hdev->hw.mac.autoneg = cmd.base.autoneg;
3111 hdev->hw.mac.speed = cmd.base.speed;
3112 hdev->hw.mac.duplex = cmd.base.duplex;
3117 static int hclge_tp_port_init(struct hclge_dev *hdev)
3119 struct ethtool_link_ksettings cmd;
3121 if (!hnae3_dev_phy_imp_supported(hdev))
3124 cmd.base.autoneg = hdev->hw.mac.autoneg;
3125 cmd.base.speed = hdev->hw.mac.speed;
3126 cmd.base.duplex = hdev->hw.mac.duplex;
3127 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3129 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3132 static int hclge_update_port_info(struct hclge_dev *hdev)
3134 struct hclge_mac *mac = &hdev->hw.mac;
3135 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3138 /* get the port info from SFP cmd if not copper port */
3139 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3140 return hclge_update_tp_port_info(hdev);
3142 /* if IMP does not support get SFP/qSFP info, return directly */
3143 if (!hdev->support_sfp_query)
3146 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3147 ret = hclge_get_sfp_info(hdev, mac);
3149 ret = hclge_get_sfp_speed(hdev, &speed);
3151 if (ret == -EOPNOTSUPP) {
3152 hdev->support_sfp_query = false;
3158 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3159 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3160 hclge_update_port_capability(hdev, mac);
3163 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3166 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3167 return 0; /* do nothing if no SFP */
3169 /* must config full duplex for SFP */
3170 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3174 static int hclge_get_status(struct hnae3_handle *handle)
3176 struct hclge_vport *vport = hclge_get_vport(handle);
3177 struct hclge_dev *hdev = vport->back;
3179 hclge_update_link_status(hdev);
3181 return hdev->hw.mac.link;
3184 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3186 if (!pci_num_vf(hdev->pdev)) {
3187 dev_err(&hdev->pdev->dev,
3188 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3192 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3193 dev_err(&hdev->pdev->dev,
3194 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3195 vf, pci_num_vf(hdev->pdev));
3199 /* VF start from 1 in vport */
3200 vf += HCLGE_VF_VPORT_START_NUM;
3201 return &hdev->vport[vf];
3204 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3205 struct ifla_vf_info *ivf)
3207 struct hclge_vport *vport = hclge_get_vport(handle);
3208 struct hclge_dev *hdev = vport->back;
3210 vport = hclge_get_vf_vport(hdev, vf);
3215 ivf->linkstate = vport->vf_info.link_state;
3216 ivf->spoofchk = vport->vf_info.spoofchk;
3217 ivf->trusted = vport->vf_info.trusted;
3218 ivf->min_tx_rate = 0;
3219 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3220 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3221 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3222 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3223 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3228 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3231 struct hclge_vport *vport = hclge_get_vport(handle);
3232 struct hclge_dev *hdev = vport->back;
3234 vport = hclge_get_vf_vport(hdev, vf);
3238 vport->vf_info.link_state = link_state;
3243 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3245 u32 cmdq_src_reg, msix_src_reg;
3247 /* fetch the events from their corresponding regs */
3248 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3249 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3251 /* Assumption: If by any chance reset and mailbox events are reported
3252 * together then we will only process reset event in this go and will
3253 * defer the processing of the mailbox events. Since, we would have not
3254 * cleared RX CMDQ event this time we would receive again another
3255 * interrupt from H/W just for the mailbox.
3257 * check for vector0 reset event sources
3259 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3260 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3261 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3262 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3263 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3264 hdev->rst_stats.imp_rst_cnt++;
3265 return HCLGE_VECTOR0_EVENT_RST;
3268 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3269 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3270 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3271 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3272 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3273 hdev->rst_stats.global_rst_cnt++;
3274 return HCLGE_VECTOR0_EVENT_RST;
3277 /* check for vector0 msix event source */
3278 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3279 *clearval = msix_src_reg;
3280 return HCLGE_VECTOR0_EVENT_ERR;
3283 /* check for vector0 mailbox(=CMDQ RX) event source */
3284 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3285 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3286 *clearval = cmdq_src_reg;
3287 return HCLGE_VECTOR0_EVENT_MBX;
3290 /* print other vector0 event source */
3291 dev_info(&hdev->pdev->dev,
3292 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3293 cmdq_src_reg, msix_src_reg);
3294 *clearval = msix_src_reg;
3296 return HCLGE_VECTOR0_EVENT_OTHER;
3299 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3302 switch (event_type) {
3303 case HCLGE_VECTOR0_EVENT_RST:
3304 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3306 case HCLGE_VECTOR0_EVENT_MBX:
3307 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3314 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3316 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3317 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3318 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3319 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3320 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3323 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3325 writel(enable ? 1 : 0, vector->addr);
3328 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3330 struct hclge_dev *hdev = data;
3334 hclge_enable_vector(&hdev->misc_vector, false);
3335 event_cause = hclge_check_event_cause(hdev, &clearval);
3337 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3338 switch (event_cause) {
3339 case HCLGE_VECTOR0_EVENT_ERR:
3340 /* we do not know what type of reset is required now. This could
3341 * only be decided after we fetch the type of errors which
3342 * caused this event. Therefore, we will do below for now:
3343 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3344 * have defered type of reset to be used.
3345 * 2. Schedule the reset serivce task.
3346 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3347 * will fetch the correct type of reset. This would be done
3348 * by first decoding the types of errors.
3350 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3352 case HCLGE_VECTOR0_EVENT_RST:
3353 hclge_reset_task_schedule(hdev);
3355 case HCLGE_VECTOR0_EVENT_MBX:
3356 /* If we are here then,
3357 * 1. Either we are not handling any mbx task and we are not
3360 * 2. We could be handling a mbx task but nothing more is
3362 * In both cases, we should schedule mbx task as there are more
3363 * mbx messages reported by this interrupt.
3365 hclge_mbx_task_schedule(hdev);
3368 dev_warn(&hdev->pdev->dev,
3369 "received unknown or unhandled event of vector0\n");
3373 hclge_clear_event_cause(hdev, event_cause, clearval);
3375 /* Enable interrupt if it is not cause by reset. And when
3376 * clearval equal to 0, it means interrupt status may be
3377 * cleared by hardware before driver reads status register.
3378 * For this case, vector0 interrupt also should be enabled.
3381 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3382 hclge_enable_vector(&hdev->misc_vector, true);
3388 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3390 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3391 dev_warn(&hdev->pdev->dev,
3392 "vector(vector_id %d) has been freed.\n", vector_id);
3396 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3397 hdev->num_msi_left += 1;
3398 hdev->num_msi_used -= 1;
3401 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3403 struct hclge_misc_vector *vector = &hdev->misc_vector;
3405 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3407 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3408 hdev->vector_status[0] = 0;
3410 hdev->num_msi_left -= 1;
3411 hdev->num_msi_used += 1;
3414 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3415 const cpumask_t *mask)
3417 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3420 cpumask_copy(&hdev->affinity_mask, mask);
3423 static void hclge_irq_affinity_release(struct kref *ref)
3427 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3429 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3430 &hdev->affinity_mask);
3432 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3433 hdev->affinity_notify.release = hclge_irq_affinity_release;
3434 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3435 &hdev->affinity_notify);
3438 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3440 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3441 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3444 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3448 hclge_get_misc_vector(hdev);
3450 /* this would be explicitly freed in the end */
3451 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3452 HCLGE_NAME, pci_name(hdev->pdev));
3453 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3454 0, hdev->misc_vector.name, hdev);
3456 hclge_free_vector(hdev, 0);
3457 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3458 hdev->misc_vector.vector_irq);
3464 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3466 free_irq(hdev->misc_vector.vector_irq, hdev);
3467 hclge_free_vector(hdev, 0);
3470 int hclge_notify_client(struct hclge_dev *hdev,
3471 enum hnae3_reset_notify_type type)
3473 struct hnae3_client *client = hdev->nic_client;
3476 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3479 if (!client->ops->reset_notify)
3482 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3483 struct hnae3_handle *handle = &hdev->vport[i].nic;
3486 ret = client->ops->reset_notify(handle, type);
3488 dev_err(&hdev->pdev->dev,
3489 "notify nic client failed %d(%d)\n", type, ret);
3497 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3498 enum hnae3_reset_notify_type type)
3500 struct hnae3_client *client = hdev->roce_client;
3504 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3507 if (!client->ops->reset_notify)
3510 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3511 struct hnae3_handle *handle = &hdev->vport[i].roce;
3513 ret = client->ops->reset_notify(handle, type);
3515 dev_err(&hdev->pdev->dev,
3516 "notify roce client failed %d(%d)",
3525 static int hclge_reset_wait(struct hclge_dev *hdev)
3527 #define HCLGE_RESET_WATI_MS 100
3528 #define HCLGE_RESET_WAIT_CNT 350
3530 u32 val, reg, reg_bit;
3533 switch (hdev->reset_type) {
3534 case HNAE3_IMP_RESET:
3535 reg = HCLGE_GLOBAL_RESET_REG;
3536 reg_bit = HCLGE_IMP_RESET_BIT;
3538 case HNAE3_GLOBAL_RESET:
3539 reg = HCLGE_GLOBAL_RESET_REG;
3540 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3542 case HNAE3_FUNC_RESET:
3543 reg = HCLGE_FUN_RST_ING;
3544 reg_bit = HCLGE_FUN_RST_ING_B;
3547 dev_err(&hdev->pdev->dev,
3548 "Wait for unsupported reset type: %d\n",
3553 val = hclge_read_dev(&hdev->hw, reg);
3554 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3555 msleep(HCLGE_RESET_WATI_MS);
3556 val = hclge_read_dev(&hdev->hw, reg);
3560 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3561 dev_warn(&hdev->pdev->dev,
3562 "Wait for reset timeout: %d\n", hdev->reset_type);
3569 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3571 struct hclge_vf_rst_cmd *req;
3572 struct hclge_desc desc;
3574 req = (struct hclge_vf_rst_cmd *)desc.data;
3575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3576 req->dest_vfid = func_id;
3581 return hclge_cmd_send(&hdev->hw, &desc, 1);
3584 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3588 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3589 struct hclge_vport *vport = &hdev->vport[i];
3592 /* Send cmd to set/clear VF's FUNC_RST_ING */
3593 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3595 dev_err(&hdev->pdev->dev,
3596 "set vf(%u) rst failed %d!\n",
3597 vport->vport_id, ret);
3601 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3604 /* Inform VF to process the reset.
3605 * hclge_inform_reset_assert_to_vf may fail if VF
3606 * driver is not loaded.
3608 ret = hclge_inform_reset_assert_to_vf(vport);
3610 dev_warn(&hdev->pdev->dev,
3611 "inform reset to vf(%u) failed %d!\n",
3612 vport->vport_id, ret);
3618 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3620 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3621 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3622 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3625 hclge_mbx_handler(hdev);
3627 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3630 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3632 struct hclge_pf_rst_sync_cmd *req;
3633 struct hclge_desc desc;
3637 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3638 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3641 /* vf need to down netdev by mbx during PF or FLR reset */
3642 hclge_mailbox_service_task(hdev);
3644 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3645 /* for compatible with old firmware, wait
3646 * 100 ms for VF to stop IO
3648 if (ret == -EOPNOTSUPP) {
3649 msleep(HCLGE_RESET_SYNC_TIME);
3652 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3655 } else if (req->all_vf_ready) {
3658 msleep(HCLGE_PF_RESET_SYNC_TIME);
3659 hclge_cmd_reuse_desc(&desc, true);
3660 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3662 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3665 void hclge_report_hw_error(struct hclge_dev *hdev,
3666 enum hnae3_hw_error_type type)
3668 struct hnae3_client *client = hdev->nic_client;
3671 if (!client || !client->ops->process_hw_error ||
3672 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3675 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3676 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3679 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3683 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3684 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3685 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3686 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3687 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3690 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3691 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3692 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3693 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3697 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3699 struct hclge_desc desc;
3700 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3704 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3705 req->fun_reset_vfid = func_id;
3707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709 dev_err(&hdev->pdev->dev,
3710 "send function reset cmd fail, status =%d\n", ret);
3715 static void hclge_do_reset(struct hclge_dev *hdev)
3717 struct hnae3_handle *handle = &hdev->vport[0].nic;
3718 struct pci_dev *pdev = hdev->pdev;
3721 if (hclge_get_hw_reset_stat(handle)) {
3722 dev_info(&pdev->dev, "hardware reset not finish\n");
3723 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3724 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3725 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3729 switch (hdev->reset_type) {
3730 case HNAE3_GLOBAL_RESET:
3731 dev_info(&pdev->dev, "global reset requested\n");
3732 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3733 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3734 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3736 case HNAE3_FUNC_RESET:
3737 dev_info(&pdev->dev, "PF reset requested\n");
3738 /* schedule again to check later */
3739 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3740 hclge_reset_task_schedule(hdev);
3743 dev_warn(&pdev->dev,
3744 "unsupported reset type: %d\n", hdev->reset_type);
3749 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3750 unsigned long *addr)
3752 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3753 struct hclge_dev *hdev = ae_dev->priv;
3755 /* first, resolve any unknown reset type to the known type(s) */
3756 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3757 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3758 HCLGE_MISC_VECTOR_INT_STS);
3759 /* we will intentionally ignore any errors from this function
3760 * as we will end up in *some* reset request in any case
3762 if (hclge_handle_hw_msix_error(hdev, addr))
3763 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3766 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3767 /* We defered the clearing of the error event which caused
3768 * interrupt since it was not posssible to do that in
3769 * interrupt context (and this is the reason we introduced
3770 * new UNKNOWN reset type). Now, the errors have been
3771 * handled and cleared in hardware we can safely enable
3772 * interrupts. This is an exception to the norm.
3774 hclge_enable_vector(&hdev->misc_vector, true);
3777 /* return the highest priority reset level amongst all */
3778 if (test_bit(HNAE3_IMP_RESET, addr)) {
3779 rst_level = HNAE3_IMP_RESET;
3780 clear_bit(HNAE3_IMP_RESET, addr);
3781 clear_bit(HNAE3_GLOBAL_RESET, addr);
3782 clear_bit(HNAE3_FUNC_RESET, addr);
3783 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3784 rst_level = HNAE3_GLOBAL_RESET;
3785 clear_bit(HNAE3_GLOBAL_RESET, addr);
3786 clear_bit(HNAE3_FUNC_RESET, addr);
3787 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3788 rst_level = HNAE3_FUNC_RESET;
3789 clear_bit(HNAE3_FUNC_RESET, addr);
3790 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3791 rst_level = HNAE3_FLR_RESET;
3792 clear_bit(HNAE3_FLR_RESET, addr);
3795 if (hdev->reset_type != HNAE3_NONE_RESET &&
3796 rst_level < hdev->reset_type)
3797 return HNAE3_NONE_RESET;
3802 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3806 switch (hdev->reset_type) {
3807 case HNAE3_IMP_RESET:
3808 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3810 case HNAE3_GLOBAL_RESET:
3811 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3820 /* For revision 0x20, the reset interrupt source
3821 * can only be cleared after hardware reset done
3823 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3824 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3827 hclge_enable_vector(&hdev->misc_vector, true);
3830 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3834 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3836 reg_val |= HCLGE_NIC_SW_RST_RDY;
3838 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3840 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3843 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3847 ret = hclge_set_all_vf_rst(hdev, true);
3851 hclge_func_reset_sync_vf(hdev);
3856 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3861 switch (hdev->reset_type) {
3862 case HNAE3_FUNC_RESET:
3863 ret = hclge_func_reset_notify_vf(hdev);
3867 ret = hclge_func_reset_cmd(hdev, 0);
3869 dev_err(&hdev->pdev->dev,
3870 "asserting function reset fail %d!\n", ret);
3874 /* After performaning pf reset, it is not necessary to do the
3875 * mailbox handling or send any command to firmware, because
3876 * any mailbox handling or command to firmware is only valid
3877 * after hclge_cmd_init is called.
3879 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3880 hdev->rst_stats.pf_rst_cnt++;
3882 case HNAE3_FLR_RESET:
3883 ret = hclge_func_reset_notify_vf(hdev);
3887 case HNAE3_IMP_RESET:
3888 hclge_handle_imp_error(hdev);
3889 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3890 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3891 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3897 /* inform hardware that preparatory work is done */
3898 msleep(HCLGE_RESET_SYNC_TIME);
3899 hclge_reset_handshake(hdev, true);
3900 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3905 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3907 #define MAX_RESET_FAIL_CNT 5
3909 if (hdev->reset_pending) {
3910 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3911 hdev->reset_pending);
3913 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3914 HCLGE_RESET_INT_M) {
3915 dev_info(&hdev->pdev->dev,
3916 "reset failed because new reset interrupt\n");
3917 hclge_clear_reset_cause(hdev);
3919 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3920 hdev->rst_stats.reset_fail_cnt++;
3921 set_bit(hdev->reset_type, &hdev->reset_pending);
3922 dev_info(&hdev->pdev->dev,
3923 "re-schedule reset task(%u)\n",
3924 hdev->rst_stats.reset_fail_cnt);
3928 hclge_clear_reset_cause(hdev);
3930 /* recover the handshake status when reset fail */
3931 hclge_reset_handshake(hdev, true);
3933 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3935 hclge_dbg_dump_rst_info(hdev);
3937 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3942 static int hclge_set_rst_done(struct hclge_dev *hdev)
3944 struct hclge_pf_rst_done_cmd *req;
3945 struct hclge_desc desc;
3948 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3949 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3950 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3952 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3953 /* To be compatible with the old firmware, which does not support
3954 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3957 if (ret == -EOPNOTSUPP) {
3958 dev_warn(&hdev->pdev->dev,
3959 "current firmware does not support command(0x%x)!\n",
3960 HCLGE_OPC_PF_RST_DONE);
3963 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3970 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3974 switch (hdev->reset_type) {
3975 case HNAE3_FUNC_RESET:
3976 case HNAE3_FLR_RESET:
3977 ret = hclge_set_all_vf_rst(hdev, false);
3979 case HNAE3_GLOBAL_RESET:
3980 case HNAE3_IMP_RESET:
3981 ret = hclge_set_rst_done(hdev);
3987 /* clear up the handshake status after re-initialize done */
3988 hclge_reset_handshake(hdev, false);
3993 static int hclge_reset_stack(struct hclge_dev *hdev)
3997 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4001 ret = hclge_reset_ae_dev(hdev->ae_dev);
4005 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4008 static int hclge_reset_prepare(struct hclge_dev *hdev)
4012 hdev->rst_stats.reset_cnt++;
4013 /* perform reset of the stack & ae device for a client */
4014 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4019 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4024 return hclge_reset_prepare_wait(hdev);
4027 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4029 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4030 enum hnae3_reset_type reset_level;
4033 hdev->rst_stats.hw_reset_done_cnt++;
4035 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4040 ret = hclge_reset_stack(hdev);
4045 hclge_clear_reset_cause(hdev);
4047 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4048 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4052 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4055 ret = hclge_reset_prepare_up(hdev);
4060 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4065 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4069 hdev->last_reset_time = jiffies;
4070 hdev->rst_stats.reset_fail_cnt = 0;
4071 hdev->rst_stats.reset_done_cnt++;
4072 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4074 /* if default_reset_request has a higher level reset request,
4075 * it should be handled as soon as possible. since some errors
4076 * need this kind of reset to fix.
4078 reset_level = hclge_get_reset_level(ae_dev,
4079 &hdev->default_reset_request);
4080 if (reset_level != HNAE3_NONE_RESET)
4081 set_bit(reset_level, &hdev->reset_request);
4086 static void hclge_reset(struct hclge_dev *hdev)
4088 if (hclge_reset_prepare(hdev))
4091 if (hclge_reset_wait(hdev))
4094 if (hclge_reset_rebuild(hdev))
4100 if (hclge_reset_err_handle(hdev))
4101 hclge_reset_task_schedule(hdev);
4104 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4106 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4107 struct hclge_dev *hdev = ae_dev->priv;
4109 /* We might end up getting called broadly because of 2 below cases:
4110 * 1. Recoverable error was conveyed through APEI and only way to bring
4111 * normalcy is to reset.
4112 * 2. A new reset request from the stack due to timeout
4114 * For the first case,error event might not have ae handle available.
4115 * check if this is a new reset request and we are not here just because
4116 * last reset attempt did not succeed and watchdog hit us again. We will
4117 * know this if last reset request did not occur very recently (watchdog
4118 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4119 * In case of new request we reset the "reset level" to PF reset.
4120 * And if it is a repeat reset request of the most recent one then we
4121 * want to make sure we throttle the reset request. Therefore, we will
4122 * not allow it again before 3*HZ times.
4125 handle = &hdev->vport[0].nic;
4127 if (time_before(jiffies, (hdev->last_reset_time +
4128 HCLGE_RESET_INTERVAL))) {
4129 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4131 } else if (hdev->default_reset_request) {
4133 hclge_get_reset_level(ae_dev,
4134 &hdev->default_reset_request);
4135 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4136 hdev->reset_level = HNAE3_FUNC_RESET;
4139 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4142 /* request reset & schedule reset task */
4143 set_bit(hdev->reset_level, &hdev->reset_request);
4144 hclge_reset_task_schedule(hdev);
4146 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4147 hdev->reset_level++;
4150 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4151 enum hnae3_reset_type rst_type)
4153 struct hclge_dev *hdev = ae_dev->priv;
4155 set_bit(rst_type, &hdev->default_reset_request);
4158 static void hclge_reset_timer(struct timer_list *t)
4160 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4162 /* if default_reset_request has no value, it means that this reset
4163 * request has already be handled, so just return here
4165 if (!hdev->default_reset_request)
4168 dev_info(&hdev->pdev->dev,
4169 "triggering reset in reset timer\n");
4170 hclge_reset_event(hdev->pdev, NULL);
4173 static void hclge_reset_subtask(struct hclge_dev *hdev)
4175 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4177 /* check if there is any ongoing reset in the hardware. This status can
4178 * be checked from reset_pending. If there is then, we need to wait for
4179 * hardware to complete reset.
4180 * a. If we are able to figure out in reasonable time that hardware
4181 * has fully resetted then, we can proceed with driver, client
4183 * b. else, we can come back later to check this status so re-sched
4186 hdev->last_reset_time = jiffies;
4187 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4188 if (hdev->reset_type != HNAE3_NONE_RESET)
4191 /* check if we got any *new* reset requests to be honored */
4192 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4193 if (hdev->reset_type != HNAE3_NONE_RESET)
4194 hclge_do_reset(hdev);
4196 hdev->reset_type = HNAE3_NONE_RESET;
4199 static void hclge_reset_service_task(struct hclge_dev *hdev)
4201 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4204 down(&hdev->reset_sem);
4205 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4207 hclge_reset_subtask(hdev);
4209 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4210 up(&hdev->reset_sem);
4213 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4217 /* start from vport 1 for PF is always alive */
4218 for (i = 1; i < hdev->num_alloc_vport; i++) {
4219 struct hclge_vport *vport = &hdev->vport[i];
4221 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4222 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4224 /* If vf is not alive, set to default value */
4225 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4226 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4230 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4232 unsigned long delta = round_jiffies_relative(HZ);
4234 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4237 /* Always handle the link updating to make sure link state is
4238 * updated when it is triggered by mbx.
4240 hclge_update_link_status(hdev);
4241 hclge_sync_mac_table(hdev);
4242 hclge_sync_promisc_mode(hdev);
4244 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4245 delta = jiffies - hdev->last_serv_processed;
4247 if (delta < round_jiffies_relative(HZ)) {
4248 delta = round_jiffies_relative(HZ) - delta;
4253 hdev->serv_processed_cnt++;
4254 hclge_update_vport_alive(hdev);
4256 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4257 hdev->last_serv_processed = jiffies;
4261 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4262 hclge_update_stats_for_all(hdev);
4264 hclge_update_port_info(hdev);
4265 hclge_sync_vlan_filter(hdev);
4267 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4268 hclge_rfs_filter_expire(hdev);
4270 hdev->last_serv_processed = jiffies;
4273 hclge_task_schedule(hdev, delta);
4276 static void hclge_service_task(struct work_struct *work)
4278 struct hclge_dev *hdev =
4279 container_of(work, struct hclge_dev, service_task.work);
4281 hclge_reset_service_task(hdev);
4282 hclge_mailbox_service_task(hdev);
4283 hclge_periodic_service_task(hdev);
4285 /* Handle reset and mbx again in case periodical task delays the
4286 * handling by calling hclge_task_schedule() in
4287 * hclge_periodic_service_task().
4289 hclge_reset_service_task(hdev);
4290 hclge_mailbox_service_task(hdev);
4293 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4295 /* VF handle has no client */
4296 if (!handle->client)
4297 return container_of(handle, struct hclge_vport, nic);
4298 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4299 return container_of(handle, struct hclge_vport, roce);
4301 return container_of(handle, struct hclge_vport, nic);
4304 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4305 struct hnae3_vector_info *vector_info)
4307 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4309 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4311 /* need an extend offset to config vector >= 64 */
4312 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4313 vector_info->io_addr = hdev->hw.io_base +
4314 HCLGE_VECTOR_REG_BASE +
4315 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4317 vector_info->io_addr = hdev->hw.io_base +
4318 HCLGE_VECTOR_EXT_REG_BASE +
4319 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4320 HCLGE_VECTOR_REG_OFFSET_H +
4321 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4322 HCLGE_VECTOR_REG_OFFSET;
4324 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4325 hdev->vector_irq[idx] = vector_info->vector;
4328 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4329 struct hnae3_vector_info *vector_info)
4331 struct hclge_vport *vport = hclge_get_vport(handle);
4332 struct hnae3_vector_info *vector = vector_info;
4333 struct hclge_dev *hdev = vport->back;
4338 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4339 vector_num = min(hdev->num_msi_left, vector_num);
4341 for (j = 0; j < vector_num; j++) {
4342 while (++i < hdev->num_nic_msi) {
4343 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4344 hclge_get_vector_info(hdev, i, vector);
4352 hdev->num_msi_left -= alloc;
4353 hdev->num_msi_used += alloc;
4358 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4362 for (i = 0; i < hdev->num_msi; i++)
4363 if (vector == hdev->vector_irq[i])
4369 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4371 struct hclge_vport *vport = hclge_get_vport(handle);
4372 struct hclge_dev *hdev = vport->back;
4375 vector_id = hclge_get_vector_index(hdev, vector);
4376 if (vector_id < 0) {
4377 dev_err(&hdev->pdev->dev,
4378 "Get vector index fail. vector = %d\n", vector);
4382 hclge_free_vector(hdev, vector_id);
4387 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4389 return HCLGE_RSS_KEY_SIZE;
4392 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4393 const u8 hfunc, const u8 *key)
4395 struct hclge_rss_config_cmd *req;
4396 unsigned int key_offset = 0;
4397 struct hclge_desc desc;
4402 key_counts = HCLGE_RSS_KEY_SIZE;
4403 req = (struct hclge_rss_config_cmd *)desc.data;
4405 while (key_counts) {
4406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4409 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4410 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4412 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4413 memcpy(req->hash_key,
4414 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4416 key_counts -= key_size;
4418 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4420 dev_err(&hdev->pdev->dev,
4421 "Configure RSS config fail, status = %d\n",
4429 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4431 struct hclge_rss_indirection_table_cmd *req;
4432 struct hclge_desc desc;
4433 int rss_cfg_tbl_num;
4441 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4442 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4443 HCLGE_RSS_CFG_TBL_SIZE;
4445 for (i = 0; i < rss_cfg_tbl_num; i++) {
4446 hclge_cmd_setup_basic_desc
4447 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4449 req->start_table_index =
4450 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4451 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4452 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4453 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4454 req->rss_qid_l[j] = qid & 0xff;
4456 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4457 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4458 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4459 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4461 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4463 dev_err(&hdev->pdev->dev,
4464 "Configure rss indir table fail,status = %d\n",
4472 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4473 u16 *tc_size, u16 *tc_offset)
4475 struct hclge_rss_tc_mode_cmd *req;
4476 struct hclge_desc desc;
4480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4481 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4483 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4486 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4487 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4488 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4489 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4490 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4491 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4492 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4494 req->rss_tc_mode[i] = cpu_to_le16(mode);
4497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4499 dev_err(&hdev->pdev->dev,
4500 "Configure rss tc mode fail, status = %d\n", ret);
4505 static void hclge_get_rss_type(struct hclge_vport *vport)
4507 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4508 vport->rss_tuple_sets.ipv4_udp_en ||
4509 vport->rss_tuple_sets.ipv4_sctp_en ||
4510 vport->rss_tuple_sets.ipv6_tcp_en ||
4511 vport->rss_tuple_sets.ipv6_udp_en ||
4512 vport->rss_tuple_sets.ipv6_sctp_en)
4513 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4514 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4515 vport->rss_tuple_sets.ipv6_fragment_en)
4516 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4518 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4521 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4523 struct hclge_rss_input_tuple_cmd *req;
4524 struct hclge_desc desc;
4527 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4529 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4531 /* Get the tuple cfg from pf */
4532 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4533 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4534 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4535 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4536 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4537 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4538 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4539 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4540 hclge_get_rss_type(&hdev->vport[0]);
4541 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4543 dev_err(&hdev->pdev->dev,
4544 "Configure rss input fail, status = %d\n", ret);
4548 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4551 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4552 struct hclge_vport *vport = hclge_get_vport(handle);
4555 /* Get hash algorithm */
4557 switch (vport->rss_algo) {
4558 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4559 *hfunc = ETH_RSS_HASH_TOP;
4561 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4562 *hfunc = ETH_RSS_HASH_XOR;
4565 *hfunc = ETH_RSS_HASH_UNKNOWN;
4570 /* Get the RSS Key required by the user */
4572 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4574 /* Get indirect table */
4576 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4577 indir[i] = vport->rss_indirection_tbl[i];
4582 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4583 const u8 *key, const u8 hfunc)
4585 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4586 struct hclge_vport *vport = hclge_get_vport(handle);
4587 struct hclge_dev *hdev = vport->back;
4591 /* Set the RSS Hash Key if specififed by the user */
4594 case ETH_RSS_HASH_TOP:
4595 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4597 case ETH_RSS_HASH_XOR:
4598 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4600 case ETH_RSS_HASH_NO_CHANGE:
4601 hash_algo = vport->rss_algo;
4607 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4611 /* Update the shadow RSS key with user specified qids */
4612 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4613 vport->rss_algo = hash_algo;
4616 /* Update the shadow RSS table with user specified qids */
4617 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4618 vport->rss_indirection_tbl[i] = indir[i];
4620 /* Update the hardware */
4621 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4624 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4626 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4628 if (nfc->data & RXH_L4_B_2_3)
4629 hash_sets |= HCLGE_D_PORT_BIT;
4631 hash_sets &= ~HCLGE_D_PORT_BIT;
4633 if (nfc->data & RXH_IP_SRC)
4634 hash_sets |= HCLGE_S_IP_BIT;
4636 hash_sets &= ~HCLGE_S_IP_BIT;
4638 if (nfc->data & RXH_IP_DST)
4639 hash_sets |= HCLGE_D_IP_BIT;
4641 hash_sets &= ~HCLGE_D_IP_BIT;
4643 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4644 hash_sets |= HCLGE_V_TAG_BIT;
4649 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4650 struct ethtool_rxnfc *nfc,
4651 struct hclge_rss_input_tuple_cmd *req)
4653 struct hclge_dev *hdev = vport->back;
4656 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4657 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4658 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4659 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4660 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4661 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4662 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4663 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4665 tuple_sets = hclge_get_rss_hash_bits(nfc);
4666 switch (nfc->flow_type) {
4668 req->ipv4_tcp_en = tuple_sets;
4671 req->ipv6_tcp_en = tuple_sets;
4674 req->ipv4_udp_en = tuple_sets;
4677 req->ipv6_udp_en = tuple_sets;
4680 req->ipv4_sctp_en = tuple_sets;
4683 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4684 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4687 req->ipv6_sctp_en = tuple_sets;
4690 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4693 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4702 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4703 struct ethtool_rxnfc *nfc)
4705 struct hclge_vport *vport = hclge_get_vport(handle);
4706 struct hclge_dev *hdev = vport->back;
4707 struct hclge_rss_input_tuple_cmd *req;
4708 struct hclge_desc desc;
4711 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4712 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4715 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4716 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4718 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4720 dev_err(&hdev->pdev->dev,
4721 "failed to init rss tuple cmd, ret = %d\n", ret);
4725 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4727 dev_err(&hdev->pdev->dev,
4728 "Set rss tuple fail, status = %d\n", ret);
4732 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4733 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4734 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4735 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4736 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4737 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4738 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4739 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4740 hclge_get_rss_type(vport);
4744 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4747 switch (flow_type) {
4749 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4752 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4755 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4758 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4761 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4764 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4768 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4777 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4781 if (tuple_sets & HCLGE_D_PORT_BIT)
4782 tuple_data |= RXH_L4_B_2_3;
4783 if (tuple_sets & HCLGE_S_PORT_BIT)
4784 tuple_data |= RXH_L4_B_0_1;
4785 if (tuple_sets & HCLGE_D_IP_BIT)
4786 tuple_data |= RXH_IP_DST;
4787 if (tuple_sets & HCLGE_S_IP_BIT)
4788 tuple_data |= RXH_IP_SRC;
4793 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4794 struct ethtool_rxnfc *nfc)
4796 struct hclge_vport *vport = hclge_get_vport(handle);
4802 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4803 if (ret || !tuple_sets)
4806 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4811 static int hclge_get_tc_size(struct hnae3_handle *handle)
4813 struct hclge_vport *vport = hclge_get_vport(handle);
4814 struct hclge_dev *hdev = vport->back;
4816 return hdev->pf_rss_size_max;
4819 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4821 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4822 struct hclge_vport *vport = hdev->vport;
4823 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4824 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4825 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4826 struct hnae3_tc_info *tc_info;
4831 tc_info = &vport->nic.kinfo.tc_info;
4832 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4833 rss_size = tc_info->tqp_count[i];
4836 if (!(hdev->hw_tc_map & BIT(i)))
4839 /* tc_size set to hardware is the log2 of roundup power of two
4840 * of rss_size, the acutal queue size is limited by indirection
4843 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4845 dev_err(&hdev->pdev->dev,
4846 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4851 roundup_size = roundup_pow_of_two(rss_size);
4852 roundup_size = ilog2(roundup_size);
4855 tc_size[i] = roundup_size;
4856 tc_offset[i] = tc_info->tqp_offset[i];
4859 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4862 int hclge_rss_init_hw(struct hclge_dev *hdev)
4864 struct hclge_vport *vport = hdev->vport;
4865 u16 *rss_indir = vport[0].rss_indirection_tbl;
4866 u8 *key = vport[0].rss_hash_key;
4867 u8 hfunc = vport[0].rss_algo;
4870 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4874 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4878 ret = hclge_set_rss_input_tuple(hdev);
4882 return hclge_init_rss_tc_mode(hdev);
4885 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4887 struct hclge_vport *vport = hdev->vport;
4890 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4891 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4892 vport[j].rss_indirection_tbl[i] =
4893 i % vport[j].alloc_rss_size;
4897 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4899 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4900 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4901 struct hclge_vport *vport = hdev->vport;
4903 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4904 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4906 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4909 vport[i].rss_tuple_sets.ipv4_tcp_en =
4910 HCLGE_RSS_INPUT_TUPLE_OTHER;
4911 vport[i].rss_tuple_sets.ipv4_udp_en =
4912 HCLGE_RSS_INPUT_TUPLE_OTHER;
4913 vport[i].rss_tuple_sets.ipv4_sctp_en =
4914 HCLGE_RSS_INPUT_TUPLE_SCTP;
4915 vport[i].rss_tuple_sets.ipv4_fragment_en =
4916 HCLGE_RSS_INPUT_TUPLE_OTHER;
4917 vport[i].rss_tuple_sets.ipv6_tcp_en =
4918 HCLGE_RSS_INPUT_TUPLE_OTHER;
4919 vport[i].rss_tuple_sets.ipv6_udp_en =
4920 HCLGE_RSS_INPUT_TUPLE_OTHER;
4921 vport[i].rss_tuple_sets.ipv6_sctp_en =
4922 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4923 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4924 HCLGE_RSS_INPUT_TUPLE_SCTP;
4925 vport[i].rss_tuple_sets.ipv6_fragment_en =
4926 HCLGE_RSS_INPUT_TUPLE_OTHER;
4928 vport[i].rss_algo = rss_algo;
4930 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4931 sizeof(*rss_ind_tbl), GFP_KERNEL);
4935 vport[i].rss_indirection_tbl = rss_ind_tbl;
4936 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4937 HCLGE_RSS_KEY_SIZE);
4940 hclge_rss_indir_init_cfg(hdev);
4945 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4946 int vector_id, bool en,
4947 struct hnae3_ring_chain_node *ring_chain)
4949 struct hclge_dev *hdev = vport->back;
4950 struct hnae3_ring_chain_node *node;
4951 struct hclge_desc desc;
4952 struct hclge_ctrl_vector_chain_cmd *req =
4953 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4954 enum hclge_cmd_status status;
4955 enum hclge_opcode_type op;
4956 u16 tqp_type_and_id;
4959 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4960 hclge_cmd_setup_basic_desc(&desc, op, false);
4961 req->int_vector_id_l = hnae3_get_field(vector_id,
4962 HCLGE_VECTOR_ID_L_M,
4963 HCLGE_VECTOR_ID_L_S);
4964 req->int_vector_id_h = hnae3_get_field(vector_id,
4965 HCLGE_VECTOR_ID_H_M,
4966 HCLGE_VECTOR_ID_H_S);
4969 for (node = ring_chain; node; node = node->next) {
4970 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4971 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4973 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4974 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4975 HCLGE_TQP_ID_S, node->tqp_index);
4976 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4978 hnae3_get_field(node->int_gl_idx,
4979 HNAE3_RING_GL_IDX_M,
4980 HNAE3_RING_GL_IDX_S));
4981 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4982 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4983 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4984 req->vfid = vport->vport_id;
4986 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4988 dev_err(&hdev->pdev->dev,
4989 "Map TQP fail, status is %d.\n",
4995 hclge_cmd_setup_basic_desc(&desc,
4998 req->int_vector_id_l =
4999 hnae3_get_field(vector_id,
5000 HCLGE_VECTOR_ID_L_M,
5001 HCLGE_VECTOR_ID_L_S);
5002 req->int_vector_id_h =
5003 hnae3_get_field(vector_id,
5004 HCLGE_VECTOR_ID_H_M,
5005 HCLGE_VECTOR_ID_H_S);
5010 req->int_cause_num = i;
5011 req->vfid = vport->vport_id;
5012 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5014 dev_err(&hdev->pdev->dev,
5015 "Map TQP fail, status is %d.\n", status);
5023 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5024 struct hnae3_ring_chain_node *ring_chain)
5026 struct hclge_vport *vport = hclge_get_vport(handle);
5027 struct hclge_dev *hdev = vport->back;
5030 vector_id = hclge_get_vector_index(hdev, vector);
5031 if (vector_id < 0) {
5032 dev_err(&hdev->pdev->dev,
5033 "failed to get vector index. vector=%d\n", vector);
5037 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5040 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5041 struct hnae3_ring_chain_node *ring_chain)
5043 struct hclge_vport *vport = hclge_get_vport(handle);
5044 struct hclge_dev *hdev = vport->back;
5047 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5050 vector_id = hclge_get_vector_index(hdev, vector);
5051 if (vector_id < 0) {
5052 dev_err(&handle->pdev->dev,
5053 "Get vector index fail. ret =%d\n", vector_id);
5057 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5059 dev_err(&handle->pdev->dev,
5060 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5066 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5067 bool en_uc, bool en_mc, bool en_bc)
5069 struct hclge_vport *vport = &hdev->vport[vf_id];
5070 struct hnae3_handle *handle = &vport->nic;
5071 struct hclge_promisc_cfg_cmd *req;
5072 struct hclge_desc desc;
5073 bool uc_tx_en = en_uc;
5077 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5079 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5082 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5085 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5086 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5087 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5088 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5089 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5090 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5091 req->extend_promisc = promisc_cfg;
5093 /* to be compatible with DEVICE_VERSION_V1/2 */
5095 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5096 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5097 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5098 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5099 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5100 req->promisc = promisc_cfg;
5102 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5104 dev_err(&hdev->pdev->dev,
5105 "failed to set vport %u promisc mode, ret = %d.\n",
5111 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5112 bool en_mc_pmc, bool en_bc_pmc)
5114 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5115 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5118 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5121 struct hclge_vport *vport = hclge_get_vport(handle);
5122 struct hclge_dev *hdev = vport->back;
5123 bool en_bc_pmc = true;
5125 /* For device whose version below V2, if broadcast promisc enabled,
5126 * vlan filter is always bypassed. So broadcast promisc should be
5127 * disabled until user enable promisc mode
5129 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5130 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5132 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5136 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5138 struct hclge_vport *vport = hclge_get_vport(handle);
5139 struct hclge_dev *hdev = vport->back;
5141 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5144 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5146 struct hclge_get_fd_mode_cmd *req;
5147 struct hclge_desc desc;
5150 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5152 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5154 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5156 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5160 *fd_mode = req->mode;
5165 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5166 u32 *stage1_entry_num,
5167 u32 *stage2_entry_num,
5168 u16 *stage1_counter_num,
5169 u16 *stage2_counter_num)
5171 struct hclge_get_fd_allocation_cmd *req;
5172 struct hclge_desc desc;
5175 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5177 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5179 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5181 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5186 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5187 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5188 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5189 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5194 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5195 enum HCLGE_FD_STAGE stage_num)
5197 struct hclge_set_fd_key_config_cmd *req;
5198 struct hclge_fd_key_cfg *stage;
5199 struct hclge_desc desc;
5202 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5204 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5205 stage = &hdev->fd_cfg.key_cfg[stage_num];
5206 req->stage = stage_num;
5207 req->key_select = stage->key_sel;
5208 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5209 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5210 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5211 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5212 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5213 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5215 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5217 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5222 static int hclge_init_fd_config(struct hclge_dev *hdev)
5224 #define LOW_2_WORDS 0x03
5225 struct hclge_fd_key_cfg *key_cfg;
5228 if (!hnae3_dev_fd_supported(hdev))
5231 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5235 switch (hdev->fd_cfg.fd_mode) {
5236 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5237 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5239 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5240 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5243 dev_err(&hdev->pdev->dev,
5244 "Unsupported flow director mode %u\n",
5245 hdev->fd_cfg.fd_mode);
5249 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5250 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5251 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5252 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5253 key_cfg->outer_sipv6_word_en = 0;
5254 key_cfg->outer_dipv6_word_en = 0;
5256 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5257 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5258 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5259 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5261 /* If use max 400bit key, we can support tuples for ether type */
5262 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5263 key_cfg->tuple_active |=
5264 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5266 /* roce_type is used to filter roce frames
5267 * dst_vport is used to specify the rule
5269 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5271 ret = hclge_get_fd_allocation(hdev,
5272 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5273 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5274 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5275 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5279 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5282 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5283 int loc, u8 *key, bool is_add)
5285 struct hclge_fd_tcam_config_1_cmd *req1;
5286 struct hclge_fd_tcam_config_2_cmd *req2;
5287 struct hclge_fd_tcam_config_3_cmd *req3;
5288 struct hclge_desc desc[3];
5291 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5292 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5293 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5294 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5295 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5297 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5298 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5299 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5301 req1->stage = stage;
5302 req1->xy_sel = sel_x ? 1 : 0;
5303 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5304 req1->index = cpu_to_le32(loc);
5305 req1->entry_vld = sel_x ? is_add : 0;
5308 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5309 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5310 sizeof(req2->tcam_data));
5311 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5312 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5315 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5317 dev_err(&hdev->pdev->dev,
5318 "config tcam key fail, ret=%d\n",
5324 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5325 struct hclge_fd_ad_data *action)
5327 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5328 struct hclge_fd_ad_config_cmd *req;
5329 struct hclge_desc desc;
5333 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5335 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5336 req->index = cpu_to_le32(loc);
5339 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5340 action->write_rule_id_to_bd);
5341 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5343 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5344 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5345 action->override_tc);
5346 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5347 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5350 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5351 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5352 action->forward_to_direct_queue);
5353 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5355 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5356 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5357 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5358 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5359 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5360 action->counter_id);
5362 req->ad_data = cpu_to_le64(ad_data);
5363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5365 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5370 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5371 struct hclge_fd_rule *rule)
5373 u16 tmp_x_s, tmp_y_s;
5374 u32 tmp_x_l, tmp_y_l;
5377 if (rule->unused_tuple & tuple_bit)
5380 switch (tuple_bit) {
5381 case BIT(INNER_DST_MAC):
5382 for (i = 0; i < ETH_ALEN; i++) {
5383 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5384 rule->tuples_mask.dst_mac[i]);
5385 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5386 rule->tuples_mask.dst_mac[i]);
5390 case BIT(INNER_SRC_MAC):
5391 for (i = 0; i < ETH_ALEN; i++) {
5392 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5393 rule->tuples_mask.src_mac[i]);
5394 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5395 rule->tuples_mask.src_mac[i]);
5399 case BIT(INNER_VLAN_TAG_FST):
5400 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5401 rule->tuples_mask.vlan_tag1);
5402 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5403 rule->tuples_mask.vlan_tag1);
5404 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5405 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5408 case BIT(INNER_ETH_TYPE):
5409 calc_x(tmp_x_s, rule->tuples.ether_proto,
5410 rule->tuples_mask.ether_proto);
5411 calc_y(tmp_y_s, rule->tuples.ether_proto,
5412 rule->tuples_mask.ether_proto);
5413 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5414 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5417 case BIT(INNER_IP_TOS):
5418 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5419 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5422 case BIT(INNER_IP_PROTO):
5423 calc_x(*key_x, rule->tuples.ip_proto,
5424 rule->tuples_mask.ip_proto);
5425 calc_y(*key_y, rule->tuples.ip_proto,
5426 rule->tuples_mask.ip_proto);
5429 case BIT(INNER_SRC_IP):
5430 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5431 rule->tuples_mask.src_ip[IPV4_INDEX]);
5432 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5433 rule->tuples_mask.src_ip[IPV4_INDEX]);
5434 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5435 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5438 case BIT(INNER_DST_IP):
5439 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5440 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5441 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5442 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5443 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5444 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5447 case BIT(INNER_SRC_PORT):
5448 calc_x(tmp_x_s, rule->tuples.src_port,
5449 rule->tuples_mask.src_port);
5450 calc_y(tmp_y_s, rule->tuples.src_port,
5451 rule->tuples_mask.src_port);
5452 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5453 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5456 case BIT(INNER_DST_PORT):
5457 calc_x(tmp_x_s, rule->tuples.dst_port,
5458 rule->tuples_mask.dst_port);
5459 calc_y(tmp_y_s, rule->tuples.dst_port,
5460 rule->tuples_mask.dst_port);
5461 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5462 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5470 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5471 u8 vf_id, u8 network_port_id)
5473 u32 port_number = 0;
5475 if (port_type == HOST_PORT) {
5476 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5478 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5480 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5482 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5483 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5484 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5490 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5491 __le32 *key_x, __le32 *key_y,
5492 struct hclge_fd_rule *rule)
5494 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5495 u8 cur_pos = 0, tuple_size, shift_bits;
5498 for (i = 0; i < MAX_META_DATA; i++) {
5499 tuple_size = meta_data_key_info[i].key_length;
5500 tuple_bit = key_cfg->meta_data_active & BIT(i);
5502 switch (tuple_bit) {
5503 case BIT(ROCE_TYPE):
5504 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5505 cur_pos += tuple_size;
5507 case BIT(DST_VPORT):
5508 port_number = hclge_get_port_number(HOST_PORT, 0,
5510 hnae3_set_field(meta_data,
5511 GENMASK(cur_pos + tuple_size, cur_pos),
5512 cur_pos, port_number);
5513 cur_pos += tuple_size;
5520 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5521 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5522 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5524 *key_x = cpu_to_le32(tmp_x << shift_bits);
5525 *key_y = cpu_to_le32(tmp_y << shift_bits);
5528 /* A complete key is combined with meta data key and tuple key.
5529 * Meta data key is stored at the MSB region, and tuple key is stored at
5530 * the LSB region, unused bits will be filled 0.
5532 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5533 struct hclge_fd_rule *rule)
5535 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5536 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5537 u8 *cur_key_x, *cur_key_y;
5538 u8 meta_data_region;
5543 memset(key_x, 0, sizeof(key_x));
5544 memset(key_y, 0, sizeof(key_y));
5548 for (i = 0 ; i < MAX_TUPLE; i++) {
5552 tuple_size = tuple_key_info[i].key_length / 8;
5553 check_tuple = key_cfg->tuple_active & BIT(i);
5555 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5558 cur_key_x += tuple_size;
5559 cur_key_y += tuple_size;
5563 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5564 MAX_META_DATA_LENGTH / 8;
5566 hclge_fd_convert_meta_data(key_cfg,
5567 (__le32 *)(key_x + meta_data_region),
5568 (__le32 *)(key_y + meta_data_region),
5571 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5574 dev_err(&hdev->pdev->dev,
5575 "fd key_y config fail, loc=%u, ret=%d\n",
5576 rule->queue_id, ret);
5580 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5583 dev_err(&hdev->pdev->dev,
5584 "fd key_x config fail, loc=%u, ret=%d\n",
5585 rule->queue_id, ret);
5589 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5590 struct hclge_fd_rule *rule)
5592 struct hclge_vport *vport = hdev->vport;
5593 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5594 struct hclge_fd_ad_data ad_data;
5596 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5597 ad_data.ad_id = rule->location;
5599 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5600 ad_data.drop_packet = true;
5601 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5602 ad_data.override_tc = true;
5604 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5606 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5608 ad_data.forward_to_direct_queue = true;
5609 ad_data.queue_id = rule->queue_id;
5612 ad_data.use_counter = false;
5613 ad_data.counter_id = 0;
5615 ad_data.use_next_stage = false;
5616 ad_data.next_input_key = 0;
5618 ad_data.write_rule_id_to_bd = true;
5619 ad_data.rule_id = rule->location;
5621 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5624 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5627 if (!spec || !unused_tuple)
5630 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5633 *unused_tuple |= BIT(INNER_SRC_IP);
5636 *unused_tuple |= BIT(INNER_DST_IP);
5639 *unused_tuple |= BIT(INNER_SRC_PORT);
5642 *unused_tuple |= BIT(INNER_DST_PORT);
5645 *unused_tuple |= BIT(INNER_IP_TOS);
5650 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5653 if (!spec || !unused_tuple)
5656 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5657 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5660 *unused_tuple |= BIT(INNER_SRC_IP);
5663 *unused_tuple |= BIT(INNER_DST_IP);
5666 *unused_tuple |= BIT(INNER_IP_TOS);
5669 *unused_tuple |= BIT(INNER_IP_PROTO);
5671 if (spec->l4_4_bytes)
5674 if (spec->ip_ver != ETH_RX_NFC_IP4)
5680 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5683 if (!spec || !unused_tuple)
5686 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5689 /* check whether src/dst ip address used */
5690 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5691 *unused_tuple |= BIT(INNER_SRC_IP);
5693 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5694 *unused_tuple |= BIT(INNER_DST_IP);
5697 *unused_tuple |= BIT(INNER_SRC_PORT);
5700 *unused_tuple |= BIT(INNER_DST_PORT);
5708 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5711 if (!spec || !unused_tuple)
5714 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5715 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5717 /* check whether src/dst ip address used */
5718 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5719 *unused_tuple |= BIT(INNER_SRC_IP);
5721 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5722 *unused_tuple |= BIT(INNER_DST_IP);
5724 if (!spec->l4_proto)
5725 *unused_tuple |= BIT(INNER_IP_PROTO);
5730 if (spec->l4_4_bytes)
5736 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5738 if (!spec || !unused_tuple)
5741 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5742 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5743 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5745 if (is_zero_ether_addr(spec->h_source))
5746 *unused_tuple |= BIT(INNER_SRC_MAC);
5748 if (is_zero_ether_addr(spec->h_dest))
5749 *unused_tuple |= BIT(INNER_DST_MAC);
5752 *unused_tuple |= BIT(INNER_ETH_TYPE);
5757 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5758 struct ethtool_rx_flow_spec *fs,
5761 if (fs->flow_type & FLOW_EXT) {
5762 if (fs->h_ext.vlan_etype) {
5763 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5767 if (!fs->h_ext.vlan_tci)
5768 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5770 if (fs->m_ext.vlan_tci &&
5771 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5772 dev_err(&hdev->pdev->dev,
5773 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5774 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5778 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5781 if (fs->flow_type & FLOW_MAC_EXT) {
5782 if (hdev->fd_cfg.fd_mode !=
5783 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5784 dev_err(&hdev->pdev->dev,
5785 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5789 if (is_zero_ether_addr(fs->h_ext.h_dest))
5790 *unused_tuple |= BIT(INNER_DST_MAC);
5792 *unused_tuple &= ~BIT(INNER_DST_MAC);
5798 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5799 struct ethtool_rx_flow_spec *fs,
5805 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5806 dev_err(&hdev->pdev->dev,
5807 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5809 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5813 if ((fs->flow_type & FLOW_EXT) &&
5814 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5815 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5819 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5820 switch (flow_type) {
5824 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5828 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5834 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5837 case IPV6_USER_FLOW:
5838 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5842 if (hdev->fd_cfg.fd_mode !=
5843 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5844 dev_err(&hdev->pdev->dev,
5845 "ETHER_FLOW is not supported in current fd mode!\n");
5849 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5853 dev_err(&hdev->pdev->dev,
5854 "unsupported protocol type, protocol type = %#x\n",
5860 dev_err(&hdev->pdev->dev,
5861 "failed to check flow union tuple, ret = %d\n",
5866 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5869 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5871 struct hclge_fd_rule *rule = NULL;
5872 struct hlist_node *node2;
5874 spin_lock_bh(&hdev->fd_rule_lock);
5875 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5876 if (rule->location >= location)
5880 spin_unlock_bh(&hdev->fd_rule_lock);
5882 return rule && rule->location == location;
5885 /* make sure being called after lock up with fd_rule_lock */
5886 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5887 struct hclge_fd_rule *new_rule,
5891 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5892 struct hlist_node *node2;
5894 if (is_add && !new_rule)
5897 hlist_for_each_entry_safe(rule, node2,
5898 &hdev->fd_rule_list, rule_node) {
5899 if (rule->location >= location)
5904 if (rule && rule->location == location) {
5905 hlist_del(&rule->rule_node);
5907 hdev->hclge_fd_rule_num--;
5910 if (!hdev->hclge_fd_rule_num)
5911 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5912 clear_bit(location, hdev->fd_bmap);
5916 } else if (!is_add) {
5917 dev_err(&hdev->pdev->dev,
5918 "delete fail, rule %u is inexistent\n",
5923 INIT_HLIST_NODE(&new_rule->rule_node);
5926 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5928 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5930 set_bit(location, hdev->fd_bmap);
5931 hdev->hclge_fd_rule_num++;
5932 hdev->fd_active_type = new_rule->rule_type;
5937 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5938 struct ethtool_rx_flow_spec *fs,
5939 struct hclge_fd_rule *rule)
5941 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5943 switch (flow_type) {
5947 rule->tuples.src_ip[IPV4_INDEX] =
5948 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5949 rule->tuples_mask.src_ip[IPV4_INDEX] =
5950 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5952 rule->tuples.dst_ip[IPV4_INDEX] =
5953 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5954 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5955 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5957 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5958 rule->tuples_mask.src_port =
5959 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5961 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5962 rule->tuples_mask.dst_port =
5963 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5965 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5966 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5968 rule->tuples.ether_proto = ETH_P_IP;
5969 rule->tuples_mask.ether_proto = 0xFFFF;
5973 rule->tuples.src_ip[IPV4_INDEX] =
5974 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5975 rule->tuples_mask.src_ip[IPV4_INDEX] =
5976 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5978 rule->tuples.dst_ip[IPV4_INDEX] =
5979 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5980 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5981 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5983 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5984 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5986 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5987 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5989 rule->tuples.ether_proto = ETH_P_IP;
5990 rule->tuples_mask.ether_proto = 0xFFFF;
5996 be32_to_cpu_array(rule->tuples.src_ip,
5997 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5998 be32_to_cpu_array(rule->tuples_mask.src_ip,
5999 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
6001 be32_to_cpu_array(rule->tuples.dst_ip,
6002 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6003 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6004 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6006 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6007 rule->tuples_mask.src_port =
6008 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6010 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6011 rule->tuples_mask.dst_port =
6012 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6014 rule->tuples.ether_proto = ETH_P_IPV6;
6015 rule->tuples_mask.ether_proto = 0xFFFF;
6018 case IPV6_USER_FLOW:
6019 be32_to_cpu_array(rule->tuples.src_ip,
6020 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6021 be32_to_cpu_array(rule->tuples_mask.src_ip,
6022 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6024 be32_to_cpu_array(rule->tuples.dst_ip,
6025 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6026 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6027 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6029 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6030 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6032 rule->tuples.ether_proto = ETH_P_IPV6;
6033 rule->tuples_mask.ether_proto = 0xFFFF;
6037 ether_addr_copy(rule->tuples.src_mac,
6038 fs->h_u.ether_spec.h_source);
6039 ether_addr_copy(rule->tuples_mask.src_mac,
6040 fs->m_u.ether_spec.h_source);
6042 ether_addr_copy(rule->tuples.dst_mac,
6043 fs->h_u.ether_spec.h_dest);
6044 ether_addr_copy(rule->tuples_mask.dst_mac,
6045 fs->m_u.ether_spec.h_dest);
6047 rule->tuples.ether_proto =
6048 be16_to_cpu(fs->h_u.ether_spec.h_proto);
6049 rule->tuples_mask.ether_proto =
6050 be16_to_cpu(fs->m_u.ether_spec.h_proto);
6057 switch (flow_type) {
6060 rule->tuples.ip_proto = IPPROTO_SCTP;
6061 rule->tuples_mask.ip_proto = 0xFF;
6065 rule->tuples.ip_proto = IPPROTO_TCP;
6066 rule->tuples_mask.ip_proto = 0xFF;
6070 rule->tuples.ip_proto = IPPROTO_UDP;
6071 rule->tuples_mask.ip_proto = 0xFF;
6077 if (fs->flow_type & FLOW_EXT) {
6078 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6079 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6082 if (fs->flow_type & FLOW_MAC_EXT) {
6083 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6084 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6090 /* make sure being called after lock up with fd_rule_lock */
6091 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6092 struct hclge_fd_rule *rule)
6097 dev_err(&hdev->pdev->dev,
6098 "The flow director rule is NULL\n");
6102 /* it will never fail here, so needn't to check return value */
6103 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
6105 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6109 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6116 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
6120 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6122 struct hclge_vport *vport = hclge_get_vport(handle);
6123 struct hclge_dev *hdev = vport->back;
6125 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6128 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6129 struct ethtool_rxnfc *cmd)
6131 struct hclge_vport *vport = hclge_get_vport(handle);
6132 struct hclge_dev *hdev = vport->back;
6133 u16 dst_vport_id = 0, q_index = 0;
6134 struct ethtool_rx_flow_spec *fs;
6135 struct hclge_fd_rule *rule;
6140 if (!hnae3_dev_fd_supported(hdev)) {
6141 dev_err(&hdev->pdev->dev,
6142 "flow table director is not supported\n");
6147 dev_err(&hdev->pdev->dev,
6148 "please enable flow director first\n");
6152 if (hclge_is_cls_flower_active(handle)) {
6153 dev_err(&hdev->pdev->dev,
6154 "please delete all exist cls flower rules first\n");
6158 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6160 ret = hclge_fd_check_spec(hdev, fs, &unused);
6164 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6165 action = HCLGE_FD_ACTION_DROP_PACKET;
6167 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6168 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6171 if (vf > hdev->num_req_vfs) {
6172 dev_err(&hdev->pdev->dev,
6173 "Error: vf id (%u) > max vf num (%u)\n",
6174 vf, hdev->num_req_vfs);
6178 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6179 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6182 dev_err(&hdev->pdev->dev,
6183 "Error: queue id (%u) > max tqp num (%u)\n",
6188 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6192 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6196 ret = hclge_fd_get_tuple(hdev, fs, rule);
6202 rule->flow_type = fs->flow_type;
6203 rule->location = fs->location;
6204 rule->unused_tuple = unused;
6205 rule->vf_id = dst_vport_id;
6206 rule->queue_id = q_index;
6207 rule->action = action;
6208 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6210 /* to avoid rule conflict, when user configure rule by ethtool,
6211 * we need to clear all arfs rules
6213 spin_lock_bh(&hdev->fd_rule_lock);
6214 hclge_clear_arfs_rules(handle);
6216 ret = hclge_fd_config_rule(hdev, rule);
6218 spin_unlock_bh(&hdev->fd_rule_lock);
6223 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6224 struct ethtool_rxnfc *cmd)
6226 struct hclge_vport *vport = hclge_get_vport(handle);
6227 struct hclge_dev *hdev = vport->back;
6228 struct ethtool_rx_flow_spec *fs;
6231 if (!hnae3_dev_fd_supported(hdev))
6234 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6236 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6239 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6240 !hclge_fd_rule_exist(hdev, fs->location)) {
6241 dev_err(&hdev->pdev->dev,
6242 "Delete fail, rule %u is inexistent\n", fs->location);
6246 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6251 spin_lock_bh(&hdev->fd_rule_lock);
6252 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6254 spin_unlock_bh(&hdev->fd_rule_lock);
6259 /* make sure being called after lock up with fd_rule_lock */
6260 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6263 struct hclge_vport *vport = hclge_get_vport(handle);
6264 struct hclge_dev *hdev = vport->back;
6265 struct hclge_fd_rule *rule;
6266 struct hlist_node *node;
6269 if (!hnae3_dev_fd_supported(hdev))
6272 for_each_set_bit(location, hdev->fd_bmap,
6273 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6274 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6278 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6280 hlist_del(&rule->rule_node);
6283 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6284 hdev->hclge_fd_rule_num = 0;
6285 bitmap_zero(hdev->fd_bmap,
6286 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6290 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6292 struct hclge_vport *vport = hclge_get_vport(handle);
6293 struct hclge_dev *hdev = vport->back;
6294 struct hclge_fd_rule *rule;
6295 struct hlist_node *node;
6298 /* Return ok here, because reset error handling will check this
6299 * return value. If error is returned here, the reset process will
6302 if (!hnae3_dev_fd_supported(hdev))
6305 /* if fd is disabled, should not restore it when reset */
6309 spin_lock_bh(&hdev->fd_rule_lock);
6310 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6311 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6313 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6316 dev_warn(&hdev->pdev->dev,
6317 "Restore rule %u failed, remove it\n",
6319 clear_bit(rule->location, hdev->fd_bmap);
6320 hlist_del(&rule->rule_node);
6322 hdev->hclge_fd_rule_num--;
6326 if (hdev->hclge_fd_rule_num)
6327 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6329 spin_unlock_bh(&hdev->fd_rule_lock);
6334 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6335 struct ethtool_rxnfc *cmd)
6337 struct hclge_vport *vport = hclge_get_vport(handle);
6338 struct hclge_dev *hdev = vport->back;
6340 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6343 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6344 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6349 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6350 struct ethtool_tcpip4_spec *spec,
6351 struct ethtool_tcpip4_spec *spec_mask)
6353 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6354 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6355 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6357 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6358 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6359 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6361 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6362 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6363 0 : cpu_to_be16(rule->tuples_mask.src_port);
6365 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6366 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6367 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6369 spec->tos = rule->tuples.ip_tos;
6370 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6371 0 : rule->tuples_mask.ip_tos;
6374 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6375 struct ethtool_usrip4_spec *spec,
6376 struct ethtool_usrip4_spec *spec_mask)
6378 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6379 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6380 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6382 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6383 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6384 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6386 spec->tos = rule->tuples.ip_tos;
6387 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6388 0 : rule->tuples_mask.ip_tos;
6390 spec->proto = rule->tuples.ip_proto;
6391 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6392 0 : rule->tuples_mask.ip_proto;
6394 spec->ip_ver = ETH_RX_NFC_IP4;
6397 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6398 struct ethtool_tcpip6_spec *spec,
6399 struct ethtool_tcpip6_spec *spec_mask)
6401 cpu_to_be32_array(spec->ip6src,
6402 rule->tuples.src_ip, IPV6_SIZE);
6403 cpu_to_be32_array(spec->ip6dst,
6404 rule->tuples.dst_ip, IPV6_SIZE);
6405 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6406 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6408 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6411 if (rule->unused_tuple & BIT(INNER_DST_IP))
6412 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6414 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6417 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6418 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6419 0 : cpu_to_be16(rule->tuples_mask.src_port);
6421 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6422 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6423 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6426 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6427 struct ethtool_usrip6_spec *spec,
6428 struct ethtool_usrip6_spec *spec_mask)
6430 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6431 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6432 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6433 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6435 cpu_to_be32_array(spec_mask->ip6src,
6436 rule->tuples_mask.src_ip, IPV6_SIZE);
6438 if (rule->unused_tuple & BIT(INNER_DST_IP))
6439 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6441 cpu_to_be32_array(spec_mask->ip6dst,
6442 rule->tuples_mask.dst_ip, IPV6_SIZE);
6444 spec->l4_proto = rule->tuples.ip_proto;
6445 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6446 0 : rule->tuples_mask.ip_proto;
6449 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6450 struct ethhdr *spec,
6451 struct ethhdr *spec_mask)
6453 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6454 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6456 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6457 eth_zero_addr(spec_mask->h_source);
6459 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6461 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6462 eth_zero_addr(spec_mask->h_dest);
6464 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6466 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6467 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6468 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6471 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6472 struct hclge_fd_rule *rule)
6474 if (fs->flow_type & FLOW_EXT) {
6475 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6476 fs->m_ext.vlan_tci =
6477 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6478 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6481 if (fs->flow_type & FLOW_MAC_EXT) {
6482 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6483 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6484 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6486 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6487 rule->tuples_mask.dst_mac);
6491 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6492 struct ethtool_rxnfc *cmd)
6494 struct hclge_vport *vport = hclge_get_vport(handle);
6495 struct hclge_fd_rule *rule = NULL;
6496 struct hclge_dev *hdev = vport->back;
6497 struct ethtool_rx_flow_spec *fs;
6498 struct hlist_node *node2;
6500 if (!hnae3_dev_fd_supported(hdev))
6503 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6505 spin_lock_bh(&hdev->fd_rule_lock);
6507 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6508 if (rule->location >= fs->location)
6512 if (!rule || fs->location != rule->location) {
6513 spin_unlock_bh(&hdev->fd_rule_lock);
6518 fs->flow_type = rule->flow_type;
6519 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6523 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6524 &fs->m_u.tcp_ip4_spec);
6527 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6528 &fs->m_u.usr_ip4_spec);
6533 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6534 &fs->m_u.tcp_ip6_spec);
6536 case IPV6_USER_FLOW:
6537 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6538 &fs->m_u.usr_ip6_spec);
6540 /* The flow type of fd rule has been checked before adding in to rule
6541 * list. As other flow types have been handled, it must be ETHER_FLOW
6542 * for the default case
6545 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6546 &fs->m_u.ether_spec);
6550 hclge_fd_get_ext_info(fs, rule);
6552 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6553 fs->ring_cookie = RX_CLS_FLOW_DISC;
6557 fs->ring_cookie = rule->queue_id;
6558 vf_id = rule->vf_id;
6559 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6560 fs->ring_cookie |= vf_id;
6563 spin_unlock_bh(&hdev->fd_rule_lock);
6568 static int hclge_get_all_rules(struct hnae3_handle *handle,
6569 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6571 struct hclge_vport *vport = hclge_get_vport(handle);
6572 struct hclge_dev *hdev = vport->back;
6573 struct hclge_fd_rule *rule;
6574 struct hlist_node *node2;
6577 if (!hnae3_dev_fd_supported(hdev))
6580 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6582 spin_lock_bh(&hdev->fd_rule_lock);
6583 hlist_for_each_entry_safe(rule, node2,
6584 &hdev->fd_rule_list, rule_node) {
6585 if (cnt == cmd->rule_cnt) {
6586 spin_unlock_bh(&hdev->fd_rule_lock);
6590 rule_locs[cnt] = rule->location;
6594 spin_unlock_bh(&hdev->fd_rule_lock);
6596 cmd->rule_cnt = cnt;
6601 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6602 struct hclge_fd_rule_tuples *tuples)
6604 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6605 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6607 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6608 tuples->ip_proto = fkeys->basic.ip_proto;
6609 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6611 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6612 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6613 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6617 for (i = 0; i < IPV6_SIZE; i++) {
6618 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6619 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6624 /* traverse all rules, check whether an existed rule has the same tuples */
6625 static struct hclge_fd_rule *
6626 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6627 const struct hclge_fd_rule_tuples *tuples)
6629 struct hclge_fd_rule *rule = NULL;
6630 struct hlist_node *node;
6632 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6633 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6640 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6641 struct hclge_fd_rule *rule)
6643 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6644 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6645 BIT(INNER_SRC_PORT);
6648 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6649 if (tuples->ether_proto == ETH_P_IP) {
6650 if (tuples->ip_proto == IPPROTO_TCP)
6651 rule->flow_type = TCP_V4_FLOW;
6653 rule->flow_type = UDP_V4_FLOW;
6655 if (tuples->ip_proto == IPPROTO_TCP)
6656 rule->flow_type = TCP_V6_FLOW;
6658 rule->flow_type = UDP_V6_FLOW;
6660 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6661 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6664 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6665 u16 flow_id, struct flow_keys *fkeys)
6667 struct hclge_vport *vport = hclge_get_vport(handle);
6668 struct hclge_fd_rule_tuples new_tuples = {};
6669 struct hclge_dev *hdev = vport->back;
6670 struct hclge_fd_rule *rule;
6675 if (!hnae3_dev_fd_supported(hdev))
6678 /* when there is already fd rule existed add by user,
6679 * arfs should not work
6681 spin_lock_bh(&hdev->fd_rule_lock);
6682 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6683 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6684 spin_unlock_bh(&hdev->fd_rule_lock);
6688 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6690 /* check is there flow director filter existed for this flow,
6691 * if not, create a new filter for it;
6692 * if filter exist with different queue id, modify the filter;
6693 * if filter exist with same queue id, do nothing
6695 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6697 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6698 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6699 spin_unlock_bh(&hdev->fd_rule_lock);
6703 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6705 spin_unlock_bh(&hdev->fd_rule_lock);
6709 set_bit(bit_id, hdev->fd_bmap);
6710 rule->location = bit_id;
6711 rule->arfs.flow_id = flow_id;
6712 rule->queue_id = queue_id;
6713 hclge_fd_build_arfs_rule(&new_tuples, rule);
6714 ret = hclge_fd_config_rule(hdev, rule);
6716 spin_unlock_bh(&hdev->fd_rule_lock);
6721 return rule->location;
6724 spin_unlock_bh(&hdev->fd_rule_lock);
6726 if (rule->queue_id == queue_id)
6727 return rule->location;
6729 tmp_queue_id = rule->queue_id;
6730 rule->queue_id = queue_id;
6731 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6733 rule->queue_id = tmp_queue_id;
6737 return rule->location;
6740 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6742 #ifdef CONFIG_RFS_ACCEL
6743 struct hnae3_handle *handle = &hdev->vport[0].nic;
6744 struct hclge_fd_rule *rule;
6745 struct hlist_node *node;
6746 HLIST_HEAD(del_list);
6748 spin_lock_bh(&hdev->fd_rule_lock);
6749 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6750 spin_unlock_bh(&hdev->fd_rule_lock);
6753 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6754 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6755 rule->arfs.flow_id, rule->location)) {
6756 hlist_del_init(&rule->rule_node);
6757 hlist_add_head(&rule->rule_node, &del_list);
6758 hdev->hclge_fd_rule_num--;
6759 clear_bit(rule->location, hdev->fd_bmap);
6762 spin_unlock_bh(&hdev->fd_rule_lock);
6764 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6765 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6766 rule->location, NULL, false);
6772 /* make sure being called after lock up with fd_rule_lock */
6773 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6775 #ifdef CONFIG_RFS_ACCEL
6776 struct hclge_vport *vport = hclge_get_vport(handle);
6777 struct hclge_dev *hdev = vport->back;
6779 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6780 hclge_del_all_fd_entries(handle, true);
6784 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6785 struct hclge_fd_rule *rule)
6787 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6788 struct flow_match_basic match;
6789 u16 ethtype_key, ethtype_mask;
6791 flow_rule_match_basic(flow, &match);
6792 ethtype_key = ntohs(match.key->n_proto);
6793 ethtype_mask = ntohs(match.mask->n_proto);
6795 if (ethtype_key == ETH_P_ALL) {
6799 rule->tuples.ether_proto = ethtype_key;
6800 rule->tuples_mask.ether_proto = ethtype_mask;
6801 rule->tuples.ip_proto = match.key->ip_proto;
6802 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6804 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6805 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6809 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6810 struct hclge_fd_rule *rule)
6812 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6813 struct flow_match_eth_addrs match;
6815 flow_rule_match_eth_addrs(flow, &match);
6816 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6817 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6818 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6819 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6821 rule->unused_tuple |= BIT(INNER_DST_MAC);
6822 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6826 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6827 struct hclge_fd_rule *rule)
6829 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6830 struct flow_match_vlan match;
6832 flow_rule_match_vlan(flow, &match);
6833 rule->tuples.vlan_tag1 = match.key->vlan_id |
6834 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6835 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6836 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6838 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6842 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6843 struct hclge_fd_rule *rule)
6847 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6848 struct flow_match_control match;
6850 flow_rule_match_control(flow, &match);
6851 addr_type = match.key->addr_type;
6854 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6855 struct flow_match_ipv4_addrs match;
6857 flow_rule_match_ipv4_addrs(flow, &match);
6858 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6859 rule->tuples_mask.src_ip[IPV4_INDEX] =
6860 be32_to_cpu(match.mask->src);
6861 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6862 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6863 be32_to_cpu(match.mask->dst);
6864 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6865 struct flow_match_ipv6_addrs match;
6867 flow_rule_match_ipv6_addrs(flow, &match);
6868 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6870 be32_to_cpu_array(rule->tuples_mask.src_ip,
6871 match.mask->src.s6_addr32, IPV6_SIZE);
6872 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6874 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6875 match.mask->dst.s6_addr32, IPV6_SIZE);
6877 rule->unused_tuple |= BIT(INNER_SRC_IP);
6878 rule->unused_tuple |= BIT(INNER_DST_IP);
6882 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6883 struct hclge_fd_rule *rule)
6885 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6886 struct flow_match_ports match;
6888 flow_rule_match_ports(flow, &match);
6890 rule->tuples.src_port = be16_to_cpu(match.key->src);
6891 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6892 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6893 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6895 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6896 rule->unused_tuple |= BIT(INNER_DST_PORT);
6900 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6901 struct flow_cls_offload *cls_flower,
6902 struct hclge_fd_rule *rule)
6904 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6905 struct flow_dissector *dissector = flow->match.dissector;
6907 if (dissector->used_keys &
6908 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6909 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6910 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6911 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6912 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6913 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6914 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6915 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6916 dissector->used_keys);
6920 hclge_get_cls_key_basic(flow, rule);
6921 hclge_get_cls_key_mac(flow, rule);
6922 hclge_get_cls_key_vlan(flow, rule);
6923 hclge_get_cls_key_ip(flow, rule);
6924 hclge_get_cls_key_port(flow, rule);
6929 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6930 struct flow_cls_offload *cls_flower, int tc)
6932 u32 prio = cls_flower->common.prio;
6934 if (tc < 0 || tc > hdev->tc_max) {
6935 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6940 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6941 dev_err(&hdev->pdev->dev,
6942 "prio %u should be in range[1, %u]\n",
6943 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6947 if (test_bit(prio - 1, hdev->fd_bmap)) {
6948 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6954 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6955 struct flow_cls_offload *cls_flower,
6958 struct hclge_vport *vport = hclge_get_vport(handle);
6959 struct hclge_dev *hdev = vport->back;
6960 struct hclge_fd_rule *rule;
6963 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6964 dev_err(&hdev->pdev->dev,
6965 "please remove all exist fd rules via ethtool first\n");
6969 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6971 dev_err(&hdev->pdev->dev,
6972 "failed to check cls flower params, ret = %d\n", ret);
6976 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6980 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6984 rule->action = HCLGE_FD_ACTION_SELECT_TC;
6985 rule->cls_flower.tc = tc;
6986 rule->location = cls_flower->common.prio - 1;
6988 rule->cls_flower.cookie = cls_flower->cookie;
6989 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6991 spin_lock_bh(&hdev->fd_rule_lock);
6992 hclge_clear_arfs_rules(handle);
6994 ret = hclge_fd_config_rule(hdev, rule);
6996 spin_unlock_bh(&hdev->fd_rule_lock);
6999 dev_err(&hdev->pdev->dev,
7000 "failed to add cls flower rule, ret = %d\n", ret);
7010 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7011 unsigned long cookie)
7013 struct hclge_fd_rule *rule;
7014 struct hlist_node *node;
7016 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7017 if (rule->cls_flower.cookie == cookie)
7024 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7025 struct flow_cls_offload *cls_flower)
7027 struct hclge_vport *vport = hclge_get_vport(handle);
7028 struct hclge_dev *hdev = vport->back;
7029 struct hclge_fd_rule *rule;
7032 spin_lock_bh(&hdev->fd_rule_lock);
7034 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7036 spin_unlock_bh(&hdev->fd_rule_lock);
7040 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7043 dev_err(&hdev->pdev->dev,
7044 "failed to delete cls flower rule %u, ret = %d\n",
7045 rule->location, ret);
7046 spin_unlock_bh(&hdev->fd_rule_lock);
7050 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
7052 dev_err(&hdev->pdev->dev,
7053 "failed to delete cls flower rule %u in list, ret = %d\n",
7054 rule->location, ret);
7055 spin_unlock_bh(&hdev->fd_rule_lock);
7059 spin_unlock_bh(&hdev->fd_rule_lock);
7064 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7066 struct hclge_vport *vport = hclge_get_vport(handle);
7067 struct hclge_dev *hdev = vport->back;
7069 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7070 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7073 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7075 struct hclge_vport *vport = hclge_get_vport(handle);
7076 struct hclge_dev *hdev = vport->back;
7078 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7081 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7083 struct hclge_vport *vport = hclge_get_vport(handle);
7084 struct hclge_dev *hdev = vport->back;
7086 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7089 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7091 struct hclge_vport *vport = hclge_get_vport(handle);
7092 struct hclge_dev *hdev = vport->back;
7094 return hdev->rst_stats.hw_reset_done_cnt;
7097 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7099 struct hclge_vport *vport = hclge_get_vport(handle);
7100 struct hclge_dev *hdev = vport->back;
7103 hdev->fd_en = enable;
7104 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7107 spin_lock_bh(&hdev->fd_rule_lock);
7108 hclge_del_all_fd_entries(handle, clear);
7109 spin_unlock_bh(&hdev->fd_rule_lock);
7111 hclge_restore_fd_entries(handle);
7115 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7117 struct hclge_desc desc;
7118 struct hclge_config_mac_mode_cmd *req =
7119 (struct hclge_config_mac_mode_cmd *)desc.data;
7123 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7126 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7127 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7128 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7129 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7130 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7131 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7132 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7133 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7134 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7135 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7138 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7140 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7142 dev_err(&hdev->pdev->dev,
7143 "mac enable fail, ret =%d.\n", ret);
7146 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7147 u8 switch_param, u8 param_mask)
7149 struct hclge_mac_vlan_switch_cmd *req;
7150 struct hclge_desc desc;
7154 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7155 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7157 /* read current config parameter */
7158 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7160 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7161 req->func_id = cpu_to_le32(func_id);
7163 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165 dev_err(&hdev->pdev->dev,
7166 "read mac vlan switch parameter fail, ret = %d\n", ret);
7170 /* modify and write new config parameter */
7171 hclge_cmd_reuse_desc(&desc, false);
7172 req->switch_param = (req->switch_param & param_mask) | switch_param;
7173 req->param_mask = param_mask;
7175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7177 dev_err(&hdev->pdev->dev,
7178 "set mac vlan switch parameter fail, ret = %d\n", ret);
7182 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7185 #define HCLGE_PHY_LINK_STATUS_NUM 200
7187 struct phy_device *phydev = hdev->hw.mac.phydev;
7192 ret = phy_read_status(phydev);
7194 dev_err(&hdev->pdev->dev,
7195 "phy update link status fail, ret = %d\n", ret);
7199 if (phydev->link == link_ret)
7202 msleep(HCLGE_LINK_STATUS_MS);
7203 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7206 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7208 #define HCLGE_MAC_LINK_STATUS_NUM 100
7215 ret = hclge_get_mac_link_status(hdev, &link_status);
7218 if (link_status == link_ret)
7221 msleep(HCLGE_LINK_STATUS_MS);
7222 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7226 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7231 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7234 hclge_phy_link_status_wait(hdev, link_ret);
7236 return hclge_mac_link_status_wait(hdev, link_ret);
7239 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7241 struct hclge_config_mac_mode_cmd *req;
7242 struct hclge_desc desc;
7246 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7247 /* 1 Read out the MAC mode config at first */
7248 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7249 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7251 dev_err(&hdev->pdev->dev,
7252 "mac loopback get fail, ret =%d.\n", ret);
7256 /* 2 Then setup the loopback flag */
7257 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7258 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7260 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7262 /* 3 Config mac work mode with loopback flag
7263 * and its original configure parameters
7265 hclge_cmd_reuse_desc(&desc, false);
7266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7268 dev_err(&hdev->pdev->dev,
7269 "mac loopback set fail, ret =%d.\n", ret);
7273 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7274 enum hnae3_loop loop_mode)
7276 #define HCLGE_SERDES_RETRY_MS 10
7277 #define HCLGE_SERDES_RETRY_NUM 100
7279 struct hclge_serdes_lb_cmd *req;
7280 struct hclge_desc desc;
7284 req = (struct hclge_serdes_lb_cmd *)desc.data;
7285 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7287 switch (loop_mode) {
7288 case HNAE3_LOOP_SERIAL_SERDES:
7289 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7291 case HNAE3_LOOP_PARALLEL_SERDES:
7292 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7295 dev_err(&hdev->pdev->dev,
7296 "unsupported serdes loopback mode %d\n", loop_mode);
7301 req->enable = loop_mode_b;
7302 req->mask = loop_mode_b;
7304 req->mask = loop_mode_b;
7307 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7309 dev_err(&hdev->pdev->dev,
7310 "serdes loopback set fail, ret = %d\n", ret);
7315 msleep(HCLGE_SERDES_RETRY_MS);
7316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7318 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7320 dev_err(&hdev->pdev->dev,
7321 "serdes loopback get, ret = %d\n", ret);
7324 } while (++i < HCLGE_SERDES_RETRY_NUM &&
7325 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7327 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7328 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7330 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7331 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7337 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7338 enum hnae3_loop loop_mode)
7342 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7346 hclge_cfg_mac_mode(hdev, en);
7348 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7350 dev_err(&hdev->pdev->dev,
7351 "serdes loopback config mac mode timeout\n");
7356 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7357 struct phy_device *phydev)
7361 if (!phydev->suspended) {
7362 ret = phy_suspend(phydev);
7367 ret = phy_resume(phydev);
7371 return phy_loopback(phydev, true);
7374 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7375 struct phy_device *phydev)
7379 ret = phy_loopback(phydev, false);
7383 return phy_suspend(phydev);
7386 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7388 struct phy_device *phydev = hdev->hw.mac.phydev;
7395 ret = hclge_enable_phy_loopback(hdev, phydev);
7397 ret = hclge_disable_phy_loopback(hdev, phydev);
7399 dev_err(&hdev->pdev->dev,
7400 "set phy loopback fail, ret = %d\n", ret);
7404 hclge_cfg_mac_mode(hdev, en);
7406 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7408 dev_err(&hdev->pdev->dev,
7409 "phy loopback config mac mode timeout\n");
7414 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7415 int stream_id, bool enable)
7417 struct hclge_desc desc;
7418 struct hclge_cfg_com_tqp_queue_cmd *req =
7419 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7422 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7423 req->tqp_id = cpu_to_le16(tqp_id);
7424 req->stream_id = cpu_to_le16(stream_id);
7426 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7428 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7430 dev_err(&hdev->pdev->dev,
7431 "Tqp enable fail, status =%d.\n", ret);
7435 static int hclge_set_loopback(struct hnae3_handle *handle,
7436 enum hnae3_loop loop_mode, bool en)
7438 struct hclge_vport *vport = hclge_get_vport(handle);
7439 struct hnae3_knic_private_info *kinfo;
7440 struct hclge_dev *hdev = vport->back;
7443 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7444 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7445 * the same, the packets are looped back in the SSU. If SSU loopback
7446 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7448 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7449 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7451 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7452 HCLGE_SWITCH_ALW_LPBK_MASK);
7457 switch (loop_mode) {
7458 case HNAE3_LOOP_APP:
7459 ret = hclge_set_app_loopback(hdev, en);
7461 case HNAE3_LOOP_SERIAL_SERDES:
7462 case HNAE3_LOOP_PARALLEL_SERDES:
7463 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7465 case HNAE3_LOOP_PHY:
7466 ret = hclge_set_phy_loopback(hdev, en);
7470 dev_err(&hdev->pdev->dev,
7471 "loop_mode %d is not supported\n", loop_mode);
7478 kinfo = &vport->nic.kinfo;
7479 for (i = 0; i < kinfo->num_tqps; i++) {
7480 ret = hclge_tqp_enable(hdev, i, 0, en);
7488 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7492 ret = hclge_set_app_loopback(hdev, false);
7496 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7500 return hclge_cfg_serdes_loopback(hdev, false,
7501 HNAE3_LOOP_PARALLEL_SERDES);
7504 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7506 struct hclge_vport *vport = hclge_get_vport(handle);
7507 struct hnae3_knic_private_info *kinfo;
7508 struct hnae3_queue *queue;
7509 struct hclge_tqp *tqp;
7512 kinfo = &vport->nic.kinfo;
7513 for (i = 0; i < kinfo->num_tqps; i++) {
7514 queue = handle->kinfo.tqp[i];
7515 tqp = container_of(queue, struct hclge_tqp, q);
7516 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7520 static void hclge_flush_link_update(struct hclge_dev *hdev)
7522 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7524 unsigned long last = hdev->serv_processed_cnt;
7527 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7528 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7529 last == hdev->serv_processed_cnt)
7533 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7535 struct hclge_vport *vport = hclge_get_vport(handle);
7536 struct hclge_dev *hdev = vport->back;
7539 hclge_task_schedule(hdev, 0);
7541 /* Set the DOWN flag here to disable link updating */
7542 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7544 /* flush memory to make sure DOWN is seen by service task */
7545 smp_mb__before_atomic();
7546 hclge_flush_link_update(hdev);
7550 static int hclge_ae_start(struct hnae3_handle *handle)
7552 struct hclge_vport *vport = hclge_get_vport(handle);
7553 struct hclge_dev *hdev = vport->back;
7556 hclge_cfg_mac_mode(hdev, true);
7557 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7558 hdev->hw.mac.link = 0;
7560 /* reset tqp stats */
7561 hclge_reset_tqp_stats(handle);
7563 hclge_mac_start_phy(hdev);
7568 static void hclge_ae_stop(struct hnae3_handle *handle)
7570 struct hclge_vport *vport = hclge_get_vport(handle);
7571 struct hclge_dev *hdev = vport->back;
7574 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7575 spin_lock_bh(&hdev->fd_rule_lock);
7576 hclge_clear_arfs_rules(handle);
7577 spin_unlock_bh(&hdev->fd_rule_lock);
7579 /* If it is not PF reset, the firmware will disable the MAC,
7580 * so it only need to stop phy here.
7582 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7583 hdev->reset_type != HNAE3_FUNC_RESET) {
7584 hclge_mac_stop_phy(hdev);
7585 hclge_update_link_status(hdev);
7589 for (i = 0; i < handle->kinfo.num_tqps; i++)
7590 hclge_reset_tqp(handle, i);
7592 hclge_config_mac_tnl_int(hdev, false);
7595 hclge_cfg_mac_mode(hdev, false);
7597 hclge_mac_stop_phy(hdev);
7599 /* reset tqp stats */
7600 hclge_reset_tqp_stats(handle);
7601 hclge_update_link_status(hdev);
7604 int hclge_vport_start(struct hclge_vport *vport)
7606 struct hclge_dev *hdev = vport->back;
7608 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7609 vport->last_active_jiffies = jiffies;
7611 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7612 if (vport->vport_id) {
7613 hclge_restore_mac_table_common(vport);
7614 hclge_restore_vport_vlan_table(vport);
7616 hclge_restore_hw_table(hdev);
7620 clear_bit(vport->vport_id, hdev->vport_config_block);
7625 void hclge_vport_stop(struct hclge_vport *vport)
7627 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7630 static int hclge_client_start(struct hnae3_handle *handle)
7632 struct hclge_vport *vport = hclge_get_vport(handle);
7634 return hclge_vport_start(vport);
7637 static void hclge_client_stop(struct hnae3_handle *handle)
7639 struct hclge_vport *vport = hclge_get_vport(handle);
7641 hclge_vport_stop(vport);
7644 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7645 u16 cmdq_resp, u8 resp_code,
7646 enum hclge_mac_vlan_tbl_opcode op)
7648 struct hclge_dev *hdev = vport->back;
7651 dev_err(&hdev->pdev->dev,
7652 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7657 if (op == HCLGE_MAC_VLAN_ADD) {
7658 if (!resp_code || resp_code == 1)
7660 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7661 resp_code == HCLGE_ADD_MC_OVERFLOW)
7664 dev_err(&hdev->pdev->dev,
7665 "add mac addr failed for undefined, code=%u.\n",
7668 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7671 } else if (resp_code == 1) {
7672 dev_dbg(&hdev->pdev->dev,
7673 "remove mac addr failed for miss.\n");
7677 dev_err(&hdev->pdev->dev,
7678 "remove mac addr failed for undefined, code=%u.\n",
7681 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7684 } else if (resp_code == 1) {
7685 dev_dbg(&hdev->pdev->dev,
7686 "lookup mac addr failed for miss.\n");
7690 dev_err(&hdev->pdev->dev,
7691 "lookup mac addr failed for undefined, code=%u.\n",
7696 dev_err(&hdev->pdev->dev,
7697 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7702 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7704 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7706 unsigned int word_num;
7707 unsigned int bit_num;
7709 if (vfid > 255 || vfid < 0)
7712 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7713 word_num = vfid / 32;
7714 bit_num = vfid % 32;
7716 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7718 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7720 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7721 bit_num = vfid % 32;
7723 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7725 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7731 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7733 #define HCLGE_DESC_NUMBER 3
7734 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7737 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7738 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7739 if (desc[i].data[j])
7745 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7746 const u8 *addr, bool is_mc)
7748 const unsigned char *mac_addr = addr;
7749 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7750 (mac_addr[0]) | (mac_addr[1] << 8);
7751 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7753 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7755 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7756 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7759 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7760 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7763 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7764 struct hclge_mac_vlan_tbl_entry_cmd *req)
7766 struct hclge_dev *hdev = vport->back;
7767 struct hclge_desc desc;
7772 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7774 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7776 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7778 dev_err(&hdev->pdev->dev,
7779 "del mac addr failed for cmd_send, ret =%d.\n",
7783 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7784 retval = le16_to_cpu(desc.retval);
7786 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7787 HCLGE_MAC_VLAN_REMOVE);
7790 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7791 struct hclge_mac_vlan_tbl_entry_cmd *req,
7792 struct hclge_desc *desc,
7795 struct hclge_dev *hdev = vport->back;
7800 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7802 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7803 memcpy(desc[0].data,
7805 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7806 hclge_cmd_setup_basic_desc(&desc[1],
7807 HCLGE_OPC_MAC_VLAN_ADD,
7809 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7810 hclge_cmd_setup_basic_desc(&desc[2],
7811 HCLGE_OPC_MAC_VLAN_ADD,
7813 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7815 memcpy(desc[0].data,
7817 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7818 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7821 dev_err(&hdev->pdev->dev,
7822 "lookup mac addr failed for cmd_send, ret =%d.\n",
7826 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7827 retval = le16_to_cpu(desc[0].retval);
7829 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7830 HCLGE_MAC_VLAN_LKUP);
7833 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7834 struct hclge_mac_vlan_tbl_entry_cmd *req,
7835 struct hclge_desc *mc_desc)
7837 struct hclge_dev *hdev = vport->back;
7844 struct hclge_desc desc;
7846 hclge_cmd_setup_basic_desc(&desc,
7847 HCLGE_OPC_MAC_VLAN_ADD,
7849 memcpy(desc.data, req,
7850 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7851 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7852 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7853 retval = le16_to_cpu(desc.retval);
7855 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7857 HCLGE_MAC_VLAN_ADD);
7859 hclge_cmd_reuse_desc(&mc_desc[0], false);
7860 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7861 hclge_cmd_reuse_desc(&mc_desc[1], false);
7862 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7863 hclge_cmd_reuse_desc(&mc_desc[2], false);
7864 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7865 memcpy(mc_desc[0].data, req,
7866 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7867 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7868 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7869 retval = le16_to_cpu(mc_desc[0].retval);
7871 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7873 HCLGE_MAC_VLAN_ADD);
7877 dev_err(&hdev->pdev->dev,
7878 "add mac addr failed for cmd_send, ret =%d.\n",
7886 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7887 u16 *allocated_size)
7889 struct hclge_umv_spc_alc_cmd *req;
7890 struct hclge_desc desc;
7893 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7894 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7896 req->space_size = cpu_to_le32(space_size);
7898 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7900 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7905 *allocated_size = le32_to_cpu(desc.data[1]);
7910 static int hclge_init_umv_space(struct hclge_dev *hdev)
7912 u16 allocated_size = 0;
7915 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7919 if (allocated_size < hdev->wanted_umv_size)
7920 dev_warn(&hdev->pdev->dev,
7921 "failed to alloc umv space, want %u, get %u\n",
7922 hdev->wanted_umv_size, allocated_size);
7924 hdev->max_umv_size = allocated_size;
7925 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7926 hdev->share_umv_size = hdev->priv_umv_size +
7927 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7932 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7934 struct hclge_vport *vport;
7937 for (i = 0; i < hdev->num_alloc_vport; i++) {
7938 vport = &hdev->vport[i];
7939 vport->used_umv_num = 0;
7942 mutex_lock(&hdev->vport_lock);
7943 hdev->share_umv_size = hdev->priv_umv_size +
7944 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7945 mutex_unlock(&hdev->vport_lock);
7948 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7950 struct hclge_dev *hdev = vport->back;
7954 mutex_lock(&hdev->vport_lock);
7956 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7957 hdev->share_umv_size == 0);
7960 mutex_unlock(&hdev->vport_lock);
7965 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7967 struct hclge_dev *hdev = vport->back;
7970 if (vport->used_umv_num > hdev->priv_umv_size)
7971 hdev->share_umv_size++;
7973 if (vport->used_umv_num > 0)
7974 vport->used_umv_num--;
7976 if (vport->used_umv_num >= hdev->priv_umv_size &&
7977 hdev->share_umv_size > 0)
7978 hdev->share_umv_size--;
7979 vport->used_umv_num++;
7983 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7986 struct hclge_mac_node *mac_node, *tmp;
7988 list_for_each_entry_safe(mac_node, tmp, list, node)
7989 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7995 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7996 enum HCLGE_MAC_NODE_STATE state)
7999 /* from set_rx_mode or tmp_add_list */
8000 case HCLGE_MAC_TO_ADD:
8001 if (mac_node->state == HCLGE_MAC_TO_DEL)
8002 mac_node->state = HCLGE_MAC_ACTIVE;
8004 /* only from set_rx_mode */
8005 case HCLGE_MAC_TO_DEL:
8006 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8007 list_del(&mac_node->node);
8010 mac_node->state = HCLGE_MAC_TO_DEL;
8013 /* only from tmp_add_list, the mac_node->state won't be
8016 case HCLGE_MAC_ACTIVE:
8017 if (mac_node->state == HCLGE_MAC_TO_ADD)
8018 mac_node->state = HCLGE_MAC_ACTIVE;
8024 int hclge_update_mac_list(struct hclge_vport *vport,
8025 enum HCLGE_MAC_NODE_STATE state,
8026 enum HCLGE_MAC_ADDR_TYPE mac_type,
8027 const unsigned char *addr)
8029 struct hclge_dev *hdev = vport->back;
8030 struct hclge_mac_node *mac_node;
8031 struct list_head *list;
8033 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8034 &vport->uc_mac_list : &vport->mc_mac_list;
8036 spin_lock_bh(&vport->mac_list_lock);
8038 /* if the mac addr is already in the mac list, no need to add a new
8039 * one into it, just check the mac addr state, convert it to a new
8040 * new state, or just remove it, or do nothing.
8042 mac_node = hclge_find_mac_node(list, addr);
8044 hclge_update_mac_node(mac_node, state);
8045 spin_unlock_bh(&vport->mac_list_lock);
8046 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8050 /* if this address is never added, unnecessary to delete */
8051 if (state == HCLGE_MAC_TO_DEL) {
8052 spin_unlock_bh(&vport->mac_list_lock);
8053 dev_err(&hdev->pdev->dev,
8054 "failed to delete address %pM from mac list\n",
8059 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8061 spin_unlock_bh(&vport->mac_list_lock);
8065 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8067 mac_node->state = state;
8068 ether_addr_copy(mac_node->mac_addr, addr);
8069 list_add_tail(&mac_node->node, list);
8071 spin_unlock_bh(&vport->mac_list_lock);
8076 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8077 const unsigned char *addr)
8079 struct hclge_vport *vport = hclge_get_vport(handle);
8081 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8085 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8086 const unsigned char *addr)
8088 struct hclge_dev *hdev = vport->back;
8089 struct hclge_mac_vlan_tbl_entry_cmd req;
8090 struct hclge_desc desc;
8091 u16 egress_port = 0;
8094 /* mac addr check */
8095 if (is_zero_ether_addr(addr) ||
8096 is_broadcast_ether_addr(addr) ||
8097 is_multicast_ether_addr(addr)) {
8098 dev_err(&hdev->pdev->dev,
8099 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8100 addr, is_zero_ether_addr(addr),
8101 is_broadcast_ether_addr(addr),
8102 is_multicast_ether_addr(addr));
8106 memset(&req, 0, sizeof(req));
8108 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8109 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8111 req.egress_port = cpu_to_le16(egress_port);
8113 hclge_prepare_mac_addr(&req, addr, false);
8115 /* Lookup the mac address in the mac_vlan table, and add
8116 * it if the entry is inexistent. Repeated unicast entry
8117 * is not allowed in the mac vlan table.
8119 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8120 if (ret == -ENOENT) {
8121 mutex_lock(&hdev->vport_lock);
8122 if (!hclge_is_umv_space_full(vport, false)) {
8123 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8125 hclge_update_umv_space(vport, false);
8126 mutex_unlock(&hdev->vport_lock);
8129 mutex_unlock(&hdev->vport_lock);
8131 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8132 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8133 hdev->priv_umv_size);
8138 /* check if we just hit the duplicate */
8140 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8141 vport->vport_id, addr);
8145 dev_err(&hdev->pdev->dev,
8146 "PF failed to add unicast entry(%pM) in the MAC table\n",
8152 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8153 const unsigned char *addr)
8155 struct hclge_vport *vport = hclge_get_vport(handle);
8157 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8161 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8162 const unsigned char *addr)
8164 struct hclge_dev *hdev = vport->back;
8165 struct hclge_mac_vlan_tbl_entry_cmd req;
8168 /* mac addr check */
8169 if (is_zero_ether_addr(addr) ||
8170 is_broadcast_ether_addr(addr) ||
8171 is_multicast_ether_addr(addr)) {
8172 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8177 memset(&req, 0, sizeof(req));
8178 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8179 hclge_prepare_mac_addr(&req, addr, false);
8180 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8182 mutex_lock(&hdev->vport_lock);
8183 hclge_update_umv_space(vport, true);
8184 mutex_unlock(&hdev->vport_lock);
8185 } else if (ret == -ENOENT) {
8192 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8193 const unsigned char *addr)
8195 struct hclge_vport *vport = hclge_get_vport(handle);
8197 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8201 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8202 const unsigned char *addr)
8204 struct hclge_dev *hdev = vport->back;
8205 struct hclge_mac_vlan_tbl_entry_cmd req;
8206 struct hclge_desc desc[3];
8209 /* mac addr check */
8210 if (!is_multicast_ether_addr(addr)) {
8211 dev_err(&hdev->pdev->dev,
8212 "Add mc mac err! invalid mac:%pM.\n",
8216 memset(&req, 0, sizeof(req));
8217 hclge_prepare_mac_addr(&req, addr, true);
8218 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8220 /* This mac addr do not exist, add new entry for it */
8221 memset(desc[0].data, 0, sizeof(desc[0].data));
8222 memset(desc[1].data, 0, sizeof(desc[0].data));
8223 memset(desc[2].data, 0, sizeof(desc[0].data));
8225 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8228 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8230 /* if already overflow, not to print each time */
8231 if (status == -ENOSPC &&
8232 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8233 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8238 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8239 const unsigned char *addr)
8241 struct hclge_vport *vport = hclge_get_vport(handle);
8243 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8247 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8248 const unsigned char *addr)
8250 struct hclge_dev *hdev = vport->back;
8251 struct hclge_mac_vlan_tbl_entry_cmd req;
8252 enum hclge_cmd_status status;
8253 struct hclge_desc desc[3];
8255 /* mac addr check */
8256 if (!is_multicast_ether_addr(addr)) {
8257 dev_dbg(&hdev->pdev->dev,
8258 "Remove mc mac err! invalid mac:%pM.\n",
8263 memset(&req, 0, sizeof(req));
8264 hclge_prepare_mac_addr(&req, addr, true);
8265 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8267 /* This mac addr exist, remove this handle's VFID for it */
8268 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8272 if (hclge_is_all_function_id_zero(desc))
8273 /* All the vfid is zero, so need to delete this entry */
8274 status = hclge_remove_mac_vlan_tbl(vport, &req);
8276 /* Not all the vfid is zero, update the vfid */
8277 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8279 } else if (status == -ENOENT) {
8286 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8287 struct list_head *list,
8288 int (*sync)(struct hclge_vport *,
8289 const unsigned char *))
8291 struct hclge_mac_node *mac_node, *tmp;
8294 list_for_each_entry_safe(mac_node, tmp, list, node) {
8295 ret = sync(vport, mac_node->mac_addr);
8297 mac_node->state = HCLGE_MAC_ACTIVE;
8299 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8306 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8307 struct list_head *list,
8308 int (*unsync)(struct hclge_vport *,
8309 const unsigned char *))
8311 struct hclge_mac_node *mac_node, *tmp;
8314 list_for_each_entry_safe(mac_node, tmp, list, node) {
8315 ret = unsync(vport, mac_node->mac_addr);
8316 if (!ret || ret == -ENOENT) {
8317 list_del(&mac_node->node);
8320 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8327 static bool hclge_sync_from_add_list(struct list_head *add_list,
8328 struct list_head *mac_list)
8330 struct hclge_mac_node *mac_node, *tmp, *new_node;
8331 bool all_added = true;
8333 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8334 if (mac_node->state == HCLGE_MAC_TO_ADD)
8337 /* if the mac address from tmp_add_list is not in the
8338 * uc/mc_mac_list, it means have received a TO_DEL request
8339 * during the time window of adding the mac address into mac
8340 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8341 * then it will be removed at next time. else it must be TO_ADD,
8342 * this address hasn't been added into mac table,
8343 * so just remove the mac node.
8345 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8347 hclge_update_mac_node(new_node, mac_node->state);
8348 list_del(&mac_node->node);
8350 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8351 mac_node->state = HCLGE_MAC_TO_DEL;
8352 list_del(&mac_node->node);
8353 list_add_tail(&mac_node->node, mac_list);
8355 list_del(&mac_node->node);
8363 static void hclge_sync_from_del_list(struct list_head *del_list,
8364 struct list_head *mac_list)
8366 struct hclge_mac_node *mac_node, *tmp, *new_node;
8368 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8369 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8371 /* If the mac addr exists in the mac list, it means
8372 * received a new TO_ADD request during the time window
8373 * of configuring the mac address. For the mac node
8374 * state is TO_ADD, and the address is already in the
8375 * in the hardware(due to delete fail), so we just need
8376 * to change the mac node state to ACTIVE.
8378 new_node->state = HCLGE_MAC_ACTIVE;
8379 list_del(&mac_node->node);
8382 list_del(&mac_node->node);
8383 list_add_tail(&mac_node->node, mac_list);
8388 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8389 enum HCLGE_MAC_ADDR_TYPE mac_type,
8392 if (mac_type == HCLGE_MAC_ADDR_UC) {
8394 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8396 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8399 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8401 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8405 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8406 enum HCLGE_MAC_ADDR_TYPE mac_type)
8408 struct hclge_mac_node *mac_node, *tmp, *new_node;
8409 struct list_head tmp_add_list, tmp_del_list;
8410 struct list_head *list;
8413 INIT_LIST_HEAD(&tmp_add_list);
8414 INIT_LIST_HEAD(&tmp_del_list);
8416 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8417 * we can add/delete these mac addr outside the spin lock
8419 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8420 &vport->uc_mac_list : &vport->mc_mac_list;
8422 spin_lock_bh(&vport->mac_list_lock);
8424 list_for_each_entry_safe(mac_node, tmp, list, node) {
8425 switch (mac_node->state) {
8426 case HCLGE_MAC_TO_DEL:
8427 list_del(&mac_node->node);
8428 list_add_tail(&mac_node->node, &tmp_del_list);
8430 case HCLGE_MAC_TO_ADD:
8431 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8434 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8435 new_node->state = mac_node->state;
8436 list_add_tail(&new_node->node, &tmp_add_list);
8444 spin_unlock_bh(&vport->mac_list_lock);
8446 /* delete first, in order to get max mac table space for adding */
8447 if (mac_type == HCLGE_MAC_ADDR_UC) {
8448 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8449 hclge_rm_uc_addr_common);
8450 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8451 hclge_add_uc_addr_common);
8453 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8454 hclge_rm_mc_addr_common);
8455 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8456 hclge_add_mc_addr_common);
8459 /* if some mac addresses were added/deleted fail, move back to the
8460 * mac_list, and retry at next time.
8462 spin_lock_bh(&vport->mac_list_lock);
8464 hclge_sync_from_del_list(&tmp_del_list, list);
8465 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8467 spin_unlock_bh(&vport->mac_list_lock);
8469 hclge_update_overflow_flags(vport, mac_type, all_added);
8472 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8474 struct hclge_dev *hdev = vport->back;
8476 if (test_bit(vport->vport_id, hdev->vport_config_block))
8479 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8485 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8489 for (i = 0; i < hdev->num_alloc_vport; i++) {
8490 struct hclge_vport *vport = &hdev->vport[i];
8492 if (!hclge_need_sync_mac_table(vport))
8495 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8496 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8500 static void hclge_build_del_list(struct list_head *list,
8502 struct list_head *tmp_del_list)
8504 struct hclge_mac_node *mac_cfg, *tmp;
8506 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8507 switch (mac_cfg->state) {
8508 case HCLGE_MAC_TO_DEL:
8509 case HCLGE_MAC_ACTIVE:
8510 list_del(&mac_cfg->node);
8511 list_add_tail(&mac_cfg->node, tmp_del_list);
8513 case HCLGE_MAC_TO_ADD:
8515 list_del(&mac_cfg->node);
8523 static void hclge_unsync_del_list(struct hclge_vport *vport,
8524 int (*unsync)(struct hclge_vport *vport,
8525 const unsigned char *addr),
8527 struct list_head *tmp_del_list)
8529 struct hclge_mac_node *mac_cfg, *tmp;
8532 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8533 ret = unsync(vport, mac_cfg->mac_addr);
8534 if (!ret || ret == -ENOENT) {
8535 /* clear all mac addr from hardware, but remain these
8536 * mac addr in the mac list, and restore them after
8537 * vf reset finished.
8540 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8541 mac_cfg->state = HCLGE_MAC_TO_ADD;
8543 list_del(&mac_cfg->node);
8546 } else if (is_del_list) {
8547 mac_cfg->state = HCLGE_MAC_TO_DEL;
8552 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8553 enum HCLGE_MAC_ADDR_TYPE mac_type)
8555 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8556 struct hclge_dev *hdev = vport->back;
8557 struct list_head tmp_del_list, *list;
8559 if (mac_type == HCLGE_MAC_ADDR_UC) {
8560 list = &vport->uc_mac_list;
8561 unsync = hclge_rm_uc_addr_common;
8563 list = &vport->mc_mac_list;
8564 unsync = hclge_rm_mc_addr_common;
8567 INIT_LIST_HEAD(&tmp_del_list);
8570 set_bit(vport->vport_id, hdev->vport_config_block);
8572 spin_lock_bh(&vport->mac_list_lock);
8574 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8576 spin_unlock_bh(&vport->mac_list_lock);
8578 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8580 spin_lock_bh(&vport->mac_list_lock);
8582 hclge_sync_from_del_list(&tmp_del_list, list);
8584 spin_unlock_bh(&vport->mac_list_lock);
8587 /* remove all mac address when uninitailize */
8588 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8589 enum HCLGE_MAC_ADDR_TYPE mac_type)
8591 struct hclge_mac_node *mac_node, *tmp;
8592 struct hclge_dev *hdev = vport->back;
8593 struct list_head tmp_del_list, *list;
8595 INIT_LIST_HEAD(&tmp_del_list);
8597 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8598 &vport->uc_mac_list : &vport->mc_mac_list;
8600 spin_lock_bh(&vport->mac_list_lock);
8602 list_for_each_entry_safe(mac_node, tmp, list, node) {
8603 switch (mac_node->state) {
8604 case HCLGE_MAC_TO_DEL:
8605 case HCLGE_MAC_ACTIVE:
8606 list_del(&mac_node->node);
8607 list_add_tail(&mac_node->node, &tmp_del_list);
8609 case HCLGE_MAC_TO_ADD:
8610 list_del(&mac_node->node);
8616 spin_unlock_bh(&vport->mac_list_lock);
8618 if (mac_type == HCLGE_MAC_ADDR_UC)
8619 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8620 hclge_rm_uc_addr_common);
8622 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8623 hclge_rm_mc_addr_common);
8625 if (!list_empty(&tmp_del_list))
8626 dev_warn(&hdev->pdev->dev,
8627 "uninit %s mac list for vport %u not completely.\n",
8628 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8631 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8632 list_del(&mac_node->node);
8637 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8639 struct hclge_vport *vport;
8642 for (i = 0; i < hdev->num_alloc_vport; i++) {
8643 vport = &hdev->vport[i];
8644 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8645 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8649 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8650 u16 cmdq_resp, u8 resp_code)
8652 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8653 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8654 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8655 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8660 dev_err(&hdev->pdev->dev,
8661 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8666 switch (resp_code) {
8667 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8668 case HCLGE_ETHERTYPE_ALREADY_ADD:
8671 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8672 dev_err(&hdev->pdev->dev,
8673 "add mac ethertype failed for manager table overflow.\n");
8674 return_status = -EIO;
8676 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8677 dev_err(&hdev->pdev->dev,
8678 "add mac ethertype failed for key conflict.\n");
8679 return_status = -EIO;
8682 dev_err(&hdev->pdev->dev,
8683 "add mac ethertype failed for undefined, code=%u.\n",
8685 return_status = -EIO;
8688 return return_status;
8691 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8694 struct hclge_mac_vlan_tbl_entry_cmd req;
8695 struct hclge_dev *hdev = vport->back;
8696 struct hclge_desc desc;
8697 u16 egress_port = 0;
8700 if (is_zero_ether_addr(mac_addr))
8703 memset(&req, 0, sizeof(req));
8704 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8705 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8706 req.egress_port = cpu_to_le16(egress_port);
8707 hclge_prepare_mac_addr(&req, mac_addr, false);
8709 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8712 vf_idx += HCLGE_VF_VPORT_START_NUM;
8713 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8715 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8721 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8724 struct hclge_vport *vport = hclge_get_vport(handle);
8725 struct hclge_dev *hdev = vport->back;
8727 vport = hclge_get_vf_vport(hdev, vf);
8731 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8732 dev_info(&hdev->pdev->dev,
8733 "Specified MAC(=%pM) is same as before, no change committed!\n",
8738 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8739 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8744 ether_addr_copy(vport->vf_info.mac, mac_addr);
8746 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8747 dev_info(&hdev->pdev->dev,
8748 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8750 return hclge_inform_reset_assert_to_vf(vport);
8753 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8758 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8759 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8761 struct hclge_desc desc;
8766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8767 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8769 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8771 dev_err(&hdev->pdev->dev,
8772 "add mac ethertype failed for cmd_send, ret =%d.\n",
8777 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8778 retval = le16_to_cpu(desc.retval);
8780 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8783 static int init_mgr_tbl(struct hclge_dev *hdev)
8788 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8789 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8791 dev_err(&hdev->pdev->dev,
8792 "add mac ethertype failed, ret =%d.\n",
8801 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8803 struct hclge_vport *vport = hclge_get_vport(handle);
8804 struct hclge_dev *hdev = vport->back;
8806 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8809 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8810 const u8 *old_addr, const u8 *new_addr)
8812 struct list_head *list = &vport->uc_mac_list;
8813 struct hclge_mac_node *old_node, *new_node;
8815 new_node = hclge_find_mac_node(list, new_addr);
8817 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8821 new_node->state = HCLGE_MAC_TO_ADD;
8822 ether_addr_copy(new_node->mac_addr, new_addr);
8823 list_add(&new_node->node, list);
8825 if (new_node->state == HCLGE_MAC_TO_DEL)
8826 new_node->state = HCLGE_MAC_ACTIVE;
8828 /* make sure the new addr is in the list head, avoid dev
8829 * addr may be not re-added into mac table for the umv space
8830 * limitation after global/imp reset which will clear mac
8831 * table by hardware.
8833 list_move(&new_node->node, list);
8836 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8837 old_node = hclge_find_mac_node(list, old_addr);
8839 if (old_node->state == HCLGE_MAC_TO_ADD) {
8840 list_del(&old_node->node);
8843 old_node->state = HCLGE_MAC_TO_DEL;
8848 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8853 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8856 const unsigned char *new_addr = (const unsigned char *)p;
8857 struct hclge_vport *vport = hclge_get_vport(handle);
8858 struct hclge_dev *hdev = vport->back;
8859 unsigned char *old_addr = NULL;
8862 /* mac addr check */
8863 if (is_zero_ether_addr(new_addr) ||
8864 is_broadcast_ether_addr(new_addr) ||
8865 is_multicast_ether_addr(new_addr)) {
8866 dev_err(&hdev->pdev->dev,
8867 "change uc mac err! invalid mac: %pM.\n",
8872 ret = hclge_pause_addr_cfg(hdev, new_addr);
8874 dev_err(&hdev->pdev->dev,
8875 "failed to configure mac pause address, ret = %d\n",
8881 old_addr = hdev->hw.mac.mac_addr;
8883 spin_lock_bh(&vport->mac_list_lock);
8884 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8886 dev_err(&hdev->pdev->dev,
8887 "failed to change the mac addr:%pM, ret = %d\n",
8889 spin_unlock_bh(&vport->mac_list_lock);
8892 hclge_pause_addr_cfg(hdev, old_addr);
8896 /* we must update dev addr with spin lock protect, preventing dev addr
8897 * being removed by set_rx_mode path.
8899 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8900 spin_unlock_bh(&vport->mac_list_lock);
8902 hclge_task_schedule(hdev, 0);
8907 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8910 struct hclge_vport *vport = hclge_get_vport(handle);
8911 struct hclge_dev *hdev = vport->back;
8913 if (!hdev->hw.mac.phydev)
8916 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8919 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8920 u8 fe_type, bool filter_en, u8 vf_id)
8922 struct hclge_vlan_filter_ctrl_cmd *req;
8923 struct hclge_desc desc;
8926 /* read current vlan filter parameter */
8927 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8928 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8929 req->vlan_type = vlan_type;
8932 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8934 dev_err(&hdev->pdev->dev,
8935 "failed to get vlan filter config, ret = %d.\n", ret);
8939 /* modify and write new config parameter */
8940 hclge_cmd_reuse_desc(&desc, false);
8941 req->vlan_fe = filter_en ?
8942 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8944 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8946 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8952 #define HCLGE_FILTER_TYPE_VF 0
8953 #define HCLGE_FILTER_TYPE_PORT 1
8954 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8955 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8956 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8957 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8958 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8959 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8960 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8961 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8962 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8964 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8966 struct hclge_vport *vport = hclge_get_vport(handle);
8967 struct hclge_dev *hdev = vport->back;
8969 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8970 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8971 HCLGE_FILTER_FE_EGRESS, enable, 0);
8972 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8973 HCLGE_FILTER_FE_INGRESS, enable, 0);
8975 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8976 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8980 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8982 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8985 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8986 bool is_kill, u16 vlan,
8987 struct hclge_desc *desc)
8989 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8990 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8995 hclge_cmd_setup_basic_desc(&desc[0],
8996 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8997 hclge_cmd_setup_basic_desc(&desc[1],
8998 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9000 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9002 vf_byte_off = vfid / 8;
9003 vf_byte_val = 1 << (vfid % 8);
9005 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9006 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9008 req0->vlan_id = cpu_to_le16(vlan);
9009 req0->vlan_cfg = is_kill;
9011 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9012 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9014 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9016 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9018 dev_err(&hdev->pdev->dev,
9019 "Send vf vlan command fail, ret =%d.\n",
9027 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9028 bool is_kill, struct hclge_desc *desc)
9030 struct hclge_vlan_filter_vf_cfg_cmd *req;
9032 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9035 #define HCLGE_VF_VLAN_NO_ENTRY 2
9036 if (!req->resp_code || req->resp_code == 1)
9039 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9040 set_bit(vfid, hdev->vf_vlan_full);
9041 dev_warn(&hdev->pdev->dev,
9042 "vf vlan table is full, vf vlan filter is disabled\n");
9046 dev_err(&hdev->pdev->dev,
9047 "Add vf vlan filter fail, ret =%u.\n",
9050 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9051 if (!req->resp_code)
9054 /* vf vlan filter is disabled when vf vlan table is full,
9055 * then new vlan id will not be added into vf vlan table.
9056 * Just return 0 without warning, avoid massive verbose
9057 * print logs when unload.
9059 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9062 dev_err(&hdev->pdev->dev,
9063 "Kill vf vlan filter fail, ret =%u.\n",
9070 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9071 bool is_kill, u16 vlan,
9074 struct hclge_vport *vport = &hdev->vport[vfid];
9075 struct hclge_desc desc[2];
9078 /* if vf vlan table is full, firmware will close vf vlan filter, it
9079 * is unable and unnecessary to add new vlan id to vf vlan filter.
9080 * If spoof check is enable, and vf vlan is full, it shouldn't add
9081 * new vlan, because tx packets with these vlan id will be dropped.
9083 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9084 if (vport->vf_info.spoofchk && vlan) {
9085 dev_err(&hdev->pdev->dev,
9086 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9092 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9096 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9099 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9100 u16 vlan_id, bool is_kill)
9102 struct hclge_vlan_filter_pf_cfg_cmd *req;
9103 struct hclge_desc desc;
9104 u8 vlan_offset_byte_val;
9105 u8 vlan_offset_byte;
9109 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9111 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9112 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9113 HCLGE_VLAN_BYTE_SIZE;
9114 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9116 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9117 req->vlan_offset = vlan_offset_160;
9118 req->vlan_cfg = is_kill;
9119 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9123 dev_err(&hdev->pdev->dev,
9124 "port vlan command, send fail, ret =%d.\n", ret);
9128 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9129 u16 vport_id, u16 vlan_id,
9132 u16 vport_idx, vport_num = 0;
9135 if (is_kill && !vlan_id)
9138 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9141 dev_err(&hdev->pdev->dev,
9142 "Set %u vport vlan filter config fail, ret =%d.\n",
9147 /* vlan 0 may be added twice when 8021q module is enabled */
9148 if (!is_kill && !vlan_id &&
9149 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9152 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9153 dev_err(&hdev->pdev->dev,
9154 "Add port vlan failed, vport %u is already in vlan %u\n",
9160 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9161 dev_err(&hdev->pdev->dev,
9162 "Delete port vlan failed, vport %u is not in vlan %u\n",
9167 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9170 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9171 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9177 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9179 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9180 struct hclge_vport_vtag_tx_cfg_cmd *req;
9181 struct hclge_dev *hdev = vport->back;
9182 struct hclge_desc desc;
9186 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9188 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9189 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9190 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9191 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9192 vcfg->accept_tag1 ? 1 : 0);
9193 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9194 vcfg->accept_untag1 ? 1 : 0);
9195 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9196 vcfg->accept_tag2 ? 1 : 0);
9197 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9198 vcfg->accept_untag2 ? 1 : 0);
9199 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9200 vcfg->insert_tag1_en ? 1 : 0);
9201 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9202 vcfg->insert_tag2_en ? 1 : 0);
9203 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9204 vcfg->tag_shift_mode_en ? 1 : 0);
9205 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9207 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9208 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9209 HCLGE_VF_NUM_PER_BYTE;
9210 req->vf_bitmap[bmap_index] =
9211 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9213 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9215 dev_err(&hdev->pdev->dev,
9216 "Send port txvlan cfg command fail, ret =%d\n",
9222 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9224 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9225 struct hclge_vport_vtag_rx_cfg_cmd *req;
9226 struct hclge_dev *hdev = vport->back;
9227 struct hclge_desc desc;
9231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9233 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9234 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9235 vcfg->strip_tag1_en ? 1 : 0);
9236 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9237 vcfg->strip_tag2_en ? 1 : 0);
9238 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9239 vcfg->vlan1_vlan_prionly ? 1 : 0);
9240 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9241 vcfg->vlan2_vlan_prionly ? 1 : 0);
9242 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9243 vcfg->strip_tag1_discard_en ? 1 : 0);
9244 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9245 vcfg->strip_tag2_discard_en ? 1 : 0);
9247 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9248 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9249 HCLGE_VF_NUM_PER_BYTE;
9250 req->vf_bitmap[bmap_index] =
9251 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9253 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9255 dev_err(&hdev->pdev->dev,
9256 "Send port rxvlan cfg command fail, ret =%d\n",
9262 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9263 u16 port_base_vlan_state,
9268 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9269 vport->txvlan_cfg.accept_tag1 = true;
9270 vport->txvlan_cfg.insert_tag1_en = false;
9271 vport->txvlan_cfg.default_tag1 = 0;
9273 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9275 vport->txvlan_cfg.accept_tag1 =
9276 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9277 vport->txvlan_cfg.insert_tag1_en = true;
9278 vport->txvlan_cfg.default_tag1 = vlan_tag;
9281 vport->txvlan_cfg.accept_untag1 = true;
9283 /* accept_tag2 and accept_untag2 are not supported on
9284 * pdev revision(0x20), new revision support them,
9285 * this two fields can not be configured by user.
9287 vport->txvlan_cfg.accept_tag2 = true;
9288 vport->txvlan_cfg.accept_untag2 = true;
9289 vport->txvlan_cfg.insert_tag2_en = false;
9290 vport->txvlan_cfg.default_tag2 = 0;
9291 vport->txvlan_cfg.tag_shift_mode_en = true;
9293 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9294 vport->rxvlan_cfg.strip_tag1_en = false;
9295 vport->rxvlan_cfg.strip_tag2_en =
9296 vport->rxvlan_cfg.rx_vlan_offload_en;
9297 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9299 vport->rxvlan_cfg.strip_tag1_en =
9300 vport->rxvlan_cfg.rx_vlan_offload_en;
9301 vport->rxvlan_cfg.strip_tag2_en = true;
9302 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9305 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9306 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9307 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9309 ret = hclge_set_vlan_tx_offload_cfg(vport);
9313 return hclge_set_vlan_rx_offload_cfg(vport);
9316 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9318 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9319 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9320 struct hclge_desc desc;
9323 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9324 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9325 rx_req->ot_fst_vlan_type =
9326 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9327 rx_req->ot_sec_vlan_type =
9328 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9329 rx_req->in_fst_vlan_type =
9330 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9331 rx_req->in_sec_vlan_type =
9332 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9334 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9336 dev_err(&hdev->pdev->dev,
9337 "Send rxvlan protocol type command fail, ret =%d\n",
9342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9344 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9345 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9346 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9348 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9350 dev_err(&hdev->pdev->dev,
9351 "Send txvlan protocol type command fail, ret =%d\n",
9357 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9359 #define HCLGE_DEF_VLAN_TYPE 0x8100
9361 struct hnae3_handle *handle = &hdev->vport[0].nic;
9362 struct hclge_vport *vport;
9366 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9367 /* for revision 0x21, vf vlan filter is per function */
9368 for (i = 0; i < hdev->num_alloc_vport; i++) {
9369 vport = &hdev->vport[i];
9370 ret = hclge_set_vlan_filter_ctrl(hdev,
9371 HCLGE_FILTER_TYPE_VF,
9372 HCLGE_FILTER_FE_EGRESS,
9379 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9380 HCLGE_FILTER_FE_INGRESS, true,
9385 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9386 HCLGE_FILTER_FE_EGRESS_V1_B,
9392 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9394 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9395 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9396 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9397 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9398 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9399 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9401 ret = hclge_set_vlan_protocol_type(hdev);
9405 for (i = 0; i < hdev->num_alloc_vport; i++) {
9408 vport = &hdev->vport[i];
9409 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9411 ret = hclge_vlan_offload_cfg(vport,
9412 vport->port_base_vlan_cfg.state,
9418 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9421 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9424 struct hclge_vport_vlan_cfg *vlan;
9426 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9430 vlan->hd_tbl_status = writen_to_tbl;
9431 vlan->vlan_id = vlan_id;
9433 list_add_tail(&vlan->node, &vport->vlan_list);
9436 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9438 struct hclge_vport_vlan_cfg *vlan, *tmp;
9439 struct hclge_dev *hdev = vport->back;
9442 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9443 if (!vlan->hd_tbl_status) {
9444 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9446 vlan->vlan_id, false);
9448 dev_err(&hdev->pdev->dev,
9449 "restore vport vlan list failed, ret=%d\n",
9454 vlan->hd_tbl_status = true;
9460 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9463 struct hclge_vport_vlan_cfg *vlan, *tmp;
9464 struct hclge_dev *hdev = vport->back;
9466 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9467 if (vlan->vlan_id == vlan_id) {
9468 if (is_write_tbl && vlan->hd_tbl_status)
9469 hclge_set_vlan_filter_hw(hdev,
9475 list_del(&vlan->node);
9482 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9484 struct hclge_vport_vlan_cfg *vlan, *tmp;
9485 struct hclge_dev *hdev = vport->back;
9487 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9488 if (vlan->hd_tbl_status)
9489 hclge_set_vlan_filter_hw(hdev,
9495 vlan->hd_tbl_status = false;
9497 list_del(&vlan->node);
9501 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9504 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9506 struct hclge_vport_vlan_cfg *vlan, *tmp;
9507 struct hclge_vport *vport;
9510 for (i = 0; i < hdev->num_alloc_vport; i++) {
9511 vport = &hdev->vport[i];
9512 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9513 list_del(&vlan->node);
9519 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9521 struct hclge_vport_vlan_cfg *vlan, *tmp;
9522 struct hclge_dev *hdev = vport->back;
9528 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9529 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9530 state = vport->port_base_vlan_cfg.state;
9532 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9533 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9534 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9535 vport->vport_id, vlan_id,
9540 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9541 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9543 vlan->vlan_id, false);
9546 vlan->hd_tbl_status = true;
9550 /* For global reset and imp reset, hardware will clear the mac table,
9551 * so we change the mac address state from ACTIVE to TO_ADD, then they
9552 * can be restored in the service task after reset complete. Furtherly,
9553 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9554 * be restored after reset, so just remove these mac nodes from mac_list.
9556 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9558 struct hclge_mac_node *mac_node, *tmp;
9560 list_for_each_entry_safe(mac_node, tmp, list, node) {
9561 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9562 mac_node->state = HCLGE_MAC_TO_ADD;
9563 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9564 list_del(&mac_node->node);
9570 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9572 spin_lock_bh(&vport->mac_list_lock);
9574 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9575 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9576 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9578 spin_unlock_bh(&vport->mac_list_lock);
9581 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9583 struct hclge_vport *vport = &hdev->vport[0];
9584 struct hnae3_handle *handle = &vport->nic;
9586 hclge_restore_mac_table_common(vport);
9587 hclge_restore_vport_vlan_table(vport);
9588 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9590 hclge_restore_fd_entries(handle);
9593 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9595 struct hclge_vport *vport = hclge_get_vport(handle);
9597 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9598 vport->rxvlan_cfg.strip_tag1_en = false;
9599 vport->rxvlan_cfg.strip_tag2_en = enable;
9600 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9602 vport->rxvlan_cfg.strip_tag1_en = enable;
9603 vport->rxvlan_cfg.strip_tag2_en = true;
9604 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9607 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9608 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9609 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9610 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9612 return hclge_set_vlan_rx_offload_cfg(vport);
9615 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9616 u16 port_base_vlan_state,
9617 struct hclge_vlan_info *new_info,
9618 struct hclge_vlan_info *old_info)
9620 struct hclge_dev *hdev = vport->back;
9623 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9624 hclge_rm_vport_all_vlan_table(vport, false);
9625 return hclge_set_vlan_filter_hw(hdev,
9626 htons(new_info->vlan_proto),
9632 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9633 vport->vport_id, old_info->vlan_tag,
9638 return hclge_add_vport_all_vlan_table(vport);
9641 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9642 struct hclge_vlan_info *vlan_info)
9644 struct hnae3_handle *nic = &vport->nic;
9645 struct hclge_vlan_info *old_vlan_info;
9646 struct hclge_dev *hdev = vport->back;
9649 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9651 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9655 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9656 /* add new VLAN tag */
9657 ret = hclge_set_vlan_filter_hw(hdev,
9658 htons(vlan_info->vlan_proto),
9660 vlan_info->vlan_tag,
9665 /* remove old VLAN tag */
9666 ret = hclge_set_vlan_filter_hw(hdev,
9667 htons(old_vlan_info->vlan_proto),
9669 old_vlan_info->vlan_tag,
9677 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9682 /* update state only when disable/enable port based VLAN */
9683 vport->port_base_vlan_cfg.state = state;
9684 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9685 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9687 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9690 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9691 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9692 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9697 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9698 enum hnae3_port_base_vlan_state state,
9701 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9703 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9705 return HNAE3_PORT_BASE_VLAN_ENABLE;
9708 return HNAE3_PORT_BASE_VLAN_DISABLE;
9709 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9710 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9712 return HNAE3_PORT_BASE_VLAN_MODIFY;
9716 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9717 u16 vlan, u8 qos, __be16 proto)
9719 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9720 struct hclge_vport *vport = hclge_get_vport(handle);
9721 struct hclge_dev *hdev = vport->back;
9722 struct hclge_vlan_info vlan_info;
9726 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9729 vport = hclge_get_vf_vport(hdev, vfid);
9733 /* qos is a 3 bits value, so can not be bigger than 7 */
9734 if (vlan > VLAN_N_VID - 1 || qos > 7)
9736 if (proto != htons(ETH_P_8021Q))
9737 return -EPROTONOSUPPORT;
9739 state = hclge_get_port_base_vlan_state(vport,
9740 vport->port_base_vlan_cfg.state,
9742 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9745 vlan_info.vlan_tag = vlan;
9746 vlan_info.qos = qos;
9747 vlan_info.vlan_proto = ntohs(proto);
9749 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9751 dev_err(&hdev->pdev->dev,
9752 "failed to update port base vlan for vf %d, ret = %d\n",
9757 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9760 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9761 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9762 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9763 vport->vport_id, state,
9770 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9772 struct hclge_vlan_info *vlan_info;
9773 struct hclge_vport *vport;
9777 /* clear port base vlan for all vf */
9778 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9779 vport = &hdev->vport[vf];
9780 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9782 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9784 vlan_info->vlan_tag, true);
9786 dev_err(&hdev->pdev->dev,
9787 "failed to clear vf vlan for vf%d, ret = %d\n",
9788 vf - HCLGE_VF_VPORT_START_NUM, ret);
9792 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9793 u16 vlan_id, bool is_kill)
9795 struct hclge_vport *vport = hclge_get_vport(handle);
9796 struct hclge_dev *hdev = vport->back;
9797 bool writen_to_tbl = false;
9800 /* When device is resetting or reset failed, firmware is unable to
9801 * handle mailbox. Just record the vlan id, and remove it after
9804 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9805 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9806 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9810 /* when port base vlan enabled, we use port base vlan as the vlan
9811 * filter entry. In this case, we don't update vlan filter table
9812 * when user add new vlan or remove exist vlan, just update the vport
9813 * vlan list. The vlan id in vlan list will be writen in vlan filter
9814 * table until port base vlan disabled
9816 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9817 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9819 writen_to_tbl = true;
9824 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9826 hclge_add_vport_vlan_table(vport, vlan_id,
9828 } else if (is_kill) {
9829 /* when remove hw vlan filter failed, record the vlan id,
9830 * and try to remove it from hw later, to be consistence
9833 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9838 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9840 #define HCLGE_MAX_SYNC_COUNT 60
9842 int i, ret, sync_cnt = 0;
9845 /* start from vport 1 for PF is always alive */
9846 for (i = 0; i < hdev->num_alloc_vport; i++) {
9847 struct hclge_vport *vport = &hdev->vport[i];
9849 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9851 while (vlan_id != VLAN_N_VID) {
9852 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9853 vport->vport_id, vlan_id,
9855 if (ret && ret != -EINVAL)
9858 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9859 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9862 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9865 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9871 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9873 struct hclge_config_max_frm_size_cmd *req;
9874 struct hclge_desc desc;
9876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9878 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9879 req->max_frm_size = cpu_to_le16(new_mps);
9880 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9882 return hclge_cmd_send(&hdev->hw, &desc, 1);
9885 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9887 struct hclge_vport *vport = hclge_get_vport(handle);
9889 return hclge_set_vport_mtu(vport, new_mtu);
9892 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9894 struct hclge_dev *hdev = vport->back;
9895 int i, max_frm_size, ret;
9897 /* HW supprt 2 layer vlan */
9898 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9899 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9900 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9903 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9904 mutex_lock(&hdev->vport_lock);
9905 /* VF's mps must fit within hdev->mps */
9906 if (vport->vport_id && max_frm_size > hdev->mps) {
9907 mutex_unlock(&hdev->vport_lock);
9909 } else if (vport->vport_id) {
9910 vport->mps = max_frm_size;
9911 mutex_unlock(&hdev->vport_lock);
9915 /* PF's mps must be greater then VF's mps */
9916 for (i = 1; i < hdev->num_alloc_vport; i++)
9917 if (max_frm_size < hdev->vport[i].mps) {
9918 mutex_unlock(&hdev->vport_lock);
9922 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9924 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9926 dev_err(&hdev->pdev->dev,
9927 "Change mtu fail, ret =%d\n", ret);
9931 hdev->mps = max_frm_size;
9932 vport->mps = max_frm_size;
9934 ret = hclge_buffer_alloc(hdev);
9936 dev_err(&hdev->pdev->dev,
9937 "Allocate buffer fail, ret =%d\n", ret);
9940 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9941 mutex_unlock(&hdev->vport_lock);
9945 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9948 struct hclge_reset_tqp_queue_cmd *req;
9949 struct hclge_desc desc;
9952 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9954 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9955 req->tqp_id = cpu_to_le16(queue_id);
9957 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9959 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9961 dev_err(&hdev->pdev->dev,
9962 "Send tqp reset cmd error, status =%d\n", ret);
9969 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9971 struct hclge_reset_tqp_queue_cmd *req;
9972 struct hclge_desc desc;
9975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9977 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9978 req->tqp_id = cpu_to_le16(queue_id);
9980 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9982 dev_err(&hdev->pdev->dev,
9983 "Get reset status error, status =%d\n", ret);
9987 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9990 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9992 struct hnae3_queue *queue;
9993 struct hclge_tqp *tqp;
9995 queue = handle->kinfo.tqp[queue_id];
9996 tqp = container_of(queue, struct hclge_tqp, q);
10001 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10003 struct hclge_vport *vport = hclge_get_vport(handle);
10004 struct hclge_dev *hdev = vport->back;
10005 int reset_try_times = 0;
10010 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10012 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10014 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10018 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10020 dev_err(&hdev->pdev->dev,
10021 "Send reset tqp cmd fail, ret = %d\n", ret);
10025 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10026 reset_status = hclge_get_reset_status(hdev, queue_gid);
10030 /* Wait for tqp hw reset */
10031 usleep_range(1000, 1200);
10034 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10035 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10039 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10041 dev_err(&hdev->pdev->dev,
10042 "Deassert the soft reset fail, ret = %d\n", ret);
10047 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10049 struct hnae3_handle *handle = &vport->nic;
10050 struct hclge_dev *hdev = vport->back;
10051 int reset_try_times = 0;
10056 if (queue_id >= handle->kinfo.num_tqps) {
10057 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10062 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10064 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10066 dev_warn(&hdev->pdev->dev,
10067 "Send reset tqp cmd fail, ret = %d\n", ret);
10071 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10072 reset_status = hclge_get_reset_status(hdev, queue_gid);
10076 /* Wait for tqp hw reset */
10077 usleep_range(1000, 1200);
10080 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10081 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10085 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10087 dev_warn(&hdev->pdev->dev,
10088 "Deassert the soft reset fail, ret = %d\n", ret);
10091 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10093 struct hclge_vport *vport = hclge_get_vport(handle);
10094 struct hclge_dev *hdev = vport->back;
10096 return hdev->fw_version;
10099 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10101 struct phy_device *phydev = hdev->hw.mac.phydev;
10106 phy_set_asym_pause(phydev, rx_en, tx_en);
10109 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10113 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10116 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10118 dev_err(&hdev->pdev->dev,
10119 "configure pauseparam error, ret = %d.\n", ret);
10124 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10126 struct phy_device *phydev = hdev->hw.mac.phydev;
10127 u16 remote_advertising = 0;
10128 u16 local_advertising;
10129 u32 rx_pause, tx_pause;
10132 if (!phydev->link || !phydev->autoneg)
10135 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10138 remote_advertising = LPA_PAUSE_CAP;
10140 if (phydev->asym_pause)
10141 remote_advertising |= LPA_PAUSE_ASYM;
10143 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10144 remote_advertising);
10145 tx_pause = flowctl & FLOW_CTRL_TX;
10146 rx_pause = flowctl & FLOW_CTRL_RX;
10148 if (phydev->duplex == HCLGE_MAC_HALF) {
10153 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10156 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10157 u32 *rx_en, u32 *tx_en)
10159 struct hclge_vport *vport = hclge_get_vport(handle);
10160 struct hclge_dev *hdev = vport->back;
10161 struct phy_device *phydev = hdev->hw.mac.phydev;
10163 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10165 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10171 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10174 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10177 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10186 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10187 u32 rx_en, u32 tx_en)
10189 if (rx_en && tx_en)
10190 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10191 else if (rx_en && !tx_en)
10192 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10193 else if (!rx_en && tx_en)
10194 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10196 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10198 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10201 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10202 u32 rx_en, u32 tx_en)
10204 struct hclge_vport *vport = hclge_get_vport(handle);
10205 struct hclge_dev *hdev = vport->back;
10206 struct phy_device *phydev = hdev->hw.mac.phydev;
10210 fc_autoneg = hclge_get_autoneg(handle);
10211 if (auto_neg != fc_autoneg) {
10212 dev_info(&hdev->pdev->dev,
10213 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10214 return -EOPNOTSUPP;
10218 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10219 dev_info(&hdev->pdev->dev,
10220 "Priority flow control enabled. Cannot set link flow control.\n");
10221 return -EOPNOTSUPP;
10224 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10226 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10229 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10232 return phy_start_aneg(phydev);
10234 return -EOPNOTSUPP;
10237 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10238 u8 *auto_neg, u32 *speed, u8 *duplex)
10240 struct hclge_vport *vport = hclge_get_vport(handle);
10241 struct hclge_dev *hdev = vport->back;
10244 *speed = hdev->hw.mac.speed;
10246 *duplex = hdev->hw.mac.duplex;
10248 *auto_neg = hdev->hw.mac.autoneg;
10251 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10254 struct hclge_vport *vport = hclge_get_vport(handle);
10255 struct hclge_dev *hdev = vport->back;
10257 /* When nic is down, the service task is not running, doesn't update
10258 * the port information per second. Query the port information before
10259 * return the media type, ensure getting the correct media information.
10261 hclge_update_port_info(hdev);
10264 *media_type = hdev->hw.mac.media_type;
10267 *module_type = hdev->hw.mac.module_type;
10270 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10271 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10273 struct hclge_vport *vport = hclge_get_vport(handle);
10274 struct hclge_dev *hdev = vport->back;
10275 struct phy_device *phydev = hdev->hw.mac.phydev;
10276 int mdix_ctrl, mdix, is_resolved;
10277 unsigned int retval;
10280 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10281 *tp_mdix = ETH_TP_MDI_INVALID;
10285 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10287 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10288 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10289 HCLGE_PHY_MDIX_CTRL_S);
10291 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10292 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10293 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10295 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10297 switch (mdix_ctrl) {
10299 *tp_mdix_ctrl = ETH_TP_MDI;
10302 *tp_mdix_ctrl = ETH_TP_MDI_X;
10305 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10308 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10313 *tp_mdix = ETH_TP_MDI_INVALID;
10315 *tp_mdix = ETH_TP_MDI_X;
10317 *tp_mdix = ETH_TP_MDI;
10320 static void hclge_info_show(struct hclge_dev *hdev)
10322 struct device *dev = &hdev->pdev->dev;
10324 dev_info(dev, "PF info begin:\n");
10326 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10327 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10328 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10329 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10330 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10331 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10332 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10333 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10334 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10335 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10336 dev_info(dev, "This is %s PF\n",
10337 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10338 dev_info(dev, "DCB %s\n",
10339 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10340 dev_info(dev, "MQPRIO %s\n",
10341 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10343 dev_info(dev, "PF info end.\n");
10346 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10347 struct hclge_vport *vport)
10349 struct hnae3_client *client = vport->nic.client;
10350 struct hclge_dev *hdev = ae_dev->priv;
10351 int rst_cnt = hdev->rst_stats.reset_cnt;
10354 ret = client->ops->init_instance(&vport->nic);
10358 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10359 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10360 rst_cnt != hdev->rst_stats.reset_cnt) {
10365 /* Enable nic hw error interrupts */
10366 ret = hclge_config_nic_hw_error(hdev, true);
10368 dev_err(&ae_dev->pdev->dev,
10369 "fail(%d) to enable hw error interrupts\n", ret);
10373 hnae3_set_client_init_flag(client, ae_dev, 1);
10375 if (netif_msg_drv(&hdev->vport->nic))
10376 hclge_info_show(hdev);
10381 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10382 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10383 msleep(HCLGE_WAIT_RESET_DONE);
10385 client->ops->uninit_instance(&vport->nic, 0);
10390 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10391 struct hclge_vport *vport)
10393 struct hclge_dev *hdev = ae_dev->priv;
10394 struct hnae3_client *client;
10398 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10402 client = hdev->roce_client;
10403 ret = hclge_init_roce_base_info(vport);
10407 rst_cnt = hdev->rst_stats.reset_cnt;
10408 ret = client->ops->init_instance(&vport->roce);
10412 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10413 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10414 rst_cnt != hdev->rst_stats.reset_cnt) {
10416 goto init_roce_err;
10419 /* Enable roce ras interrupts */
10420 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10422 dev_err(&ae_dev->pdev->dev,
10423 "fail(%d) to enable roce ras interrupts\n", ret);
10424 goto init_roce_err;
10427 hnae3_set_client_init_flag(client, ae_dev, 1);
10432 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10433 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10434 msleep(HCLGE_WAIT_RESET_DONE);
10436 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10441 static int hclge_init_client_instance(struct hnae3_client *client,
10442 struct hnae3_ae_dev *ae_dev)
10444 struct hclge_dev *hdev = ae_dev->priv;
10445 struct hclge_vport *vport;
10448 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10449 vport = &hdev->vport[i];
10451 switch (client->type) {
10452 case HNAE3_CLIENT_KNIC:
10453 hdev->nic_client = client;
10454 vport->nic.client = client;
10455 ret = hclge_init_nic_client_instance(ae_dev, vport);
10459 ret = hclge_init_roce_client_instance(ae_dev, vport);
10464 case HNAE3_CLIENT_ROCE:
10465 if (hnae3_dev_roce_supported(hdev)) {
10466 hdev->roce_client = client;
10467 vport->roce.client = client;
10470 ret = hclge_init_roce_client_instance(ae_dev, vport);
10483 hdev->nic_client = NULL;
10484 vport->nic.client = NULL;
10487 hdev->roce_client = NULL;
10488 vport->roce.client = NULL;
10492 static void hclge_uninit_client_instance(struct hnae3_client *client,
10493 struct hnae3_ae_dev *ae_dev)
10495 struct hclge_dev *hdev = ae_dev->priv;
10496 struct hclge_vport *vport;
10499 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10500 vport = &hdev->vport[i];
10501 if (hdev->roce_client) {
10502 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10503 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10504 msleep(HCLGE_WAIT_RESET_DONE);
10506 hdev->roce_client->ops->uninit_instance(&vport->roce,
10508 hdev->roce_client = NULL;
10509 vport->roce.client = NULL;
10511 if (client->type == HNAE3_CLIENT_ROCE)
10513 if (hdev->nic_client && client->ops->uninit_instance) {
10514 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10515 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10516 msleep(HCLGE_WAIT_RESET_DONE);
10518 client->ops->uninit_instance(&vport->nic, 0);
10519 hdev->nic_client = NULL;
10520 vport->nic.client = NULL;
10525 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10527 #define HCLGE_MEM_BAR 4
10529 struct pci_dev *pdev = hdev->pdev;
10530 struct hclge_hw *hw = &hdev->hw;
10532 /* for device does not have device memory, return directly */
10533 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10536 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10537 pci_resource_start(pdev, HCLGE_MEM_BAR),
10538 pci_resource_len(pdev, HCLGE_MEM_BAR));
10539 if (!hw->mem_base) {
10540 dev_err(&pdev->dev, "failed to map device memory\n");
10547 static int hclge_pci_init(struct hclge_dev *hdev)
10549 struct pci_dev *pdev = hdev->pdev;
10550 struct hclge_hw *hw;
10553 ret = pci_enable_device(pdev);
10555 dev_err(&pdev->dev, "failed to enable PCI device\n");
10559 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10561 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10563 dev_err(&pdev->dev,
10564 "can't set consistent PCI DMA");
10565 goto err_disable_device;
10567 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10570 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10572 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10573 goto err_disable_device;
10576 pci_set_master(pdev);
10578 hw->io_base = pcim_iomap(pdev, 2, 0);
10579 if (!hw->io_base) {
10580 dev_err(&pdev->dev, "Can't map configuration register space\n");
10582 goto err_clr_master;
10585 ret = hclge_dev_mem_map(hdev);
10587 goto err_unmap_io_base;
10589 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10594 pcim_iounmap(pdev, hdev->hw.io_base);
10596 pci_clear_master(pdev);
10597 pci_release_regions(pdev);
10598 err_disable_device:
10599 pci_disable_device(pdev);
10604 static void hclge_pci_uninit(struct hclge_dev *hdev)
10606 struct pci_dev *pdev = hdev->pdev;
10608 if (hdev->hw.mem_base)
10609 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10611 pcim_iounmap(pdev, hdev->hw.io_base);
10612 pci_free_irq_vectors(pdev);
10613 pci_clear_master(pdev);
10614 pci_release_mem_regions(pdev);
10615 pci_disable_device(pdev);
10618 static void hclge_state_init(struct hclge_dev *hdev)
10620 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10621 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10622 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10623 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10624 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10625 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10626 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10629 static void hclge_state_uninit(struct hclge_dev *hdev)
10631 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10632 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10634 if (hdev->reset_timer.function)
10635 del_timer_sync(&hdev->reset_timer);
10636 if (hdev->service_task.work.func)
10637 cancel_delayed_work_sync(&hdev->service_task);
10640 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10642 #define HCLGE_FLR_RETRY_WAIT_MS 500
10643 #define HCLGE_FLR_RETRY_CNT 5
10645 struct hclge_dev *hdev = ae_dev->priv;
10650 down(&hdev->reset_sem);
10651 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10652 hdev->reset_type = HNAE3_FLR_RESET;
10653 ret = hclge_reset_prepare(hdev);
10654 if (ret || hdev->reset_pending) {
10655 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10657 if (hdev->reset_pending ||
10658 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10659 dev_err(&hdev->pdev->dev,
10660 "reset_pending:0x%lx, retry_cnt:%d\n",
10661 hdev->reset_pending, retry_cnt);
10662 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10663 up(&hdev->reset_sem);
10664 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10669 /* disable misc vector before FLR done */
10670 hclge_enable_vector(&hdev->misc_vector, false);
10671 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10672 hdev->rst_stats.flr_rst_cnt++;
10675 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10677 struct hclge_dev *hdev = ae_dev->priv;
10680 hclge_enable_vector(&hdev->misc_vector, true);
10682 ret = hclge_reset_rebuild(hdev);
10684 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10686 hdev->reset_type = HNAE3_NONE_RESET;
10687 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10688 up(&hdev->reset_sem);
10691 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10695 for (i = 0; i < hdev->num_alloc_vport; i++) {
10696 struct hclge_vport *vport = &hdev->vport[i];
10699 /* Send cmd to clear VF's FUNC_RST_ING */
10700 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10702 dev_warn(&hdev->pdev->dev,
10703 "clear vf(%u) rst failed %d!\n",
10704 vport->vport_id, ret);
10708 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10710 struct pci_dev *pdev = ae_dev->pdev;
10711 struct hclge_dev *hdev;
10714 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10719 hdev->ae_dev = ae_dev;
10720 hdev->reset_type = HNAE3_NONE_RESET;
10721 hdev->reset_level = HNAE3_FUNC_RESET;
10722 ae_dev->priv = hdev;
10724 /* HW supprt 2 layer vlan */
10725 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10727 mutex_init(&hdev->vport_lock);
10728 spin_lock_init(&hdev->fd_rule_lock);
10729 sema_init(&hdev->reset_sem, 1);
10731 ret = hclge_pci_init(hdev);
10735 /* Firmware command queue initialize */
10736 ret = hclge_cmd_queue_init(hdev);
10738 goto err_pci_uninit;
10740 /* Firmware command initialize */
10741 ret = hclge_cmd_init(hdev);
10743 goto err_cmd_uninit;
10745 ret = hclge_get_cap(hdev);
10747 goto err_cmd_uninit;
10749 ret = hclge_query_dev_specs(hdev);
10751 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10753 goto err_cmd_uninit;
10756 ret = hclge_configure(hdev);
10758 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10759 goto err_cmd_uninit;
10762 ret = hclge_init_msi(hdev);
10764 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10765 goto err_cmd_uninit;
10768 ret = hclge_misc_irq_init(hdev);
10770 goto err_msi_uninit;
10772 ret = hclge_alloc_tqps(hdev);
10774 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10775 goto err_msi_irq_uninit;
10778 ret = hclge_alloc_vport(hdev);
10780 goto err_msi_irq_uninit;
10782 ret = hclge_map_tqp(hdev);
10784 goto err_msi_irq_uninit;
10786 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
10787 !hnae3_dev_phy_imp_supported(hdev)) {
10788 ret = hclge_mac_mdio_config(hdev);
10790 goto err_msi_irq_uninit;
10793 ret = hclge_init_umv_space(hdev);
10795 goto err_mdiobus_unreg;
10797 ret = hclge_mac_init(hdev);
10799 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10800 goto err_mdiobus_unreg;
10803 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10805 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10806 goto err_mdiobus_unreg;
10809 ret = hclge_config_gro(hdev, true);
10811 goto err_mdiobus_unreg;
10813 ret = hclge_init_vlan_config(hdev);
10815 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10816 goto err_mdiobus_unreg;
10819 ret = hclge_tm_schd_init(hdev);
10821 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10822 goto err_mdiobus_unreg;
10825 ret = hclge_rss_init_cfg(hdev);
10827 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10828 goto err_mdiobus_unreg;
10831 ret = hclge_rss_init_hw(hdev);
10833 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10834 goto err_mdiobus_unreg;
10837 ret = init_mgr_tbl(hdev);
10839 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10840 goto err_mdiobus_unreg;
10843 ret = hclge_init_fd_config(hdev);
10845 dev_err(&pdev->dev,
10846 "fd table init fail, ret=%d\n", ret);
10847 goto err_mdiobus_unreg;
10850 INIT_KFIFO(hdev->mac_tnl_log);
10852 hclge_dcb_ops_set(hdev);
10854 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10855 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10857 /* Setup affinity after service timer setup because add_timer_on
10858 * is called in affinity notify.
10860 hclge_misc_affinity_setup(hdev);
10862 hclge_clear_all_event_cause(hdev);
10863 hclge_clear_resetting_state(hdev);
10865 /* Log and clear the hw errors those already occurred */
10866 hclge_handle_all_hns_hw_errors(ae_dev);
10868 /* request delayed reset for the error recovery because an immediate
10869 * global reset on a PF affecting pending initialization of other PFs
10871 if (ae_dev->hw_err_reset_req) {
10872 enum hnae3_reset_type reset_level;
10874 reset_level = hclge_get_reset_level(ae_dev,
10875 &ae_dev->hw_err_reset_req);
10876 hclge_set_def_reset_request(ae_dev, reset_level);
10877 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10880 /* Enable MISC vector(vector0) */
10881 hclge_enable_vector(&hdev->misc_vector, true);
10883 hclge_state_init(hdev);
10884 hdev->last_reset_time = jiffies;
10886 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10887 HCLGE_DRIVER_NAME);
10889 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10894 if (hdev->hw.mac.phydev)
10895 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10896 err_msi_irq_uninit:
10897 hclge_misc_irq_uninit(hdev);
10899 pci_free_irq_vectors(pdev);
10901 hclge_cmd_uninit(hdev);
10903 pcim_iounmap(pdev, hdev->hw.io_base);
10904 pci_clear_master(pdev);
10905 pci_release_regions(pdev);
10906 pci_disable_device(pdev);
10908 mutex_destroy(&hdev->vport_lock);
10912 static void hclge_stats_clear(struct hclge_dev *hdev)
10914 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10917 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10919 return hclge_config_switch_param(hdev, vf, enable,
10920 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10923 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10925 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10926 HCLGE_FILTER_FE_NIC_INGRESS_B,
10930 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10934 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10936 dev_err(&hdev->pdev->dev,
10937 "Set vf %d mac spoof check %s failed, ret=%d\n",
10938 vf, enable ? "on" : "off", ret);
10942 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10944 dev_err(&hdev->pdev->dev,
10945 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10946 vf, enable ? "on" : "off", ret);
10951 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10954 struct hclge_vport *vport = hclge_get_vport(handle);
10955 struct hclge_dev *hdev = vport->back;
10956 u32 new_spoofchk = enable ? 1 : 0;
10959 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10960 return -EOPNOTSUPP;
10962 vport = hclge_get_vf_vport(hdev, vf);
10966 if (vport->vf_info.spoofchk == new_spoofchk)
10969 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10970 dev_warn(&hdev->pdev->dev,
10971 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10973 else if (enable && hclge_is_umv_space_full(vport, true))
10974 dev_warn(&hdev->pdev->dev,
10975 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10978 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10982 vport->vf_info.spoofchk = new_spoofchk;
10986 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10988 struct hclge_vport *vport = hdev->vport;
10992 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10995 /* resume the vf spoof check state after reset */
10996 for (i = 0; i < hdev->num_alloc_vport; i++) {
10997 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10998 vport->vf_info.spoofchk);
11008 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11010 struct hclge_vport *vport = hclge_get_vport(handle);
11011 struct hclge_dev *hdev = vport->back;
11012 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11013 u32 new_trusted = enable ? 1 : 0;
11017 vport = hclge_get_vf_vport(hdev, vf);
11021 if (vport->vf_info.trusted == new_trusted)
11024 /* Disable promisc mode for VF if it is not trusted any more. */
11025 if (!enable && vport->vf_info.promisc_enable) {
11026 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11027 ret = hclge_set_vport_promisc_mode(vport, false, false,
11031 vport->vf_info.promisc_enable = 0;
11032 hclge_inform_vf_promisc_info(vport);
11035 vport->vf_info.trusted = new_trusted;
11040 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11045 /* reset vf rate to default value */
11046 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11047 struct hclge_vport *vport = &hdev->vport[vf];
11049 vport->vf_info.max_tx_rate = 0;
11050 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11052 dev_err(&hdev->pdev->dev,
11053 "vf%d failed to reset to default, ret=%d\n",
11054 vf - HCLGE_VF_VPORT_START_NUM, ret);
11058 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11059 int min_tx_rate, int max_tx_rate)
11061 if (min_tx_rate != 0 ||
11062 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11063 dev_err(&hdev->pdev->dev,
11064 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11065 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11072 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11073 int min_tx_rate, int max_tx_rate, bool force)
11075 struct hclge_vport *vport = hclge_get_vport(handle);
11076 struct hclge_dev *hdev = vport->back;
11079 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11083 vport = hclge_get_vf_vport(hdev, vf);
11087 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11090 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11094 vport->vf_info.max_tx_rate = max_tx_rate;
11099 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11101 struct hnae3_handle *handle = &hdev->vport->nic;
11102 struct hclge_vport *vport;
11106 /* resume the vf max_tx_rate after reset */
11107 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11108 vport = hclge_get_vf_vport(hdev, vf);
11112 /* zero means max rate, after reset, firmware already set it to
11113 * max rate, so just continue.
11115 if (!vport->vf_info.max_tx_rate)
11118 ret = hclge_set_vf_rate(handle, vf, 0,
11119 vport->vf_info.max_tx_rate, true);
11121 dev_err(&hdev->pdev->dev,
11122 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11123 vf, vport->vf_info.max_tx_rate, ret);
11131 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11133 struct hclge_vport *vport = hdev->vport;
11136 for (i = 0; i < hdev->num_alloc_vport; i++) {
11137 hclge_vport_stop(vport);
11142 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11144 struct hclge_dev *hdev = ae_dev->priv;
11145 struct pci_dev *pdev = ae_dev->pdev;
11148 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11150 hclge_stats_clear(hdev);
11151 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11152 * so here should not clean table in memory.
11154 if (hdev->reset_type == HNAE3_IMP_RESET ||
11155 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11156 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11157 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11158 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11159 hclge_reset_umv_space(hdev);
11162 ret = hclge_cmd_init(hdev);
11164 dev_err(&pdev->dev, "Cmd queue init failed\n");
11168 ret = hclge_map_tqp(hdev);
11170 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11174 ret = hclge_mac_init(hdev);
11176 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11180 ret = hclge_tp_port_init(hdev);
11182 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11187 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11189 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11193 ret = hclge_config_gro(hdev, true);
11197 ret = hclge_init_vlan_config(hdev);
11199 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11203 ret = hclge_tm_init_hw(hdev, true);
11205 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11209 ret = hclge_rss_init_hw(hdev);
11211 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11215 ret = init_mgr_tbl(hdev);
11217 dev_err(&pdev->dev,
11218 "failed to reinit manager table, ret = %d\n", ret);
11222 ret = hclge_init_fd_config(hdev);
11224 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11228 /* Log and clear the hw errors those already occurred */
11229 hclge_handle_all_hns_hw_errors(ae_dev);
11231 /* Re-enable the hw error interrupts because
11232 * the interrupts get disabled on global reset.
11234 ret = hclge_config_nic_hw_error(hdev, true);
11236 dev_err(&pdev->dev,
11237 "fail(%d) to re-enable NIC hw error interrupts\n",
11242 if (hdev->roce_client) {
11243 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11245 dev_err(&pdev->dev,
11246 "fail(%d) to re-enable roce ras interrupts\n",
11252 hclge_reset_vport_state(hdev);
11253 ret = hclge_reset_vport_spoofchk(hdev);
11257 ret = hclge_resume_vf_rate(hdev);
11261 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11262 HCLGE_DRIVER_NAME);
11267 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11269 struct hclge_dev *hdev = ae_dev->priv;
11270 struct hclge_mac *mac = &hdev->hw.mac;
11272 hclge_reset_vf_rate(hdev);
11273 hclge_clear_vf_vlan(hdev);
11274 hclge_misc_affinity_teardown(hdev);
11275 hclge_state_uninit(hdev);
11276 hclge_uninit_mac_table(hdev);
11279 mdiobus_unregister(mac->mdio_bus);
11281 /* Disable MISC vector(vector0) */
11282 hclge_enable_vector(&hdev->misc_vector, false);
11283 synchronize_irq(hdev->misc_vector.vector_irq);
11285 /* Disable all hw interrupts */
11286 hclge_config_mac_tnl_int(hdev, false);
11287 hclge_config_nic_hw_error(hdev, false);
11288 hclge_config_rocee_ras_interrupt(hdev, false);
11290 hclge_cmd_uninit(hdev);
11291 hclge_misc_irq_uninit(hdev);
11292 hclge_pci_uninit(hdev);
11293 mutex_destroy(&hdev->vport_lock);
11294 hclge_uninit_vport_vlan_table(hdev);
11295 ae_dev->priv = NULL;
11298 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11300 struct hclge_vport *vport = hclge_get_vport(handle);
11301 struct hclge_dev *hdev = vport->back;
11303 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11306 static void hclge_get_channels(struct hnae3_handle *handle,
11307 struct ethtool_channels *ch)
11309 ch->max_combined = hclge_get_max_channels(handle);
11310 ch->other_count = 1;
11312 ch->combined_count = handle->kinfo.rss_size;
11315 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11316 u16 *alloc_tqps, u16 *max_rss_size)
11318 struct hclge_vport *vport = hclge_get_vport(handle);
11319 struct hclge_dev *hdev = vport->back;
11321 *alloc_tqps = vport->alloc_tqps;
11322 *max_rss_size = hdev->pf_rss_size_max;
11325 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11326 bool rxfh_configured)
11328 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11329 struct hclge_vport *vport = hclge_get_vport(handle);
11330 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11331 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11332 struct hclge_dev *hdev = vport->back;
11333 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11334 u16 cur_rss_size = kinfo->rss_size;
11335 u16 cur_tqps = kinfo->num_tqps;
11336 u16 tc_valid[HCLGE_MAX_TC_NUM];
11342 kinfo->req_rss_size = new_tqps_num;
11344 ret = hclge_tm_vport_map_update(hdev);
11346 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11350 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11351 roundup_size = ilog2(roundup_size);
11352 /* Set the RSS TC mode according to the new RSS size */
11353 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11356 if (!(hdev->hw_tc_map & BIT(i)))
11360 tc_size[i] = roundup_size;
11361 tc_offset[i] = kinfo->rss_size * i;
11363 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11367 /* RSS indirection table has been configuared by user */
11368 if (rxfh_configured)
11371 /* Reinitializes the rss indirect table according to the new RSS size */
11372 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11377 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11378 rss_indir[i] = i % kinfo->rss_size;
11380 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11382 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11389 dev_info(&hdev->pdev->dev,
11390 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11391 cur_rss_size, kinfo->rss_size,
11392 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11397 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11398 u32 *regs_num_64_bit)
11400 struct hclge_desc desc;
11404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11407 dev_err(&hdev->pdev->dev,
11408 "Query register number cmd failed, ret = %d.\n", ret);
11412 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11413 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11415 total_num = *regs_num_32_bit + *regs_num_64_bit;
11422 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11425 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11426 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11428 struct hclge_desc *desc;
11429 u32 *reg_val = data;
11439 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11440 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11441 HCLGE_32_BIT_REG_RTN_DATANUM);
11442 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11446 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11447 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11449 dev_err(&hdev->pdev->dev,
11450 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11455 for (i = 0; i < cmd_num; i++) {
11457 desc_data = (__le32 *)(&desc[i].data[0]);
11458 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11460 desc_data = (__le32 *)(&desc[i]);
11461 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11463 for (k = 0; k < n; k++) {
11464 *reg_val++ = le32_to_cpu(*desc_data++);
11476 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11479 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11480 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11482 struct hclge_desc *desc;
11483 u64 *reg_val = data;
11493 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11494 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11495 HCLGE_64_BIT_REG_RTN_DATANUM);
11496 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11500 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11501 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11503 dev_err(&hdev->pdev->dev,
11504 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11509 for (i = 0; i < cmd_num; i++) {
11511 desc_data = (__le64 *)(&desc[i].data[0]);
11512 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11514 desc_data = (__le64 *)(&desc[i]);
11515 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11517 for (k = 0; k < n; k++) {
11518 *reg_val++ = le64_to_cpu(*desc_data++);
11530 #define MAX_SEPARATE_NUM 4
11531 #define SEPARATOR_VALUE 0xFDFCFBFA
11532 #define REG_NUM_PER_LINE 4
11533 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11534 #define REG_SEPARATOR_LINE 1
11535 #define REG_NUM_REMAIN_MASK 3
11536 #define BD_LIST_MAX_NUM 30
11538 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11542 /* initialize command BD except the last one */
11543 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11544 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11546 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11549 /* initialize the last command BD */
11550 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11552 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11555 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11559 u32 entries_per_desc, desc_index, index, offset, i;
11560 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11563 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11565 dev_err(&hdev->pdev->dev,
11566 "Get dfx bd num fail, status is %d.\n", ret);
11570 entries_per_desc = ARRAY_SIZE(desc[0].data);
11571 for (i = 0; i < type_num; i++) {
11572 offset = hclge_dfx_bd_offset_list[i];
11573 index = offset % entries_per_desc;
11574 desc_index = offset / entries_per_desc;
11575 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11581 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11582 struct hclge_desc *desc_src, int bd_num,
11583 enum hclge_opcode_type cmd)
11585 struct hclge_desc *desc = desc_src;
11588 hclge_cmd_setup_basic_desc(desc, cmd, true);
11589 for (i = 0; i < bd_num - 1; i++) {
11590 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11592 hclge_cmd_setup_basic_desc(desc, cmd, true);
11596 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11598 dev_err(&hdev->pdev->dev,
11599 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11605 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11608 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11609 struct hclge_desc *desc = desc_src;
11612 entries_per_desc = ARRAY_SIZE(desc->data);
11613 reg_num = entries_per_desc * bd_num;
11614 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11615 for (i = 0; i < reg_num; i++) {
11616 index = i % entries_per_desc;
11617 desc_index = i / entries_per_desc;
11618 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11620 for (i = 0; i < separator_num; i++)
11621 *reg++ = SEPARATOR_VALUE;
11623 return reg_num + separator_num;
11626 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11628 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11629 int data_len_per_desc, bd_num, i;
11630 int bd_num_list[BD_LIST_MAX_NUM];
11634 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11636 dev_err(&hdev->pdev->dev,
11637 "Get dfx reg bd num fail, status is %d.\n", ret);
11641 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11643 for (i = 0; i < dfx_reg_type_num; i++) {
11644 bd_num = bd_num_list[i];
11645 data_len = data_len_per_desc * bd_num;
11646 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11652 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11654 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11655 int bd_num, bd_num_max, buf_len, i;
11656 int bd_num_list[BD_LIST_MAX_NUM];
11657 struct hclge_desc *desc_src;
11661 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11663 dev_err(&hdev->pdev->dev,
11664 "Get dfx reg bd num fail, status is %d.\n", ret);
11668 bd_num_max = bd_num_list[0];
11669 for (i = 1; i < dfx_reg_type_num; i++)
11670 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11672 buf_len = sizeof(*desc_src) * bd_num_max;
11673 desc_src = kzalloc(buf_len, GFP_KERNEL);
11677 for (i = 0; i < dfx_reg_type_num; i++) {
11678 bd_num = bd_num_list[i];
11679 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11680 hclge_dfx_reg_opcode_list[i]);
11682 dev_err(&hdev->pdev->dev,
11683 "Get dfx reg fail, status is %d.\n", ret);
11687 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11694 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11695 struct hnae3_knic_private_info *kinfo)
11697 #define HCLGE_RING_REG_OFFSET 0x200
11698 #define HCLGE_RING_INT_REG_OFFSET 0x4
11700 int i, j, reg_num, separator_num;
11704 /* fetching per-PF registers valus from PF PCIe register space */
11705 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11706 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11707 for (i = 0; i < reg_num; i++)
11708 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11709 for (i = 0; i < separator_num; i++)
11710 *reg++ = SEPARATOR_VALUE;
11711 data_num_sum = reg_num + separator_num;
11713 reg_num = ARRAY_SIZE(common_reg_addr_list);
11714 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11715 for (i = 0; i < reg_num; i++)
11716 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11717 for (i = 0; i < separator_num; i++)
11718 *reg++ = SEPARATOR_VALUE;
11719 data_num_sum += reg_num + separator_num;
11721 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11722 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11723 for (j = 0; j < kinfo->num_tqps; j++) {
11724 for (i = 0; i < reg_num; i++)
11725 *reg++ = hclge_read_dev(&hdev->hw,
11726 ring_reg_addr_list[i] +
11727 HCLGE_RING_REG_OFFSET * j);
11728 for (i = 0; i < separator_num; i++)
11729 *reg++ = SEPARATOR_VALUE;
11731 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11733 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11734 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11735 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11736 for (i = 0; i < reg_num; i++)
11737 *reg++ = hclge_read_dev(&hdev->hw,
11738 tqp_intr_reg_addr_list[i] +
11739 HCLGE_RING_INT_REG_OFFSET * j);
11740 for (i = 0; i < separator_num; i++)
11741 *reg++ = SEPARATOR_VALUE;
11743 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11745 return data_num_sum;
11748 static int hclge_get_regs_len(struct hnae3_handle *handle)
11750 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11751 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11752 struct hclge_vport *vport = hclge_get_vport(handle);
11753 struct hclge_dev *hdev = vport->back;
11754 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11755 int regs_lines_32_bit, regs_lines_64_bit;
11758 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11760 dev_err(&hdev->pdev->dev,
11761 "Get register number failed, ret = %d.\n", ret);
11765 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11767 dev_err(&hdev->pdev->dev,
11768 "Get dfx reg len failed, ret = %d.\n", ret);
11772 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11773 REG_SEPARATOR_LINE;
11774 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11775 REG_SEPARATOR_LINE;
11776 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11777 REG_SEPARATOR_LINE;
11778 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11779 REG_SEPARATOR_LINE;
11780 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11781 REG_SEPARATOR_LINE;
11782 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11783 REG_SEPARATOR_LINE;
11785 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11786 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11787 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11790 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11793 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11794 struct hclge_vport *vport = hclge_get_vport(handle);
11795 struct hclge_dev *hdev = vport->back;
11796 u32 regs_num_32_bit, regs_num_64_bit;
11797 int i, reg_num, separator_num, ret;
11800 *version = hdev->fw_version;
11802 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11804 dev_err(&hdev->pdev->dev,
11805 "Get register number failed, ret = %d.\n", ret);
11809 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11811 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11813 dev_err(&hdev->pdev->dev,
11814 "Get 32 bit register failed, ret = %d.\n", ret);
11817 reg_num = regs_num_32_bit;
11819 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11820 for (i = 0; i < separator_num; i++)
11821 *reg++ = SEPARATOR_VALUE;
11823 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11825 dev_err(&hdev->pdev->dev,
11826 "Get 64 bit register failed, ret = %d.\n", ret);
11829 reg_num = regs_num_64_bit * 2;
11831 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11832 for (i = 0; i < separator_num; i++)
11833 *reg++ = SEPARATOR_VALUE;
11835 ret = hclge_get_dfx_reg(hdev, reg);
11837 dev_err(&hdev->pdev->dev,
11838 "Get dfx register failed, ret = %d.\n", ret);
11841 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11843 struct hclge_set_led_state_cmd *req;
11844 struct hclge_desc desc;
11847 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11849 req = (struct hclge_set_led_state_cmd *)desc.data;
11850 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11851 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11853 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11855 dev_err(&hdev->pdev->dev,
11856 "Send set led state cmd error, ret =%d\n", ret);
11861 enum hclge_led_status {
11864 HCLGE_LED_NO_CHANGE = 0xFF,
11867 static int hclge_set_led_id(struct hnae3_handle *handle,
11868 enum ethtool_phys_id_state status)
11870 struct hclge_vport *vport = hclge_get_vport(handle);
11871 struct hclge_dev *hdev = vport->back;
11874 case ETHTOOL_ID_ACTIVE:
11875 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11876 case ETHTOOL_ID_INACTIVE:
11877 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11883 static void hclge_get_link_mode(struct hnae3_handle *handle,
11884 unsigned long *supported,
11885 unsigned long *advertising)
11887 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11888 struct hclge_vport *vport = hclge_get_vport(handle);
11889 struct hclge_dev *hdev = vport->back;
11890 unsigned int idx = 0;
11892 for (; idx < size; idx++) {
11893 supported[idx] = hdev->hw.mac.supported[idx];
11894 advertising[idx] = hdev->hw.mac.advertising[idx];
11898 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11900 struct hclge_vport *vport = hclge_get_vport(handle);
11901 struct hclge_dev *hdev = vport->back;
11903 return hclge_config_gro(hdev, enable);
11906 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11908 struct hclge_vport *vport = &hdev->vport[0];
11909 struct hnae3_handle *handle = &vport->nic;
11913 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11914 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11915 vport->last_promisc_flags = vport->overflow_promisc_flags;
11918 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11919 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11920 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11921 tmp_flags & HNAE3_MPE);
11923 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11924 hclge_enable_vlan_filter(handle,
11925 tmp_flags & HNAE3_VLAN_FLTR);
11930 static bool hclge_module_existed(struct hclge_dev *hdev)
11932 struct hclge_desc desc;
11936 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11937 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11939 dev_err(&hdev->pdev->dev,
11940 "failed to get SFP exist state, ret = %d\n", ret);
11944 existed = le32_to_cpu(desc.data[0]);
11946 return existed != 0;
11949 /* need 6 bds(total 140 bytes) in one reading
11950 * return the number of bytes actually read, 0 means read failed.
11952 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11955 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11956 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11962 /* setup all 6 bds to read module eeprom info. */
11963 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11964 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11967 /* bd0~bd4 need next flag */
11968 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11969 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11972 /* setup bd0, this bd contains offset and read length. */
11973 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11974 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11975 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11976 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11978 ret = hclge_cmd_send(&hdev->hw, desc, i);
11980 dev_err(&hdev->pdev->dev,
11981 "failed to get SFP eeprom info, ret = %d\n", ret);
11985 /* copy sfp info from bd0 to out buffer. */
11986 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11987 memcpy(data, sfp_info_bd0->data, copy_len);
11988 read_len = copy_len;
11990 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11991 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11992 if (read_len >= len)
11995 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11996 memcpy(data + read_len, desc[i].data, copy_len);
11997 read_len += copy_len;
12003 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12006 struct hclge_vport *vport = hclge_get_vport(handle);
12007 struct hclge_dev *hdev = vport->back;
12011 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12012 return -EOPNOTSUPP;
12014 if (!hclge_module_existed(hdev))
12017 while (read_len < len) {
12018 data_len = hclge_get_sfp_eeprom_info(hdev,
12025 read_len += data_len;
12031 static const struct hnae3_ae_ops hclge_ops = {
12032 .init_ae_dev = hclge_init_ae_dev,
12033 .uninit_ae_dev = hclge_uninit_ae_dev,
12034 .flr_prepare = hclge_flr_prepare,
12035 .flr_done = hclge_flr_done,
12036 .init_client_instance = hclge_init_client_instance,
12037 .uninit_client_instance = hclge_uninit_client_instance,
12038 .map_ring_to_vector = hclge_map_ring_to_vector,
12039 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12040 .get_vector = hclge_get_vector,
12041 .put_vector = hclge_put_vector,
12042 .set_promisc_mode = hclge_set_promisc_mode,
12043 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12044 .set_loopback = hclge_set_loopback,
12045 .start = hclge_ae_start,
12046 .stop = hclge_ae_stop,
12047 .client_start = hclge_client_start,
12048 .client_stop = hclge_client_stop,
12049 .get_status = hclge_get_status,
12050 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12051 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12052 .get_media_type = hclge_get_media_type,
12053 .check_port_speed = hclge_check_port_speed,
12054 .get_fec = hclge_get_fec,
12055 .set_fec = hclge_set_fec,
12056 .get_rss_key_size = hclge_get_rss_key_size,
12057 .get_rss = hclge_get_rss,
12058 .set_rss = hclge_set_rss,
12059 .set_rss_tuple = hclge_set_rss_tuple,
12060 .get_rss_tuple = hclge_get_rss_tuple,
12061 .get_tc_size = hclge_get_tc_size,
12062 .get_mac_addr = hclge_get_mac_addr,
12063 .set_mac_addr = hclge_set_mac_addr,
12064 .do_ioctl = hclge_do_ioctl,
12065 .add_uc_addr = hclge_add_uc_addr,
12066 .rm_uc_addr = hclge_rm_uc_addr,
12067 .add_mc_addr = hclge_add_mc_addr,
12068 .rm_mc_addr = hclge_rm_mc_addr,
12069 .set_autoneg = hclge_set_autoneg,
12070 .get_autoneg = hclge_get_autoneg,
12071 .restart_autoneg = hclge_restart_autoneg,
12072 .halt_autoneg = hclge_halt_autoneg,
12073 .get_pauseparam = hclge_get_pauseparam,
12074 .set_pauseparam = hclge_set_pauseparam,
12075 .set_mtu = hclge_set_mtu,
12076 .reset_queue = hclge_reset_tqp,
12077 .get_stats = hclge_get_stats,
12078 .get_mac_stats = hclge_get_mac_stat,
12079 .update_stats = hclge_update_stats,
12080 .get_strings = hclge_get_strings,
12081 .get_sset_count = hclge_get_sset_count,
12082 .get_fw_version = hclge_get_fw_version,
12083 .get_mdix_mode = hclge_get_mdix_mode,
12084 .enable_vlan_filter = hclge_enable_vlan_filter,
12085 .set_vlan_filter = hclge_set_vlan_filter,
12086 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12087 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12088 .reset_event = hclge_reset_event,
12089 .get_reset_level = hclge_get_reset_level,
12090 .set_default_reset_request = hclge_set_def_reset_request,
12091 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12092 .set_channels = hclge_set_channels,
12093 .get_channels = hclge_get_channels,
12094 .get_regs_len = hclge_get_regs_len,
12095 .get_regs = hclge_get_regs,
12096 .set_led_id = hclge_set_led_id,
12097 .get_link_mode = hclge_get_link_mode,
12098 .add_fd_entry = hclge_add_fd_entry,
12099 .del_fd_entry = hclge_del_fd_entry,
12100 .del_all_fd_entries = hclge_del_all_fd_entries,
12101 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12102 .get_fd_rule_info = hclge_get_fd_rule_info,
12103 .get_fd_all_rules = hclge_get_all_rules,
12104 .enable_fd = hclge_enable_fd,
12105 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12106 .dbg_run_cmd = hclge_dbg_run_cmd,
12107 .dbg_read_cmd = hclge_dbg_read_cmd,
12108 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12109 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12110 .ae_dev_resetting = hclge_ae_dev_resetting,
12111 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12112 .set_gro_en = hclge_gro_en,
12113 .get_global_queue_id = hclge_covert_handle_qid_global,
12114 .set_timer_task = hclge_set_timer_task,
12115 .mac_connect_phy = hclge_mac_connect_phy,
12116 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12117 .get_vf_config = hclge_get_vf_config,
12118 .set_vf_link_state = hclge_set_vf_link_state,
12119 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12120 .set_vf_trust = hclge_set_vf_trust,
12121 .set_vf_rate = hclge_set_vf_rate,
12122 .set_vf_mac = hclge_set_vf_mac,
12123 .get_module_eeprom = hclge_get_module_eeprom,
12124 .get_cmdq_stat = hclge_get_cmdq_stat,
12125 .add_cls_flower = hclge_add_cls_flower,
12126 .del_cls_flower = hclge_del_cls_flower,
12127 .cls_flower_active = hclge_is_cls_flower_active,
12128 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12129 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12132 static struct hnae3_ae_algo ae_algo = {
12134 .pdev_id_table = ae_algo_pci_tbl,
12137 static int hclge_init(void)
12139 pr_info("%s is initializing\n", HCLGE_NAME);
12141 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12143 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12147 hnae3_register_ae_algo(&ae_algo);
12152 static void hclge_exit(void)
12154 hnae3_unregister_ae_algo(&ae_algo);
12155 destroy_workqueue(hclge_wq);
12157 module_init(hclge_init);
12158 module_exit(hclge_exit);
12160 MODULE_LICENSE("GPL");
12161 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12162 MODULE_DESCRIPTION("HCLGE Driver");
12163 MODULE_VERSION(HCLGE_MOD_VERSION);