1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->pdev->revision >= 0x21 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if (hdev->hw.mac.phydev) {
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759 } else if (stringset == ETH_SS_STATS) {
760 count = ARRAY_SIZE(g_mac_stats_string) +
761 hclge_tqps_get_sset_count(handle, stringset);
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 u8 *p = (char *)data;
773 if (stringset == ETH_SS_STATS) {
774 size = ARRAY_SIZE(g_mac_stats_string);
775 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 p = hclge_tqps_get_strings(handle, p);
778 } else if (stringset == ETH_SS_TEST) {
779 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 p += ETH_GSTRING_LEN;
784 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 p += ETH_GSTRING_LEN;
789 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 p += ETH_GSTRING_LEN;
795 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 p += ETH_GSTRING_LEN;
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 struct hclge_vport *vport = hclge_get_vport(handle);
806 struct hclge_dev *hdev = vport->back;
809 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 ARRAY_SIZE(g_mac_stats_string), data);
811 p = hclge_tqps_get_stats(handle, p);
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 struct hns3_mac_stats *mac_stats)
817 struct hclge_vport *vport = hclge_get_vport(handle);
818 struct hclge_dev *hdev = vport->back;
820 hclge_update_stats(handle, NULL);
822 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 struct hclge_func_status_cmd *status)
829 #define HCLGE_MAC_ID_MASK 0xF
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
840 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
844 static int hclge_query_function_status(struct hclge_dev *hdev)
846 #define HCLGE_QUERY_MAX_CNT 5
848 struct hclge_func_status_cmd *req;
849 struct hclge_desc desc;
853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 req = (struct hclge_func_status_cmd *)desc.data;
857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 dev_err(&hdev->pdev->dev,
860 "query function status failed %d.\n", ret);
864 /* Check pf reset is done */
867 usleep_range(1000, 2000);
868 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870 return hclge_parse_func_status(hdev, req);
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 struct hclge_pf_res_cmd *req;
876 struct hclge_desc desc;
879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 dev_err(&hdev->pdev->dev,
883 "query pf resource failed %d.\n", ret);
887 req = (struct hclge_pf_res_cmd *)desc.data;
888 hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891 if (req->tx_buf_size)
893 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
895 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
897 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
899 if (req->dv_buf_size)
901 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
903 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
905 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
907 if (hnae3_dev_roce_supported(hdev)) {
908 hdev->roce_base_msix_offset =
909 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
912 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
915 /* nic's msix numbers is always equals to the roce's. */
916 hdev->num_nic_msi = hdev->num_roce_msi;
918 /* PF should have NIC vectors and Roce vectors,
919 * NIC vectors are queued before Roce vectors.
921 hdev->num_msi = hdev->num_roce_msi +
922 hdev->roce_base_msix_offset;
925 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
928 hdev->num_nic_msi = hdev->num_msi;
931 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 dev_err(&hdev->pdev->dev,
933 "Just %u msi resources, not enough for pf(min:2).\n",
941 static int hclge_parse_speed(int speed_cmd, int *speed)
945 *speed = HCLGE_MAC_SPEED_10M;
948 *speed = HCLGE_MAC_SPEED_100M;
951 *speed = HCLGE_MAC_SPEED_1G;
954 *speed = HCLGE_MAC_SPEED_10G;
957 *speed = HCLGE_MAC_SPEED_25G;
960 *speed = HCLGE_MAC_SPEED_40G;
963 *speed = HCLGE_MAC_SPEED_50G;
966 *speed = HCLGE_MAC_SPEED_100G;
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
977 struct hclge_vport *vport = hclge_get_vport(handle);
978 struct hclge_dev *hdev = vport->back;
979 u32 speed_ability = hdev->hw.mac.speed_ability;
983 case HCLGE_MAC_SPEED_10M:
984 speed_bit = HCLGE_SUPPORT_10M_BIT;
986 case HCLGE_MAC_SPEED_100M:
987 speed_bit = HCLGE_SUPPORT_100M_BIT;
989 case HCLGE_MAC_SPEED_1G:
990 speed_bit = HCLGE_SUPPORT_1G_BIT;
992 case HCLGE_MAC_SPEED_10G:
993 speed_bit = HCLGE_SUPPORT_10G_BIT;
995 case HCLGE_MAC_SPEED_25G:
996 speed_bit = HCLGE_SUPPORT_25G_BIT;
998 case HCLGE_MAC_SPEED_40G:
999 speed_bit = HCLGE_SUPPORT_40G_BIT;
1001 case HCLGE_MAC_SPEED_50G:
1002 speed_bit = HCLGE_SUPPORT_50G_BIT;
1004 case HCLGE_MAC_SPEED_100G:
1005 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 if (speed_bit & speed_ability)
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1057 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1069 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1091 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1098 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1101 switch (mac->speed) {
1102 case HCLGE_MAC_SPEED_10G:
1103 case HCLGE_MAC_SPEED_40G:
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1107 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1109 case HCLGE_MAC_SPEED_25G:
1110 case HCLGE_MAC_SPEED_50G:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1114 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 BIT(HNAE3_FEC_AUTO);
1117 case HCLGE_MAC_SPEED_100G:
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1122 mac->fec_ability = 0;
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1130 struct hclge_mac *mac = &hdev->hw.mac;
1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1136 hclge_convert_setting_sr(mac, speed_ability);
1137 hclge_convert_setting_lr(mac, speed_ability);
1138 hclge_convert_setting_cr(mac, speed_ability);
1139 if (hdev->pdev->revision >= 0x21)
1140 hclge_convert_setting_fec(mac);
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1150 struct hclge_mac *mac = &hdev->hw.mac;
1152 hclge_convert_setting_kr(mac, speed_ability);
1153 if (hdev->pdev->revision >= 0x21)
1154 hclge_convert_setting_fec(mac);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1163 unsigned long *supported = hdev->hw.mac.supported;
1165 /* default to support all speed for GE port */
1167 speed_ability = HCLGE_SUPPORT_GE;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1173 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1180 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1193 u8 media_type = hdev->hw.mac.media_type;
1195 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 hclge_parse_copper_link_mode(hdev, speed_ability);
1199 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 hclge_parse_backplane_link_mode(hdev, speed_ability);
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1205 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 return HCLGE_MAC_SPEED_100G;
1208 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 return HCLGE_MAC_SPEED_50G;
1211 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 return HCLGE_MAC_SPEED_40G;
1214 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 return HCLGE_MAC_SPEED_25G;
1217 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 return HCLGE_MAC_SPEED_10G;
1220 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 return HCLGE_MAC_SPEED_1G;
1223 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 return HCLGE_MAC_SPEED_100M;
1226 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 return HCLGE_MAC_SPEED_10M;
1229 return HCLGE_MAC_SPEED_1G;
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1234 struct hclge_cfg_param_cmd *req;
1235 u64 mac_addr_tmp_high;
1239 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1241 /* get the configuration */
1242 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 HCLGE_CFG_TQP_DESC_N_M,
1249 HCLGE_CFG_TQP_DESC_N_S);
1251 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_PHY_ADDR_M,
1253 HCLGE_CFG_PHY_ADDR_S);
1254 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_MEDIA_TP_M,
1256 HCLGE_CFG_MEDIA_TP_S);
1257 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 HCLGE_CFG_RX_BUF_LEN_M,
1259 HCLGE_CFG_RX_BUF_LEN_S);
1260 /* get mac_address */
1261 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 HCLGE_CFG_MAC_ADDR_H_M,
1264 HCLGE_CFG_MAC_ADDR_H_S);
1266 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1268 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_DEFAULT_SPEED_M,
1270 HCLGE_CFG_DEFAULT_SPEED_S);
1271 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 HCLGE_CFG_RSS_SIZE_M,
1273 HCLGE_CFG_RSS_SIZE_S);
1275 for (i = 0; i < ETH_ALEN; i++)
1276 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1278 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1281 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_SPEED_ABILITY_M,
1283 HCLGE_CFG_SPEED_ABILITY_S);
1284 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 if (!cfg->umv_space)
1288 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1291 /* hclge_get_cfg: query the static parameter from flash
1292 * @hdev: pointer to struct hclge_dev
1293 * @hcfg: the config structure to be getted
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1297 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 struct hclge_cfg_param_cmd *req;
1302 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1305 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1308 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310 /* Len should be united by 4 bytes when send to hardware */
1311 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 req->offset = cpu_to_le32(offset);
1316 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1318 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1322 hclge_parse_cfg(hcfg, desc);
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1331 ret = hclge_query_function_status(hdev);
1333 dev_err(&hdev->pdev->dev,
1334 "query function status error %d.\n", ret);
1338 /* get pf resource */
1339 return hclge_query_pf_resource(hdev);
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 #define HCLGE_MIN_TX_DESC 64
1345 #define HCLGE_MIN_RX_DESC 64
1347 if (!is_kdump_kernel())
1350 dev_info(&hdev->pdev->dev,
1351 "Running kdump kernel. Using minimal resources\n");
1353 /* minimal queue pairs equals to the number of vports */
1354 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1359 static int hclge_configure(struct hclge_dev *hdev)
1361 struct hclge_cfg cfg;
1365 ret = hclge_get_cfg(hdev, &cfg);
1369 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 hdev->base_tqp_pid = 0;
1371 hdev->rss_size_max = cfg.rss_size_max;
1372 hdev->rx_buf_len = cfg.rx_buf_len;
1373 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 hdev->hw.mac.media_type = cfg.media_type;
1375 hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 hdev->num_tx_desc = cfg.tqp_desc_num;
1377 hdev->num_rx_desc = cfg.tqp_desc_num;
1378 hdev->tm_info.num_pg = 1;
1379 hdev->tc_max = cfg.tc_num;
1380 hdev->tm_info.hw_pfc_map = 0;
1381 hdev->wanted_umv_size = cfg.umv_space;
1383 if (hnae3_dev_fd_supported(hdev)) {
1385 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1388 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1390 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1394 hclge_parse_link_mode(hdev, cfg.speed_ability);
1396 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 (hdev->tc_max < 1)) {
1400 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1405 /* Dev does not support DCB */
1406 if (!hnae3_dev_dcb_supported(hdev)) {
1410 hdev->pfc_max = hdev->tc_max;
1413 hdev->tm_info.num_tc = 1;
1415 /* Currently not support uncontiuous tc */
1416 for (i = 0; i < hdev->tm_info.num_tc; i++)
1417 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421 hclge_init_kdump_kernel_config(hdev);
1423 /* Set the init affinity based on pci func number */
1424 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 &hdev->affinity_mask);
1432 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1435 struct hclge_cfg_tso_status_cmd *req;
1436 struct hclge_desc desc;
1438 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1441 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1442 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1444 return hclge_cmd_send(&hdev->hw, &desc, 1);
1447 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1449 struct hclge_cfg_gro_status_cmd *req;
1450 struct hclge_desc desc;
1453 if (!hnae3_dev_gro_supported(hdev))
1456 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1457 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1459 req->gro_en = cpu_to_le16(en ? 1 : 0);
1461 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1463 dev_err(&hdev->pdev->dev,
1464 "GRO hardware config cmd failed, ret = %d\n", ret);
1469 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1471 struct hclge_tqp *tqp;
1474 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1475 sizeof(struct hclge_tqp), GFP_KERNEL);
1481 for (i = 0; i < hdev->num_tqps; i++) {
1482 tqp->dev = &hdev->pdev->dev;
1485 tqp->q.ae_algo = &ae_algo;
1486 tqp->q.buf_size = hdev->rx_buf_len;
1487 tqp->q.tx_desc_num = hdev->num_tx_desc;
1488 tqp->q.rx_desc_num = hdev->num_rx_desc;
1489 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1490 i * HCLGE_TQP_REG_SIZE;
1498 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1499 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1501 struct hclge_tqp_map_cmd *req;
1502 struct hclge_desc desc;
1505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1507 req = (struct hclge_tqp_map_cmd *)desc.data;
1508 req->tqp_id = cpu_to_le16(tqp_pid);
1509 req->tqp_vf = func_id;
1510 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1512 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1513 req->tqp_vid = cpu_to_le16(tqp_vid);
1515 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1517 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1522 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1524 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1525 struct hclge_dev *hdev = vport->back;
1528 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1529 alloced < num_tqps; i++) {
1530 if (!hdev->htqp[i].alloced) {
1531 hdev->htqp[i].q.handle = &vport->nic;
1532 hdev->htqp[i].q.tqp_index = alloced;
1533 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1534 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1535 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1536 hdev->htqp[i].alloced = true;
1540 vport->alloc_tqps = alloced;
1541 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1542 vport->alloc_tqps / hdev->tm_info.num_tc);
1544 /* ensure one to one mapping between irq and queue at default */
1545 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1546 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1551 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1552 u16 num_tx_desc, u16 num_rx_desc)
1555 struct hnae3_handle *nic = &vport->nic;
1556 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1557 struct hclge_dev *hdev = vport->back;
1560 kinfo->num_tx_desc = num_tx_desc;
1561 kinfo->num_rx_desc = num_rx_desc;
1563 kinfo->rx_buf_len = hdev->rx_buf_len;
1565 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1566 sizeof(struct hnae3_queue *), GFP_KERNEL);
1570 ret = hclge_assign_tqp(vport, num_tqps);
1572 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1577 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1578 struct hclge_vport *vport)
1580 struct hnae3_handle *nic = &vport->nic;
1581 struct hnae3_knic_private_info *kinfo;
1584 kinfo = &nic->kinfo;
1585 for (i = 0; i < vport->alloc_tqps; i++) {
1586 struct hclge_tqp *q =
1587 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1591 is_pf = !(vport->vport_id);
1592 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1601 static int hclge_map_tqp(struct hclge_dev *hdev)
1603 struct hclge_vport *vport = hdev->vport;
1606 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1607 for (i = 0; i < num_vport; i++) {
1610 ret = hclge_map_tqp_to_vport(hdev, vport);
1620 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1622 struct hnae3_handle *nic = &vport->nic;
1623 struct hclge_dev *hdev = vport->back;
1626 nic->pdev = hdev->pdev;
1627 nic->ae_algo = &ae_algo;
1628 nic->numa_node_mask = hdev->numa_node_mask;
1630 ret = hclge_knic_setup(vport, num_tqps,
1631 hdev->num_tx_desc, hdev->num_rx_desc);
1633 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1638 static int hclge_alloc_vport(struct hclge_dev *hdev)
1640 struct pci_dev *pdev = hdev->pdev;
1641 struct hclge_vport *vport;
1647 /* We need to alloc a vport for main NIC of PF */
1648 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1650 if (hdev->num_tqps < num_vport) {
1651 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1652 hdev->num_tqps, num_vport);
1656 /* Alloc the same number of TQPs for every vport */
1657 tqp_per_vport = hdev->num_tqps / num_vport;
1658 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1660 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1665 hdev->vport = vport;
1666 hdev->num_alloc_vport = num_vport;
1668 if (IS_ENABLED(CONFIG_PCI_IOV))
1669 hdev->num_alloc_vfs = hdev->num_req_vfs;
1671 for (i = 0; i < num_vport; i++) {
1673 vport->vport_id = i;
1674 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1675 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1676 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1677 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1678 INIT_LIST_HEAD(&vport->vlan_list);
1679 INIT_LIST_HEAD(&vport->uc_mac_list);
1680 INIT_LIST_HEAD(&vport->mc_mac_list);
1681 spin_lock_init(&vport->mac_list_lock);
1684 ret = hclge_vport_setup(vport, tqp_main_vport);
1686 ret = hclge_vport_setup(vport, tqp_per_vport);
1689 "vport setup failed for vport %d, %d\n",
1700 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1701 struct hclge_pkt_buf_alloc *buf_alloc)
1703 /* TX buffer size is unit by 128 byte */
1704 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1705 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1706 struct hclge_tx_buff_alloc_cmd *req;
1707 struct hclge_desc desc;
1711 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1713 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1714 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1715 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1717 req->tx_pkt_buff[i] =
1718 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1719 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1724 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1730 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1731 struct hclge_pkt_buf_alloc *buf_alloc)
1733 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1736 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1741 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1746 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1747 if (hdev->hw_tc_map & BIT(i))
1752 /* Get the number of pfc enabled TCs, which have private buffer */
1753 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1754 struct hclge_pkt_buf_alloc *buf_alloc)
1756 struct hclge_priv_buf *priv;
1760 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1761 priv = &buf_alloc->priv_buf[i];
1762 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1770 /* Get the number of pfc disabled TCs, which have private buffer */
1771 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1772 struct hclge_pkt_buf_alloc *buf_alloc)
1774 struct hclge_priv_buf *priv;
1778 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1779 priv = &buf_alloc->priv_buf[i];
1780 if (hdev->hw_tc_map & BIT(i) &&
1781 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1789 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1791 struct hclge_priv_buf *priv;
1795 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1796 priv = &buf_alloc->priv_buf[i];
1798 rx_priv += priv->buf_size;
1803 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1805 u32 i, total_tx_size = 0;
1807 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1808 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1810 return total_tx_size;
1813 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1814 struct hclge_pkt_buf_alloc *buf_alloc,
1817 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1818 u32 tc_num = hclge_get_tc_num(hdev);
1819 u32 shared_buf, aligned_mps;
1823 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1825 if (hnae3_dev_dcb_supported(hdev))
1826 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1829 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1830 + hdev->dv_buf_size;
1832 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1833 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1834 HCLGE_BUF_SIZE_UNIT);
1836 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1837 if (rx_all < rx_priv + shared_std)
1840 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1841 buf_alloc->s_buf.buf_size = shared_buf;
1842 if (hnae3_dev_dcb_supported(hdev)) {
1843 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1844 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1845 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1846 HCLGE_BUF_SIZE_UNIT);
1848 buf_alloc->s_buf.self.high = aligned_mps +
1849 HCLGE_NON_DCB_ADDITIONAL_BUF;
1850 buf_alloc->s_buf.self.low = aligned_mps;
1853 if (hnae3_dev_dcb_supported(hdev)) {
1854 hi_thrd = shared_buf - hdev->dv_buf_size;
1856 if (tc_num <= NEED_RESERVE_TC_NUM)
1857 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1861 hi_thrd = hi_thrd / tc_num;
1863 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1864 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1865 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1867 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1868 lo_thrd = aligned_mps;
1871 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1872 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1873 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1879 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1884 total_size = hdev->pkt_buf_size;
1886 /* alloc tx buffer for all enabled tc */
1887 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1888 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 if (hdev->hw_tc_map & BIT(i)) {
1891 if (total_size < hdev->tx_buf_size)
1894 priv->tx_buf_size = hdev->tx_buf_size;
1896 priv->tx_buf_size = 0;
1899 total_size -= priv->tx_buf_size;
1905 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1906 struct hclge_pkt_buf_alloc *buf_alloc)
1908 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1909 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1912 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1913 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1920 if (!(hdev->hw_tc_map & BIT(i)))
1925 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1926 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1927 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1928 HCLGE_BUF_SIZE_UNIT);
1931 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1935 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1938 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1941 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1942 struct hclge_pkt_buf_alloc *buf_alloc)
1944 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1945 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1948 /* let the last to be cleared first */
1949 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1950 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1951 unsigned int mask = BIT((unsigned int)i);
1953 if (hdev->hw_tc_map & mask &&
1954 !(hdev->tm_info.hw_pfc_map & mask)) {
1955 /* Clear the no pfc TC private buffer */
1963 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1964 no_pfc_priv_num == 0)
1968 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1971 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1972 struct hclge_pkt_buf_alloc *buf_alloc)
1974 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1975 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1978 /* let the last to be cleared first */
1979 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1980 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1981 unsigned int mask = BIT((unsigned int)i);
1983 if (hdev->hw_tc_map & mask &&
1984 hdev->tm_info.hw_pfc_map & mask) {
1985 /* Reduce the number of pfc TC with private buffer */
1993 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1998 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2001 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2002 struct hclge_pkt_buf_alloc *buf_alloc)
2004 #define COMPENSATE_BUFFER 0x3C00
2005 #define COMPENSATE_HALF_MPS_NUM 5
2006 #define PRIV_WL_GAP 0x1800
2008 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2009 u32 tc_num = hclge_get_tc_num(hdev);
2010 u32 half_mps = hdev->mps >> 1;
2015 rx_priv = rx_priv / tc_num;
2017 if (tc_num <= NEED_RESERVE_TC_NUM)
2018 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2020 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2021 COMPENSATE_HALF_MPS_NUM * half_mps;
2022 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2023 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2025 if (rx_priv < min_rx_priv)
2028 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2029 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2036 if (!(hdev->hw_tc_map & BIT(i)))
2040 priv->buf_size = rx_priv;
2041 priv->wl.high = rx_priv - hdev->dv_buf_size;
2042 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2045 buf_alloc->s_buf.buf_size = 0;
2050 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2051 * @hdev: pointer to struct hclge_dev
2052 * @buf_alloc: pointer to buffer calculation data
2053 * @return: 0: calculate sucessful, negative: fail
2055 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2056 struct hclge_pkt_buf_alloc *buf_alloc)
2058 /* When DCB is not supported, rx private buffer is not allocated. */
2059 if (!hnae3_dev_dcb_supported(hdev)) {
2060 u32 rx_all = hdev->pkt_buf_size;
2062 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2063 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2069 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2072 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2075 /* try to decrease the buffer size */
2076 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2079 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2082 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2088 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2089 struct hclge_pkt_buf_alloc *buf_alloc)
2091 struct hclge_rx_priv_buff_cmd *req;
2092 struct hclge_desc desc;
2096 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2097 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2099 /* Alloc private buffer TCs */
2100 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2101 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2104 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2106 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2110 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2111 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2113 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2115 dev_err(&hdev->pdev->dev,
2116 "rx private buffer alloc cmd failed %d\n", ret);
2121 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2122 struct hclge_pkt_buf_alloc *buf_alloc)
2124 struct hclge_rx_priv_wl_buf *req;
2125 struct hclge_priv_buf *priv;
2126 struct hclge_desc desc[2];
2130 for (i = 0; i < 2; i++) {
2131 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2133 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2135 /* The first descriptor set the NEXT bit to 1 */
2137 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2139 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2141 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2142 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2144 priv = &buf_alloc->priv_buf[idx];
2145 req->tc_wl[j].high =
2146 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2147 req->tc_wl[j].high |=
2148 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2150 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2151 req->tc_wl[j].low |=
2152 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2156 /* Send 2 descriptor at one time */
2157 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2159 dev_err(&hdev->pdev->dev,
2160 "rx private waterline config cmd failed %d\n",
2165 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2166 struct hclge_pkt_buf_alloc *buf_alloc)
2168 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2169 struct hclge_rx_com_thrd *req;
2170 struct hclge_desc desc[2];
2171 struct hclge_tc_thrd *tc;
2175 for (i = 0; i < 2; i++) {
2176 hclge_cmd_setup_basic_desc(&desc[i],
2177 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2178 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2180 /* The first descriptor set the NEXT bit to 1 */
2182 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2184 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2186 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2187 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2189 req->com_thrd[j].high =
2190 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2191 req->com_thrd[j].high |=
2192 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2193 req->com_thrd[j].low =
2194 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2195 req->com_thrd[j].low |=
2196 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2200 /* Send 2 descriptors at one time */
2201 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2203 dev_err(&hdev->pdev->dev,
2204 "common threshold config cmd failed %d\n", ret);
2208 static int hclge_common_wl_config(struct hclge_dev *hdev,
2209 struct hclge_pkt_buf_alloc *buf_alloc)
2211 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2212 struct hclge_rx_com_wl *req;
2213 struct hclge_desc desc;
2216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2218 req = (struct hclge_rx_com_wl *)desc.data;
2219 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2220 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2222 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2223 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2225 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2227 dev_err(&hdev->pdev->dev,
2228 "common waterline config cmd failed %d\n", ret);
2233 int hclge_buffer_alloc(struct hclge_dev *hdev)
2235 struct hclge_pkt_buf_alloc *pkt_buf;
2238 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2242 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2244 dev_err(&hdev->pdev->dev,
2245 "could not calc tx buffer size for all TCs %d\n", ret);
2249 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2251 dev_err(&hdev->pdev->dev,
2252 "could not alloc tx buffers %d\n", ret);
2256 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2258 dev_err(&hdev->pdev->dev,
2259 "could not calc rx priv buffer size for all TCs %d\n",
2264 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2266 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2271 if (hnae3_dev_dcb_supported(hdev)) {
2272 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2274 dev_err(&hdev->pdev->dev,
2275 "could not configure rx private waterline %d\n",
2280 ret = hclge_common_thrd_config(hdev, pkt_buf);
2282 dev_err(&hdev->pdev->dev,
2283 "could not configure common threshold %d\n",
2289 ret = hclge_common_wl_config(hdev, pkt_buf);
2291 dev_err(&hdev->pdev->dev,
2292 "could not configure common waterline %d\n", ret);
2299 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2301 struct hnae3_handle *roce = &vport->roce;
2302 struct hnae3_handle *nic = &vport->nic;
2304 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2306 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2307 vport->back->num_msi_left == 0)
2310 roce->rinfo.base_vector = vport->back->roce_base_vector;
2312 roce->rinfo.netdev = nic->kinfo.netdev;
2313 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2315 roce->pdev = nic->pdev;
2316 roce->ae_algo = nic->ae_algo;
2317 roce->numa_node_mask = nic->numa_node_mask;
2322 static int hclge_init_msi(struct hclge_dev *hdev)
2324 struct pci_dev *pdev = hdev->pdev;
2328 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2330 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2333 "failed(%d) to allocate MSI/MSI-X vectors\n",
2337 if (vectors < hdev->num_msi)
2338 dev_warn(&hdev->pdev->dev,
2339 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2340 hdev->num_msi, vectors);
2342 hdev->num_msi = vectors;
2343 hdev->num_msi_left = vectors;
2345 hdev->base_msi_vector = pdev->irq;
2346 hdev->roce_base_vector = hdev->base_msi_vector +
2347 hdev->roce_base_msix_offset;
2349 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2350 sizeof(u16), GFP_KERNEL);
2351 if (!hdev->vector_status) {
2352 pci_free_irq_vectors(pdev);
2356 for (i = 0; i < hdev->num_msi; i++)
2357 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2359 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2360 sizeof(int), GFP_KERNEL);
2361 if (!hdev->vector_irq) {
2362 pci_free_irq_vectors(pdev);
2369 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2371 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2372 duplex = HCLGE_MAC_FULL;
2377 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2380 struct hclge_config_mac_speed_dup_cmd *req;
2381 struct hclge_desc desc;
2384 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2386 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2389 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2392 case HCLGE_MAC_SPEED_10M:
2393 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2394 HCLGE_CFG_SPEED_S, 6);
2396 case HCLGE_MAC_SPEED_100M:
2397 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2398 HCLGE_CFG_SPEED_S, 7);
2400 case HCLGE_MAC_SPEED_1G:
2401 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2402 HCLGE_CFG_SPEED_S, 0);
2404 case HCLGE_MAC_SPEED_10G:
2405 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2406 HCLGE_CFG_SPEED_S, 1);
2408 case HCLGE_MAC_SPEED_25G:
2409 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2410 HCLGE_CFG_SPEED_S, 2);
2412 case HCLGE_MAC_SPEED_40G:
2413 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2414 HCLGE_CFG_SPEED_S, 3);
2416 case HCLGE_MAC_SPEED_50G:
2417 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2418 HCLGE_CFG_SPEED_S, 4);
2420 case HCLGE_MAC_SPEED_100G:
2421 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2422 HCLGE_CFG_SPEED_S, 5);
2425 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2429 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2432 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2434 dev_err(&hdev->pdev->dev,
2435 "mac speed/duplex config cmd failed %d.\n", ret);
2442 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2444 struct hclge_mac *mac = &hdev->hw.mac;
2447 duplex = hclge_check_speed_dup(duplex, speed);
2448 if (!mac->support_autoneg && mac->speed == speed &&
2449 mac->duplex == duplex)
2452 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2456 hdev->hw.mac.speed = speed;
2457 hdev->hw.mac.duplex = duplex;
2462 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2465 struct hclge_vport *vport = hclge_get_vport(handle);
2466 struct hclge_dev *hdev = vport->back;
2468 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2471 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2473 struct hclge_config_auto_neg_cmd *req;
2474 struct hclge_desc desc;
2478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2480 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2482 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2483 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2485 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2487 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2493 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2495 struct hclge_vport *vport = hclge_get_vport(handle);
2496 struct hclge_dev *hdev = vport->back;
2498 if (!hdev->hw.mac.support_autoneg) {
2500 dev_err(&hdev->pdev->dev,
2501 "autoneg is not supported by current port\n");
2508 return hclge_set_autoneg_en(hdev, enable);
2511 static int hclge_get_autoneg(struct hnae3_handle *handle)
2513 struct hclge_vport *vport = hclge_get_vport(handle);
2514 struct hclge_dev *hdev = vport->back;
2515 struct phy_device *phydev = hdev->hw.mac.phydev;
2518 return phydev->autoneg;
2520 return hdev->hw.mac.autoneg;
2523 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2525 struct hclge_vport *vport = hclge_get_vport(handle);
2526 struct hclge_dev *hdev = vport->back;
2529 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2531 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2534 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2537 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2539 struct hclge_vport *vport = hclge_get_vport(handle);
2540 struct hclge_dev *hdev = vport->back;
2542 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2543 return hclge_set_autoneg_en(hdev, !halt);
2548 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2550 struct hclge_config_fec_cmd *req;
2551 struct hclge_desc desc;
2554 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2556 req = (struct hclge_config_fec_cmd *)desc.data;
2557 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2558 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2559 if (fec_mode & BIT(HNAE3_FEC_RS))
2560 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2561 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2562 if (fec_mode & BIT(HNAE3_FEC_BASER))
2563 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2564 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2566 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2573 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2575 struct hclge_vport *vport = hclge_get_vport(handle);
2576 struct hclge_dev *hdev = vport->back;
2577 struct hclge_mac *mac = &hdev->hw.mac;
2580 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2581 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2585 ret = hclge_set_fec_hw(hdev, fec_mode);
2589 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2593 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2596 struct hclge_vport *vport = hclge_get_vport(handle);
2597 struct hclge_dev *hdev = vport->back;
2598 struct hclge_mac *mac = &hdev->hw.mac;
2601 *fec_ability = mac->fec_ability;
2603 *fec_mode = mac->fec_mode;
2606 static int hclge_mac_init(struct hclge_dev *hdev)
2608 struct hclge_mac *mac = &hdev->hw.mac;
2611 hdev->support_sfp_query = true;
2612 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2613 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2614 hdev->hw.mac.duplex);
2618 if (hdev->hw.mac.support_autoneg) {
2619 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2626 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2627 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2632 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2634 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2638 ret = hclge_set_default_loopback(hdev);
2642 ret = hclge_buffer_alloc(hdev);
2644 dev_err(&hdev->pdev->dev,
2645 "allocate buffer fail, ret=%d\n", ret);
2650 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2652 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2653 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2654 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2655 hclge_wq, &hdev->service_task, 0);
2658 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2660 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2661 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2662 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2663 hclge_wq, &hdev->service_task, 0);
2666 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2668 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2669 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2670 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2671 hclge_wq, &hdev->service_task,
2675 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2677 struct hclge_link_status_cmd *req;
2678 struct hclge_desc desc;
2682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2683 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2685 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2690 req = (struct hclge_link_status_cmd *)desc.data;
2691 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2693 return !!link_status;
2696 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2698 unsigned int mac_state;
2701 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2704 mac_state = hclge_get_mac_link_status(hdev);
2706 if (hdev->hw.mac.phydev) {
2707 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2708 link_stat = mac_state &
2709 hdev->hw.mac.phydev->link;
2714 link_stat = mac_state;
2720 static void hclge_update_link_status(struct hclge_dev *hdev)
2722 struct hnae3_client *rclient = hdev->roce_client;
2723 struct hnae3_client *client = hdev->nic_client;
2724 struct hnae3_handle *rhandle;
2725 struct hnae3_handle *handle;
2732 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2735 state = hclge_get_mac_phy_link(hdev);
2736 if (state != hdev->hw.mac.link) {
2737 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2738 handle = &hdev->vport[i].nic;
2739 client->ops->link_status_change(handle, state);
2740 hclge_config_mac_tnl_int(hdev, state);
2741 rhandle = &hdev->vport[i].roce;
2742 if (rclient && rclient->ops->link_status_change)
2743 rclient->ops->link_status_change(rhandle,
2746 hdev->hw.mac.link = state;
2749 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2752 static void hclge_update_port_capability(struct hclge_mac *mac)
2754 /* update fec ability by speed */
2755 hclge_convert_setting_fec(mac);
2757 /* firmware can not identify back plane type, the media type
2758 * read from configuration can help deal it
2760 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2761 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2762 mac->module_type = HNAE3_MODULE_TYPE_KR;
2763 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2764 mac->module_type = HNAE3_MODULE_TYPE_TP;
2766 if (mac->support_autoneg) {
2767 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2768 linkmode_copy(mac->advertising, mac->supported);
2770 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2772 linkmode_zero(mac->advertising);
2776 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2778 struct hclge_sfp_info_cmd *resp;
2779 struct hclge_desc desc;
2782 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2783 resp = (struct hclge_sfp_info_cmd *)desc.data;
2784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2785 if (ret == -EOPNOTSUPP) {
2786 dev_warn(&hdev->pdev->dev,
2787 "IMP do not support get SFP speed %d\n", ret);
2790 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2794 *speed = le32_to_cpu(resp->speed);
2799 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2801 struct hclge_sfp_info_cmd *resp;
2802 struct hclge_desc desc;
2805 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2806 resp = (struct hclge_sfp_info_cmd *)desc.data;
2808 resp->query_type = QUERY_ACTIVE_SPEED;
2810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2811 if (ret == -EOPNOTSUPP) {
2812 dev_warn(&hdev->pdev->dev,
2813 "IMP does not support get SFP info %d\n", ret);
2816 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2820 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2821 * set to mac->speed.
2823 if (!le32_to_cpu(resp->speed))
2826 mac->speed = le32_to_cpu(resp->speed);
2827 /* if resp->speed_ability is 0, it means it's an old version
2828 * firmware, do not update these params
2830 if (resp->speed_ability) {
2831 mac->module_type = le32_to_cpu(resp->module_type);
2832 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2833 mac->autoneg = resp->autoneg;
2834 mac->support_autoneg = resp->autoneg_ability;
2835 mac->speed_type = QUERY_ACTIVE_SPEED;
2836 if (!resp->active_fec)
2839 mac->fec_mode = BIT(resp->active_fec);
2841 mac->speed_type = QUERY_SFP_SPEED;
2847 static int hclge_update_port_info(struct hclge_dev *hdev)
2849 struct hclge_mac *mac = &hdev->hw.mac;
2850 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2853 /* get the port info from SFP cmd if not copper port */
2854 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2857 /* if IMP does not support get SFP/qSFP info, return directly */
2858 if (!hdev->support_sfp_query)
2861 if (hdev->pdev->revision >= 0x21)
2862 ret = hclge_get_sfp_info(hdev, mac);
2864 ret = hclge_get_sfp_speed(hdev, &speed);
2866 if (ret == -EOPNOTSUPP) {
2867 hdev->support_sfp_query = false;
2873 if (hdev->pdev->revision >= 0x21) {
2874 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2875 hclge_update_port_capability(mac);
2878 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2881 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2882 return 0; /* do nothing if no SFP */
2884 /* must config full duplex for SFP */
2885 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2889 static int hclge_get_status(struct hnae3_handle *handle)
2891 struct hclge_vport *vport = hclge_get_vport(handle);
2892 struct hclge_dev *hdev = vport->back;
2894 hclge_update_link_status(hdev);
2896 return hdev->hw.mac.link;
2899 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2901 if (!pci_num_vf(hdev->pdev)) {
2902 dev_err(&hdev->pdev->dev,
2903 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2907 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2908 dev_err(&hdev->pdev->dev,
2909 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2910 vf, pci_num_vf(hdev->pdev));
2914 /* VF start from 1 in vport */
2915 vf += HCLGE_VF_VPORT_START_NUM;
2916 return &hdev->vport[vf];
2919 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2920 struct ifla_vf_info *ivf)
2922 struct hclge_vport *vport = hclge_get_vport(handle);
2923 struct hclge_dev *hdev = vport->back;
2925 vport = hclge_get_vf_vport(hdev, vf);
2930 ivf->linkstate = vport->vf_info.link_state;
2931 ivf->spoofchk = vport->vf_info.spoofchk;
2932 ivf->trusted = vport->vf_info.trusted;
2933 ivf->min_tx_rate = 0;
2934 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2935 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2936 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2937 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2938 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2943 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2946 struct hclge_vport *vport = hclge_get_vport(handle);
2947 struct hclge_dev *hdev = vport->back;
2949 vport = hclge_get_vf_vport(hdev, vf);
2953 vport->vf_info.link_state = link_state;
2958 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2960 u32 cmdq_src_reg, msix_src_reg;
2962 /* fetch the events from their corresponding regs */
2963 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2964 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2966 /* Assumption: If by any chance reset and mailbox events are reported
2967 * together then we will only process reset event in this go and will
2968 * defer the processing of the mailbox events. Since, we would have not
2969 * cleared RX CMDQ event this time we would receive again another
2970 * interrupt from H/W just for the mailbox.
2972 * check for vector0 reset event sources
2974 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2975 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2976 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2977 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2978 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2979 hdev->rst_stats.imp_rst_cnt++;
2980 return HCLGE_VECTOR0_EVENT_RST;
2983 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2984 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2985 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2986 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2987 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2988 hdev->rst_stats.global_rst_cnt++;
2989 return HCLGE_VECTOR0_EVENT_RST;
2992 /* check for vector0 msix event source */
2993 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2994 *clearval = msix_src_reg;
2995 return HCLGE_VECTOR0_EVENT_ERR;
2998 /* check for vector0 mailbox(=CMDQ RX) event source */
2999 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3000 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3001 *clearval = cmdq_src_reg;
3002 return HCLGE_VECTOR0_EVENT_MBX;
3005 /* print other vector0 event source */
3006 dev_info(&hdev->pdev->dev,
3007 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3008 cmdq_src_reg, msix_src_reg);
3009 *clearval = msix_src_reg;
3011 return HCLGE_VECTOR0_EVENT_OTHER;
3014 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3017 switch (event_type) {
3018 case HCLGE_VECTOR0_EVENT_RST:
3019 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3021 case HCLGE_VECTOR0_EVENT_MBX:
3022 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3029 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3031 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3032 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3033 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3034 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3035 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3038 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3040 writel(enable ? 1 : 0, vector->addr);
3043 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3045 struct hclge_dev *hdev = data;
3049 hclge_enable_vector(&hdev->misc_vector, false);
3050 event_cause = hclge_check_event_cause(hdev, &clearval);
3052 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3053 switch (event_cause) {
3054 case HCLGE_VECTOR0_EVENT_ERR:
3055 /* we do not know what type of reset is required now. This could
3056 * only be decided after we fetch the type of errors which
3057 * caused this event. Therefore, we will do below for now:
3058 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3059 * have defered type of reset to be used.
3060 * 2. Schedule the reset serivce task.
3061 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3062 * will fetch the correct type of reset. This would be done
3063 * by first decoding the types of errors.
3065 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3067 case HCLGE_VECTOR0_EVENT_RST:
3068 hclge_reset_task_schedule(hdev);
3070 case HCLGE_VECTOR0_EVENT_MBX:
3071 /* If we are here then,
3072 * 1. Either we are not handling any mbx task and we are not
3075 * 2. We could be handling a mbx task but nothing more is
3077 * In both cases, we should schedule mbx task as there are more
3078 * mbx messages reported by this interrupt.
3080 hclge_mbx_task_schedule(hdev);
3083 dev_warn(&hdev->pdev->dev,
3084 "received unknown or unhandled event of vector0\n");
3088 hclge_clear_event_cause(hdev, event_cause, clearval);
3090 /* Enable interrupt if it is not cause by reset. And when
3091 * clearval equal to 0, it means interrupt status may be
3092 * cleared by hardware before driver reads status register.
3093 * For this case, vector0 interrupt also should be enabled.
3096 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3097 hclge_enable_vector(&hdev->misc_vector, true);
3103 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3105 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3106 dev_warn(&hdev->pdev->dev,
3107 "vector(vector_id %d) has been freed.\n", vector_id);
3111 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3112 hdev->num_msi_left += 1;
3113 hdev->num_msi_used -= 1;
3116 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3118 struct hclge_misc_vector *vector = &hdev->misc_vector;
3120 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3122 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3123 hdev->vector_status[0] = 0;
3125 hdev->num_msi_left -= 1;
3126 hdev->num_msi_used += 1;
3129 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3130 const cpumask_t *mask)
3132 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3135 cpumask_copy(&hdev->affinity_mask, mask);
3138 static void hclge_irq_affinity_release(struct kref *ref)
3142 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3144 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3145 &hdev->affinity_mask);
3147 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3148 hdev->affinity_notify.release = hclge_irq_affinity_release;
3149 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3150 &hdev->affinity_notify);
3153 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3155 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3156 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3159 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3163 hclge_get_misc_vector(hdev);
3165 /* this would be explicitly freed in the end */
3166 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3167 HCLGE_NAME, pci_name(hdev->pdev));
3168 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3169 0, hdev->misc_vector.name, hdev);
3171 hclge_free_vector(hdev, 0);
3172 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3173 hdev->misc_vector.vector_irq);
3179 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3181 free_irq(hdev->misc_vector.vector_irq, hdev);
3182 hclge_free_vector(hdev, 0);
3185 int hclge_notify_client(struct hclge_dev *hdev,
3186 enum hnae3_reset_notify_type type)
3188 struct hnae3_client *client = hdev->nic_client;
3191 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3194 if (!client->ops->reset_notify)
3197 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3198 struct hnae3_handle *handle = &hdev->vport[i].nic;
3201 ret = client->ops->reset_notify(handle, type);
3203 dev_err(&hdev->pdev->dev,
3204 "notify nic client failed %d(%d)\n", type, ret);
3212 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3213 enum hnae3_reset_notify_type type)
3215 struct hnae3_client *client = hdev->roce_client;
3219 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3222 if (!client->ops->reset_notify)
3225 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3226 struct hnae3_handle *handle = &hdev->vport[i].roce;
3228 ret = client->ops->reset_notify(handle, type);
3230 dev_err(&hdev->pdev->dev,
3231 "notify roce client failed %d(%d)",
3240 static int hclge_reset_wait(struct hclge_dev *hdev)
3242 #define HCLGE_RESET_WATI_MS 100
3243 #define HCLGE_RESET_WAIT_CNT 350
3245 u32 val, reg, reg_bit;
3248 switch (hdev->reset_type) {
3249 case HNAE3_IMP_RESET:
3250 reg = HCLGE_GLOBAL_RESET_REG;
3251 reg_bit = HCLGE_IMP_RESET_BIT;
3253 case HNAE3_GLOBAL_RESET:
3254 reg = HCLGE_GLOBAL_RESET_REG;
3255 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3257 case HNAE3_FUNC_RESET:
3258 reg = HCLGE_FUN_RST_ING;
3259 reg_bit = HCLGE_FUN_RST_ING_B;
3262 dev_err(&hdev->pdev->dev,
3263 "Wait for unsupported reset type: %d\n",
3268 val = hclge_read_dev(&hdev->hw, reg);
3269 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3270 msleep(HCLGE_RESET_WATI_MS);
3271 val = hclge_read_dev(&hdev->hw, reg);
3275 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3276 dev_warn(&hdev->pdev->dev,
3277 "Wait for reset timeout: %d\n", hdev->reset_type);
3284 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3286 struct hclge_vf_rst_cmd *req;
3287 struct hclge_desc desc;
3289 req = (struct hclge_vf_rst_cmd *)desc.data;
3290 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3291 req->dest_vfid = func_id;
3296 return hclge_cmd_send(&hdev->hw, &desc, 1);
3299 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3303 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3304 struct hclge_vport *vport = &hdev->vport[i];
3307 /* Send cmd to set/clear VF's FUNC_RST_ING */
3308 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3310 dev_err(&hdev->pdev->dev,
3311 "set vf(%u) rst failed %d!\n",
3312 vport->vport_id, ret);
3316 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3319 /* Inform VF to process the reset.
3320 * hclge_inform_reset_assert_to_vf may fail if VF
3321 * driver is not loaded.
3323 ret = hclge_inform_reset_assert_to_vf(vport);
3325 dev_warn(&hdev->pdev->dev,
3326 "inform reset to vf(%u) failed %d!\n",
3327 vport->vport_id, ret);
3333 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3335 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3336 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3337 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3340 hclge_mbx_handler(hdev);
3342 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3345 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3347 struct hclge_pf_rst_sync_cmd *req;
3348 struct hclge_desc desc;
3352 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3353 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3356 /* vf need to down netdev by mbx during PF or FLR reset */
3357 hclge_mailbox_service_task(hdev);
3359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3360 /* for compatible with old firmware, wait
3361 * 100 ms for VF to stop IO
3363 if (ret == -EOPNOTSUPP) {
3364 msleep(HCLGE_RESET_SYNC_TIME);
3367 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3370 } else if (req->all_vf_ready) {
3373 msleep(HCLGE_PF_RESET_SYNC_TIME);
3374 hclge_cmd_reuse_desc(&desc, true);
3375 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3377 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3380 void hclge_report_hw_error(struct hclge_dev *hdev,
3381 enum hnae3_hw_error_type type)
3383 struct hnae3_client *client = hdev->nic_client;
3386 if (!client || !client->ops->process_hw_error ||
3387 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3390 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3391 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3394 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3398 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3399 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3400 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3401 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3402 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3405 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3406 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3407 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3408 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3412 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3414 struct hclge_desc desc;
3415 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3418 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3419 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3420 req->fun_reset_vfid = func_id;
3422 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3424 dev_err(&hdev->pdev->dev,
3425 "send function reset cmd fail, status =%d\n", ret);
3430 static void hclge_do_reset(struct hclge_dev *hdev)
3432 struct hnae3_handle *handle = &hdev->vport[0].nic;
3433 struct pci_dev *pdev = hdev->pdev;
3436 if (hclge_get_hw_reset_stat(handle)) {
3437 dev_info(&pdev->dev, "hardware reset not finish\n");
3438 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3439 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3440 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3444 switch (hdev->reset_type) {
3445 case HNAE3_GLOBAL_RESET:
3446 dev_info(&pdev->dev, "global reset requested\n");
3447 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3448 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3449 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3451 case HNAE3_FUNC_RESET:
3452 dev_info(&pdev->dev, "PF reset requested\n");
3453 /* schedule again to check later */
3454 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3455 hclge_reset_task_schedule(hdev);
3458 dev_warn(&pdev->dev,
3459 "unsupported reset type: %d\n", hdev->reset_type);
3464 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3465 unsigned long *addr)
3467 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3468 struct hclge_dev *hdev = ae_dev->priv;
3470 /* first, resolve any unknown reset type to the known type(s) */
3471 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3472 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3473 HCLGE_MISC_VECTOR_INT_STS);
3474 /* we will intentionally ignore any errors from this function
3475 * as we will end up in *some* reset request in any case
3477 if (hclge_handle_hw_msix_error(hdev, addr))
3478 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3481 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3482 /* We defered the clearing of the error event which caused
3483 * interrupt since it was not posssible to do that in
3484 * interrupt context (and this is the reason we introduced
3485 * new UNKNOWN reset type). Now, the errors have been
3486 * handled and cleared in hardware we can safely enable
3487 * interrupts. This is an exception to the norm.
3489 hclge_enable_vector(&hdev->misc_vector, true);
3492 /* return the highest priority reset level amongst all */
3493 if (test_bit(HNAE3_IMP_RESET, addr)) {
3494 rst_level = HNAE3_IMP_RESET;
3495 clear_bit(HNAE3_IMP_RESET, addr);
3496 clear_bit(HNAE3_GLOBAL_RESET, addr);
3497 clear_bit(HNAE3_FUNC_RESET, addr);
3498 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3499 rst_level = HNAE3_GLOBAL_RESET;
3500 clear_bit(HNAE3_GLOBAL_RESET, addr);
3501 clear_bit(HNAE3_FUNC_RESET, addr);
3502 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3503 rst_level = HNAE3_FUNC_RESET;
3504 clear_bit(HNAE3_FUNC_RESET, addr);
3505 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3506 rst_level = HNAE3_FLR_RESET;
3507 clear_bit(HNAE3_FLR_RESET, addr);
3510 if (hdev->reset_type != HNAE3_NONE_RESET &&
3511 rst_level < hdev->reset_type)
3512 return HNAE3_NONE_RESET;
3517 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3521 switch (hdev->reset_type) {
3522 case HNAE3_IMP_RESET:
3523 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3525 case HNAE3_GLOBAL_RESET:
3526 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3535 /* For revision 0x20, the reset interrupt source
3536 * can only be cleared after hardware reset done
3538 if (hdev->pdev->revision == 0x20)
3539 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3542 hclge_enable_vector(&hdev->misc_vector, true);
3545 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3549 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3551 reg_val |= HCLGE_NIC_SW_RST_RDY;
3553 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3555 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3558 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3562 ret = hclge_set_all_vf_rst(hdev, true);
3566 hclge_func_reset_sync_vf(hdev);
3571 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3576 switch (hdev->reset_type) {
3577 case HNAE3_FUNC_RESET:
3578 ret = hclge_func_reset_notify_vf(hdev);
3582 ret = hclge_func_reset_cmd(hdev, 0);
3584 dev_err(&hdev->pdev->dev,
3585 "asserting function reset fail %d!\n", ret);
3589 /* After performaning pf reset, it is not necessary to do the
3590 * mailbox handling or send any command to firmware, because
3591 * any mailbox handling or command to firmware is only valid
3592 * after hclge_cmd_init is called.
3594 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3595 hdev->rst_stats.pf_rst_cnt++;
3597 case HNAE3_FLR_RESET:
3598 ret = hclge_func_reset_notify_vf(hdev);
3602 case HNAE3_IMP_RESET:
3603 hclge_handle_imp_error(hdev);
3604 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3605 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3606 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3612 /* inform hardware that preparatory work is done */
3613 msleep(HCLGE_RESET_SYNC_TIME);
3614 hclge_reset_handshake(hdev, true);
3615 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3620 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3622 #define MAX_RESET_FAIL_CNT 5
3624 if (hdev->reset_pending) {
3625 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3626 hdev->reset_pending);
3628 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3629 HCLGE_RESET_INT_M) {
3630 dev_info(&hdev->pdev->dev,
3631 "reset failed because new reset interrupt\n");
3632 hclge_clear_reset_cause(hdev);
3634 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3635 hdev->rst_stats.reset_fail_cnt++;
3636 set_bit(hdev->reset_type, &hdev->reset_pending);
3637 dev_info(&hdev->pdev->dev,
3638 "re-schedule reset task(%u)\n",
3639 hdev->rst_stats.reset_fail_cnt);
3643 hclge_clear_reset_cause(hdev);
3645 /* recover the handshake status when reset fail */
3646 hclge_reset_handshake(hdev, true);
3648 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3650 hclge_dbg_dump_rst_info(hdev);
3652 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3657 static int hclge_set_rst_done(struct hclge_dev *hdev)
3659 struct hclge_pf_rst_done_cmd *req;
3660 struct hclge_desc desc;
3663 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3664 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3665 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3667 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3668 /* To be compatible with the old firmware, which does not support
3669 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3672 if (ret == -EOPNOTSUPP) {
3673 dev_warn(&hdev->pdev->dev,
3674 "current firmware does not support command(0x%x)!\n",
3675 HCLGE_OPC_PF_RST_DONE);
3678 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3685 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3689 switch (hdev->reset_type) {
3690 case HNAE3_FUNC_RESET:
3692 case HNAE3_FLR_RESET:
3693 ret = hclge_set_all_vf_rst(hdev, false);
3695 case HNAE3_GLOBAL_RESET:
3697 case HNAE3_IMP_RESET:
3698 ret = hclge_set_rst_done(hdev);
3704 /* clear up the handshake status after re-initialize done */
3705 hclge_reset_handshake(hdev, false);
3710 static int hclge_reset_stack(struct hclge_dev *hdev)
3714 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3718 ret = hclge_reset_ae_dev(hdev->ae_dev);
3722 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3725 static int hclge_reset_prepare(struct hclge_dev *hdev)
3729 hdev->rst_stats.reset_cnt++;
3730 /* perform reset of the stack & ae device for a client */
3731 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3736 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3741 return hclge_reset_prepare_wait(hdev);
3744 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3746 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3747 enum hnae3_reset_type reset_level;
3750 hdev->rst_stats.hw_reset_done_cnt++;
3752 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3757 ret = hclge_reset_stack(hdev);
3762 hclge_clear_reset_cause(hdev);
3764 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3765 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3769 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3772 ret = hclge_reset_prepare_up(hdev);
3777 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3782 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3786 hdev->last_reset_time = jiffies;
3787 hdev->rst_stats.reset_fail_cnt = 0;
3788 hdev->rst_stats.reset_done_cnt++;
3789 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3791 /* if default_reset_request has a higher level reset request,
3792 * it should be handled as soon as possible. since some errors
3793 * need this kind of reset to fix.
3795 reset_level = hclge_get_reset_level(ae_dev,
3796 &hdev->default_reset_request);
3797 if (reset_level != HNAE3_NONE_RESET)
3798 set_bit(reset_level, &hdev->reset_request);
3803 static void hclge_reset(struct hclge_dev *hdev)
3805 if (hclge_reset_prepare(hdev))
3808 if (hclge_reset_wait(hdev))
3811 if (hclge_reset_rebuild(hdev))
3817 if (hclge_reset_err_handle(hdev))
3818 hclge_reset_task_schedule(hdev);
3821 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3823 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3824 struct hclge_dev *hdev = ae_dev->priv;
3826 /* We might end up getting called broadly because of 2 below cases:
3827 * 1. Recoverable error was conveyed through APEI and only way to bring
3828 * normalcy is to reset.
3829 * 2. A new reset request from the stack due to timeout
3831 * For the first case,error event might not have ae handle available.
3832 * check if this is a new reset request and we are not here just because
3833 * last reset attempt did not succeed and watchdog hit us again. We will
3834 * know this if last reset request did not occur very recently (watchdog
3835 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3836 * In case of new request we reset the "reset level" to PF reset.
3837 * And if it is a repeat reset request of the most recent one then we
3838 * want to make sure we throttle the reset request. Therefore, we will
3839 * not allow it again before 3*HZ times.
3842 handle = &hdev->vport[0].nic;
3844 if (time_before(jiffies, (hdev->last_reset_time +
3845 HCLGE_RESET_INTERVAL))) {
3846 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3848 } else if (hdev->default_reset_request) {
3850 hclge_get_reset_level(ae_dev,
3851 &hdev->default_reset_request);
3852 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3853 hdev->reset_level = HNAE3_FUNC_RESET;
3856 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3859 /* request reset & schedule reset task */
3860 set_bit(hdev->reset_level, &hdev->reset_request);
3861 hclge_reset_task_schedule(hdev);
3863 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3864 hdev->reset_level++;
3867 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3868 enum hnae3_reset_type rst_type)
3870 struct hclge_dev *hdev = ae_dev->priv;
3872 set_bit(rst_type, &hdev->default_reset_request);
3875 static void hclge_reset_timer(struct timer_list *t)
3877 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3879 /* if default_reset_request has no value, it means that this reset
3880 * request has already be handled, so just return here
3882 if (!hdev->default_reset_request)
3885 dev_info(&hdev->pdev->dev,
3886 "triggering reset in reset timer\n");
3887 hclge_reset_event(hdev->pdev, NULL);
3890 static void hclge_reset_subtask(struct hclge_dev *hdev)
3892 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3894 /* check if there is any ongoing reset in the hardware. This status can
3895 * be checked from reset_pending. If there is then, we need to wait for
3896 * hardware to complete reset.
3897 * a. If we are able to figure out in reasonable time that hardware
3898 * has fully resetted then, we can proceed with driver, client
3900 * b. else, we can come back later to check this status so re-sched
3903 hdev->last_reset_time = jiffies;
3904 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3905 if (hdev->reset_type != HNAE3_NONE_RESET)
3908 /* check if we got any *new* reset requests to be honored */
3909 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3910 if (hdev->reset_type != HNAE3_NONE_RESET)
3911 hclge_do_reset(hdev);
3913 hdev->reset_type = HNAE3_NONE_RESET;
3916 static void hclge_reset_service_task(struct hclge_dev *hdev)
3918 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3921 down(&hdev->reset_sem);
3922 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3924 hclge_reset_subtask(hdev);
3926 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3927 up(&hdev->reset_sem);
3930 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3934 /* start from vport 1 for PF is always alive */
3935 for (i = 1; i < hdev->num_alloc_vport; i++) {
3936 struct hclge_vport *vport = &hdev->vport[i];
3938 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3939 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3941 /* If vf is not alive, set to default value */
3942 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3943 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3947 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3949 unsigned long delta = round_jiffies_relative(HZ);
3951 /* Always handle the link updating to make sure link state is
3952 * updated when it is triggered by mbx.
3954 hclge_update_link_status(hdev);
3955 hclge_sync_mac_table(hdev);
3956 hclge_sync_promisc_mode(hdev);
3958 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3959 delta = jiffies - hdev->last_serv_processed;
3961 if (delta < round_jiffies_relative(HZ)) {
3962 delta = round_jiffies_relative(HZ) - delta;
3967 hdev->serv_processed_cnt++;
3968 hclge_update_vport_alive(hdev);
3970 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3971 hdev->last_serv_processed = jiffies;
3975 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3976 hclge_update_stats_for_all(hdev);
3978 hclge_update_port_info(hdev);
3979 hclge_sync_vlan_filter(hdev);
3981 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3982 hclge_rfs_filter_expire(hdev);
3984 hdev->last_serv_processed = jiffies;
3987 hclge_task_schedule(hdev, delta);
3990 static void hclge_service_task(struct work_struct *work)
3992 struct hclge_dev *hdev =
3993 container_of(work, struct hclge_dev, service_task.work);
3995 hclge_reset_service_task(hdev);
3996 hclge_mailbox_service_task(hdev);
3997 hclge_periodic_service_task(hdev);
3999 /* Handle reset and mbx again in case periodical task delays the
4000 * handling by calling hclge_task_schedule() in
4001 * hclge_periodic_service_task().
4003 hclge_reset_service_task(hdev);
4004 hclge_mailbox_service_task(hdev);
4007 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4009 /* VF handle has no client */
4010 if (!handle->client)
4011 return container_of(handle, struct hclge_vport, nic);
4012 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4013 return container_of(handle, struct hclge_vport, roce);
4015 return container_of(handle, struct hclge_vport, nic);
4018 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4019 struct hnae3_vector_info *vector_info)
4021 struct hclge_vport *vport = hclge_get_vport(handle);
4022 struct hnae3_vector_info *vector = vector_info;
4023 struct hclge_dev *hdev = vport->back;
4027 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4028 vector_num = min(hdev->num_msi_left, vector_num);
4030 for (j = 0; j < vector_num; j++) {
4031 for (i = 1; i < hdev->num_msi; i++) {
4032 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4033 vector->vector = pci_irq_vector(hdev->pdev, i);
4034 vector->io_addr = hdev->hw.io_base +
4035 HCLGE_VECTOR_REG_BASE +
4036 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4038 HCLGE_VECTOR_VF_OFFSET;
4039 hdev->vector_status[i] = vport->vport_id;
4040 hdev->vector_irq[i] = vector->vector;
4049 hdev->num_msi_left -= alloc;
4050 hdev->num_msi_used += alloc;
4055 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4059 for (i = 0; i < hdev->num_msi; i++)
4060 if (vector == hdev->vector_irq[i])
4066 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4068 struct hclge_vport *vport = hclge_get_vport(handle);
4069 struct hclge_dev *hdev = vport->back;
4072 vector_id = hclge_get_vector_index(hdev, vector);
4073 if (vector_id < 0) {
4074 dev_err(&hdev->pdev->dev,
4075 "Get vector index fail. vector = %d\n", vector);
4079 hclge_free_vector(hdev, vector_id);
4084 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4086 return HCLGE_RSS_KEY_SIZE;
4089 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4091 return HCLGE_RSS_IND_TBL_SIZE;
4094 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4095 const u8 hfunc, const u8 *key)
4097 struct hclge_rss_config_cmd *req;
4098 unsigned int key_offset = 0;
4099 struct hclge_desc desc;
4104 key_counts = HCLGE_RSS_KEY_SIZE;
4105 req = (struct hclge_rss_config_cmd *)desc.data;
4107 while (key_counts) {
4108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4111 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4112 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4114 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4115 memcpy(req->hash_key,
4116 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4118 key_counts -= key_size;
4120 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4122 dev_err(&hdev->pdev->dev,
4123 "Configure RSS config fail, status = %d\n",
4131 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4133 struct hclge_rss_indirection_table_cmd *req;
4134 struct hclge_desc desc;
4138 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4140 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4141 hclge_cmd_setup_basic_desc
4142 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4144 req->start_table_index =
4145 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4146 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4148 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4149 req->rss_result[j] =
4150 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4154 dev_err(&hdev->pdev->dev,
4155 "Configure rss indir table fail,status = %d\n",
4163 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4164 u16 *tc_size, u16 *tc_offset)
4166 struct hclge_rss_tc_mode_cmd *req;
4167 struct hclge_desc desc;
4171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4172 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4174 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4177 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4178 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4179 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4180 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4181 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4183 req->rss_tc_mode[i] = cpu_to_le16(mode);
4186 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4188 dev_err(&hdev->pdev->dev,
4189 "Configure rss tc mode fail, status = %d\n", ret);
4194 static void hclge_get_rss_type(struct hclge_vport *vport)
4196 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4197 vport->rss_tuple_sets.ipv4_udp_en ||
4198 vport->rss_tuple_sets.ipv4_sctp_en ||
4199 vport->rss_tuple_sets.ipv6_tcp_en ||
4200 vport->rss_tuple_sets.ipv6_udp_en ||
4201 vport->rss_tuple_sets.ipv6_sctp_en)
4202 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4203 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4204 vport->rss_tuple_sets.ipv6_fragment_en)
4205 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4207 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4210 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4212 struct hclge_rss_input_tuple_cmd *req;
4213 struct hclge_desc desc;
4216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4218 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4220 /* Get the tuple cfg from pf */
4221 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4222 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4223 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4224 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4225 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4226 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4227 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4228 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4229 hclge_get_rss_type(&hdev->vport[0]);
4230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4232 dev_err(&hdev->pdev->dev,
4233 "Configure rss input fail, status = %d\n", ret);
4237 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4240 struct hclge_vport *vport = hclge_get_vport(handle);
4243 /* Get hash algorithm */
4245 switch (vport->rss_algo) {
4246 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4247 *hfunc = ETH_RSS_HASH_TOP;
4249 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4250 *hfunc = ETH_RSS_HASH_XOR;
4253 *hfunc = ETH_RSS_HASH_UNKNOWN;
4258 /* Get the RSS Key required by the user */
4260 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4262 /* Get indirect table */
4264 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4265 indir[i] = vport->rss_indirection_tbl[i];
4270 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4271 const u8 *key, const u8 hfunc)
4273 struct hclge_vport *vport = hclge_get_vport(handle);
4274 struct hclge_dev *hdev = vport->back;
4278 /* Set the RSS Hash Key if specififed by the user */
4281 case ETH_RSS_HASH_TOP:
4282 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4284 case ETH_RSS_HASH_XOR:
4285 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4287 case ETH_RSS_HASH_NO_CHANGE:
4288 hash_algo = vport->rss_algo;
4294 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4298 /* Update the shadow RSS key with user specified qids */
4299 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4300 vport->rss_algo = hash_algo;
4303 /* Update the shadow RSS table with user specified qids */
4304 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4305 vport->rss_indirection_tbl[i] = indir[i];
4307 /* Update the hardware */
4308 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4311 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4313 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4315 if (nfc->data & RXH_L4_B_2_3)
4316 hash_sets |= HCLGE_D_PORT_BIT;
4318 hash_sets &= ~HCLGE_D_PORT_BIT;
4320 if (nfc->data & RXH_IP_SRC)
4321 hash_sets |= HCLGE_S_IP_BIT;
4323 hash_sets &= ~HCLGE_S_IP_BIT;
4325 if (nfc->data & RXH_IP_DST)
4326 hash_sets |= HCLGE_D_IP_BIT;
4328 hash_sets &= ~HCLGE_D_IP_BIT;
4330 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4331 hash_sets |= HCLGE_V_TAG_BIT;
4336 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4337 struct ethtool_rxnfc *nfc)
4339 struct hclge_vport *vport = hclge_get_vport(handle);
4340 struct hclge_dev *hdev = vport->back;
4341 struct hclge_rss_input_tuple_cmd *req;
4342 struct hclge_desc desc;
4346 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4347 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4350 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4353 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4354 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4355 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4356 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4357 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4358 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4359 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4360 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4362 tuple_sets = hclge_get_rss_hash_bits(nfc);
4363 switch (nfc->flow_type) {
4365 req->ipv4_tcp_en = tuple_sets;
4368 req->ipv6_tcp_en = tuple_sets;
4371 req->ipv4_udp_en = tuple_sets;
4374 req->ipv6_udp_en = tuple_sets;
4377 req->ipv4_sctp_en = tuple_sets;
4380 if ((nfc->data & RXH_L4_B_0_1) ||
4381 (nfc->data & RXH_L4_B_2_3))
4384 req->ipv6_sctp_en = tuple_sets;
4387 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4390 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4398 dev_err(&hdev->pdev->dev,
4399 "Set rss tuple fail, status = %d\n", ret);
4403 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4404 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4405 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4406 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4407 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4408 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4409 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4410 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4411 hclge_get_rss_type(vport);
4415 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4416 struct ethtool_rxnfc *nfc)
4418 struct hclge_vport *vport = hclge_get_vport(handle);
4423 switch (nfc->flow_type) {
4425 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4428 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4431 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4434 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4437 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4440 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4444 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4453 if (tuple_sets & HCLGE_D_PORT_BIT)
4454 nfc->data |= RXH_L4_B_2_3;
4455 if (tuple_sets & HCLGE_S_PORT_BIT)
4456 nfc->data |= RXH_L4_B_0_1;
4457 if (tuple_sets & HCLGE_D_IP_BIT)
4458 nfc->data |= RXH_IP_DST;
4459 if (tuple_sets & HCLGE_S_IP_BIT)
4460 nfc->data |= RXH_IP_SRC;
4465 static int hclge_get_tc_size(struct hnae3_handle *handle)
4467 struct hclge_vport *vport = hclge_get_vport(handle);
4468 struct hclge_dev *hdev = vport->back;
4470 return hdev->rss_size_max;
4473 int hclge_rss_init_hw(struct hclge_dev *hdev)
4475 struct hclge_vport *vport = hdev->vport;
4476 u8 *rss_indir = vport[0].rss_indirection_tbl;
4477 u16 rss_size = vport[0].alloc_rss_size;
4478 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4479 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4480 u8 *key = vport[0].rss_hash_key;
4481 u8 hfunc = vport[0].rss_algo;
4482 u16 tc_valid[HCLGE_MAX_TC_NUM];
4487 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4491 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4495 ret = hclge_set_rss_input_tuple(hdev);
4499 /* Each TC have the same queue size, and tc_size set to hardware is
4500 * the log2 of roundup power of two of rss_size, the acutal queue
4501 * size is limited by indirection table.
4503 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4504 dev_err(&hdev->pdev->dev,
4505 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4510 roundup_size = roundup_pow_of_two(rss_size);
4511 roundup_size = ilog2(roundup_size);
4513 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4516 if (!(hdev->hw_tc_map & BIT(i)))
4520 tc_size[i] = roundup_size;
4521 tc_offset[i] = rss_size * i;
4524 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4527 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4529 struct hclge_vport *vport = hdev->vport;
4532 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4533 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4534 vport[j].rss_indirection_tbl[i] =
4535 i % vport[j].alloc_rss_size;
4539 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4541 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4542 struct hclge_vport *vport = hdev->vport;
4544 if (hdev->pdev->revision >= 0x21)
4545 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4547 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4548 vport[i].rss_tuple_sets.ipv4_tcp_en =
4549 HCLGE_RSS_INPUT_TUPLE_OTHER;
4550 vport[i].rss_tuple_sets.ipv4_udp_en =
4551 HCLGE_RSS_INPUT_TUPLE_OTHER;
4552 vport[i].rss_tuple_sets.ipv4_sctp_en =
4553 HCLGE_RSS_INPUT_TUPLE_SCTP;
4554 vport[i].rss_tuple_sets.ipv4_fragment_en =
4555 HCLGE_RSS_INPUT_TUPLE_OTHER;
4556 vport[i].rss_tuple_sets.ipv6_tcp_en =
4557 HCLGE_RSS_INPUT_TUPLE_OTHER;
4558 vport[i].rss_tuple_sets.ipv6_udp_en =
4559 HCLGE_RSS_INPUT_TUPLE_OTHER;
4560 vport[i].rss_tuple_sets.ipv6_sctp_en =
4561 HCLGE_RSS_INPUT_TUPLE_SCTP;
4562 vport[i].rss_tuple_sets.ipv6_fragment_en =
4563 HCLGE_RSS_INPUT_TUPLE_OTHER;
4565 vport[i].rss_algo = rss_algo;
4567 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4568 HCLGE_RSS_KEY_SIZE);
4571 hclge_rss_indir_init_cfg(hdev);
4574 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4575 int vector_id, bool en,
4576 struct hnae3_ring_chain_node *ring_chain)
4578 struct hclge_dev *hdev = vport->back;
4579 struct hnae3_ring_chain_node *node;
4580 struct hclge_desc desc;
4581 struct hclge_ctrl_vector_chain_cmd *req =
4582 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4583 enum hclge_cmd_status status;
4584 enum hclge_opcode_type op;
4585 u16 tqp_type_and_id;
4588 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4589 hclge_cmd_setup_basic_desc(&desc, op, false);
4590 req->int_vector_id = vector_id;
4593 for (node = ring_chain; node; node = node->next) {
4594 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4595 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4597 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4598 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4599 HCLGE_TQP_ID_S, node->tqp_index);
4600 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4602 hnae3_get_field(node->int_gl_idx,
4603 HNAE3_RING_GL_IDX_M,
4604 HNAE3_RING_GL_IDX_S));
4605 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4606 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4607 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4608 req->vfid = vport->vport_id;
4610 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4612 dev_err(&hdev->pdev->dev,
4613 "Map TQP fail, status is %d.\n",
4619 hclge_cmd_setup_basic_desc(&desc,
4622 req->int_vector_id = vector_id;
4627 req->int_cause_num = i;
4628 req->vfid = vport->vport_id;
4629 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4631 dev_err(&hdev->pdev->dev,
4632 "Map TQP fail, status is %d.\n", status);
4640 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4641 struct hnae3_ring_chain_node *ring_chain)
4643 struct hclge_vport *vport = hclge_get_vport(handle);
4644 struct hclge_dev *hdev = vport->back;
4647 vector_id = hclge_get_vector_index(hdev, vector);
4648 if (vector_id < 0) {
4649 dev_err(&hdev->pdev->dev,
4650 "failed to get vector index. vector=%d\n", vector);
4654 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4657 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4658 struct hnae3_ring_chain_node *ring_chain)
4660 struct hclge_vport *vport = hclge_get_vport(handle);
4661 struct hclge_dev *hdev = vport->back;
4664 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4667 vector_id = hclge_get_vector_index(hdev, vector);
4668 if (vector_id < 0) {
4669 dev_err(&handle->pdev->dev,
4670 "Get vector index fail. ret =%d\n", vector_id);
4674 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4676 dev_err(&handle->pdev->dev,
4677 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4683 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4684 struct hclge_promisc_param *param)
4686 struct hclge_promisc_cfg_cmd *req;
4687 struct hclge_desc desc;
4690 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4692 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4693 req->vf_id = param->vf_id;
4695 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4696 * pdev revision(0x20), new revision support them. The
4697 * value of this two fields will not return error when driver
4698 * send command to fireware in revision(0x20).
4700 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4701 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4705 dev_err(&hdev->pdev->dev,
4706 "failed to set vport %d promisc mode, ret = %d.\n",
4712 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4713 bool en_uc, bool en_mc, bool en_bc,
4719 memset(param, 0, sizeof(struct hclge_promisc_param));
4721 param->enable = HCLGE_PROMISC_EN_UC;
4723 param->enable |= HCLGE_PROMISC_EN_MC;
4725 param->enable |= HCLGE_PROMISC_EN_BC;
4726 param->vf_id = vport_id;
4729 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4730 bool en_mc_pmc, bool en_bc_pmc)
4732 struct hclge_dev *hdev = vport->back;
4733 struct hclge_promisc_param param;
4735 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4737 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4740 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4743 struct hclge_vport *vport = hclge_get_vport(handle);
4744 bool en_bc_pmc = true;
4746 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4747 * always bypassed. So broadcast promisc should be disabled until
4748 * user enable promisc mode
4750 if (handle->pdev->revision == 0x20)
4751 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4753 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4757 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4759 struct hclge_vport *vport = hclge_get_vport(handle);
4760 struct hclge_dev *hdev = vport->back;
4762 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4765 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4767 struct hclge_get_fd_mode_cmd *req;
4768 struct hclge_desc desc;
4771 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4773 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4775 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4777 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4781 *fd_mode = req->mode;
4786 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4787 u32 *stage1_entry_num,
4788 u32 *stage2_entry_num,
4789 u16 *stage1_counter_num,
4790 u16 *stage2_counter_num)
4792 struct hclge_get_fd_allocation_cmd *req;
4793 struct hclge_desc desc;
4796 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4798 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4800 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4802 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4807 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4808 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4809 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4810 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4815 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4816 enum HCLGE_FD_STAGE stage_num)
4818 struct hclge_set_fd_key_config_cmd *req;
4819 struct hclge_fd_key_cfg *stage;
4820 struct hclge_desc desc;
4823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4825 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4826 stage = &hdev->fd_cfg.key_cfg[stage_num];
4827 req->stage = stage_num;
4828 req->key_select = stage->key_sel;
4829 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4830 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4831 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4832 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4833 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4834 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4836 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4838 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4843 static int hclge_init_fd_config(struct hclge_dev *hdev)
4845 #define LOW_2_WORDS 0x03
4846 struct hclge_fd_key_cfg *key_cfg;
4849 if (!hnae3_dev_fd_supported(hdev))
4852 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4856 switch (hdev->fd_cfg.fd_mode) {
4857 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4858 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4860 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4861 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4864 dev_err(&hdev->pdev->dev,
4865 "Unsupported flow director mode %u\n",
4866 hdev->fd_cfg.fd_mode);
4870 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4871 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4872 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4873 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4874 key_cfg->outer_sipv6_word_en = 0;
4875 key_cfg->outer_dipv6_word_en = 0;
4877 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4878 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4879 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4880 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4882 /* If use max 400bit key, we can support tuples for ether type */
4883 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4884 key_cfg->tuple_active |=
4885 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4887 /* roce_type is used to filter roce frames
4888 * dst_vport is used to specify the rule
4890 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4892 ret = hclge_get_fd_allocation(hdev,
4893 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4894 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4895 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4896 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4900 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4903 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4904 int loc, u8 *key, bool is_add)
4906 struct hclge_fd_tcam_config_1_cmd *req1;
4907 struct hclge_fd_tcam_config_2_cmd *req2;
4908 struct hclge_fd_tcam_config_3_cmd *req3;
4909 struct hclge_desc desc[3];
4912 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4913 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4914 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4915 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4916 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4918 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4919 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4920 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4922 req1->stage = stage;
4923 req1->xy_sel = sel_x ? 1 : 0;
4924 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4925 req1->index = cpu_to_le32(loc);
4926 req1->entry_vld = sel_x ? is_add : 0;
4929 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4930 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4931 sizeof(req2->tcam_data));
4932 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4933 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4936 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4938 dev_err(&hdev->pdev->dev,
4939 "config tcam key fail, ret=%d\n",
4945 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4946 struct hclge_fd_ad_data *action)
4948 struct hclge_fd_ad_config_cmd *req;
4949 struct hclge_desc desc;
4953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4955 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4956 req->index = cpu_to_le32(loc);
4959 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4960 action->write_rule_id_to_bd);
4961 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4964 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4965 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4966 action->forward_to_direct_queue);
4967 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4969 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4970 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4971 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4972 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4973 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4974 action->counter_id);
4976 req->ad_data = cpu_to_le64(ad_data);
4977 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4979 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4984 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4985 struct hclge_fd_rule *rule)
4987 u16 tmp_x_s, tmp_y_s;
4988 u32 tmp_x_l, tmp_y_l;
4991 if (rule->unused_tuple & tuple_bit)
4994 switch (tuple_bit) {
4995 case BIT(INNER_DST_MAC):
4996 for (i = 0; i < ETH_ALEN; i++) {
4997 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4998 rule->tuples_mask.dst_mac[i]);
4999 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5000 rule->tuples_mask.dst_mac[i]);
5004 case BIT(INNER_SRC_MAC):
5005 for (i = 0; i < ETH_ALEN; i++) {
5006 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5007 rule->tuples.src_mac[i]);
5008 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5009 rule->tuples.src_mac[i]);
5013 case BIT(INNER_VLAN_TAG_FST):
5014 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5015 rule->tuples_mask.vlan_tag1);
5016 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5017 rule->tuples_mask.vlan_tag1);
5018 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5019 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5022 case BIT(INNER_ETH_TYPE):
5023 calc_x(tmp_x_s, rule->tuples.ether_proto,
5024 rule->tuples_mask.ether_proto);
5025 calc_y(tmp_y_s, rule->tuples.ether_proto,
5026 rule->tuples_mask.ether_proto);
5027 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5028 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5031 case BIT(INNER_IP_TOS):
5032 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5033 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5036 case BIT(INNER_IP_PROTO):
5037 calc_x(*key_x, rule->tuples.ip_proto,
5038 rule->tuples_mask.ip_proto);
5039 calc_y(*key_y, rule->tuples.ip_proto,
5040 rule->tuples_mask.ip_proto);
5043 case BIT(INNER_SRC_IP):
5044 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5045 rule->tuples_mask.src_ip[IPV4_INDEX]);
5046 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5047 rule->tuples_mask.src_ip[IPV4_INDEX]);
5048 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5049 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5052 case BIT(INNER_DST_IP):
5053 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5054 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5055 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5056 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5057 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5058 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5061 case BIT(INNER_SRC_PORT):
5062 calc_x(tmp_x_s, rule->tuples.src_port,
5063 rule->tuples_mask.src_port);
5064 calc_y(tmp_y_s, rule->tuples.src_port,
5065 rule->tuples_mask.src_port);
5066 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5067 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5070 case BIT(INNER_DST_PORT):
5071 calc_x(tmp_x_s, rule->tuples.dst_port,
5072 rule->tuples_mask.dst_port);
5073 calc_y(tmp_y_s, rule->tuples.dst_port,
5074 rule->tuples_mask.dst_port);
5075 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5076 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5084 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5085 u8 vf_id, u8 network_port_id)
5087 u32 port_number = 0;
5089 if (port_type == HOST_PORT) {
5090 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5092 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5094 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5096 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5097 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5098 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5104 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5105 __le32 *key_x, __le32 *key_y,
5106 struct hclge_fd_rule *rule)
5108 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5109 u8 cur_pos = 0, tuple_size, shift_bits;
5112 for (i = 0; i < MAX_META_DATA; i++) {
5113 tuple_size = meta_data_key_info[i].key_length;
5114 tuple_bit = key_cfg->meta_data_active & BIT(i);
5116 switch (tuple_bit) {
5117 case BIT(ROCE_TYPE):
5118 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5119 cur_pos += tuple_size;
5121 case BIT(DST_VPORT):
5122 port_number = hclge_get_port_number(HOST_PORT, 0,
5124 hnae3_set_field(meta_data,
5125 GENMASK(cur_pos + tuple_size, cur_pos),
5126 cur_pos, port_number);
5127 cur_pos += tuple_size;
5134 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5135 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5136 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5138 *key_x = cpu_to_le32(tmp_x << shift_bits);
5139 *key_y = cpu_to_le32(tmp_y << shift_bits);
5142 /* A complete key is combined with meta data key and tuple key.
5143 * Meta data key is stored at the MSB region, and tuple key is stored at
5144 * the LSB region, unused bits will be filled 0.
5146 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5147 struct hclge_fd_rule *rule)
5149 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5150 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5151 u8 *cur_key_x, *cur_key_y;
5152 u8 meta_data_region;
5157 memset(key_x, 0, sizeof(key_x));
5158 memset(key_y, 0, sizeof(key_y));
5162 for (i = 0 ; i < MAX_TUPLE; i++) {
5166 tuple_size = tuple_key_info[i].key_length / 8;
5167 check_tuple = key_cfg->tuple_active & BIT(i);
5169 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5172 cur_key_x += tuple_size;
5173 cur_key_y += tuple_size;
5177 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5178 MAX_META_DATA_LENGTH / 8;
5180 hclge_fd_convert_meta_data(key_cfg,
5181 (__le32 *)(key_x + meta_data_region),
5182 (__le32 *)(key_y + meta_data_region),
5185 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5188 dev_err(&hdev->pdev->dev,
5189 "fd key_y config fail, loc=%u, ret=%d\n",
5190 rule->queue_id, ret);
5194 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5197 dev_err(&hdev->pdev->dev,
5198 "fd key_x config fail, loc=%u, ret=%d\n",
5199 rule->queue_id, ret);
5203 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5204 struct hclge_fd_rule *rule)
5206 struct hclge_fd_ad_data ad_data;
5208 ad_data.ad_id = rule->location;
5210 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5211 ad_data.drop_packet = true;
5212 ad_data.forward_to_direct_queue = false;
5213 ad_data.queue_id = 0;
5215 ad_data.drop_packet = false;
5216 ad_data.forward_to_direct_queue = true;
5217 ad_data.queue_id = rule->queue_id;
5220 ad_data.use_counter = false;
5221 ad_data.counter_id = 0;
5223 ad_data.use_next_stage = false;
5224 ad_data.next_input_key = 0;
5226 ad_data.write_rule_id_to_bd = true;
5227 ad_data.rule_id = rule->location;
5229 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5232 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5235 if (!spec || !unused_tuple)
5238 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5241 *unused_tuple |= BIT(INNER_SRC_IP);
5244 *unused_tuple |= BIT(INNER_DST_IP);
5247 *unused_tuple |= BIT(INNER_SRC_PORT);
5250 *unused_tuple |= BIT(INNER_DST_PORT);
5253 *unused_tuple |= BIT(INNER_IP_TOS);
5258 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5261 if (!spec || !unused_tuple)
5264 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5265 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5268 *unused_tuple |= BIT(INNER_SRC_IP);
5271 *unused_tuple |= BIT(INNER_DST_IP);
5274 *unused_tuple |= BIT(INNER_IP_TOS);
5277 *unused_tuple |= BIT(INNER_IP_PROTO);
5279 if (spec->l4_4_bytes)
5282 if (spec->ip_ver != ETH_RX_NFC_IP4)
5288 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5291 if (!spec || !unused_tuple)
5294 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5297 /* check whether src/dst ip address used */
5298 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5299 !spec->ip6src[2] && !spec->ip6src[3])
5300 *unused_tuple |= BIT(INNER_SRC_IP);
5302 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5303 !spec->ip6dst[2] && !spec->ip6dst[3])
5304 *unused_tuple |= BIT(INNER_DST_IP);
5307 *unused_tuple |= BIT(INNER_SRC_PORT);
5310 *unused_tuple |= BIT(INNER_DST_PORT);
5318 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5321 if (!spec || !unused_tuple)
5324 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5325 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5327 /* check whether src/dst ip address used */
5328 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5329 !spec->ip6src[2] && !spec->ip6src[3])
5330 *unused_tuple |= BIT(INNER_SRC_IP);
5332 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5333 !spec->ip6dst[2] && !spec->ip6dst[3])
5334 *unused_tuple |= BIT(INNER_DST_IP);
5336 if (!spec->l4_proto)
5337 *unused_tuple |= BIT(INNER_IP_PROTO);
5342 if (spec->l4_4_bytes)
5348 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5350 if (!spec || !unused_tuple)
5353 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5354 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5355 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5357 if (is_zero_ether_addr(spec->h_source))
5358 *unused_tuple |= BIT(INNER_SRC_MAC);
5360 if (is_zero_ether_addr(spec->h_dest))
5361 *unused_tuple |= BIT(INNER_DST_MAC);
5364 *unused_tuple |= BIT(INNER_ETH_TYPE);
5369 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5370 struct ethtool_rx_flow_spec *fs,
5373 if (fs->flow_type & FLOW_EXT) {
5374 if (fs->h_ext.vlan_etype) {
5375 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5379 if (!fs->h_ext.vlan_tci)
5380 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5382 if (fs->m_ext.vlan_tci &&
5383 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5384 dev_err(&hdev->pdev->dev,
5385 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5386 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5390 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5393 if (fs->flow_type & FLOW_MAC_EXT) {
5394 if (hdev->fd_cfg.fd_mode !=
5395 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5396 dev_err(&hdev->pdev->dev,
5397 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5401 if (is_zero_ether_addr(fs->h_ext.h_dest))
5402 *unused_tuple |= BIT(INNER_DST_MAC);
5404 *unused_tuple &= ~BIT(INNER_DST_MAC);
5410 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5411 struct ethtool_rx_flow_spec *fs,
5417 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5418 dev_err(&hdev->pdev->dev,
5419 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5421 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5425 if ((fs->flow_type & FLOW_EXT) &&
5426 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5427 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5431 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5432 switch (flow_type) {
5436 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5440 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5446 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5449 case IPV6_USER_FLOW:
5450 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5454 if (hdev->fd_cfg.fd_mode !=
5455 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5456 dev_err(&hdev->pdev->dev,
5457 "ETHER_FLOW is not supported in current fd mode!\n");
5461 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5465 dev_err(&hdev->pdev->dev,
5466 "unsupported protocol type, protocol type = %#x\n",
5472 dev_err(&hdev->pdev->dev,
5473 "failed to check flow union tuple, ret = %d\n",
5478 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5481 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5483 struct hclge_fd_rule *rule = NULL;
5484 struct hlist_node *node2;
5486 spin_lock_bh(&hdev->fd_rule_lock);
5487 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5488 if (rule->location >= location)
5492 spin_unlock_bh(&hdev->fd_rule_lock);
5494 return rule && rule->location == location;
5497 /* make sure being called after lock up with fd_rule_lock */
5498 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5499 struct hclge_fd_rule *new_rule,
5503 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5504 struct hlist_node *node2;
5506 if (is_add && !new_rule)
5509 hlist_for_each_entry_safe(rule, node2,
5510 &hdev->fd_rule_list, rule_node) {
5511 if (rule->location >= location)
5516 if (rule && rule->location == location) {
5517 hlist_del(&rule->rule_node);
5519 hdev->hclge_fd_rule_num--;
5522 if (!hdev->hclge_fd_rule_num)
5523 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5524 clear_bit(location, hdev->fd_bmap);
5528 } else if (!is_add) {
5529 dev_err(&hdev->pdev->dev,
5530 "delete fail, rule %u is inexistent\n",
5535 INIT_HLIST_NODE(&new_rule->rule_node);
5538 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5540 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5542 set_bit(location, hdev->fd_bmap);
5543 hdev->hclge_fd_rule_num++;
5544 hdev->fd_active_type = new_rule->rule_type;
5549 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5550 struct ethtool_rx_flow_spec *fs,
5551 struct hclge_fd_rule *rule)
5553 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5555 switch (flow_type) {
5559 rule->tuples.src_ip[IPV4_INDEX] =
5560 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5561 rule->tuples_mask.src_ip[IPV4_INDEX] =
5562 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5564 rule->tuples.dst_ip[IPV4_INDEX] =
5565 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5566 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5567 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5569 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5570 rule->tuples_mask.src_port =
5571 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5573 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5574 rule->tuples_mask.dst_port =
5575 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5577 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5578 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5580 rule->tuples.ether_proto = ETH_P_IP;
5581 rule->tuples_mask.ether_proto = 0xFFFF;
5585 rule->tuples.src_ip[IPV4_INDEX] =
5586 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5587 rule->tuples_mask.src_ip[IPV4_INDEX] =
5588 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5590 rule->tuples.dst_ip[IPV4_INDEX] =
5591 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5592 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5593 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5595 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5596 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5598 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5599 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5601 rule->tuples.ether_proto = ETH_P_IP;
5602 rule->tuples_mask.ether_proto = 0xFFFF;
5608 be32_to_cpu_array(rule->tuples.src_ip,
5609 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5610 be32_to_cpu_array(rule->tuples_mask.src_ip,
5611 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5613 be32_to_cpu_array(rule->tuples.dst_ip,
5614 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5615 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5616 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5618 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5619 rule->tuples_mask.src_port =
5620 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5622 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5623 rule->tuples_mask.dst_port =
5624 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5626 rule->tuples.ether_proto = ETH_P_IPV6;
5627 rule->tuples_mask.ether_proto = 0xFFFF;
5630 case IPV6_USER_FLOW:
5631 be32_to_cpu_array(rule->tuples.src_ip,
5632 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5633 be32_to_cpu_array(rule->tuples_mask.src_ip,
5634 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5636 be32_to_cpu_array(rule->tuples.dst_ip,
5637 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5638 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5639 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5641 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5642 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5644 rule->tuples.ether_proto = ETH_P_IPV6;
5645 rule->tuples_mask.ether_proto = 0xFFFF;
5649 ether_addr_copy(rule->tuples.src_mac,
5650 fs->h_u.ether_spec.h_source);
5651 ether_addr_copy(rule->tuples_mask.src_mac,
5652 fs->m_u.ether_spec.h_source);
5654 ether_addr_copy(rule->tuples.dst_mac,
5655 fs->h_u.ether_spec.h_dest);
5656 ether_addr_copy(rule->tuples_mask.dst_mac,
5657 fs->m_u.ether_spec.h_dest);
5659 rule->tuples.ether_proto =
5660 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5661 rule->tuples_mask.ether_proto =
5662 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5669 switch (flow_type) {
5672 rule->tuples.ip_proto = IPPROTO_SCTP;
5673 rule->tuples_mask.ip_proto = 0xFF;
5677 rule->tuples.ip_proto = IPPROTO_TCP;
5678 rule->tuples_mask.ip_proto = 0xFF;
5682 rule->tuples.ip_proto = IPPROTO_UDP;
5683 rule->tuples_mask.ip_proto = 0xFF;
5689 if (fs->flow_type & FLOW_EXT) {
5690 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5691 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5694 if (fs->flow_type & FLOW_MAC_EXT) {
5695 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5696 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5702 /* make sure being called after lock up with fd_rule_lock */
5703 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5704 struct hclge_fd_rule *rule)
5709 dev_err(&hdev->pdev->dev,
5710 "The flow director rule is NULL\n");
5714 /* it will never fail here, so needn't to check return value */
5715 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5717 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5721 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5728 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5732 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5733 struct ethtool_rxnfc *cmd)
5735 struct hclge_vport *vport = hclge_get_vport(handle);
5736 struct hclge_dev *hdev = vport->back;
5737 u16 dst_vport_id = 0, q_index = 0;
5738 struct ethtool_rx_flow_spec *fs;
5739 struct hclge_fd_rule *rule;
5744 if (!hnae3_dev_fd_supported(hdev)) {
5745 dev_err(&hdev->pdev->dev,
5746 "flow table director is not supported\n");
5751 dev_err(&hdev->pdev->dev,
5752 "please enable flow director first\n");
5756 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5758 ret = hclge_fd_check_spec(hdev, fs, &unused);
5762 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5763 action = HCLGE_FD_ACTION_DROP_PACKET;
5765 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5766 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5769 if (vf > hdev->num_req_vfs) {
5770 dev_err(&hdev->pdev->dev,
5771 "Error: vf id (%u) > max vf num (%u)\n",
5772 vf, hdev->num_req_vfs);
5776 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5777 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5780 dev_err(&hdev->pdev->dev,
5781 "Error: queue id (%u) > max tqp num (%u)\n",
5786 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5790 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5794 ret = hclge_fd_get_tuple(hdev, fs, rule);
5800 rule->flow_type = fs->flow_type;
5801 rule->location = fs->location;
5802 rule->unused_tuple = unused;
5803 rule->vf_id = dst_vport_id;
5804 rule->queue_id = q_index;
5805 rule->action = action;
5806 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5808 /* to avoid rule conflict, when user configure rule by ethtool,
5809 * we need to clear all arfs rules
5811 hclge_clear_arfs_rules(handle);
5813 spin_lock_bh(&hdev->fd_rule_lock);
5814 ret = hclge_fd_config_rule(hdev, rule);
5816 spin_unlock_bh(&hdev->fd_rule_lock);
5821 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5822 struct ethtool_rxnfc *cmd)
5824 struct hclge_vport *vport = hclge_get_vport(handle);
5825 struct hclge_dev *hdev = vport->back;
5826 struct ethtool_rx_flow_spec *fs;
5829 if (!hnae3_dev_fd_supported(hdev))
5832 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5834 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5837 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5838 dev_err(&hdev->pdev->dev,
5839 "Delete fail, rule %u is inexistent\n", fs->location);
5843 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5848 spin_lock_bh(&hdev->fd_rule_lock);
5849 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5851 spin_unlock_bh(&hdev->fd_rule_lock);
5856 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5859 struct hclge_vport *vport = hclge_get_vport(handle);
5860 struct hclge_dev *hdev = vport->back;
5861 struct hclge_fd_rule *rule;
5862 struct hlist_node *node;
5865 if (!hnae3_dev_fd_supported(hdev))
5868 spin_lock_bh(&hdev->fd_rule_lock);
5869 for_each_set_bit(location, hdev->fd_bmap,
5870 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5871 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5875 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5877 hlist_del(&rule->rule_node);
5880 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5881 hdev->hclge_fd_rule_num = 0;
5882 bitmap_zero(hdev->fd_bmap,
5883 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5886 spin_unlock_bh(&hdev->fd_rule_lock);
5889 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5891 struct hclge_vport *vport = hclge_get_vport(handle);
5892 struct hclge_dev *hdev = vport->back;
5893 struct hclge_fd_rule *rule;
5894 struct hlist_node *node;
5897 /* Return ok here, because reset error handling will check this
5898 * return value. If error is returned here, the reset process will
5901 if (!hnae3_dev_fd_supported(hdev))
5904 /* if fd is disabled, should not restore it when reset */
5908 spin_lock_bh(&hdev->fd_rule_lock);
5909 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5910 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5912 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5915 dev_warn(&hdev->pdev->dev,
5916 "Restore rule %u failed, remove it\n",
5918 clear_bit(rule->location, hdev->fd_bmap);
5919 hlist_del(&rule->rule_node);
5921 hdev->hclge_fd_rule_num--;
5925 if (hdev->hclge_fd_rule_num)
5926 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5928 spin_unlock_bh(&hdev->fd_rule_lock);
5933 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5934 struct ethtool_rxnfc *cmd)
5936 struct hclge_vport *vport = hclge_get_vport(handle);
5937 struct hclge_dev *hdev = vport->back;
5939 if (!hnae3_dev_fd_supported(hdev))
5942 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5943 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5948 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5949 struct ethtool_tcpip4_spec *spec,
5950 struct ethtool_tcpip4_spec *spec_mask)
5952 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5953 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5954 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5956 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5957 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5958 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5960 spec->psrc = cpu_to_be16(rule->tuples.src_port);
5961 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5962 0 : cpu_to_be16(rule->tuples_mask.src_port);
5964 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5965 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5966 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5968 spec->tos = rule->tuples.ip_tos;
5969 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5970 0 : rule->tuples_mask.ip_tos;
5973 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5974 struct ethtool_usrip4_spec *spec,
5975 struct ethtool_usrip4_spec *spec_mask)
5977 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5978 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5979 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5981 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5982 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5983 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5985 spec->tos = rule->tuples.ip_tos;
5986 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5987 0 : rule->tuples_mask.ip_tos;
5989 spec->proto = rule->tuples.ip_proto;
5990 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5991 0 : rule->tuples_mask.ip_proto;
5993 spec->ip_ver = ETH_RX_NFC_IP4;
5996 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
5997 struct ethtool_tcpip6_spec *spec,
5998 struct ethtool_tcpip6_spec *spec_mask)
6000 cpu_to_be32_array(spec->ip6src,
6001 rule->tuples.src_ip, IPV6_SIZE);
6002 cpu_to_be32_array(spec->ip6dst,
6003 rule->tuples.dst_ip, IPV6_SIZE);
6004 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6005 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6007 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6010 if (rule->unused_tuple & BIT(INNER_DST_IP))
6011 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6013 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6016 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6017 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6018 0 : cpu_to_be16(rule->tuples_mask.src_port);
6020 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6021 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6022 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6025 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6026 struct ethtool_usrip6_spec *spec,
6027 struct ethtool_usrip6_spec *spec_mask)
6029 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6030 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6031 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6032 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6034 cpu_to_be32_array(spec_mask->ip6src,
6035 rule->tuples_mask.src_ip, IPV6_SIZE);
6037 if (rule->unused_tuple & BIT(INNER_DST_IP))
6038 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6040 cpu_to_be32_array(spec_mask->ip6dst,
6041 rule->tuples_mask.dst_ip, IPV6_SIZE);
6043 spec->l4_proto = rule->tuples.ip_proto;
6044 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6045 0 : rule->tuples_mask.ip_proto;
6048 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6049 struct ethhdr *spec,
6050 struct ethhdr *spec_mask)
6052 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6053 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6055 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6056 eth_zero_addr(spec_mask->h_source);
6058 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6060 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6061 eth_zero_addr(spec_mask->h_dest);
6063 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6065 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6066 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6067 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6070 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6071 struct hclge_fd_rule *rule)
6073 if (fs->flow_type & FLOW_EXT) {
6074 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6075 fs->m_ext.vlan_tci =
6076 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6077 cpu_to_be16(VLAN_VID_MASK) :
6078 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6081 if (fs->flow_type & FLOW_MAC_EXT) {
6082 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6083 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6084 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6086 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6087 rule->tuples_mask.dst_mac);
6091 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6092 struct ethtool_rxnfc *cmd)
6094 struct hclge_vport *vport = hclge_get_vport(handle);
6095 struct hclge_fd_rule *rule = NULL;
6096 struct hclge_dev *hdev = vport->back;
6097 struct ethtool_rx_flow_spec *fs;
6098 struct hlist_node *node2;
6100 if (!hnae3_dev_fd_supported(hdev))
6103 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6105 spin_lock_bh(&hdev->fd_rule_lock);
6107 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6108 if (rule->location >= fs->location)
6112 if (!rule || fs->location != rule->location) {
6113 spin_unlock_bh(&hdev->fd_rule_lock);
6118 fs->flow_type = rule->flow_type;
6119 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6123 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6124 &fs->m_u.tcp_ip4_spec);
6127 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6128 &fs->m_u.usr_ip4_spec);
6133 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6134 &fs->m_u.tcp_ip6_spec);
6136 case IPV6_USER_FLOW:
6137 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6138 &fs->m_u.usr_ip6_spec);
6140 /* The flow type of fd rule has been checked before adding in to rule
6141 * list. As other flow types have been handled, it must be ETHER_FLOW
6142 * for the default case
6145 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6146 &fs->m_u.ether_spec);
6150 hclge_fd_get_ext_info(fs, rule);
6152 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6153 fs->ring_cookie = RX_CLS_FLOW_DISC;
6157 fs->ring_cookie = rule->queue_id;
6158 vf_id = rule->vf_id;
6159 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6160 fs->ring_cookie |= vf_id;
6163 spin_unlock_bh(&hdev->fd_rule_lock);
6168 static int hclge_get_all_rules(struct hnae3_handle *handle,
6169 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6171 struct hclge_vport *vport = hclge_get_vport(handle);
6172 struct hclge_dev *hdev = vport->back;
6173 struct hclge_fd_rule *rule;
6174 struct hlist_node *node2;
6177 if (!hnae3_dev_fd_supported(hdev))
6180 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6182 spin_lock_bh(&hdev->fd_rule_lock);
6183 hlist_for_each_entry_safe(rule, node2,
6184 &hdev->fd_rule_list, rule_node) {
6185 if (cnt == cmd->rule_cnt) {
6186 spin_unlock_bh(&hdev->fd_rule_lock);
6190 rule_locs[cnt] = rule->location;
6194 spin_unlock_bh(&hdev->fd_rule_lock);
6196 cmd->rule_cnt = cnt;
6201 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6202 struct hclge_fd_rule_tuples *tuples)
6204 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6205 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6207 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6208 tuples->ip_proto = fkeys->basic.ip_proto;
6209 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6211 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6212 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6213 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6217 for (i = 0; i < IPV6_SIZE; i++) {
6218 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6219 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6224 /* traverse all rules, check whether an existed rule has the same tuples */
6225 static struct hclge_fd_rule *
6226 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6227 const struct hclge_fd_rule_tuples *tuples)
6229 struct hclge_fd_rule *rule = NULL;
6230 struct hlist_node *node;
6232 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6233 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6240 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6241 struct hclge_fd_rule *rule)
6243 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6244 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6245 BIT(INNER_SRC_PORT);
6248 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6249 if (tuples->ether_proto == ETH_P_IP) {
6250 if (tuples->ip_proto == IPPROTO_TCP)
6251 rule->flow_type = TCP_V4_FLOW;
6253 rule->flow_type = UDP_V4_FLOW;
6255 if (tuples->ip_proto == IPPROTO_TCP)
6256 rule->flow_type = TCP_V6_FLOW;
6258 rule->flow_type = UDP_V6_FLOW;
6260 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6261 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6264 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6265 u16 flow_id, struct flow_keys *fkeys)
6267 struct hclge_vport *vport = hclge_get_vport(handle);
6268 struct hclge_fd_rule_tuples new_tuples;
6269 struct hclge_dev *hdev = vport->back;
6270 struct hclge_fd_rule *rule;
6275 if (!hnae3_dev_fd_supported(hdev))
6278 memset(&new_tuples, 0, sizeof(new_tuples));
6279 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6281 spin_lock_bh(&hdev->fd_rule_lock);
6283 /* when there is already fd rule existed add by user,
6284 * arfs should not work
6286 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6287 spin_unlock_bh(&hdev->fd_rule_lock);
6291 /* check is there flow director filter existed for this flow,
6292 * if not, create a new filter for it;
6293 * if filter exist with different queue id, modify the filter;
6294 * if filter exist with same queue id, do nothing
6296 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6298 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6299 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6300 spin_unlock_bh(&hdev->fd_rule_lock);
6304 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6306 spin_unlock_bh(&hdev->fd_rule_lock);
6310 set_bit(bit_id, hdev->fd_bmap);
6311 rule->location = bit_id;
6312 rule->flow_id = flow_id;
6313 rule->queue_id = queue_id;
6314 hclge_fd_build_arfs_rule(&new_tuples, rule);
6315 ret = hclge_fd_config_rule(hdev, rule);
6317 spin_unlock_bh(&hdev->fd_rule_lock);
6322 return rule->location;
6325 spin_unlock_bh(&hdev->fd_rule_lock);
6327 if (rule->queue_id == queue_id)
6328 return rule->location;
6330 tmp_queue_id = rule->queue_id;
6331 rule->queue_id = queue_id;
6332 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6334 rule->queue_id = tmp_queue_id;
6338 return rule->location;
6341 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6343 #ifdef CONFIG_RFS_ACCEL
6344 struct hnae3_handle *handle = &hdev->vport[0].nic;
6345 struct hclge_fd_rule *rule;
6346 struct hlist_node *node;
6347 HLIST_HEAD(del_list);
6349 spin_lock_bh(&hdev->fd_rule_lock);
6350 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6351 spin_unlock_bh(&hdev->fd_rule_lock);
6354 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6355 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6356 rule->flow_id, rule->location)) {
6357 hlist_del_init(&rule->rule_node);
6358 hlist_add_head(&rule->rule_node, &del_list);
6359 hdev->hclge_fd_rule_num--;
6360 clear_bit(rule->location, hdev->fd_bmap);
6363 spin_unlock_bh(&hdev->fd_rule_lock);
6365 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6366 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6367 rule->location, NULL, false);
6373 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6375 #ifdef CONFIG_RFS_ACCEL
6376 struct hclge_vport *vport = hclge_get_vport(handle);
6377 struct hclge_dev *hdev = vport->back;
6379 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6380 hclge_del_all_fd_entries(handle, true);
6384 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6386 struct hclge_vport *vport = hclge_get_vport(handle);
6387 struct hclge_dev *hdev = vport->back;
6389 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6390 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6393 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6395 struct hclge_vport *vport = hclge_get_vport(handle);
6396 struct hclge_dev *hdev = vport->back;
6398 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6401 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6403 struct hclge_vport *vport = hclge_get_vport(handle);
6404 struct hclge_dev *hdev = vport->back;
6406 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6409 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6411 struct hclge_vport *vport = hclge_get_vport(handle);
6412 struct hclge_dev *hdev = vport->back;
6414 return hdev->rst_stats.hw_reset_done_cnt;
6417 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6419 struct hclge_vport *vport = hclge_get_vport(handle);
6420 struct hclge_dev *hdev = vport->back;
6423 hdev->fd_en = enable;
6424 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6426 hclge_del_all_fd_entries(handle, clear);
6428 hclge_restore_fd_entries(handle);
6431 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6433 struct hclge_desc desc;
6434 struct hclge_config_mac_mode_cmd *req =
6435 (struct hclge_config_mac_mode_cmd *)desc.data;
6439 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6442 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6443 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6444 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6445 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6446 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6447 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6448 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6449 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6450 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6451 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6454 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6456 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6458 dev_err(&hdev->pdev->dev,
6459 "mac enable fail, ret =%d.\n", ret);
6462 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6463 u8 switch_param, u8 param_mask)
6465 struct hclge_mac_vlan_switch_cmd *req;
6466 struct hclge_desc desc;
6470 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6471 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6473 /* read current config parameter */
6474 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6476 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6477 req->func_id = cpu_to_le32(func_id);
6479 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6481 dev_err(&hdev->pdev->dev,
6482 "read mac vlan switch parameter fail, ret = %d\n", ret);
6486 /* modify and write new config parameter */
6487 hclge_cmd_reuse_desc(&desc, false);
6488 req->switch_param = (req->switch_param & param_mask) | switch_param;
6489 req->param_mask = param_mask;
6491 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6493 dev_err(&hdev->pdev->dev,
6494 "set mac vlan switch parameter fail, ret = %d\n", ret);
6498 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6501 #define HCLGE_PHY_LINK_STATUS_NUM 200
6503 struct phy_device *phydev = hdev->hw.mac.phydev;
6508 ret = phy_read_status(phydev);
6510 dev_err(&hdev->pdev->dev,
6511 "phy update link status fail, ret = %d\n", ret);
6515 if (phydev->link == link_ret)
6518 msleep(HCLGE_LINK_STATUS_MS);
6519 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6522 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6524 #define HCLGE_MAC_LINK_STATUS_NUM 100
6530 ret = hclge_get_mac_link_status(hdev);
6533 else if (ret == link_ret)
6536 msleep(HCLGE_LINK_STATUS_MS);
6537 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6541 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6544 #define HCLGE_LINK_STATUS_DOWN 0
6545 #define HCLGE_LINK_STATUS_UP 1
6549 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6552 hclge_phy_link_status_wait(hdev, link_ret);
6554 return hclge_mac_link_status_wait(hdev, link_ret);
6557 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6559 struct hclge_config_mac_mode_cmd *req;
6560 struct hclge_desc desc;
6564 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6565 /* 1 Read out the MAC mode config at first */
6566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6567 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6569 dev_err(&hdev->pdev->dev,
6570 "mac loopback get fail, ret =%d.\n", ret);
6574 /* 2 Then setup the loopback flag */
6575 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6576 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6578 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6580 /* 3 Config mac work mode with loopback flag
6581 * and its original configure parameters
6583 hclge_cmd_reuse_desc(&desc, false);
6584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6586 dev_err(&hdev->pdev->dev,
6587 "mac loopback set fail, ret =%d.\n", ret);
6591 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6592 enum hnae3_loop loop_mode)
6594 #define HCLGE_SERDES_RETRY_MS 10
6595 #define HCLGE_SERDES_RETRY_NUM 100
6597 struct hclge_serdes_lb_cmd *req;
6598 struct hclge_desc desc;
6602 req = (struct hclge_serdes_lb_cmd *)desc.data;
6603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6605 switch (loop_mode) {
6606 case HNAE3_LOOP_SERIAL_SERDES:
6607 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6609 case HNAE3_LOOP_PARALLEL_SERDES:
6610 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6613 dev_err(&hdev->pdev->dev,
6614 "unsupported serdes loopback mode %d\n", loop_mode);
6619 req->enable = loop_mode_b;
6620 req->mask = loop_mode_b;
6622 req->mask = loop_mode_b;
6625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6627 dev_err(&hdev->pdev->dev,
6628 "serdes loopback set fail, ret = %d\n", ret);
6633 msleep(HCLGE_SERDES_RETRY_MS);
6634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6638 dev_err(&hdev->pdev->dev,
6639 "serdes loopback get, ret = %d\n", ret);
6642 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6643 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6645 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6646 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6648 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6649 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6655 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6656 enum hnae3_loop loop_mode)
6660 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6664 hclge_cfg_mac_mode(hdev, en);
6666 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6668 dev_err(&hdev->pdev->dev,
6669 "serdes loopback config mac mode timeout\n");
6674 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6675 struct phy_device *phydev)
6679 if (!phydev->suspended) {
6680 ret = phy_suspend(phydev);
6685 ret = phy_resume(phydev);
6689 return phy_loopback(phydev, true);
6692 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6693 struct phy_device *phydev)
6697 ret = phy_loopback(phydev, false);
6701 return phy_suspend(phydev);
6704 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6706 struct phy_device *phydev = hdev->hw.mac.phydev;
6713 ret = hclge_enable_phy_loopback(hdev, phydev);
6715 ret = hclge_disable_phy_loopback(hdev, phydev);
6717 dev_err(&hdev->pdev->dev,
6718 "set phy loopback fail, ret = %d\n", ret);
6722 hclge_cfg_mac_mode(hdev, en);
6724 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6726 dev_err(&hdev->pdev->dev,
6727 "phy loopback config mac mode timeout\n");
6732 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6733 int stream_id, bool enable)
6735 struct hclge_desc desc;
6736 struct hclge_cfg_com_tqp_queue_cmd *req =
6737 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6741 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6742 req->stream_id = cpu_to_le16(stream_id);
6744 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6746 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6748 dev_err(&hdev->pdev->dev,
6749 "Tqp enable fail, status =%d.\n", ret);
6753 static int hclge_set_loopback(struct hnae3_handle *handle,
6754 enum hnae3_loop loop_mode, bool en)
6756 struct hclge_vport *vport = hclge_get_vport(handle);
6757 struct hnae3_knic_private_info *kinfo;
6758 struct hclge_dev *hdev = vport->back;
6761 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6762 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6763 * the same, the packets are looped back in the SSU. If SSU loopback
6764 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6766 if (hdev->pdev->revision >= 0x21) {
6767 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6769 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6770 HCLGE_SWITCH_ALW_LPBK_MASK);
6775 switch (loop_mode) {
6776 case HNAE3_LOOP_APP:
6777 ret = hclge_set_app_loopback(hdev, en);
6779 case HNAE3_LOOP_SERIAL_SERDES:
6780 case HNAE3_LOOP_PARALLEL_SERDES:
6781 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6783 case HNAE3_LOOP_PHY:
6784 ret = hclge_set_phy_loopback(hdev, en);
6788 dev_err(&hdev->pdev->dev,
6789 "loop_mode %d is not supported\n", loop_mode);
6796 kinfo = &vport->nic.kinfo;
6797 for (i = 0; i < kinfo->num_tqps; i++) {
6798 ret = hclge_tqp_enable(hdev, i, 0, en);
6806 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6810 ret = hclge_set_app_loopback(hdev, false);
6814 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6818 return hclge_cfg_serdes_loopback(hdev, false,
6819 HNAE3_LOOP_PARALLEL_SERDES);
6822 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6824 struct hclge_vport *vport = hclge_get_vport(handle);
6825 struct hnae3_knic_private_info *kinfo;
6826 struct hnae3_queue *queue;
6827 struct hclge_tqp *tqp;
6830 kinfo = &vport->nic.kinfo;
6831 for (i = 0; i < kinfo->num_tqps; i++) {
6832 queue = handle->kinfo.tqp[i];
6833 tqp = container_of(queue, struct hclge_tqp, q);
6834 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6838 static void hclge_flush_link_update(struct hclge_dev *hdev)
6840 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6842 unsigned long last = hdev->serv_processed_cnt;
6845 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6846 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6847 last == hdev->serv_processed_cnt)
6851 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6853 struct hclge_vport *vport = hclge_get_vport(handle);
6854 struct hclge_dev *hdev = vport->back;
6857 hclge_task_schedule(hdev, 0);
6859 /* Set the DOWN flag here to disable link updating */
6860 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6862 /* flush memory to make sure DOWN is seen by service task */
6863 smp_mb__before_atomic();
6864 hclge_flush_link_update(hdev);
6868 static int hclge_ae_start(struct hnae3_handle *handle)
6870 struct hclge_vport *vport = hclge_get_vport(handle);
6871 struct hclge_dev *hdev = vport->back;
6874 hclge_cfg_mac_mode(hdev, true);
6875 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6876 hdev->hw.mac.link = 0;
6878 /* reset tqp stats */
6879 hclge_reset_tqp_stats(handle);
6881 hclge_mac_start_phy(hdev);
6886 static void hclge_ae_stop(struct hnae3_handle *handle)
6888 struct hclge_vport *vport = hclge_get_vport(handle);
6889 struct hclge_dev *hdev = vport->back;
6892 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6894 hclge_clear_arfs_rules(handle);
6896 /* If it is not PF reset, the firmware will disable the MAC,
6897 * so it only need to stop phy here.
6899 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6900 hdev->reset_type != HNAE3_FUNC_RESET) {
6901 hclge_mac_stop_phy(hdev);
6902 hclge_update_link_status(hdev);
6906 for (i = 0; i < handle->kinfo.num_tqps; i++)
6907 hclge_reset_tqp(handle, i);
6909 hclge_config_mac_tnl_int(hdev, false);
6912 hclge_cfg_mac_mode(hdev, false);
6914 hclge_mac_stop_phy(hdev);
6916 /* reset tqp stats */
6917 hclge_reset_tqp_stats(handle);
6918 hclge_update_link_status(hdev);
6921 int hclge_vport_start(struct hclge_vport *vport)
6923 struct hclge_dev *hdev = vport->back;
6925 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6926 vport->last_active_jiffies = jiffies;
6928 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6929 if (vport->vport_id) {
6930 hclge_restore_mac_table_common(vport);
6931 hclge_restore_vport_vlan_table(vport);
6933 hclge_restore_hw_table(hdev);
6937 clear_bit(vport->vport_id, hdev->vport_config_block);
6942 void hclge_vport_stop(struct hclge_vport *vport)
6944 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6947 static int hclge_client_start(struct hnae3_handle *handle)
6949 struct hclge_vport *vport = hclge_get_vport(handle);
6951 return hclge_vport_start(vport);
6954 static void hclge_client_stop(struct hnae3_handle *handle)
6956 struct hclge_vport *vport = hclge_get_vport(handle);
6958 hclge_vport_stop(vport);
6961 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6962 u16 cmdq_resp, u8 resp_code,
6963 enum hclge_mac_vlan_tbl_opcode op)
6965 struct hclge_dev *hdev = vport->back;
6968 dev_err(&hdev->pdev->dev,
6969 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6974 if (op == HCLGE_MAC_VLAN_ADD) {
6975 if (!resp_code || resp_code == 1)
6977 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6978 resp_code == HCLGE_ADD_MC_OVERFLOW)
6981 dev_err(&hdev->pdev->dev,
6982 "add mac addr failed for undefined, code=%u.\n",
6985 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6988 } else if (resp_code == 1) {
6989 dev_dbg(&hdev->pdev->dev,
6990 "remove mac addr failed for miss.\n");
6994 dev_err(&hdev->pdev->dev,
6995 "remove mac addr failed for undefined, code=%u.\n",
6998 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7001 } else if (resp_code == 1) {
7002 dev_dbg(&hdev->pdev->dev,
7003 "lookup mac addr failed for miss.\n");
7007 dev_err(&hdev->pdev->dev,
7008 "lookup mac addr failed for undefined, code=%u.\n",
7013 dev_err(&hdev->pdev->dev,
7014 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7019 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7021 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7023 unsigned int word_num;
7024 unsigned int bit_num;
7026 if (vfid > 255 || vfid < 0)
7029 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7030 word_num = vfid / 32;
7031 bit_num = vfid % 32;
7033 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7035 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7037 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7038 bit_num = vfid % 32;
7040 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7042 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7048 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7050 #define HCLGE_DESC_NUMBER 3
7051 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7054 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7055 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7056 if (desc[i].data[j])
7062 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7063 const u8 *addr, bool is_mc)
7065 const unsigned char *mac_addr = addr;
7066 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7067 (mac_addr[0]) | (mac_addr[1] << 8);
7068 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7070 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7072 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7073 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7076 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7077 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7080 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7081 struct hclge_mac_vlan_tbl_entry_cmd *req)
7083 struct hclge_dev *hdev = vport->back;
7084 struct hclge_desc desc;
7089 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7091 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7093 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7095 dev_err(&hdev->pdev->dev,
7096 "del mac addr failed for cmd_send, ret =%d.\n",
7100 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7101 retval = le16_to_cpu(desc.retval);
7103 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7104 HCLGE_MAC_VLAN_REMOVE);
7107 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7108 struct hclge_mac_vlan_tbl_entry_cmd *req,
7109 struct hclge_desc *desc,
7112 struct hclge_dev *hdev = vport->back;
7117 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7119 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7120 memcpy(desc[0].data,
7122 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7123 hclge_cmd_setup_basic_desc(&desc[1],
7124 HCLGE_OPC_MAC_VLAN_ADD,
7126 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7127 hclge_cmd_setup_basic_desc(&desc[2],
7128 HCLGE_OPC_MAC_VLAN_ADD,
7130 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7132 memcpy(desc[0].data,
7134 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7135 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7138 dev_err(&hdev->pdev->dev,
7139 "lookup mac addr failed for cmd_send, ret =%d.\n",
7143 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7144 retval = le16_to_cpu(desc[0].retval);
7146 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7147 HCLGE_MAC_VLAN_LKUP);
7150 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7151 struct hclge_mac_vlan_tbl_entry_cmd *req,
7152 struct hclge_desc *mc_desc)
7154 struct hclge_dev *hdev = vport->back;
7161 struct hclge_desc desc;
7163 hclge_cmd_setup_basic_desc(&desc,
7164 HCLGE_OPC_MAC_VLAN_ADD,
7166 memcpy(desc.data, req,
7167 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7168 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7169 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7170 retval = le16_to_cpu(desc.retval);
7172 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7174 HCLGE_MAC_VLAN_ADD);
7176 hclge_cmd_reuse_desc(&mc_desc[0], false);
7177 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7178 hclge_cmd_reuse_desc(&mc_desc[1], false);
7179 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7180 hclge_cmd_reuse_desc(&mc_desc[2], false);
7181 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7182 memcpy(mc_desc[0].data, req,
7183 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7184 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7185 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7186 retval = le16_to_cpu(mc_desc[0].retval);
7188 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7190 HCLGE_MAC_VLAN_ADD);
7194 dev_err(&hdev->pdev->dev,
7195 "add mac addr failed for cmd_send, ret =%d.\n",
7203 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7204 u16 *allocated_size)
7206 struct hclge_umv_spc_alc_cmd *req;
7207 struct hclge_desc desc;
7210 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7211 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7213 req->space_size = cpu_to_le32(space_size);
7215 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7217 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7222 *allocated_size = le32_to_cpu(desc.data[1]);
7227 static int hclge_init_umv_space(struct hclge_dev *hdev)
7229 u16 allocated_size = 0;
7232 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7236 if (allocated_size < hdev->wanted_umv_size)
7237 dev_warn(&hdev->pdev->dev,
7238 "failed to alloc umv space, want %u, get %u\n",
7239 hdev->wanted_umv_size, allocated_size);
7241 hdev->max_umv_size = allocated_size;
7242 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7243 hdev->share_umv_size = hdev->priv_umv_size +
7244 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7249 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7251 struct hclge_vport *vport;
7254 for (i = 0; i < hdev->num_alloc_vport; i++) {
7255 vport = &hdev->vport[i];
7256 vport->used_umv_num = 0;
7259 mutex_lock(&hdev->vport_lock);
7260 hdev->share_umv_size = hdev->priv_umv_size +
7261 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7262 mutex_unlock(&hdev->vport_lock);
7265 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7267 struct hclge_dev *hdev = vport->back;
7271 mutex_lock(&hdev->vport_lock);
7273 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7274 hdev->share_umv_size == 0);
7277 mutex_unlock(&hdev->vport_lock);
7282 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7284 struct hclge_dev *hdev = vport->back;
7287 if (vport->used_umv_num > hdev->priv_umv_size)
7288 hdev->share_umv_size++;
7290 if (vport->used_umv_num > 0)
7291 vport->used_umv_num--;
7293 if (vport->used_umv_num >= hdev->priv_umv_size &&
7294 hdev->share_umv_size > 0)
7295 hdev->share_umv_size--;
7296 vport->used_umv_num++;
7300 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7303 struct hclge_mac_node *mac_node, *tmp;
7305 list_for_each_entry_safe(mac_node, tmp, list, node)
7306 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7312 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7313 enum HCLGE_MAC_NODE_STATE state)
7316 /* from set_rx_mode or tmp_add_list */
7317 case HCLGE_MAC_TO_ADD:
7318 if (mac_node->state == HCLGE_MAC_TO_DEL)
7319 mac_node->state = HCLGE_MAC_ACTIVE;
7321 /* only from set_rx_mode */
7322 case HCLGE_MAC_TO_DEL:
7323 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7324 list_del(&mac_node->node);
7327 mac_node->state = HCLGE_MAC_TO_DEL;
7330 /* only from tmp_add_list, the mac_node->state won't be
7333 case HCLGE_MAC_ACTIVE:
7334 if (mac_node->state == HCLGE_MAC_TO_ADD)
7335 mac_node->state = HCLGE_MAC_ACTIVE;
7341 int hclge_update_mac_list(struct hclge_vport *vport,
7342 enum HCLGE_MAC_NODE_STATE state,
7343 enum HCLGE_MAC_ADDR_TYPE mac_type,
7344 const unsigned char *addr)
7346 struct hclge_dev *hdev = vport->back;
7347 struct hclge_mac_node *mac_node;
7348 struct list_head *list;
7350 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7351 &vport->uc_mac_list : &vport->mc_mac_list;
7353 spin_lock_bh(&vport->mac_list_lock);
7355 /* if the mac addr is already in the mac list, no need to add a new
7356 * one into it, just check the mac addr state, convert it to a new
7357 * new state, or just remove it, or do nothing.
7359 mac_node = hclge_find_mac_node(list, addr);
7361 hclge_update_mac_node(mac_node, state);
7362 spin_unlock_bh(&vport->mac_list_lock);
7363 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7367 /* if this address is never added, unnecessary to delete */
7368 if (state == HCLGE_MAC_TO_DEL) {
7369 spin_unlock_bh(&vport->mac_list_lock);
7370 dev_err(&hdev->pdev->dev,
7371 "failed to delete address %pM from mac list\n",
7376 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7378 spin_unlock_bh(&vport->mac_list_lock);
7382 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7384 mac_node->state = state;
7385 ether_addr_copy(mac_node->mac_addr, addr);
7386 list_add_tail(&mac_node->node, list);
7388 spin_unlock_bh(&vport->mac_list_lock);
7393 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7394 const unsigned char *addr)
7396 struct hclge_vport *vport = hclge_get_vport(handle);
7398 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7402 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7403 const unsigned char *addr)
7405 struct hclge_dev *hdev = vport->back;
7406 struct hclge_mac_vlan_tbl_entry_cmd req;
7407 struct hclge_desc desc;
7408 u16 egress_port = 0;
7411 /* mac addr check */
7412 if (is_zero_ether_addr(addr) ||
7413 is_broadcast_ether_addr(addr) ||
7414 is_multicast_ether_addr(addr)) {
7415 dev_err(&hdev->pdev->dev,
7416 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7417 addr, is_zero_ether_addr(addr),
7418 is_broadcast_ether_addr(addr),
7419 is_multicast_ether_addr(addr));
7423 memset(&req, 0, sizeof(req));
7425 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7426 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7428 req.egress_port = cpu_to_le16(egress_port);
7430 hclge_prepare_mac_addr(&req, addr, false);
7432 /* Lookup the mac address in the mac_vlan table, and add
7433 * it if the entry is inexistent. Repeated unicast entry
7434 * is not allowed in the mac vlan table.
7436 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7437 if (ret == -ENOENT) {
7438 mutex_lock(&hdev->vport_lock);
7439 if (!hclge_is_umv_space_full(vport, false)) {
7440 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7442 hclge_update_umv_space(vport, false);
7443 mutex_unlock(&hdev->vport_lock);
7446 mutex_unlock(&hdev->vport_lock);
7448 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7449 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7450 hdev->priv_umv_size);
7455 /* check if we just hit the duplicate */
7457 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7458 vport->vport_id, addr);
7462 dev_err(&hdev->pdev->dev,
7463 "PF failed to add unicast entry(%pM) in the MAC table\n",
7469 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7470 const unsigned char *addr)
7472 struct hclge_vport *vport = hclge_get_vport(handle);
7474 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7478 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7479 const unsigned char *addr)
7481 struct hclge_dev *hdev = vport->back;
7482 struct hclge_mac_vlan_tbl_entry_cmd req;
7485 /* mac addr check */
7486 if (is_zero_ether_addr(addr) ||
7487 is_broadcast_ether_addr(addr) ||
7488 is_multicast_ether_addr(addr)) {
7489 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7494 memset(&req, 0, sizeof(req));
7495 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7496 hclge_prepare_mac_addr(&req, addr, false);
7497 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7499 mutex_lock(&hdev->vport_lock);
7500 hclge_update_umv_space(vport, true);
7501 mutex_unlock(&hdev->vport_lock);
7502 } else if (ret == -ENOENT) {
7509 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7510 const unsigned char *addr)
7512 struct hclge_vport *vport = hclge_get_vport(handle);
7514 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7518 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7519 const unsigned char *addr)
7521 struct hclge_dev *hdev = vport->back;
7522 struct hclge_mac_vlan_tbl_entry_cmd req;
7523 struct hclge_desc desc[3];
7526 /* mac addr check */
7527 if (!is_multicast_ether_addr(addr)) {
7528 dev_err(&hdev->pdev->dev,
7529 "Add mc mac err! invalid mac:%pM.\n",
7533 memset(&req, 0, sizeof(req));
7534 hclge_prepare_mac_addr(&req, addr, true);
7535 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7537 /* This mac addr do not exist, add new entry for it */
7538 memset(desc[0].data, 0, sizeof(desc[0].data));
7539 memset(desc[1].data, 0, sizeof(desc[0].data));
7540 memset(desc[2].data, 0, sizeof(desc[0].data));
7542 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7545 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7547 /* if already overflow, not to print each time */
7548 if (status == -ENOSPC &&
7549 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7550 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7555 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7556 const unsigned char *addr)
7558 struct hclge_vport *vport = hclge_get_vport(handle);
7560 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7564 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7565 const unsigned char *addr)
7567 struct hclge_dev *hdev = vport->back;
7568 struct hclge_mac_vlan_tbl_entry_cmd req;
7569 enum hclge_cmd_status status;
7570 struct hclge_desc desc[3];
7572 /* mac addr check */
7573 if (!is_multicast_ether_addr(addr)) {
7574 dev_dbg(&hdev->pdev->dev,
7575 "Remove mc mac err! invalid mac:%pM.\n",
7580 memset(&req, 0, sizeof(req));
7581 hclge_prepare_mac_addr(&req, addr, true);
7582 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7584 /* This mac addr exist, remove this handle's VFID for it */
7585 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7589 if (hclge_is_all_function_id_zero(desc))
7590 /* All the vfid is zero, so need to delete this entry */
7591 status = hclge_remove_mac_vlan_tbl(vport, &req);
7593 /* Not all the vfid is zero, update the vfid */
7594 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7596 } else if (status == -ENOENT) {
7603 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7604 struct list_head *list,
7605 int (*sync)(struct hclge_vport *,
7606 const unsigned char *))
7608 struct hclge_mac_node *mac_node, *tmp;
7611 list_for_each_entry_safe(mac_node, tmp, list, node) {
7612 ret = sync(vport, mac_node->mac_addr);
7614 mac_node->state = HCLGE_MAC_ACTIVE;
7616 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7623 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7624 struct list_head *list,
7625 int (*unsync)(struct hclge_vport *,
7626 const unsigned char *))
7628 struct hclge_mac_node *mac_node, *tmp;
7631 list_for_each_entry_safe(mac_node, tmp, list, node) {
7632 ret = unsync(vport, mac_node->mac_addr);
7633 if (!ret || ret == -ENOENT) {
7634 list_del(&mac_node->node);
7637 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7644 static bool hclge_sync_from_add_list(struct list_head *add_list,
7645 struct list_head *mac_list)
7647 struct hclge_mac_node *mac_node, *tmp, *new_node;
7648 bool all_added = true;
7650 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7651 if (mac_node->state == HCLGE_MAC_TO_ADD)
7654 /* if the mac address from tmp_add_list is not in the
7655 * uc/mc_mac_list, it means have received a TO_DEL request
7656 * during the time window of adding the mac address into mac
7657 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7658 * then it will be removed at next time. else it must be TO_ADD,
7659 * this address hasn't been added into mac table,
7660 * so just remove the mac node.
7662 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7664 hclge_update_mac_node(new_node, mac_node->state);
7665 list_del(&mac_node->node);
7667 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7668 mac_node->state = HCLGE_MAC_TO_DEL;
7669 list_del(&mac_node->node);
7670 list_add_tail(&mac_node->node, mac_list);
7672 list_del(&mac_node->node);
7680 static void hclge_sync_from_del_list(struct list_head *del_list,
7681 struct list_head *mac_list)
7683 struct hclge_mac_node *mac_node, *tmp, *new_node;
7685 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7686 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7688 /* If the mac addr exists in the mac list, it means
7689 * received a new TO_ADD request during the time window
7690 * of configuring the mac address. For the mac node
7691 * state is TO_ADD, and the address is already in the
7692 * in the hardware(due to delete fail), so we just need
7693 * to change the mac node state to ACTIVE.
7695 new_node->state = HCLGE_MAC_ACTIVE;
7696 list_del(&mac_node->node);
7699 list_del(&mac_node->node);
7700 list_add_tail(&mac_node->node, mac_list);
7705 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7706 enum HCLGE_MAC_ADDR_TYPE mac_type,
7709 if (mac_type == HCLGE_MAC_ADDR_UC) {
7711 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7713 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7716 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7718 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7722 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7723 enum HCLGE_MAC_ADDR_TYPE mac_type)
7725 struct hclge_mac_node *mac_node, *tmp, *new_node;
7726 struct list_head tmp_add_list, tmp_del_list;
7727 struct list_head *list;
7730 INIT_LIST_HEAD(&tmp_add_list);
7731 INIT_LIST_HEAD(&tmp_del_list);
7733 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7734 * we can add/delete these mac addr outside the spin lock
7736 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7737 &vport->uc_mac_list : &vport->mc_mac_list;
7739 spin_lock_bh(&vport->mac_list_lock);
7741 list_for_each_entry_safe(mac_node, tmp, list, node) {
7742 switch (mac_node->state) {
7743 case HCLGE_MAC_TO_DEL:
7744 list_del(&mac_node->node);
7745 list_add_tail(&mac_node->node, &tmp_del_list);
7747 case HCLGE_MAC_TO_ADD:
7748 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7751 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7752 new_node->state = mac_node->state;
7753 list_add_tail(&new_node->node, &tmp_add_list);
7761 spin_unlock_bh(&vport->mac_list_lock);
7763 /* delete first, in order to get max mac table space for adding */
7764 if (mac_type == HCLGE_MAC_ADDR_UC) {
7765 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7766 hclge_rm_uc_addr_common);
7767 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7768 hclge_add_uc_addr_common);
7770 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7771 hclge_rm_mc_addr_common);
7772 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7773 hclge_add_mc_addr_common);
7776 /* if some mac addresses were added/deleted fail, move back to the
7777 * mac_list, and retry at next time.
7779 spin_lock_bh(&vport->mac_list_lock);
7781 hclge_sync_from_del_list(&tmp_del_list, list);
7782 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7784 spin_unlock_bh(&vport->mac_list_lock);
7786 hclge_update_overflow_flags(vport, mac_type, all_added);
7789 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7791 struct hclge_dev *hdev = vport->back;
7793 if (test_bit(vport->vport_id, hdev->vport_config_block))
7796 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7802 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7806 for (i = 0; i < hdev->num_alloc_vport; i++) {
7807 struct hclge_vport *vport = &hdev->vport[i];
7809 if (!hclge_need_sync_mac_table(vport))
7812 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7813 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7817 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7818 enum HCLGE_MAC_ADDR_TYPE mac_type)
7820 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7821 struct hclge_mac_node *mac_cfg, *tmp;
7822 struct hclge_dev *hdev = vport->back;
7823 struct list_head tmp_del_list, *list;
7826 if (mac_type == HCLGE_MAC_ADDR_UC) {
7827 list = &vport->uc_mac_list;
7828 unsync = hclge_rm_uc_addr_common;
7830 list = &vport->mc_mac_list;
7831 unsync = hclge_rm_mc_addr_common;
7834 INIT_LIST_HEAD(&tmp_del_list);
7837 set_bit(vport->vport_id, hdev->vport_config_block);
7839 spin_lock_bh(&vport->mac_list_lock);
7841 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7842 switch (mac_cfg->state) {
7843 case HCLGE_MAC_TO_DEL:
7844 case HCLGE_MAC_ACTIVE:
7845 list_del(&mac_cfg->node);
7846 list_add_tail(&mac_cfg->node, &tmp_del_list);
7848 case HCLGE_MAC_TO_ADD:
7850 list_del(&mac_cfg->node);
7857 spin_unlock_bh(&vport->mac_list_lock);
7859 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7860 ret = unsync(vport, mac_cfg->mac_addr);
7861 if (!ret || ret == -ENOENT) {
7862 /* clear all mac addr from hardware, but remain these
7863 * mac addr in the mac list, and restore them after
7864 * vf reset finished.
7867 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7868 mac_cfg->state = HCLGE_MAC_TO_ADD;
7870 list_del(&mac_cfg->node);
7873 } else if (is_del_list) {
7874 mac_cfg->state = HCLGE_MAC_TO_DEL;
7878 spin_lock_bh(&vport->mac_list_lock);
7880 hclge_sync_from_del_list(&tmp_del_list, list);
7882 spin_unlock_bh(&vport->mac_list_lock);
7885 /* remove all mac address when uninitailize */
7886 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7887 enum HCLGE_MAC_ADDR_TYPE mac_type)
7889 struct hclge_mac_node *mac_node, *tmp;
7890 struct hclge_dev *hdev = vport->back;
7891 struct list_head tmp_del_list, *list;
7893 INIT_LIST_HEAD(&tmp_del_list);
7895 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7896 &vport->uc_mac_list : &vport->mc_mac_list;
7898 spin_lock_bh(&vport->mac_list_lock);
7900 list_for_each_entry_safe(mac_node, tmp, list, node) {
7901 switch (mac_node->state) {
7902 case HCLGE_MAC_TO_DEL:
7903 case HCLGE_MAC_ACTIVE:
7904 list_del(&mac_node->node);
7905 list_add_tail(&mac_node->node, &tmp_del_list);
7907 case HCLGE_MAC_TO_ADD:
7908 list_del(&mac_node->node);
7914 spin_unlock_bh(&vport->mac_list_lock);
7916 if (mac_type == HCLGE_MAC_ADDR_UC)
7917 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7918 hclge_rm_uc_addr_common);
7920 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7921 hclge_rm_mc_addr_common);
7923 if (!list_empty(&tmp_del_list))
7924 dev_warn(&hdev->pdev->dev,
7925 "uninit %s mac list for vport %u not completely.\n",
7926 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7929 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7930 list_del(&mac_node->node);
7935 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7937 struct hclge_vport *vport;
7940 for (i = 0; i < hdev->num_alloc_vport; i++) {
7941 vport = &hdev->vport[i];
7942 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7943 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7947 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7948 u16 cmdq_resp, u8 resp_code)
7950 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7951 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7952 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7953 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7958 dev_err(&hdev->pdev->dev,
7959 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7964 switch (resp_code) {
7965 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7966 case HCLGE_ETHERTYPE_ALREADY_ADD:
7969 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7970 dev_err(&hdev->pdev->dev,
7971 "add mac ethertype failed for manager table overflow.\n");
7972 return_status = -EIO;
7974 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7975 dev_err(&hdev->pdev->dev,
7976 "add mac ethertype failed for key conflict.\n");
7977 return_status = -EIO;
7980 dev_err(&hdev->pdev->dev,
7981 "add mac ethertype failed for undefined, code=%u.\n",
7983 return_status = -EIO;
7986 return return_status;
7989 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7992 struct hclge_mac_vlan_tbl_entry_cmd req;
7993 struct hclge_dev *hdev = vport->back;
7994 struct hclge_desc desc;
7995 u16 egress_port = 0;
7998 if (is_zero_ether_addr(mac_addr))
8001 memset(&req, 0, sizeof(req));
8002 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8003 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8004 req.egress_port = cpu_to_le16(egress_port);
8005 hclge_prepare_mac_addr(&req, mac_addr, false);
8007 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8010 vf_idx += HCLGE_VF_VPORT_START_NUM;
8011 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8013 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8019 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8022 struct hclge_vport *vport = hclge_get_vport(handle);
8023 struct hclge_dev *hdev = vport->back;
8025 vport = hclge_get_vf_vport(hdev, vf);
8029 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8030 dev_info(&hdev->pdev->dev,
8031 "Specified MAC(=%pM) is same as before, no change committed!\n",
8036 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8037 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8042 ether_addr_copy(vport->vf_info.mac, mac_addr);
8044 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8045 dev_info(&hdev->pdev->dev,
8046 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8048 return hclge_inform_reset_assert_to_vf(vport);
8051 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8056 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8057 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8059 struct hclge_desc desc;
8064 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8065 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8067 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8069 dev_err(&hdev->pdev->dev,
8070 "add mac ethertype failed for cmd_send, ret =%d.\n",
8075 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8076 retval = le16_to_cpu(desc.retval);
8078 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8081 static int init_mgr_tbl(struct hclge_dev *hdev)
8086 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8087 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8089 dev_err(&hdev->pdev->dev,
8090 "add mac ethertype failed, ret =%d.\n",
8099 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8101 struct hclge_vport *vport = hclge_get_vport(handle);
8102 struct hclge_dev *hdev = vport->back;
8104 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8107 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8108 const u8 *old_addr, const u8 *new_addr)
8110 struct list_head *list = &vport->uc_mac_list;
8111 struct hclge_mac_node *old_node, *new_node;
8113 new_node = hclge_find_mac_node(list, new_addr);
8115 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8119 new_node->state = HCLGE_MAC_TO_ADD;
8120 ether_addr_copy(new_node->mac_addr, new_addr);
8121 list_add(&new_node->node, list);
8123 if (new_node->state == HCLGE_MAC_TO_DEL)
8124 new_node->state = HCLGE_MAC_ACTIVE;
8126 /* make sure the new addr is in the list head, avoid dev
8127 * addr may be not re-added into mac table for the umv space
8128 * limitation after global/imp reset which will clear mac
8129 * table by hardware.
8131 list_move(&new_node->node, list);
8134 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8135 old_node = hclge_find_mac_node(list, old_addr);
8137 if (old_node->state == HCLGE_MAC_TO_ADD) {
8138 list_del(&old_node->node);
8141 old_node->state = HCLGE_MAC_TO_DEL;
8146 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8151 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8154 const unsigned char *new_addr = (const unsigned char *)p;
8155 struct hclge_vport *vport = hclge_get_vport(handle);
8156 struct hclge_dev *hdev = vport->back;
8157 unsigned char *old_addr = NULL;
8160 /* mac addr check */
8161 if (is_zero_ether_addr(new_addr) ||
8162 is_broadcast_ether_addr(new_addr) ||
8163 is_multicast_ether_addr(new_addr)) {
8164 dev_err(&hdev->pdev->dev,
8165 "change uc mac err! invalid mac: %pM.\n",
8170 ret = hclge_pause_addr_cfg(hdev, new_addr);
8172 dev_err(&hdev->pdev->dev,
8173 "failed to configure mac pause address, ret = %d\n",
8179 old_addr = hdev->hw.mac.mac_addr;
8181 spin_lock_bh(&vport->mac_list_lock);
8182 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8184 dev_err(&hdev->pdev->dev,
8185 "failed to change the mac addr:%pM, ret = %d\n",
8187 spin_unlock_bh(&vport->mac_list_lock);
8190 hclge_pause_addr_cfg(hdev, old_addr);
8194 /* we must update dev addr with spin lock protect, preventing dev addr
8195 * being removed by set_rx_mode path.
8197 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8198 spin_unlock_bh(&vport->mac_list_lock);
8200 hclge_task_schedule(hdev, 0);
8205 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8208 struct hclge_vport *vport = hclge_get_vport(handle);
8209 struct hclge_dev *hdev = vport->back;
8211 if (!hdev->hw.mac.phydev)
8214 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8217 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8218 u8 fe_type, bool filter_en, u8 vf_id)
8220 struct hclge_vlan_filter_ctrl_cmd *req;
8221 struct hclge_desc desc;
8224 /* read current vlan filter parameter */
8225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8226 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8227 req->vlan_type = vlan_type;
8230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8232 dev_err(&hdev->pdev->dev,
8233 "failed to get vlan filter config, ret = %d.\n", ret);
8237 /* modify and write new config parameter */
8238 hclge_cmd_reuse_desc(&desc, false);
8239 req->vlan_fe = filter_en ?
8240 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8242 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8244 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8250 #define HCLGE_FILTER_TYPE_VF 0
8251 #define HCLGE_FILTER_TYPE_PORT 1
8252 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8253 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8254 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8255 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8256 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8257 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8258 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8259 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8260 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8262 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8264 struct hclge_vport *vport = hclge_get_vport(handle);
8265 struct hclge_dev *hdev = vport->back;
8267 if (hdev->pdev->revision >= 0x21) {
8268 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8269 HCLGE_FILTER_FE_EGRESS, enable, 0);
8270 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8271 HCLGE_FILTER_FE_INGRESS, enable, 0);
8273 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8274 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8278 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8280 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8283 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8284 bool is_kill, u16 vlan,
8287 struct hclge_vport *vport = &hdev->vport[vfid];
8288 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8289 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8290 struct hclge_desc desc[2];
8295 /* if vf vlan table is full, firmware will close vf vlan filter, it
8296 * is unable and unnecessary to add new vlan id to vf vlan filter.
8297 * If spoof check is enable, and vf vlan is full, it shouldn't add
8298 * new vlan, because tx packets with these vlan id will be dropped.
8300 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8301 if (vport->vf_info.spoofchk && vlan) {
8302 dev_err(&hdev->pdev->dev,
8303 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8309 hclge_cmd_setup_basic_desc(&desc[0],
8310 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8311 hclge_cmd_setup_basic_desc(&desc[1],
8312 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8314 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8316 vf_byte_off = vfid / 8;
8317 vf_byte_val = 1 << (vfid % 8);
8319 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8320 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8322 req0->vlan_id = cpu_to_le16(vlan);
8323 req0->vlan_cfg = is_kill;
8325 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8326 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8328 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8330 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8332 dev_err(&hdev->pdev->dev,
8333 "Send vf vlan command fail, ret =%d.\n",
8339 #define HCLGE_VF_VLAN_NO_ENTRY 2
8340 if (!req0->resp_code || req0->resp_code == 1)
8343 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8344 set_bit(vfid, hdev->vf_vlan_full);
8345 dev_warn(&hdev->pdev->dev,
8346 "vf vlan table is full, vf vlan filter is disabled\n");
8350 dev_err(&hdev->pdev->dev,
8351 "Add vf vlan filter fail, ret =%u.\n",
8354 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8355 if (!req0->resp_code)
8358 /* vf vlan filter is disabled when vf vlan table is full,
8359 * then new vlan id will not be added into vf vlan table.
8360 * Just return 0 without warning, avoid massive verbose
8361 * print logs when unload.
8363 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8366 dev_err(&hdev->pdev->dev,
8367 "Kill vf vlan filter fail, ret =%u.\n",
8374 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8375 u16 vlan_id, bool is_kill)
8377 struct hclge_vlan_filter_pf_cfg_cmd *req;
8378 struct hclge_desc desc;
8379 u8 vlan_offset_byte_val;
8380 u8 vlan_offset_byte;
8384 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8386 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8387 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8388 HCLGE_VLAN_BYTE_SIZE;
8389 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8391 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8392 req->vlan_offset = vlan_offset_160;
8393 req->vlan_cfg = is_kill;
8394 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8398 dev_err(&hdev->pdev->dev,
8399 "port vlan command, send fail, ret =%d.\n", ret);
8403 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8404 u16 vport_id, u16 vlan_id,
8407 u16 vport_idx, vport_num = 0;
8410 if (is_kill && !vlan_id)
8413 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8416 dev_err(&hdev->pdev->dev,
8417 "Set %u vport vlan filter config fail, ret =%d.\n",
8422 /* vlan 0 may be added twice when 8021q module is enabled */
8423 if (!is_kill && !vlan_id &&
8424 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8427 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8428 dev_err(&hdev->pdev->dev,
8429 "Add port vlan failed, vport %u is already in vlan %u\n",
8435 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8436 dev_err(&hdev->pdev->dev,
8437 "Delete port vlan failed, vport %u is not in vlan %u\n",
8442 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8445 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8446 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8452 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8454 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8455 struct hclge_vport_vtag_tx_cfg_cmd *req;
8456 struct hclge_dev *hdev = vport->back;
8457 struct hclge_desc desc;
8461 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8463 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8464 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8465 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8466 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8467 vcfg->accept_tag1 ? 1 : 0);
8468 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8469 vcfg->accept_untag1 ? 1 : 0);
8470 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8471 vcfg->accept_tag2 ? 1 : 0);
8472 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8473 vcfg->accept_untag2 ? 1 : 0);
8474 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8475 vcfg->insert_tag1_en ? 1 : 0);
8476 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8477 vcfg->insert_tag2_en ? 1 : 0);
8478 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8480 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8481 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8482 HCLGE_VF_NUM_PER_BYTE;
8483 req->vf_bitmap[bmap_index] =
8484 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8486 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8488 dev_err(&hdev->pdev->dev,
8489 "Send port txvlan cfg command fail, ret =%d\n",
8495 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8497 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8498 struct hclge_vport_vtag_rx_cfg_cmd *req;
8499 struct hclge_dev *hdev = vport->back;
8500 struct hclge_desc desc;
8504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8506 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8507 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8508 vcfg->strip_tag1_en ? 1 : 0);
8509 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8510 vcfg->strip_tag2_en ? 1 : 0);
8511 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8512 vcfg->vlan1_vlan_prionly ? 1 : 0);
8513 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8514 vcfg->vlan2_vlan_prionly ? 1 : 0);
8516 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8517 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8518 HCLGE_VF_NUM_PER_BYTE;
8519 req->vf_bitmap[bmap_index] =
8520 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8522 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8524 dev_err(&hdev->pdev->dev,
8525 "Send port rxvlan cfg command fail, ret =%d\n",
8531 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8532 u16 port_base_vlan_state,
8537 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8538 vport->txvlan_cfg.accept_tag1 = true;
8539 vport->txvlan_cfg.insert_tag1_en = false;
8540 vport->txvlan_cfg.default_tag1 = 0;
8542 vport->txvlan_cfg.accept_tag1 = false;
8543 vport->txvlan_cfg.insert_tag1_en = true;
8544 vport->txvlan_cfg.default_tag1 = vlan_tag;
8547 vport->txvlan_cfg.accept_untag1 = true;
8549 /* accept_tag2 and accept_untag2 are not supported on
8550 * pdev revision(0x20), new revision support them,
8551 * this two fields can not be configured by user.
8553 vport->txvlan_cfg.accept_tag2 = true;
8554 vport->txvlan_cfg.accept_untag2 = true;
8555 vport->txvlan_cfg.insert_tag2_en = false;
8556 vport->txvlan_cfg.default_tag2 = 0;
8558 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8559 vport->rxvlan_cfg.strip_tag1_en = false;
8560 vport->rxvlan_cfg.strip_tag2_en =
8561 vport->rxvlan_cfg.rx_vlan_offload_en;
8563 vport->rxvlan_cfg.strip_tag1_en =
8564 vport->rxvlan_cfg.rx_vlan_offload_en;
8565 vport->rxvlan_cfg.strip_tag2_en = true;
8567 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8568 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8570 ret = hclge_set_vlan_tx_offload_cfg(vport);
8574 return hclge_set_vlan_rx_offload_cfg(vport);
8577 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8579 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8580 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8581 struct hclge_desc desc;
8584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8585 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8586 rx_req->ot_fst_vlan_type =
8587 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8588 rx_req->ot_sec_vlan_type =
8589 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8590 rx_req->in_fst_vlan_type =
8591 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8592 rx_req->in_sec_vlan_type =
8593 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8595 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8597 dev_err(&hdev->pdev->dev,
8598 "Send rxvlan protocol type command fail, ret =%d\n",
8603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8605 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8606 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8607 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8609 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8611 dev_err(&hdev->pdev->dev,
8612 "Send txvlan protocol type command fail, ret =%d\n",
8618 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8620 #define HCLGE_DEF_VLAN_TYPE 0x8100
8622 struct hnae3_handle *handle = &hdev->vport[0].nic;
8623 struct hclge_vport *vport;
8627 if (hdev->pdev->revision >= 0x21) {
8628 /* for revision 0x21, vf vlan filter is per function */
8629 for (i = 0; i < hdev->num_alloc_vport; i++) {
8630 vport = &hdev->vport[i];
8631 ret = hclge_set_vlan_filter_ctrl(hdev,
8632 HCLGE_FILTER_TYPE_VF,
8633 HCLGE_FILTER_FE_EGRESS,
8640 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8641 HCLGE_FILTER_FE_INGRESS, true,
8646 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8647 HCLGE_FILTER_FE_EGRESS_V1_B,
8653 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8655 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8656 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8657 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8658 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8659 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8660 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8662 ret = hclge_set_vlan_protocol_type(hdev);
8666 for (i = 0; i < hdev->num_alloc_vport; i++) {
8669 vport = &hdev->vport[i];
8670 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8672 ret = hclge_vlan_offload_cfg(vport,
8673 vport->port_base_vlan_cfg.state,
8679 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8682 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8685 struct hclge_vport_vlan_cfg *vlan;
8687 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8691 vlan->hd_tbl_status = writen_to_tbl;
8692 vlan->vlan_id = vlan_id;
8694 list_add_tail(&vlan->node, &vport->vlan_list);
8697 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8699 struct hclge_vport_vlan_cfg *vlan, *tmp;
8700 struct hclge_dev *hdev = vport->back;
8703 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8704 if (!vlan->hd_tbl_status) {
8705 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8707 vlan->vlan_id, false);
8709 dev_err(&hdev->pdev->dev,
8710 "restore vport vlan list failed, ret=%d\n",
8715 vlan->hd_tbl_status = true;
8721 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8724 struct hclge_vport_vlan_cfg *vlan, *tmp;
8725 struct hclge_dev *hdev = vport->back;
8727 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8728 if (vlan->vlan_id == vlan_id) {
8729 if (is_write_tbl && vlan->hd_tbl_status)
8730 hclge_set_vlan_filter_hw(hdev,
8736 list_del(&vlan->node);
8743 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8745 struct hclge_vport_vlan_cfg *vlan, *tmp;
8746 struct hclge_dev *hdev = vport->back;
8748 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8749 if (vlan->hd_tbl_status)
8750 hclge_set_vlan_filter_hw(hdev,
8756 vlan->hd_tbl_status = false;
8758 list_del(&vlan->node);
8762 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8765 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8767 struct hclge_vport_vlan_cfg *vlan, *tmp;
8768 struct hclge_vport *vport;
8771 for (i = 0; i < hdev->num_alloc_vport; i++) {
8772 vport = &hdev->vport[i];
8773 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8774 list_del(&vlan->node);
8780 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8782 struct hclge_vport_vlan_cfg *vlan, *tmp;
8783 struct hclge_dev *hdev = vport->back;
8789 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8790 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8791 state = vport->port_base_vlan_cfg.state;
8793 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8794 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8795 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8796 vport->vport_id, vlan_id,
8801 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8802 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8804 vlan->vlan_id, false);
8807 vlan->hd_tbl_status = true;
8811 /* For global reset and imp reset, hardware will clear the mac table,
8812 * so we change the mac address state from ACTIVE to TO_ADD, then they
8813 * can be restored in the service task after reset complete. Furtherly,
8814 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8815 * be restored after reset, so just remove these mac nodes from mac_list.
8817 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8819 struct hclge_mac_node *mac_node, *tmp;
8821 list_for_each_entry_safe(mac_node, tmp, list, node) {
8822 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8823 mac_node->state = HCLGE_MAC_TO_ADD;
8824 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8825 list_del(&mac_node->node);
8831 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8833 spin_lock_bh(&vport->mac_list_lock);
8835 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8836 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8837 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8839 spin_unlock_bh(&vport->mac_list_lock);
8842 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8844 struct hclge_vport *vport = &hdev->vport[0];
8845 struct hnae3_handle *handle = &vport->nic;
8847 hclge_restore_mac_table_common(vport);
8848 hclge_restore_vport_vlan_table(vport);
8849 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8851 hclge_restore_fd_entries(handle);
8854 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8856 struct hclge_vport *vport = hclge_get_vport(handle);
8858 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8859 vport->rxvlan_cfg.strip_tag1_en = false;
8860 vport->rxvlan_cfg.strip_tag2_en = enable;
8862 vport->rxvlan_cfg.strip_tag1_en = enable;
8863 vport->rxvlan_cfg.strip_tag2_en = true;
8865 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8866 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8867 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8869 return hclge_set_vlan_rx_offload_cfg(vport);
8872 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8873 u16 port_base_vlan_state,
8874 struct hclge_vlan_info *new_info,
8875 struct hclge_vlan_info *old_info)
8877 struct hclge_dev *hdev = vport->back;
8880 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8881 hclge_rm_vport_all_vlan_table(vport, false);
8882 return hclge_set_vlan_filter_hw(hdev,
8883 htons(new_info->vlan_proto),
8889 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8890 vport->vport_id, old_info->vlan_tag,
8895 return hclge_add_vport_all_vlan_table(vport);
8898 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8899 struct hclge_vlan_info *vlan_info)
8901 struct hnae3_handle *nic = &vport->nic;
8902 struct hclge_vlan_info *old_vlan_info;
8903 struct hclge_dev *hdev = vport->back;
8906 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8908 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8912 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8913 /* add new VLAN tag */
8914 ret = hclge_set_vlan_filter_hw(hdev,
8915 htons(vlan_info->vlan_proto),
8917 vlan_info->vlan_tag,
8922 /* remove old VLAN tag */
8923 ret = hclge_set_vlan_filter_hw(hdev,
8924 htons(old_vlan_info->vlan_proto),
8926 old_vlan_info->vlan_tag,
8934 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8939 /* update state only when disable/enable port based VLAN */
8940 vport->port_base_vlan_cfg.state = state;
8941 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8942 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8944 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8947 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8948 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8949 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8954 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8955 enum hnae3_port_base_vlan_state state,
8958 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8960 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8962 return HNAE3_PORT_BASE_VLAN_ENABLE;
8965 return HNAE3_PORT_BASE_VLAN_DISABLE;
8966 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8967 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8969 return HNAE3_PORT_BASE_VLAN_MODIFY;
8973 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8974 u16 vlan, u8 qos, __be16 proto)
8976 struct hclge_vport *vport = hclge_get_vport(handle);
8977 struct hclge_dev *hdev = vport->back;
8978 struct hclge_vlan_info vlan_info;
8982 if (hdev->pdev->revision == 0x20)
8985 vport = hclge_get_vf_vport(hdev, vfid);
8989 /* qos is a 3 bits value, so can not be bigger than 7 */
8990 if (vlan > VLAN_N_VID - 1 || qos > 7)
8992 if (proto != htons(ETH_P_8021Q))
8993 return -EPROTONOSUPPORT;
8995 state = hclge_get_port_base_vlan_state(vport,
8996 vport->port_base_vlan_cfg.state,
8998 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9001 vlan_info.vlan_tag = vlan;
9002 vlan_info.qos = qos;
9003 vlan_info.vlan_proto = ntohs(proto);
9005 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9006 return hclge_update_port_base_vlan_cfg(vport, state,
9009 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9010 vport->vport_id, state,
9017 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9019 struct hclge_vlan_info *vlan_info;
9020 struct hclge_vport *vport;
9024 /* clear port base vlan for all vf */
9025 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9026 vport = &hdev->vport[vf];
9027 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9029 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9031 vlan_info->vlan_tag, true);
9033 dev_err(&hdev->pdev->dev,
9034 "failed to clear vf vlan for vf%d, ret = %d\n",
9035 vf - HCLGE_VF_VPORT_START_NUM, ret);
9039 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9040 u16 vlan_id, bool is_kill)
9042 struct hclge_vport *vport = hclge_get_vport(handle);
9043 struct hclge_dev *hdev = vport->back;
9044 bool writen_to_tbl = false;
9047 /* When device is resetting, firmware is unable to handle
9048 * mailbox. Just record the vlan id, and remove it after
9051 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9052 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9056 /* when port base vlan enabled, we use port base vlan as the vlan
9057 * filter entry. In this case, we don't update vlan filter table
9058 * when user add new vlan or remove exist vlan, just update the vport
9059 * vlan list. The vlan id in vlan list will be writen in vlan filter
9060 * table until port base vlan disabled
9062 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9063 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9065 writen_to_tbl = true;
9070 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9072 hclge_add_vport_vlan_table(vport, vlan_id,
9074 } else if (is_kill) {
9075 /* when remove hw vlan filter failed, record the vlan id,
9076 * and try to remove it from hw later, to be consistence
9079 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9084 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9086 #define HCLGE_MAX_SYNC_COUNT 60
9088 int i, ret, sync_cnt = 0;
9091 /* start from vport 1 for PF is always alive */
9092 for (i = 0; i < hdev->num_alloc_vport; i++) {
9093 struct hclge_vport *vport = &hdev->vport[i];
9095 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9097 while (vlan_id != VLAN_N_VID) {
9098 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9099 vport->vport_id, vlan_id,
9101 if (ret && ret != -EINVAL)
9104 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9105 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9108 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9111 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9117 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9119 struct hclge_config_max_frm_size_cmd *req;
9120 struct hclge_desc desc;
9122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9124 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9125 req->max_frm_size = cpu_to_le16(new_mps);
9126 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9128 return hclge_cmd_send(&hdev->hw, &desc, 1);
9131 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9133 struct hclge_vport *vport = hclge_get_vport(handle);
9135 return hclge_set_vport_mtu(vport, new_mtu);
9138 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9140 struct hclge_dev *hdev = vport->back;
9141 int i, max_frm_size, ret;
9143 /* HW supprt 2 layer vlan */
9144 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9145 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9146 max_frm_size > HCLGE_MAC_MAX_FRAME)
9149 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9150 mutex_lock(&hdev->vport_lock);
9151 /* VF's mps must fit within hdev->mps */
9152 if (vport->vport_id && max_frm_size > hdev->mps) {
9153 mutex_unlock(&hdev->vport_lock);
9155 } else if (vport->vport_id) {
9156 vport->mps = max_frm_size;
9157 mutex_unlock(&hdev->vport_lock);
9161 /* PF's mps must be greater then VF's mps */
9162 for (i = 1; i < hdev->num_alloc_vport; i++)
9163 if (max_frm_size < hdev->vport[i].mps) {
9164 mutex_unlock(&hdev->vport_lock);
9168 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9170 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9172 dev_err(&hdev->pdev->dev,
9173 "Change mtu fail, ret =%d\n", ret);
9177 hdev->mps = max_frm_size;
9178 vport->mps = max_frm_size;
9180 ret = hclge_buffer_alloc(hdev);
9182 dev_err(&hdev->pdev->dev,
9183 "Allocate buffer fail, ret =%d\n", ret);
9186 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9187 mutex_unlock(&hdev->vport_lock);
9191 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9194 struct hclge_reset_tqp_queue_cmd *req;
9195 struct hclge_desc desc;
9198 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9200 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9201 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9203 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9205 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9207 dev_err(&hdev->pdev->dev,
9208 "Send tqp reset cmd error, status =%d\n", ret);
9215 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9217 struct hclge_reset_tqp_queue_cmd *req;
9218 struct hclge_desc desc;
9221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9223 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9224 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9226 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9228 dev_err(&hdev->pdev->dev,
9229 "Get reset status error, status =%d\n", ret);
9233 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9236 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9238 struct hnae3_queue *queue;
9239 struct hclge_tqp *tqp;
9241 queue = handle->kinfo.tqp[queue_id];
9242 tqp = container_of(queue, struct hclge_tqp, q);
9247 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9249 struct hclge_vport *vport = hclge_get_vport(handle);
9250 struct hclge_dev *hdev = vport->back;
9251 int reset_try_times = 0;
9256 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9258 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9260 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9264 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9266 dev_err(&hdev->pdev->dev,
9267 "Send reset tqp cmd fail, ret = %d\n", ret);
9271 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9272 reset_status = hclge_get_reset_status(hdev, queue_gid);
9276 /* Wait for tqp hw reset */
9277 usleep_range(1000, 1200);
9280 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9281 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9285 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9287 dev_err(&hdev->pdev->dev,
9288 "Deassert the soft reset fail, ret = %d\n", ret);
9293 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9295 struct hclge_dev *hdev = vport->back;
9296 int reset_try_times = 0;
9301 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9303 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9305 dev_warn(&hdev->pdev->dev,
9306 "Send reset tqp cmd fail, ret = %d\n", ret);
9310 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9311 reset_status = hclge_get_reset_status(hdev, queue_gid);
9315 /* Wait for tqp hw reset */
9316 usleep_range(1000, 1200);
9319 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9320 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9324 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9326 dev_warn(&hdev->pdev->dev,
9327 "Deassert the soft reset fail, ret = %d\n", ret);
9330 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9332 struct hclge_vport *vport = hclge_get_vport(handle);
9333 struct hclge_dev *hdev = vport->back;
9335 return hdev->fw_version;
9338 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9340 struct phy_device *phydev = hdev->hw.mac.phydev;
9345 phy_set_asym_pause(phydev, rx_en, tx_en);
9348 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9352 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9355 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9357 dev_err(&hdev->pdev->dev,
9358 "configure pauseparam error, ret = %d.\n", ret);
9363 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9365 struct phy_device *phydev = hdev->hw.mac.phydev;
9366 u16 remote_advertising = 0;
9367 u16 local_advertising;
9368 u32 rx_pause, tx_pause;
9371 if (!phydev->link || !phydev->autoneg)
9374 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9377 remote_advertising = LPA_PAUSE_CAP;
9379 if (phydev->asym_pause)
9380 remote_advertising |= LPA_PAUSE_ASYM;
9382 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9383 remote_advertising);
9384 tx_pause = flowctl & FLOW_CTRL_TX;
9385 rx_pause = flowctl & FLOW_CTRL_RX;
9387 if (phydev->duplex == HCLGE_MAC_HALF) {
9392 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9395 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9396 u32 *rx_en, u32 *tx_en)
9398 struct hclge_vport *vport = hclge_get_vport(handle);
9399 struct hclge_dev *hdev = vport->back;
9400 struct phy_device *phydev = hdev->hw.mac.phydev;
9402 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9404 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9410 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9413 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9416 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9425 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9426 u32 rx_en, u32 tx_en)
9429 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9430 else if (rx_en && !tx_en)
9431 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9432 else if (!rx_en && tx_en)
9433 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9435 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9437 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9440 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9441 u32 rx_en, u32 tx_en)
9443 struct hclge_vport *vport = hclge_get_vport(handle);
9444 struct hclge_dev *hdev = vport->back;
9445 struct phy_device *phydev = hdev->hw.mac.phydev;
9449 fc_autoneg = hclge_get_autoneg(handle);
9450 if (auto_neg != fc_autoneg) {
9451 dev_info(&hdev->pdev->dev,
9452 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9457 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9458 dev_info(&hdev->pdev->dev,
9459 "Priority flow control enabled. Cannot set link flow control.\n");
9463 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9465 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9468 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9471 return phy_start_aneg(phydev);
9476 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9477 u8 *auto_neg, u32 *speed, u8 *duplex)
9479 struct hclge_vport *vport = hclge_get_vport(handle);
9480 struct hclge_dev *hdev = vport->back;
9483 *speed = hdev->hw.mac.speed;
9485 *duplex = hdev->hw.mac.duplex;
9487 *auto_neg = hdev->hw.mac.autoneg;
9490 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9493 struct hclge_vport *vport = hclge_get_vport(handle);
9494 struct hclge_dev *hdev = vport->back;
9496 /* When nic is down, the service task is not running, doesn't update
9497 * the port information per second. Query the port information before
9498 * return the media type, ensure getting the correct media information.
9500 hclge_update_port_info(hdev);
9503 *media_type = hdev->hw.mac.media_type;
9506 *module_type = hdev->hw.mac.module_type;
9509 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9510 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9512 struct hclge_vport *vport = hclge_get_vport(handle);
9513 struct hclge_dev *hdev = vport->back;
9514 struct phy_device *phydev = hdev->hw.mac.phydev;
9515 int mdix_ctrl, mdix, is_resolved;
9516 unsigned int retval;
9519 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9520 *tp_mdix = ETH_TP_MDI_INVALID;
9524 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9526 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9527 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9528 HCLGE_PHY_MDIX_CTRL_S);
9530 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9531 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9532 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9534 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9536 switch (mdix_ctrl) {
9538 *tp_mdix_ctrl = ETH_TP_MDI;
9541 *tp_mdix_ctrl = ETH_TP_MDI_X;
9544 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9547 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9552 *tp_mdix = ETH_TP_MDI_INVALID;
9554 *tp_mdix = ETH_TP_MDI_X;
9556 *tp_mdix = ETH_TP_MDI;
9559 static void hclge_info_show(struct hclge_dev *hdev)
9561 struct device *dev = &hdev->pdev->dev;
9563 dev_info(dev, "PF info begin:\n");
9565 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9566 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9567 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9568 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9569 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9570 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9571 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9572 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9573 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9574 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9575 dev_info(dev, "This is %s PF\n",
9576 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9577 dev_info(dev, "DCB %s\n",
9578 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9579 dev_info(dev, "MQPRIO %s\n",
9580 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9582 dev_info(dev, "PF info end.\n");
9585 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9586 struct hclge_vport *vport)
9588 struct hnae3_client *client = vport->nic.client;
9589 struct hclge_dev *hdev = ae_dev->priv;
9590 int rst_cnt = hdev->rst_stats.reset_cnt;
9593 ret = client->ops->init_instance(&vport->nic);
9597 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9598 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9599 rst_cnt != hdev->rst_stats.reset_cnt) {
9604 /* Enable nic hw error interrupts */
9605 ret = hclge_config_nic_hw_error(hdev, true);
9607 dev_err(&ae_dev->pdev->dev,
9608 "fail(%d) to enable hw error interrupts\n", ret);
9612 hnae3_set_client_init_flag(client, ae_dev, 1);
9614 if (netif_msg_drv(&hdev->vport->nic))
9615 hclge_info_show(hdev);
9620 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9621 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9622 msleep(HCLGE_WAIT_RESET_DONE);
9624 client->ops->uninit_instance(&vport->nic, 0);
9629 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9630 struct hclge_vport *vport)
9632 struct hclge_dev *hdev = ae_dev->priv;
9633 struct hnae3_client *client;
9637 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9641 client = hdev->roce_client;
9642 ret = hclge_init_roce_base_info(vport);
9646 rst_cnt = hdev->rst_stats.reset_cnt;
9647 ret = client->ops->init_instance(&vport->roce);
9651 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9652 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9653 rst_cnt != hdev->rst_stats.reset_cnt) {
9658 /* Enable roce ras interrupts */
9659 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9661 dev_err(&ae_dev->pdev->dev,
9662 "fail(%d) to enable roce ras interrupts\n", ret);
9666 hnae3_set_client_init_flag(client, ae_dev, 1);
9671 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9672 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9673 msleep(HCLGE_WAIT_RESET_DONE);
9675 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9680 static int hclge_init_client_instance(struct hnae3_client *client,
9681 struct hnae3_ae_dev *ae_dev)
9683 struct hclge_dev *hdev = ae_dev->priv;
9684 struct hclge_vport *vport;
9687 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9688 vport = &hdev->vport[i];
9690 switch (client->type) {
9691 case HNAE3_CLIENT_KNIC:
9692 hdev->nic_client = client;
9693 vport->nic.client = client;
9694 ret = hclge_init_nic_client_instance(ae_dev, vport);
9698 ret = hclge_init_roce_client_instance(ae_dev, vport);
9703 case HNAE3_CLIENT_ROCE:
9704 if (hnae3_dev_roce_supported(hdev)) {
9705 hdev->roce_client = client;
9706 vport->roce.client = client;
9709 ret = hclge_init_roce_client_instance(ae_dev, vport);
9722 hdev->nic_client = NULL;
9723 vport->nic.client = NULL;
9726 hdev->roce_client = NULL;
9727 vport->roce.client = NULL;
9731 static void hclge_uninit_client_instance(struct hnae3_client *client,
9732 struct hnae3_ae_dev *ae_dev)
9734 struct hclge_dev *hdev = ae_dev->priv;
9735 struct hclge_vport *vport;
9738 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9739 vport = &hdev->vport[i];
9740 if (hdev->roce_client) {
9741 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9742 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9743 msleep(HCLGE_WAIT_RESET_DONE);
9745 hdev->roce_client->ops->uninit_instance(&vport->roce,
9747 hdev->roce_client = NULL;
9748 vport->roce.client = NULL;
9750 if (client->type == HNAE3_CLIENT_ROCE)
9752 if (hdev->nic_client && client->ops->uninit_instance) {
9753 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9754 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9755 msleep(HCLGE_WAIT_RESET_DONE);
9757 client->ops->uninit_instance(&vport->nic, 0);
9758 hdev->nic_client = NULL;
9759 vport->nic.client = NULL;
9764 static int hclge_pci_init(struct hclge_dev *hdev)
9766 struct pci_dev *pdev = hdev->pdev;
9767 struct hclge_hw *hw;
9770 ret = pci_enable_device(pdev);
9772 dev_err(&pdev->dev, "failed to enable PCI device\n");
9776 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9778 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9781 "can't set consistent PCI DMA");
9782 goto err_disable_device;
9784 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9787 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9789 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9790 goto err_disable_device;
9793 pci_set_master(pdev);
9795 hw->io_base = pcim_iomap(pdev, 2, 0);
9797 dev_err(&pdev->dev, "Can't map configuration register space\n");
9799 goto err_clr_master;
9802 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9806 pci_clear_master(pdev);
9807 pci_release_regions(pdev);
9809 pci_disable_device(pdev);
9814 static void hclge_pci_uninit(struct hclge_dev *hdev)
9816 struct pci_dev *pdev = hdev->pdev;
9818 pcim_iounmap(pdev, hdev->hw.io_base);
9819 pci_free_irq_vectors(pdev);
9820 pci_clear_master(pdev);
9821 pci_release_mem_regions(pdev);
9822 pci_disable_device(pdev);
9825 static void hclge_state_init(struct hclge_dev *hdev)
9827 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9828 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9829 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9830 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9831 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9832 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9833 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9836 static void hclge_state_uninit(struct hclge_dev *hdev)
9838 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9839 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9841 if (hdev->reset_timer.function)
9842 del_timer_sync(&hdev->reset_timer);
9843 if (hdev->service_task.work.func)
9844 cancel_delayed_work_sync(&hdev->service_task);
9847 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9849 #define HCLGE_FLR_RETRY_WAIT_MS 500
9850 #define HCLGE_FLR_RETRY_CNT 5
9852 struct hclge_dev *hdev = ae_dev->priv;
9857 down(&hdev->reset_sem);
9858 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9859 hdev->reset_type = HNAE3_FLR_RESET;
9860 ret = hclge_reset_prepare(hdev);
9862 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9864 if (hdev->reset_pending ||
9865 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9866 dev_err(&hdev->pdev->dev,
9867 "reset_pending:0x%lx, retry_cnt:%d\n",
9868 hdev->reset_pending, retry_cnt);
9869 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9870 up(&hdev->reset_sem);
9871 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9876 /* disable misc vector before FLR done */
9877 hclge_enable_vector(&hdev->misc_vector, false);
9878 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9879 hdev->rst_stats.flr_rst_cnt++;
9882 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9884 struct hclge_dev *hdev = ae_dev->priv;
9887 hclge_enable_vector(&hdev->misc_vector, true);
9889 ret = hclge_reset_rebuild(hdev);
9891 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9893 hdev->reset_type = HNAE3_NONE_RESET;
9894 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9895 up(&hdev->reset_sem);
9898 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9902 for (i = 0; i < hdev->num_alloc_vport; i++) {
9903 struct hclge_vport *vport = &hdev->vport[i];
9906 /* Send cmd to clear VF's FUNC_RST_ING */
9907 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9909 dev_warn(&hdev->pdev->dev,
9910 "clear vf(%u) rst failed %d!\n",
9911 vport->vport_id, ret);
9915 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9917 struct pci_dev *pdev = ae_dev->pdev;
9918 struct hclge_dev *hdev;
9921 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9926 hdev->ae_dev = ae_dev;
9927 hdev->reset_type = HNAE3_NONE_RESET;
9928 hdev->reset_level = HNAE3_FUNC_RESET;
9929 ae_dev->priv = hdev;
9931 /* HW supprt 2 layer vlan */
9932 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9934 mutex_init(&hdev->vport_lock);
9935 spin_lock_init(&hdev->fd_rule_lock);
9936 sema_init(&hdev->reset_sem, 1);
9938 ret = hclge_pci_init(hdev);
9942 /* Firmware command queue initialize */
9943 ret = hclge_cmd_queue_init(hdev);
9945 goto err_pci_uninit;
9947 /* Firmware command initialize */
9948 ret = hclge_cmd_init(hdev);
9950 goto err_cmd_uninit;
9952 ret = hclge_get_cap(hdev);
9954 goto err_cmd_uninit;
9956 ret = hclge_configure(hdev);
9958 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9959 goto err_cmd_uninit;
9962 ret = hclge_init_msi(hdev);
9964 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9965 goto err_cmd_uninit;
9968 ret = hclge_misc_irq_init(hdev);
9970 goto err_msi_uninit;
9972 ret = hclge_alloc_tqps(hdev);
9974 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9975 goto err_msi_irq_uninit;
9978 ret = hclge_alloc_vport(hdev);
9980 goto err_msi_irq_uninit;
9982 ret = hclge_map_tqp(hdev);
9984 goto err_msi_irq_uninit;
9986 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9987 ret = hclge_mac_mdio_config(hdev);
9989 goto err_msi_irq_uninit;
9992 ret = hclge_init_umv_space(hdev);
9994 goto err_mdiobus_unreg;
9996 ret = hclge_mac_init(hdev);
9998 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9999 goto err_mdiobus_unreg;
10002 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10004 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10005 goto err_mdiobus_unreg;
10008 ret = hclge_config_gro(hdev, true);
10010 goto err_mdiobus_unreg;
10012 ret = hclge_init_vlan_config(hdev);
10014 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10015 goto err_mdiobus_unreg;
10018 ret = hclge_tm_schd_init(hdev);
10020 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10021 goto err_mdiobus_unreg;
10024 hclge_rss_init_cfg(hdev);
10025 ret = hclge_rss_init_hw(hdev);
10027 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10028 goto err_mdiobus_unreg;
10031 ret = init_mgr_tbl(hdev);
10033 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10034 goto err_mdiobus_unreg;
10037 ret = hclge_init_fd_config(hdev);
10039 dev_err(&pdev->dev,
10040 "fd table init fail, ret=%d\n", ret);
10041 goto err_mdiobus_unreg;
10044 INIT_KFIFO(hdev->mac_tnl_log);
10046 hclge_dcb_ops_set(hdev);
10048 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10049 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10051 /* Setup affinity after service timer setup because add_timer_on
10052 * is called in affinity notify.
10054 hclge_misc_affinity_setup(hdev);
10056 hclge_clear_all_event_cause(hdev);
10057 hclge_clear_resetting_state(hdev);
10059 /* Log and clear the hw errors those already occurred */
10060 hclge_handle_all_hns_hw_errors(ae_dev);
10062 /* request delayed reset for the error recovery because an immediate
10063 * global reset on a PF affecting pending initialization of other PFs
10065 if (ae_dev->hw_err_reset_req) {
10066 enum hnae3_reset_type reset_level;
10068 reset_level = hclge_get_reset_level(ae_dev,
10069 &ae_dev->hw_err_reset_req);
10070 hclge_set_def_reset_request(ae_dev, reset_level);
10071 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10074 /* Enable MISC vector(vector0) */
10075 hclge_enable_vector(&hdev->misc_vector, true);
10077 hclge_state_init(hdev);
10078 hdev->last_reset_time = jiffies;
10080 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10081 HCLGE_DRIVER_NAME);
10083 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10088 if (hdev->hw.mac.phydev)
10089 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10090 err_msi_irq_uninit:
10091 hclge_misc_irq_uninit(hdev);
10093 pci_free_irq_vectors(pdev);
10095 hclge_cmd_uninit(hdev);
10097 pcim_iounmap(pdev, hdev->hw.io_base);
10098 pci_clear_master(pdev);
10099 pci_release_regions(pdev);
10100 pci_disable_device(pdev);
10102 mutex_destroy(&hdev->vport_lock);
10106 static void hclge_stats_clear(struct hclge_dev *hdev)
10108 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10111 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10113 return hclge_config_switch_param(hdev, vf, enable,
10114 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10117 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10119 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10120 HCLGE_FILTER_FE_NIC_INGRESS_B,
10124 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10128 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10130 dev_err(&hdev->pdev->dev,
10131 "Set vf %d mac spoof check %s failed, ret=%d\n",
10132 vf, enable ? "on" : "off", ret);
10136 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10138 dev_err(&hdev->pdev->dev,
10139 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10140 vf, enable ? "on" : "off", ret);
10145 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10148 struct hclge_vport *vport = hclge_get_vport(handle);
10149 struct hclge_dev *hdev = vport->back;
10150 u32 new_spoofchk = enable ? 1 : 0;
10153 if (hdev->pdev->revision == 0x20)
10154 return -EOPNOTSUPP;
10156 vport = hclge_get_vf_vport(hdev, vf);
10160 if (vport->vf_info.spoofchk == new_spoofchk)
10163 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10164 dev_warn(&hdev->pdev->dev,
10165 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10167 else if (enable && hclge_is_umv_space_full(vport, true))
10168 dev_warn(&hdev->pdev->dev,
10169 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10172 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10176 vport->vf_info.spoofchk = new_spoofchk;
10180 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10182 struct hclge_vport *vport = hdev->vport;
10186 if (hdev->pdev->revision == 0x20)
10189 /* resume the vf spoof check state after reset */
10190 for (i = 0; i < hdev->num_alloc_vport; i++) {
10191 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10192 vport->vf_info.spoofchk);
10202 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10204 struct hclge_vport *vport = hclge_get_vport(handle);
10205 struct hclge_dev *hdev = vport->back;
10206 u32 new_trusted = enable ? 1 : 0;
10210 vport = hclge_get_vf_vport(hdev, vf);
10214 if (vport->vf_info.trusted == new_trusted)
10217 /* Disable promisc mode for VF if it is not trusted any more. */
10218 if (!enable && vport->vf_info.promisc_enable) {
10219 en_bc_pmc = hdev->pdev->revision != 0x20;
10220 ret = hclge_set_vport_promisc_mode(vport, false, false,
10224 vport->vf_info.promisc_enable = 0;
10225 hclge_inform_vf_promisc_info(vport);
10228 vport->vf_info.trusted = new_trusted;
10233 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10238 /* reset vf rate to default value */
10239 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10240 struct hclge_vport *vport = &hdev->vport[vf];
10242 vport->vf_info.max_tx_rate = 0;
10243 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10245 dev_err(&hdev->pdev->dev,
10246 "vf%d failed to reset to default, ret=%d\n",
10247 vf - HCLGE_VF_VPORT_START_NUM, ret);
10251 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10252 int min_tx_rate, int max_tx_rate)
10254 if (min_tx_rate != 0 ||
10255 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10256 dev_err(&hdev->pdev->dev,
10257 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10258 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10265 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10266 int min_tx_rate, int max_tx_rate, bool force)
10268 struct hclge_vport *vport = hclge_get_vport(handle);
10269 struct hclge_dev *hdev = vport->back;
10272 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10276 vport = hclge_get_vf_vport(hdev, vf);
10280 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10283 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10287 vport->vf_info.max_tx_rate = max_tx_rate;
10292 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10294 struct hnae3_handle *handle = &hdev->vport->nic;
10295 struct hclge_vport *vport;
10299 /* resume the vf max_tx_rate after reset */
10300 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10301 vport = hclge_get_vf_vport(hdev, vf);
10305 /* zero means max rate, after reset, firmware already set it to
10306 * max rate, so just continue.
10308 if (!vport->vf_info.max_tx_rate)
10311 ret = hclge_set_vf_rate(handle, vf, 0,
10312 vport->vf_info.max_tx_rate, true);
10314 dev_err(&hdev->pdev->dev,
10315 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10316 vf, vport->vf_info.max_tx_rate, ret);
10324 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10326 struct hclge_vport *vport = hdev->vport;
10329 for (i = 0; i < hdev->num_alloc_vport; i++) {
10330 hclge_vport_stop(vport);
10335 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10337 struct hclge_dev *hdev = ae_dev->priv;
10338 struct pci_dev *pdev = ae_dev->pdev;
10341 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10343 hclge_stats_clear(hdev);
10344 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10345 * so here should not clean table in memory.
10347 if (hdev->reset_type == HNAE3_IMP_RESET ||
10348 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10349 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10350 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10351 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10352 hclge_reset_umv_space(hdev);
10355 ret = hclge_cmd_init(hdev);
10357 dev_err(&pdev->dev, "Cmd queue init failed\n");
10361 ret = hclge_map_tqp(hdev);
10363 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10367 ret = hclge_mac_init(hdev);
10369 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10373 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10375 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10379 ret = hclge_config_gro(hdev, true);
10383 ret = hclge_init_vlan_config(hdev);
10385 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10389 ret = hclge_tm_init_hw(hdev, true);
10391 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10395 ret = hclge_rss_init_hw(hdev);
10397 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10401 ret = init_mgr_tbl(hdev);
10403 dev_err(&pdev->dev,
10404 "failed to reinit manager table, ret = %d\n", ret);
10408 ret = hclge_init_fd_config(hdev);
10410 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10414 /* Log and clear the hw errors those already occurred */
10415 hclge_handle_all_hns_hw_errors(ae_dev);
10417 /* Re-enable the hw error interrupts because
10418 * the interrupts get disabled on global reset.
10420 ret = hclge_config_nic_hw_error(hdev, true);
10422 dev_err(&pdev->dev,
10423 "fail(%d) to re-enable NIC hw error interrupts\n",
10428 if (hdev->roce_client) {
10429 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10431 dev_err(&pdev->dev,
10432 "fail(%d) to re-enable roce ras interrupts\n",
10438 hclge_reset_vport_state(hdev);
10439 ret = hclge_reset_vport_spoofchk(hdev);
10443 ret = hclge_resume_vf_rate(hdev);
10447 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10448 HCLGE_DRIVER_NAME);
10453 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10455 struct hclge_dev *hdev = ae_dev->priv;
10456 struct hclge_mac *mac = &hdev->hw.mac;
10458 hclge_reset_vf_rate(hdev);
10459 hclge_clear_vf_vlan(hdev);
10460 hclge_misc_affinity_teardown(hdev);
10461 hclge_state_uninit(hdev);
10462 hclge_uninit_mac_table(hdev);
10465 mdiobus_unregister(mac->mdio_bus);
10467 /* Disable MISC vector(vector0) */
10468 hclge_enable_vector(&hdev->misc_vector, false);
10469 synchronize_irq(hdev->misc_vector.vector_irq);
10471 /* Disable all hw interrupts */
10472 hclge_config_mac_tnl_int(hdev, false);
10473 hclge_config_nic_hw_error(hdev, false);
10474 hclge_config_rocee_ras_interrupt(hdev, false);
10476 hclge_cmd_uninit(hdev);
10477 hclge_misc_irq_uninit(hdev);
10478 hclge_pci_uninit(hdev);
10479 mutex_destroy(&hdev->vport_lock);
10480 hclge_uninit_vport_vlan_table(hdev);
10481 ae_dev->priv = NULL;
10484 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10486 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10487 struct hclge_vport *vport = hclge_get_vport(handle);
10488 struct hclge_dev *hdev = vport->back;
10490 return min_t(u32, hdev->rss_size_max,
10491 vport->alloc_tqps / kinfo->num_tc);
10494 static void hclge_get_channels(struct hnae3_handle *handle,
10495 struct ethtool_channels *ch)
10497 ch->max_combined = hclge_get_max_channels(handle);
10498 ch->other_count = 1;
10500 ch->combined_count = handle->kinfo.rss_size;
10503 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10504 u16 *alloc_tqps, u16 *max_rss_size)
10506 struct hclge_vport *vport = hclge_get_vport(handle);
10507 struct hclge_dev *hdev = vport->back;
10509 *alloc_tqps = vport->alloc_tqps;
10510 *max_rss_size = hdev->rss_size_max;
10513 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10514 bool rxfh_configured)
10516 struct hclge_vport *vport = hclge_get_vport(handle);
10517 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10518 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10519 struct hclge_dev *hdev = vport->back;
10520 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10521 u16 cur_rss_size = kinfo->rss_size;
10522 u16 cur_tqps = kinfo->num_tqps;
10523 u16 tc_valid[HCLGE_MAX_TC_NUM];
10529 kinfo->req_rss_size = new_tqps_num;
10531 ret = hclge_tm_vport_map_update(hdev);
10533 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10537 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10538 roundup_size = ilog2(roundup_size);
10539 /* Set the RSS TC mode according to the new RSS size */
10540 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10543 if (!(hdev->hw_tc_map & BIT(i)))
10547 tc_size[i] = roundup_size;
10548 tc_offset[i] = kinfo->rss_size * i;
10550 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10554 /* RSS indirection table has been configuared by user */
10555 if (rxfh_configured)
10558 /* Reinitializes the rss indirect table according to the new RSS size */
10559 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10563 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10564 rss_indir[i] = i % kinfo->rss_size;
10566 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10568 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10575 dev_info(&hdev->pdev->dev,
10576 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10577 cur_rss_size, kinfo->rss_size,
10578 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10583 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10584 u32 *regs_num_64_bit)
10586 struct hclge_desc desc;
10590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10591 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10593 dev_err(&hdev->pdev->dev,
10594 "Query register number cmd failed, ret = %d.\n", ret);
10598 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10599 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10601 total_num = *regs_num_32_bit + *regs_num_64_bit;
10608 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10611 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10612 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10614 struct hclge_desc *desc;
10615 u32 *reg_val = data;
10625 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10626 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10627 HCLGE_32_BIT_REG_RTN_DATANUM);
10628 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10632 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10633 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10635 dev_err(&hdev->pdev->dev,
10636 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10641 for (i = 0; i < cmd_num; i++) {
10643 desc_data = (__le32 *)(&desc[i].data[0]);
10644 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10646 desc_data = (__le32 *)(&desc[i]);
10647 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10649 for (k = 0; k < n; k++) {
10650 *reg_val++ = le32_to_cpu(*desc_data++);
10662 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10665 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10666 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10668 struct hclge_desc *desc;
10669 u64 *reg_val = data;
10679 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10680 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10681 HCLGE_64_BIT_REG_RTN_DATANUM);
10682 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10686 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10687 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10689 dev_err(&hdev->pdev->dev,
10690 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10695 for (i = 0; i < cmd_num; i++) {
10697 desc_data = (__le64 *)(&desc[i].data[0]);
10698 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10700 desc_data = (__le64 *)(&desc[i]);
10701 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10703 for (k = 0; k < n; k++) {
10704 *reg_val++ = le64_to_cpu(*desc_data++);
10716 #define MAX_SEPARATE_NUM 4
10717 #define SEPARATOR_VALUE 0xFDFCFBFA
10718 #define REG_NUM_PER_LINE 4
10719 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10720 #define REG_SEPARATOR_LINE 1
10721 #define REG_NUM_REMAIN_MASK 3
10722 #define BD_LIST_MAX_NUM 30
10724 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10726 /*prepare 4 commands to query DFX BD number*/
10727 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10728 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10729 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10730 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10731 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10732 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10733 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10735 return hclge_cmd_send(&hdev->hw, desc, 4);
10738 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10742 u32 entries_per_desc, desc_index, index, offset, i;
10743 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10746 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10748 dev_err(&hdev->pdev->dev,
10749 "Get dfx bd num fail, status is %d.\n", ret);
10753 entries_per_desc = ARRAY_SIZE(desc[0].data);
10754 for (i = 0; i < type_num; i++) {
10755 offset = hclge_dfx_bd_offset_list[i];
10756 index = offset % entries_per_desc;
10757 desc_index = offset / entries_per_desc;
10758 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10764 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10765 struct hclge_desc *desc_src, int bd_num,
10766 enum hclge_opcode_type cmd)
10768 struct hclge_desc *desc = desc_src;
10771 hclge_cmd_setup_basic_desc(desc, cmd, true);
10772 for (i = 0; i < bd_num - 1; i++) {
10773 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10775 hclge_cmd_setup_basic_desc(desc, cmd, true);
10779 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10781 dev_err(&hdev->pdev->dev,
10782 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10788 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10791 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10792 struct hclge_desc *desc = desc_src;
10795 entries_per_desc = ARRAY_SIZE(desc->data);
10796 reg_num = entries_per_desc * bd_num;
10797 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10798 for (i = 0; i < reg_num; i++) {
10799 index = i % entries_per_desc;
10800 desc_index = i / entries_per_desc;
10801 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10803 for (i = 0; i < separator_num; i++)
10804 *reg++ = SEPARATOR_VALUE;
10806 return reg_num + separator_num;
10809 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10811 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10812 int data_len_per_desc, bd_num, i;
10813 int bd_num_list[BD_LIST_MAX_NUM];
10817 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10819 dev_err(&hdev->pdev->dev,
10820 "Get dfx reg bd num fail, status is %d.\n", ret);
10824 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10826 for (i = 0; i < dfx_reg_type_num; i++) {
10827 bd_num = bd_num_list[i];
10828 data_len = data_len_per_desc * bd_num;
10829 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10835 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10837 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10838 int bd_num, bd_num_max, buf_len, i;
10839 int bd_num_list[BD_LIST_MAX_NUM];
10840 struct hclge_desc *desc_src;
10844 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10846 dev_err(&hdev->pdev->dev,
10847 "Get dfx reg bd num fail, status is %d.\n", ret);
10851 bd_num_max = bd_num_list[0];
10852 for (i = 1; i < dfx_reg_type_num; i++)
10853 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10855 buf_len = sizeof(*desc_src) * bd_num_max;
10856 desc_src = kzalloc(buf_len, GFP_KERNEL);
10860 for (i = 0; i < dfx_reg_type_num; i++) {
10861 bd_num = bd_num_list[i];
10862 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10863 hclge_dfx_reg_opcode_list[i]);
10865 dev_err(&hdev->pdev->dev,
10866 "Get dfx reg fail, status is %d.\n", ret);
10870 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10877 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10878 struct hnae3_knic_private_info *kinfo)
10880 #define HCLGE_RING_REG_OFFSET 0x200
10881 #define HCLGE_RING_INT_REG_OFFSET 0x4
10883 int i, j, reg_num, separator_num;
10887 /* fetching per-PF registers valus from PF PCIe register space */
10888 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10889 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10890 for (i = 0; i < reg_num; i++)
10891 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10892 for (i = 0; i < separator_num; i++)
10893 *reg++ = SEPARATOR_VALUE;
10894 data_num_sum = reg_num + separator_num;
10896 reg_num = ARRAY_SIZE(common_reg_addr_list);
10897 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10898 for (i = 0; i < reg_num; i++)
10899 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10900 for (i = 0; i < separator_num; i++)
10901 *reg++ = SEPARATOR_VALUE;
10902 data_num_sum += reg_num + separator_num;
10904 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10905 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10906 for (j = 0; j < kinfo->num_tqps; j++) {
10907 for (i = 0; i < reg_num; i++)
10908 *reg++ = hclge_read_dev(&hdev->hw,
10909 ring_reg_addr_list[i] +
10910 HCLGE_RING_REG_OFFSET * j);
10911 for (i = 0; i < separator_num; i++)
10912 *reg++ = SEPARATOR_VALUE;
10914 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10916 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10917 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10918 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10919 for (i = 0; i < reg_num; i++)
10920 *reg++ = hclge_read_dev(&hdev->hw,
10921 tqp_intr_reg_addr_list[i] +
10922 HCLGE_RING_INT_REG_OFFSET * j);
10923 for (i = 0; i < separator_num; i++)
10924 *reg++ = SEPARATOR_VALUE;
10926 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10928 return data_num_sum;
10931 static int hclge_get_regs_len(struct hnae3_handle *handle)
10933 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10934 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10935 struct hclge_vport *vport = hclge_get_vport(handle);
10936 struct hclge_dev *hdev = vport->back;
10937 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10938 int regs_lines_32_bit, regs_lines_64_bit;
10941 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10943 dev_err(&hdev->pdev->dev,
10944 "Get register number failed, ret = %d.\n", ret);
10948 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10950 dev_err(&hdev->pdev->dev,
10951 "Get dfx reg len failed, ret = %d.\n", ret);
10955 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10956 REG_SEPARATOR_LINE;
10957 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10958 REG_SEPARATOR_LINE;
10959 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10960 REG_SEPARATOR_LINE;
10961 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10962 REG_SEPARATOR_LINE;
10963 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10964 REG_SEPARATOR_LINE;
10965 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10966 REG_SEPARATOR_LINE;
10968 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10969 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10970 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10973 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10976 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10977 struct hclge_vport *vport = hclge_get_vport(handle);
10978 struct hclge_dev *hdev = vport->back;
10979 u32 regs_num_32_bit, regs_num_64_bit;
10980 int i, reg_num, separator_num, ret;
10983 *version = hdev->fw_version;
10985 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10987 dev_err(&hdev->pdev->dev,
10988 "Get register number failed, ret = %d.\n", ret);
10992 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10994 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10996 dev_err(&hdev->pdev->dev,
10997 "Get 32 bit register failed, ret = %d.\n", ret);
11000 reg_num = regs_num_32_bit;
11002 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11003 for (i = 0; i < separator_num; i++)
11004 *reg++ = SEPARATOR_VALUE;
11006 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11008 dev_err(&hdev->pdev->dev,
11009 "Get 64 bit register failed, ret = %d.\n", ret);
11012 reg_num = regs_num_64_bit * 2;
11014 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11015 for (i = 0; i < separator_num; i++)
11016 *reg++ = SEPARATOR_VALUE;
11018 ret = hclge_get_dfx_reg(hdev, reg);
11020 dev_err(&hdev->pdev->dev,
11021 "Get dfx register failed, ret = %d.\n", ret);
11024 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11026 struct hclge_set_led_state_cmd *req;
11027 struct hclge_desc desc;
11030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11032 req = (struct hclge_set_led_state_cmd *)desc.data;
11033 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11034 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11038 dev_err(&hdev->pdev->dev,
11039 "Send set led state cmd error, ret =%d\n", ret);
11044 enum hclge_led_status {
11047 HCLGE_LED_NO_CHANGE = 0xFF,
11050 static int hclge_set_led_id(struct hnae3_handle *handle,
11051 enum ethtool_phys_id_state status)
11053 struct hclge_vport *vport = hclge_get_vport(handle);
11054 struct hclge_dev *hdev = vport->back;
11057 case ETHTOOL_ID_ACTIVE:
11058 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11059 case ETHTOOL_ID_INACTIVE:
11060 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11066 static void hclge_get_link_mode(struct hnae3_handle *handle,
11067 unsigned long *supported,
11068 unsigned long *advertising)
11070 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11071 struct hclge_vport *vport = hclge_get_vport(handle);
11072 struct hclge_dev *hdev = vport->back;
11073 unsigned int idx = 0;
11075 for (; idx < size; idx++) {
11076 supported[idx] = hdev->hw.mac.supported[idx];
11077 advertising[idx] = hdev->hw.mac.advertising[idx];
11081 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11083 struct hclge_vport *vport = hclge_get_vport(handle);
11084 struct hclge_dev *hdev = vport->back;
11086 return hclge_config_gro(hdev, enable);
11089 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11091 struct hclge_vport *vport = &hdev->vport[0];
11092 struct hnae3_handle *handle = &vport->nic;
11096 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11097 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11098 vport->last_promisc_flags = vport->overflow_promisc_flags;
11101 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11102 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11103 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11104 tmp_flags & HNAE3_MPE);
11106 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11107 hclge_enable_vlan_filter(handle,
11108 tmp_flags & HNAE3_VLAN_FLTR);
11113 static bool hclge_module_existed(struct hclge_dev *hdev)
11115 struct hclge_desc desc;
11119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11120 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11122 dev_err(&hdev->pdev->dev,
11123 "failed to get SFP exist state, ret = %d\n", ret);
11127 existed = le32_to_cpu(desc.data[0]);
11129 return existed != 0;
11132 /* need 6 bds(total 140 bytes) in one reading
11133 * return the number of bytes actually read, 0 means read failed.
11135 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11138 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11139 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11145 /* setup all 6 bds to read module eeprom info. */
11146 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11147 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11150 /* bd0~bd4 need next flag */
11151 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11152 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11155 /* setup bd0, this bd contains offset and read length. */
11156 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11157 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11158 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11159 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11161 ret = hclge_cmd_send(&hdev->hw, desc, i);
11163 dev_err(&hdev->pdev->dev,
11164 "failed to get SFP eeprom info, ret = %d\n", ret);
11168 /* copy sfp info from bd0 to out buffer. */
11169 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11170 memcpy(data, sfp_info_bd0->data, copy_len);
11171 read_len = copy_len;
11173 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11174 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11175 if (read_len >= len)
11178 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11179 memcpy(data + read_len, desc[i].data, copy_len);
11180 read_len += copy_len;
11186 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11189 struct hclge_vport *vport = hclge_get_vport(handle);
11190 struct hclge_dev *hdev = vport->back;
11194 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11195 return -EOPNOTSUPP;
11197 if (!hclge_module_existed(hdev))
11200 while (read_len < len) {
11201 data_len = hclge_get_sfp_eeprom_info(hdev,
11208 read_len += data_len;
11214 static const struct hnae3_ae_ops hclge_ops = {
11215 .init_ae_dev = hclge_init_ae_dev,
11216 .uninit_ae_dev = hclge_uninit_ae_dev,
11217 .flr_prepare = hclge_flr_prepare,
11218 .flr_done = hclge_flr_done,
11219 .init_client_instance = hclge_init_client_instance,
11220 .uninit_client_instance = hclge_uninit_client_instance,
11221 .map_ring_to_vector = hclge_map_ring_to_vector,
11222 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11223 .get_vector = hclge_get_vector,
11224 .put_vector = hclge_put_vector,
11225 .set_promisc_mode = hclge_set_promisc_mode,
11226 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11227 .set_loopback = hclge_set_loopback,
11228 .start = hclge_ae_start,
11229 .stop = hclge_ae_stop,
11230 .client_start = hclge_client_start,
11231 .client_stop = hclge_client_stop,
11232 .get_status = hclge_get_status,
11233 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11234 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11235 .get_media_type = hclge_get_media_type,
11236 .check_port_speed = hclge_check_port_speed,
11237 .get_fec = hclge_get_fec,
11238 .set_fec = hclge_set_fec,
11239 .get_rss_key_size = hclge_get_rss_key_size,
11240 .get_rss_indir_size = hclge_get_rss_indir_size,
11241 .get_rss = hclge_get_rss,
11242 .set_rss = hclge_set_rss,
11243 .set_rss_tuple = hclge_set_rss_tuple,
11244 .get_rss_tuple = hclge_get_rss_tuple,
11245 .get_tc_size = hclge_get_tc_size,
11246 .get_mac_addr = hclge_get_mac_addr,
11247 .set_mac_addr = hclge_set_mac_addr,
11248 .do_ioctl = hclge_do_ioctl,
11249 .add_uc_addr = hclge_add_uc_addr,
11250 .rm_uc_addr = hclge_rm_uc_addr,
11251 .add_mc_addr = hclge_add_mc_addr,
11252 .rm_mc_addr = hclge_rm_mc_addr,
11253 .set_autoneg = hclge_set_autoneg,
11254 .get_autoneg = hclge_get_autoneg,
11255 .restart_autoneg = hclge_restart_autoneg,
11256 .halt_autoneg = hclge_halt_autoneg,
11257 .get_pauseparam = hclge_get_pauseparam,
11258 .set_pauseparam = hclge_set_pauseparam,
11259 .set_mtu = hclge_set_mtu,
11260 .reset_queue = hclge_reset_tqp,
11261 .get_stats = hclge_get_stats,
11262 .get_mac_stats = hclge_get_mac_stat,
11263 .update_stats = hclge_update_stats,
11264 .get_strings = hclge_get_strings,
11265 .get_sset_count = hclge_get_sset_count,
11266 .get_fw_version = hclge_get_fw_version,
11267 .get_mdix_mode = hclge_get_mdix_mode,
11268 .enable_vlan_filter = hclge_enable_vlan_filter,
11269 .set_vlan_filter = hclge_set_vlan_filter,
11270 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11271 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11272 .reset_event = hclge_reset_event,
11273 .get_reset_level = hclge_get_reset_level,
11274 .set_default_reset_request = hclge_set_def_reset_request,
11275 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11276 .set_channels = hclge_set_channels,
11277 .get_channels = hclge_get_channels,
11278 .get_regs_len = hclge_get_regs_len,
11279 .get_regs = hclge_get_regs,
11280 .set_led_id = hclge_set_led_id,
11281 .get_link_mode = hclge_get_link_mode,
11282 .add_fd_entry = hclge_add_fd_entry,
11283 .del_fd_entry = hclge_del_fd_entry,
11284 .del_all_fd_entries = hclge_del_all_fd_entries,
11285 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11286 .get_fd_rule_info = hclge_get_fd_rule_info,
11287 .get_fd_all_rules = hclge_get_all_rules,
11288 .enable_fd = hclge_enable_fd,
11289 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11290 .dbg_run_cmd = hclge_dbg_run_cmd,
11291 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11292 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11293 .ae_dev_resetting = hclge_ae_dev_resetting,
11294 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11295 .set_gro_en = hclge_gro_en,
11296 .get_global_queue_id = hclge_covert_handle_qid_global,
11297 .set_timer_task = hclge_set_timer_task,
11298 .mac_connect_phy = hclge_mac_connect_phy,
11299 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11300 .get_vf_config = hclge_get_vf_config,
11301 .set_vf_link_state = hclge_set_vf_link_state,
11302 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11303 .set_vf_trust = hclge_set_vf_trust,
11304 .set_vf_rate = hclge_set_vf_rate,
11305 .set_vf_mac = hclge_set_vf_mac,
11306 .get_module_eeprom = hclge_get_module_eeprom,
11307 .get_cmdq_stat = hclge_get_cmdq_stat,
11310 static struct hnae3_ae_algo ae_algo = {
11312 .pdev_id_table = ae_algo_pci_tbl,
11315 static int hclge_init(void)
11317 pr_info("%s is initializing\n", HCLGE_NAME);
11319 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11321 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11325 hnae3_register_ae_algo(&ae_algo);
11330 static void hclge_exit(void)
11332 hnae3_unregister_ae_algo(&ae_algo);
11333 destroy_workqueue(hclge_wq);
11335 module_init(hclge_init);
11336 module_exit(hclge_exit);
11338 MODULE_LICENSE("GPL");
11339 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11340 MODULE_DESCRIPTION("HCLGE Driver");
11341 MODULE_VERSION(HCLGE_MOD_VERSION);