1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
558 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATUS,
578 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->pdev->revision >= 0x21 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if (hdev->hw.mac.phydev) {
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759 } else if (stringset == ETH_SS_STATS) {
760 count = ARRAY_SIZE(g_mac_stats_string) +
761 hclge_tqps_get_sset_count(handle, stringset);
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 u8 *p = (char *)data;
773 if (stringset == ETH_SS_STATS) {
774 size = ARRAY_SIZE(g_mac_stats_string);
775 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 p = hclge_tqps_get_strings(handle, p);
778 } else if (stringset == ETH_SS_TEST) {
779 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 p += ETH_GSTRING_LEN;
784 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 p += ETH_GSTRING_LEN;
789 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 p += ETH_GSTRING_LEN;
795 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 p += ETH_GSTRING_LEN;
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 struct hclge_vport *vport = hclge_get_vport(handle);
806 struct hclge_dev *hdev = vport->back;
809 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 ARRAY_SIZE(g_mac_stats_string), data);
811 p = hclge_tqps_get_stats(handle, p);
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 struct hns3_mac_stats *mac_stats)
817 struct hclge_vport *vport = hclge_get_vport(handle);
818 struct hclge_dev *hdev = vport->back;
820 hclge_update_stats(handle, NULL);
822 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 struct hclge_func_status_cmd *status)
829 #define HCLGE_MAC_ID_MASK 0xF
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
840 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
844 static int hclge_query_function_status(struct hclge_dev *hdev)
846 #define HCLGE_QUERY_MAX_CNT 5
848 struct hclge_func_status_cmd *req;
849 struct hclge_desc desc;
853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 req = (struct hclge_func_status_cmd *)desc.data;
857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 dev_err(&hdev->pdev->dev,
860 "query function status failed %d.\n", ret);
864 /* Check pf reset is done */
867 usleep_range(1000, 2000);
868 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870 return hclge_parse_func_status(hdev, req);
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 struct hclge_pf_res_cmd *req;
876 struct hclge_desc desc;
879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 dev_err(&hdev->pdev->dev,
883 "query pf resource failed %d.\n", ret);
887 req = (struct hclge_pf_res_cmd *)desc.data;
888 hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891 if (req->tx_buf_size)
893 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
895 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
897 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
899 if (req->dv_buf_size)
901 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
903 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
905 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
907 if (hnae3_dev_roce_supported(hdev)) {
908 hdev->roce_base_msix_offset =
909 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
912 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
915 /* nic's msix numbers is always equals to the roce's. */
916 hdev->num_nic_msi = hdev->num_roce_msi;
918 /* PF should have NIC vectors and Roce vectors,
919 * NIC vectors are queued before Roce vectors.
921 hdev->num_msi = hdev->num_roce_msi +
922 hdev->roce_base_msix_offset;
925 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
928 hdev->num_nic_msi = hdev->num_msi;
931 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 dev_err(&hdev->pdev->dev,
933 "Just %u msi resources, not enough for pf(min:2).\n",
941 static int hclge_parse_speed(int speed_cmd, int *speed)
945 *speed = HCLGE_MAC_SPEED_10M;
948 *speed = HCLGE_MAC_SPEED_100M;
951 *speed = HCLGE_MAC_SPEED_1G;
954 *speed = HCLGE_MAC_SPEED_10G;
957 *speed = HCLGE_MAC_SPEED_25G;
960 *speed = HCLGE_MAC_SPEED_40G;
963 *speed = HCLGE_MAC_SPEED_50G;
966 *speed = HCLGE_MAC_SPEED_100G;
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
977 struct hclge_vport *vport = hclge_get_vport(handle);
978 struct hclge_dev *hdev = vport->back;
979 u32 speed_ability = hdev->hw.mac.speed_ability;
983 case HCLGE_MAC_SPEED_10M:
984 speed_bit = HCLGE_SUPPORT_10M_BIT;
986 case HCLGE_MAC_SPEED_100M:
987 speed_bit = HCLGE_SUPPORT_100M_BIT;
989 case HCLGE_MAC_SPEED_1G:
990 speed_bit = HCLGE_SUPPORT_1G_BIT;
992 case HCLGE_MAC_SPEED_10G:
993 speed_bit = HCLGE_SUPPORT_10G_BIT;
995 case HCLGE_MAC_SPEED_25G:
996 speed_bit = HCLGE_SUPPORT_25G_BIT;
998 case HCLGE_MAC_SPEED_40G:
999 speed_bit = HCLGE_SUPPORT_40G_BIT;
1001 case HCLGE_MAC_SPEED_50G:
1002 speed_bit = HCLGE_SUPPORT_50G_BIT;
1004 case HCLGE_MAC_SPEED_100G:
1005 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 if (speed_bit & speed_ability)
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1057 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1069 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1091 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1098 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1101 switch (mac->speed) {
1102 case HCLGE_MAC_SPEED_10G:
1103 case HCLGE_MAC_SPEED_40G:
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1107 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1109 case HCLGE_MAC_SPEED_25G:
1110 case HCLGE_MAC_SPEED_50G:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1114 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 BIT(HNAE3_FEC_AUTO);
1117 case HCLGE_MAC_SPEED_100G:
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1122 mac->fec_ability = 0;
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1130 struct hclge_mac *mac = &hdev->hw.mac;
1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1136 hclge_convert_setting_sr(mac, speed_ability);
1137 hclge_convert_setting_lr(mac, speed_ability);
1138 hclge_convert_setting_cr(mac, speed_ability);
1139 if (hdev->pdev->revision >= 0x21)
1140 hclge_convert_setting_fec(mac);
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1150 struct hclge_mac *mac = &hdev->hw.mac;
1152 hclge_convert_setting_kr(mac, speed_ability);
1153 if (hdev->pdev->revision >= 0x21)
1154 hclge_convert_setting_fec(mac);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1163 unsigned long *supported = hdev->hw.mac.supported;
1165 /* default to support all speed for GE port */
1167 speed_ability = HCLGE_SUPPORT_GE;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1173 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1180 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1193 u8 media_type = hdev->hw.mac.media_type;
1195 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 hclge_parse_copper_link_mode(hdev, speed_ability);
1199 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 hclge_parse_backplane_link_mode(hdev, speed_ability);
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1205 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 return HCLGE_MAC_SPEED_100G;
1208 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 return HCLGE_MAC_SPEED_50G;
1211 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 return HCLGE_MAC_SPEED_40G;
1214 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 return HCLGE_MAC_SPEED_25G;
1217 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 return HCLGE_MAC_SPEED_10G;
1220 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 return HCLGE_MAC_SPEED_1G;
1223 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 return HCLGE_MAC_SPEED_100M;
1226 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 return HCLGE_MAC_SPEED_10M;
1229 return HCLGE_MAC_SPEED_1G;
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1234 struct hclge_cfg_param_cmd *req;
1235 u64 mac_addr_tmp_high;
1239 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1241 /* get the configuration */
1242 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 HCLGE_CFG_TQP_DESC_N_M,
1249 HCLGE_CFG_TQP_DESC_N_S);
1251 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_PHY_ADDR_M,
1253 HCLGE_CFG_PHY_ADDR_S);
1254 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_MEDIA_TP_M,
1256 HCLGE_CFG_MEDIA_TP_S);
1257 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 HCLGE_CFG_RX_BUF_LEN_M,
1259 HCLGE_CFG_RX_BUF_LEN_S);
1260 /* get mac_address */
1261 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 HCLGE_CFG_MAC_ADDR_H_M,
1264 HCLGE_CFG_MAC_ADDR_H_S);
1266 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1268 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_DEFAULT_SPEED_M,
1270 HCLGE_CFG_DEFAULT_SPEED_S);
1271 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 HCLGE_CFG_RSS_SIZE_M,
1273 HCLGE_CFG_RSS_SIZE_S);
1275 for (i = 0; i < ETH_ALEN; i++)
1276 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1278 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1281 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_SPEED_ABILITY_M,
1283 HCLGE_CFG_SPEED_ABILITY_S);
1284 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 if (!cfg->umv_space)
1288 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1291 /* hclge_get_cfg: query the static parameter from flash
1292 * @hdev: pointer to struct hclge_dev
1293 * @hcfg: the config structure to be getted
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1297 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 struct hclge_cfg_param_cmd *req;
1302 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1305 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1308 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310 /* Len should be united by 4 bytes when send to hardware */
1311 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 req->offset = cpu_to_le32(offset);
1316 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1318 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1322 hclge_parse_cfg(hcfg, desc);
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1331 ret = hclge_query_function_status(hdev);
1333 dev_err(&hdev->pdev->dev,
1334 "query function status error %d.\n", ret);
1338 /* get pf resource */
1339 return hclge_query_pf_resource(hdev);
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 #define HCLGE_MIN_TX_DESC 64
1345 #define HCLGE_MIN_RX_DESC 64
1347 if (!is_kdump_kernel())
1350 dev_info(&hdev->pdev->dev,
1351 "Running kdump kernel. Using minimal resources\n");
1353 /* minimal queue pairs equals to the number of vports */
1354 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1359 static int hclge_configure(struct hclge_dev *hdev)
1361 struct hclge_cfg cfg;
1365 ret = hclge_get_cfg(hdev, &cfg);
1367 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1371 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1372 hdev->base_tqp_pid = 0;
1373 hdev->rss_size_max = cfg.rss_size_max;
1374 hdev->rx_buf_len = cfg.rx_buf_len;
1375 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1376 hdev->hw.mac.media_type = cfg.media_type;
1377 hdev->hw.mac.phy_addr = cfg.phy_addr;
1378 hdev->num_tx_desc = cfg.tqp_desc_num;
1379 hdev->num_rx_desc = cfg.tqp_desc_num;
1380 hdev->tm_info.num_pg = 1;
1381 hdev->tc_max = cfg.tc_num;
1382 hdev->tm_info.hw_pfc_map = 0;
1383 hdev->wanted_umv_size = cfg.umv_space;
1385 if (hnae3_dev_fd_supported(hdev)) {
1387 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1390 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1392 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1396 hclge_parse_link_mode(hdev, cfg.speed_ability);
1398 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1400 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1401 (hdev->tc_max < 1)) {
1402 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1407 /* Dev does not support DCB */
1408 if (!hnae3_dev_dcb_supported(hdev)) {
1412 hdev->pfc_max = hdev->tc_max;
1415 hdev->tm_info.num_tc = 1;
1417 /* Currently not support uncontiuous tc */
1418 for (i = 0; i < hdev->tm_info.num_tc; i++)
1419 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1421 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1423 hclge_init_kdump_kernel_config(hdev);
1425 /* Set the init affinity based on pci func number */
1426 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1427 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1428 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1429 &hdev->affinity_mask);
1434 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1435 unsigned int tso_mss_max)
1437 struct hclge_cfg_tso_status_cmd *req;
1438 struct hclge_desc desc;
1441 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1443 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1446 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1448 req->tso_mss_min = cpu_to_le16(tso_mss);
1451 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1452 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1453 req->tso_mss_max = cpu_to_le16(tso_mss);
1455 return hclge_cmd_send(&hdev->hw, &desc, 1);
1458 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1460 struct hclge_cfg_gro_status_cmd *req;
1461 struct hclge_desc desc;
1464 if (!hnae3_dev_gro_supported(hdev))
1467 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1468 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1470 req->gro_en = cpu_to_le16(en ? 1 : 0);
1472 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1474 dev_err(&hdev->pdev->dev,
1475 "GRO hardware config cmd failed, ret = %d\n", ret);
1480 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1482 struct hclge_tqp *tqp;
1485 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1486 sizeof(struct hclge_tqp), GFP_KERNEL);
1492 for (i = 0; i < hdev->num_tqps; i++) {
1493 tqp->dev = &hdev->pdev->dev;
1496 tqp->q.ae_algo = &ae_algo;
1497 tqp->q.buf_size = hdev->rx_buf_len;
1498 tqp->q.tx_desc_num = hdev->num_tx_desc;
1499 tqp->q.rx_desc_num = hdev->num_rx_desc;
1500 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1501 i * HCLGE_TQP_REG_SIZE;
1509 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1510 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1512 struct hclge_tqp_map_cmd *req;
1513 struct hclge_desc desc;
1516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1518 req = (struct hclge_tqp_map_cmd *)desc.data;
1519 req->tqp_id = cpu_to_le16(tqp_pid);
1520 req->tqp_vf = func_id;
1521 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1523 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1524 req->tqp_vid = cpu_to_le16(tqp_vid);
1526 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1528 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1533 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1535 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1536 struct hclge_dev *hdev = vport->back;
1539 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1540 alloced < num_tqps; i++) {
1541 if (!hdev->htqp[i].alloced) {
1542 hdev->htqp[i].q.handle = &vport->nic;
1543 hdev->htqp[i].q.tqp_index = alloced;
1544 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1545 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1546 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1547 hdev->htqp[i].alloced = true;
1551 vport->alloc_tqps = alloced;
1552 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1553 vport->alloc_tqps / hdev->tm_info.num_tc);
1555 /* ensure one to one mapping between irq and queue at default */
1556 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1557 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1562 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1563 u16 num_tx_desc, u16 num_rx_desc)
1566 struct hnae3_handle *nic = &vport->nic;
1567 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1568 struct hclge_dev *hdev = vport->back;
1571 kinfo->num_tx_desc = num_tx_desc;
1572 kinfo->num_rx_desc = num_rx_desc;
1574 kinfo->rx_buf_len = hdev->rx_buf_len;
1576 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1577 sizeof(struct hnae3_queue *), GFP_KERNEL);
1581 ret = hclge_assign_tqp(vport, num_tqps);
1583 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1588 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1589 struct hclge_vport *vport)
1591 struct hnae3_handle *nic = &vport->nic;
1592 struct hnae3_knic_private_info *kinfo;
1595 kinfo = &nic->kinfo;
1596 for (i = 0; i < vport->alloc_tqps; i++) {
1597 struct hclge_tqp *q =
1598 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1602 is_pf = !(vport->vport_id);
1603 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1612 static int hclge_map_tqp(struct hclge_dev *hdev)
1614 struct hclge_vport *vport = hdev->vport;
1617 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1618 for (i = 0; i < num_vport; i++) {
1621 ret = hclge_map_tqp_to_vport(hdev, vport);
1631 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1633 struct hnae3_handle *nic = &vport->nic;
1634 struct hclge_dev *hdev = vport->back;
1637 nic->pdev = hdev->pdev;
1638 nic->ae_algo = &ae_algo;
1639 nic->numa_node_mask = hdev->numa_node_mask;
1641 ret = hclge_knic_setup(vport, num_tqps,
1642 hdev->num_tx_desc, hdev->num_rx_desc);
1644 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1649 static int hclge_alloc_vport(struct hclge_dev *hdev)
1651 struct pci_dev *pdev = hdev->pdev;
1652 struct hclge_vport *vport;
1658 /* We need to alloc a vport for main NIC of PF */
1659 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1661 if (hdev->num_tqps < num_vport) {
1662 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1663 hdev->num_tqps, num_vport);
1667 /* Alloc the same number of TQPs for every vport */
1668 tqp_per_vport = hdev->num_tqps / num_vport;
1669 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1671 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1676 hdev->vport = vport;
1677 hdev->num_alloc_vport = num_vport;
1679 if (IS_ENABLED(CONFIG_PCI_IOV))
1680 hdev->num_alloc_vfs = hdev->num_req_vfs;
1682 for (i = 0; i < num_vport; i++) {
1684 vport->vport_id = i;
1685 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1686 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1687 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1688 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1689 INIT_LIST_HEAD(&vport->vlan_list);
1690 INIT_LIST_HEAD(&vport->uc_mac_list);
1691 INIT_LIST_HEAD(&vport->mc_mac_list);
1692 spin_lock_init(&vport->mac_list_lock);
1695 ret = hclge_vport_setup(vport, tqp_main_vport);
1697 ret = hclge_vport_setup(vport, tqp_per_vport);
1700 "vport setup failed for vport %d, %d\n",
1711 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712 struct hclge_pkt_buf_alloc *buf_alloc)
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1717 struct hclge_tx_buff_alloc_cmd *req;
1718 struct hclge_desc desc;
1722 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1728 req->tx_pkt_buff[i] =
1729 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1735 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742 struct hclge_pkt_buf_alloc *buf_alloc)
1744 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1747 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1757 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758 if (hdev->hw_tc_map & BIT(i))
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765 struct hclge_pkt_buf_alloc *buf_alloc)
1767 struct hclge_priv_buf *priv;
1771 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 priv = &buf_alloc->priv_buf[i];
1773 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 struct hclge_priv_buf *priv;
1789 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790 priv = &buf_alloc->priv_buf[i];
1791 if (hdev->hw_tc_map & BIT(i) &&
1792 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1802 struct hclge_priv_buf *priv;
1806 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807 priv = &buf_alloc->priv_buf[i];
1809 rx_priv += priv->buf_size;
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1816 u32 i, total_tx_size = 0;
1818 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1821 return total_tx_size;
1824 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc,
1828 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829 u32 tc_num = hclge_get_tc_num(hdev);
1830 u32 shared_buf, aligned_mps;
1834 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1836 if (hnae3_dev_dcb_supported(hdev))
1837 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1840 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841 + hdev->dv_buf_size;
1843 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845 HCLGE_BUF_SIZE_UNIT);
1847 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848 if (rx_all < rx_priv + shared_std)
1851 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852 buf_alloc->s_buf.buf_size = shared_buf;
1853 if (hnae3_dev_dcb_supported(hdev)) {
1854 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857 HCLGE_BUF_SIZE_UNIT);
1859 buf_alloc->s_buf.self.high = aligned_mps +
1860 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861 buf_alloc->s_buf.self.low = aligned_mps;
1864 if (hnae3_dev_dcb_supported(hdev)) {
1865 hi_thrd = shared_buf - hdev->dv_buf_size;
1867 if (tc_num <= NEED_RESERVE_TC_NUM)
1868 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1872 hi_thrd = hi_thrd / tc_num;
1874 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1878 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879 lo_thrd = aligned_mps;
1882 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1895 total_size = hdev->pkt_buf_size;
1897 /* alloc tx buffer for all enabled tc */
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1901 if (hdev->hw_tc_map & BIT(i)) {
1902 if (total_size < hdev->tx_buf_size)
1905 priv->tx_buf_size = hdev->tx_buf_size;
1907 priv->tx_buf_size = 0;
1910 total_size -= priv->tx_buf_size;
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917 struct hclge_pkt_buf_alloc *buf_alloc)
1919 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1923 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1931 if (!(hdev->hw_tc_map & BIT(i)))
1936 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939 HCLGE_BUF_SIZE_UNIT);
1942 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1946 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1949 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1959 /* let the last to be cleared first */
1960 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962 unsigned int mask = BIT((unsigned int)i);
1964 if (hdev->hw_tc_map & mask &&
1965 !(hdev->tm_info.hw_pfc_map & mask)) {
1966 /* Clear the no pfc TC private buffer */
1974 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975 no_pfc_priv_num == 0)
1979 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983 struct hclge_pkt_buf_alloc *buf_alloc)
1985 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1989 /* let the last to be cleared first */
1990 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992 unsigned int mask = BIT((unsigned int)i);
1994 if (hdev->hw_tc_map & mask &&
1995 hdev->tm_info.hw_pfc_map & mask) {
1996 /* Reduce the number of pfc TC with private buffer */
2004 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2009 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013 struct hclge_pkt_buf_alloc *buf_alloc)
2015 #define COMPENSATE_BUFFER 0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP 0x1800
2019 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020 u32 tc_num = hclge_get_tc_num(hdev);
2021 u32 half_mps = hdev->mps >> 1;
2026 rx_priv = rx_priv / tc_num;
2028 if (tc_num <= NEED_RESERVE_TC_NUM)
2029 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2031 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032 COMPENSATE_HALF_MPS_NUM * half_mps;
2033 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2036 if (rx_priv < min_rx_priv)
2039 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2047 if (!(hdev->hw_tc_map & BIT(i)))
2051 priv->buf_size = rx_priv;
2052 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2056 buf_alloc->s_buf.buf_size = 0;
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062 * @hdev: pointer to struct hclge_dev
2063 * @buf_alloc: pointer to buffer calculation data
2064 * @return: 0: calculate sucessful, negative: fail
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067 struct hclge_pkt_buf_alloc *buf_alloc)
2069 /* When DCB is not supported, rx private buffer is not allocated. */
2070 if (!hnae3_dev_dcb_supported(hdev)) {
2071 u32 rx_all = hdev->pkt_buf_size;
2073 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2080 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2083 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2086 /* try to decrease the buffer size */
2087 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2090 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2093 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100 struct hclge_pkt_buf_alloc *buf_alloc)
2102 struct hclge_rx_priv_buff_cmd *req;
2103 struct hclge_desc desc;
2107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2110 /* Alloc private buffer TCs */
2111 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2115 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2117 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2121 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2126 dev_err(&hdev->pdev->dev,
2127 "rx private buffer alloc cmd failed %d\n", ret);
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 struct hclge_rx_priv_wl_buf *req;
2136 struct hclge_priv_buf *priv;
2137 struct hclge_desc desc[2];
2141 for (i = 0; i < 2; i++) {
2142 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2144 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2146 /* The first descriptor set the NEXT bit to 1 */
2148 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2152 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2155 priv = &buf_alloc->priv_buf[idx];
2156 req->tc_wl[j].high =
2157 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158 req->tc_wl[j].high |=
2159 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162 req->tc_wl[j].low |=
2163 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2167 /* Send 2 descriptor at one time */
2168 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2170 dev_err(&hdev->pdev->dev,
2171 "rx private waterline config cmd failed %d\n",
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177 struct hclge_pkt_buf_alloc *buf_alloc)
2179 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180 struct hclge_rx_com_thrd *req;
2181 struct hclge_desc desc[2];
2182 struct hclge_tc_thrd *tc;
2186 for (i = 0; i < 2; i++) {
2187 hclge_cmd_setup_basic_desc(&desc[i],
2188 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2191 /* The first descriptor set the NEXT bit to 1 */
2193 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2197 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2200 req->com_thrd[j].high =
2201 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202 req->com_thrd[j].high |=
2203 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204 req->com_thrd[j].low =
2205 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206 req->com_thrd[j].low |=
2207 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2211 /* Send 2 descriptors at one time */
2212 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2214 dev_err(&hdev->pdev->dev,
2215 "common threshold config cmd failed %d\n", ret);
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223 struct hclge_rx_com_wl *req;
2224 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2229 req = (struct hclge_rx_com_wl *)desc.data;
2230 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2238 dev_err(&hdev->pdev->dev,
2239 "common waterline config cmd failed %d\n", ret);
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2246 struct hclge_pkt_buf_alloc *pkt_buf;
2249 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2253 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2255 dev_err(&hdev->pdev->dev,
2256 "could not calc tx buffer size for all TCs %d\n", ret);
2260 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2262 dev_err(&hdev->pdev->dev,
2263 "could not alloc tx buffers %d\n", ret);
2267 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2269 dev_err(&hdev->pdev->dev,
2270 "could not calc rx priv buffer size for all TCs %d\n",
2275 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2277 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2282 if (hnae3_dev_dcb_supported(hdev)) {
2283 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2285 dev_err(&hdev->pdev->dev,
2286 "could not configure rx private waterline %d\n",
2291 ret = hclge_common_thrd_config(hdev, pkt_buf);
2293 dev_err(&hdev->pdev->dev,
2294 "could not configure common threshold %d\n",
2300 ret = hclge_common_wl_config(hdev, pkt_buf);
2302 dev_err(&hdev->pdev->dev,
2303 "could not configure common waterline %d\n", ret);
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2312 struct hnae3_handle *roce = &vport->roce;
2313 struct hnae3_handle *nic = &vport->nic;
2315 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2317 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318 vport->back->num_msi_left == 0)
2321 roce->rinfo.base_vector = vport->back->roce_base_vector;
2323 roce->rinfo.netdev = nic->kinfo.netdev;
2324 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2326 roce->pdev = nic->pdev;
2327 roce->ae_algo = nic->ae_algo;
2328 roce->numa_node_mask = nic->numa_node_mask;
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2335 struct pci_dev *pdev = hdev->pdev;
2339 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2341 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2344 "failed(%d) to allocate MSI/MSI-X vectors\n",
2348 if (vectors < hdev->num_msi)
2349 dev_warn(&hdev->pdev->dev,
2350 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351 hdev->num_msi, vectors);
2353 hdev->num_msi = vectors;
2354 hdev->num_msi_left = vectors;
2356 hdev->base_msi_vector = pdev->irq;
2357 hdev->roce_base_vector = hdev->base_msi_vector +
2358 hdev->roce_base_msix_offset;
2360 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 sizeof(u16), GFP_KERNEL);
2362 if (!hdev->vector_status) {
2363 pci_free_irq_vectors(pdev);
2367 for (i = 0; i < hdev->num_msi; i++)
2368 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2370 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371 sizeof(int), GFP_KERNEL);
2372 if (!hdev->vector_irq) {
2373 pci_free_irq_vectors(pdev);
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2382 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383 duplex = HCLGE_MAC_FULL;
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2391 struct hclge_config_mac_speed_dup_cmd *req;
2392 struct hclge_desc desc;
2395 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2400 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2403 case HCLGE_MAC_SPEED_10M:
2404 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405 HCLGE_CFG_SPEED_S, 6);
2407 case HCLGE_MAC_SPEED_100M:
2408 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409 HCLGE_CFG_SPEED_S, 7);
2411 case HCLGE_MAC_SPEED_1G:
2412 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413 HCLGE_CFG_SPEED_S, 0);
2415 case HCLGE_MAC_SPEED_10G:
2416 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417 HCLGE_CFG_SPEED_S, 1);
2419 case HCLGE_MAC_SPEED_25G:
2420 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421 HCLGE_CFG_SPEED_S, 2);
2423 case HCLGE_MAC_SPEED_40G:
2424 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425 HCLGE_CFG_SPEED_S, 3);
2427 case HCLGE_MAC_SPEED_50G:
2428 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429 HCLGE_CFG_SPEED_S, 4);
2431 case HCLGE_MAC_SPEED_100G:
2432 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433 HCLGE_CFG_SPEED_S, 5);
2436 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2440 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2445 dev_err(&hdev->pdev->dev,
2446 "mac speed/duplex config cmd failed %d.\n", ret);
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2455 struct hclge_mac *mac = &hdev->hw.mac;
2458 duplex = hclge_check_speed_dup(duplex, speed);
2459 if (!mac->support_autoneg && mac->speed == speed &&
2460 mac->duplex == duplex)
2463 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2467 hdev->hw.mac.speed = speed;
2468 hdev->hw.mac.duplex = duplex;
2473 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2476 struct hclge_vport *vport = hclge_get_vport(handle);
2477 struct hclge_dev *hdev = vport->back;
2479 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2482 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2484 struct hclge_config_auto_neg_cmd *req;
2485 struct hclge_desc desc;
2489 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2491 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2493 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2494 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2496 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2498 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2504 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2506 struct hclge_vport *vport = hclge_get_vport(handle);
2507 struct hclge_dev *hdev = vport->back;
2509 if (!hdev->hw.mac.support_autoneg) {
2511 dev_err(&hdev->pdev->dev,
2512 "autoneg is not supported by current port\n");
2519 return hclge_set_autoneg_en(hdev, enable);
2522 static int hclge_get_autoneg(struct hnae3_handle *handle)
2524 struct hclge_vport *vport = hclge_get_vport(handle);
2525 struct hclge_dev *hdev = vport->back;
2526 struct phy_device *phydev = hdev->hw.mac.phydev;
2529 return phydev->autoneg;
2531 return hdev->hw.mac.autoneg;
2534 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2536 struct hclge_vport *vport = hclge_get_vport(handle);
2537 struct hclge_dev *hdev = vport->back;
2540 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2542 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2545 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2548 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2550 struct hclge_vport *vport = hclge_get_vport(handle);
2551 struct hclge_dev *hdev = vport->back;
2553 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2554 return hclge_set_autoneg_en(hdev, !halt);
2559 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2561 struct hclge_config_fec_cmd *req;
2562 struct hclge_desc desc;
2565 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2567 req = (struct hclge_config_fec_cmd *)desc.data;
2568 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2569 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2570 if (fec_mode & BIT(HNAE3_FEC_RS))
2571 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2572 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2573 if (fec_mode & BIT(HNAE3_FEC_BASER))
2574 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2575 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2579 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2584 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2586 struct hclge_vport *vport = hclge_get_vport(handle);
2587 struct hclge_dev *hdev = vport->back;
2588 struct hclge_mac *mac = &hdev->hw.mac;
2591 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2592 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2596 ret = hclge_set_fec_hw(hdev, fec_mode);
2600 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2604 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2607 struct hclge_vport *vport = hclge_get_vport(handle);
2608 struct hclge_dev *hdev = vport->back;
2609 struct hclge_mac *mac = &hdev->hw.mac;
2612 *fec_ability = mac->fec_ability;
2614 *fec_mode = mac->fec_mode;
2617 static int hclge_mac_init(struct hclge_dev *hdev)
2619 struct hclge_mac *mac = &hdev->hw.mac;
2622 hdev->support_sfp_query = true;
2623 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2624 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2625 hdev->hw.mac.duplex);
2629 if (hdev->hw.mac.support_autoneg) {
2630 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2637 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2638 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2643 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2645 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2649 ret = hclge_set_default_loopback(hdev);
2653 ret = hclge_buffer_alloc(hdev);
2655 dev_err(&hdev->pdev->dev,
2656 "allocate buffer fail, ret=%d\n", ret);
2661 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2663 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2664 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2665 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2666 hclge_wq, &hdev->service_task, 0);
2669 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2671 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2672 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2673 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2674 hclge_wq, &hdev->service_task, 0);
2677 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2679 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2680 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2681 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2682 hclge_wq, &hdev->service_task,
2686 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2688 struct hclge_link_status_cmd *req;
2689 struct hclge_desc desc;
2693 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2694 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2696 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2701 req = (struct hclge_link_status_cmd *)desc.data;
2702 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2704 return !!link_status;
2707 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2709 unsigned int mac_state;
2712 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2715 mac_state = hclge_get_mac_link_status(hdev);
2717 if (hdev->hw.mac.phydev) {
2718 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2719 link_stat = mac_state &
2720 hdev->hw.mac.phydev->link;
2725 link_stat = mac_state;
2731 static void hclge_update_link_status(struct hclge_dev *hdev)
2733 struct hnae3_client *rclient = hdev->roce_client;
2734 struct hnae3_client *client = hdev->nic_client;
2735 struct hnae3_handle *rhandle;
2736 struct hnae3_handle *handle;
2743 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2746 state = hclge_get_mac_phy_link(hdev);
2747 if (state != hdev->hw.mac.link) {
2748 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2749 handle = &hdev->vport[i].nic;
2750 client->ops->link_status_change(handle, state);
2751 hclge_config_mac_tnl_int(hdev, state);
2752 rhandle = &hdev->vport[i].roce;
2753 if (rclient && rclient->ops->link_status_change)
2754 rclient->ops->link_status_change(rhandle,
2757 hdev->hw.mac.link = state;
2760 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2763 static void hclge_update_port_capability(struct hclge_mac *mac)
2765 /* update fec ability by speed */
2766 hclge_convert_setting_fec(mac);
2768 /* firmware can not identify back plane type, the media type
2769 * read from configuration can help deal it
2771 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2772 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2773 mac->module_type = HNAE3_MODULE_TYPE_KR;
2774 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2775 mac->module_type = HNAE3_MODULE_TYPE_TP;
2777 if (mac->support_autoneg) {
2778 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2779 linkmode_copy(mac->advertising, mac->supported);
2781 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2783 linkmode_zero(mac->advertising);
2787 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2789 struct hclge_sfp_info_cmd *resp;
2790 struct hclge_desc desc;
2793 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2794 resp = (struct hclge_sfp_info_cmd *)desc.data;
2795 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2796 if (ret == -EOPNOTSUPP) {
2797 dev_warn(&hdev->pdev->dev,
2798 "IMP do not support get SFP speed %d\n", ret);
2801 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2805 *speed = le32_to_cpu(resp->speed);
2810 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2812 struct hclge_sfp_info_cmd *resp;
2813 struct hclge_desc desc;
2816 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2817 resp = (struct hclge_sfp_info_cmd *)desc.data;
2819 resp->query_type = QUERY_ACTIVE_SPEED;
2821 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2822 if (ret == -EOPNOTSUPP) {
2823 dev_warn(&hdev->pdev->dev,
2824 "IMP does not support get SFP info %d\n", ret);
2827 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2831 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2832 * set to mac->speed.
2834 if (!le32_to_cpu(resp->speed))
2837 mac->speed = le32_to_cpu(resp->speed);
2838 /* if resp->speed_ability is 0, it means it's an old version
2839 * firmware, do not update these params
2841 if (resp->speed_ability) {
2842 mac->module_type = le32_to_cpu(resp->module_type);
2843 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2844 mac->autoneg = resp->autoneg;
2845 mac->support_autoneg = resp->autoneg_ability;
2846 mac->speed_type = QUERY_ACTIVE_SPEED;
2847 if (!resp->active_fec)
2850 mac->fec_mode = BIT(resp->active_fec);
2852 mac->speed_type = QUERY_SFP_SPEED;
2858 static int hclge_update_port_info(struct hclge_dev *hdev)
2860 struct hclge_mac *mac = &hdev->hw.mac;
2861 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2864 /* get the port info from SFP cmd if not copper port */
2865 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2868 /* if IMP does not support get SFP/qSFP info, return directly */
2869 if (!hdev->support_sfp_query)
2872 if (hdev->pdev->revision >= 0x21)
2873 ret = hclge_get_sfp_info(hdev, mac);
2875 ret = hclge_get_sfp_speed(hdev, &speed);
2877 if (ret == -EOPNOTSUPP) {
2878 hdev->support_sfp_query = false;
2884 if (hdev->pdev->revision >= 0x21) {
2885 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2886 hclge_update_port_capability(mac);
2889 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2892 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2893 return 0; /* do nothing if no SFP */
2895 /* must config full duplex for SFP */
2896 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2900 static int hclge_get_status(struct hnae3_handle *handle)
2902 struct hclge_vport *vport = hclge_get_vport(handle);
2903 struct hclge_dev *hdev = vport->back;
2905 hclge_update_link_status(hdev);
2907 return hdev->hw.mac.link;
2910 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2912 if (!pci_num_vf(hdev->pdev)) {
2913 dev_err(&hdev->pdev->dev,
2914 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2918 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2919 dev_err(&hdev->pdev->dev,
2920 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2921 vf, pci_num_vf(hdev->pdev));
2925 /* VF start from 1 in vport */
2926 vf += HCLGE_VF_VPORT_START_NUM;
2927 return &hdev->vport[vf];
2930 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2931 struct ifla_vf_info *ivf)
2933 struct hclge_vport *vport = hclge_get_vport(handle);
2934 struct hclge_dev *hdev = vport->back;
2936 vport = hclge_get_vf_vport(hdev, vf);
2941 ivf->linkstate = vport->vf_info.link_state;
2942 ivf->spoofchk = vport->vf_info.spoofchk;
2943 ivf->trusted = vport->vf_info.trusted;
2944 ivf->min_tx_rate = 0;
2945 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2946 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2947 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2948 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2949 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2954 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2957 struct hclge_vport *vport = hclge_get_vport(handle);
2958 struct hclge_dev *hdev = vport->back;
2960 vport = hclge_get_vf_vport(hdev, vf);
2964 vport->vf_info.link_state = link_state;
2969 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2971 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2973 /* fetch the events from their corresponding regs */
2974 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2975 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2976 msix_src_reg = hclge_read_dev(&hdev->hw,
2977 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2979 /* Assumption: If by any chance reset and mailbox events are reported
2980 * together then we will only process reset event in this go and will
2981 * defer the processing of the mailbox events. Since, we would have not
2982 * cleared RX CMDQ event this time we would receive again another
2983 * interrupt from H/W just for the mailbox.
2985 * check for vector0 reset event sources
2987 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2988 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2989 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2990 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2991 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2992 hdev->rst_stats.imp_rst_cnt++;
2993 return HCLGE_VECTOR0_EVENT_RST;
2996 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2997 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2998 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2999 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3000 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3001 hdev->rst_stats.global_rst_cnt++;
3002 return HCLGE_VECTOR0_EVENT_RST;
3005 /* check for vector0 msix event source */
3006 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3007 *clearval = msix_src_reg;
3008 return HCLGE_VECTOR0_EVENT_ERR;
3011 /* check for vector0 mailbox(=CMDQ RX) event source */
3012 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3013 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3014 *clearval = cmdq_src_reg;
3015 return HCLGE_VECTOR0_EVENT_MBX;
3018 /* print other vector0 event source */
3019 dev_info(&hdev->pdev->dev,
3020 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3021 cmdq_src_reg, msix_src_reg);
3022 *clearval = msix_src_reg;
3024 return HCLGE_VECTOR0_EVENT_OTHER;
3027 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3030 switch (event_type) {
3031 case HCLGE_VECTOR0_EVENT_RST:
3032 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3034 case HCLGE_VECTOR0_EVENT_MBX:
3035 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3042 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3045 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3046 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3047 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3048 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3051 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3053 writel(enable ? 1 : 0, vector->addr);
3056 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3058 struct hclge_dev *hdev = data;
3062 hclge_enable_vector(&hdev->misc_vector, false);
3063 event_cause = hclge_check_event_cause(hdev, &clearval);
3065 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3066 switch (event_cause) {
3067 case HCLGE_VECTOR0_EVENT_ERR:
3068 /* we do not know what type of reset is required now. This could
3069 * only be decided after we fetch the type of errors which
3070 * caused this event. Therefore, we will do below for now:
3071 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3072 * have defered type of reset to be used.
3073 * 2. Schedule the reset serivce task.
3074 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3075 * will fetch the correct type of reset. This would be done
3076 * by first decoding the types of errors.
3078 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3080 case HCLGE_VECTOR0_EVENT_RST:
3081 hclge_reset_task_schedule(hdev);
3083 case HCLGE_VECTOR0_EVENT_MBX:
3084 /* If we are here then,
3085 * 1. Either we are not handling any mbx task and we are not
3088 * 2. We could be handling a mbx task but nothing more is
3090 * In both cases, we should schedule mbx task as there are more
3091 * mbx messages reported by this interrupt.
3093 hclge_mbx_task_schedule(hdev);
3096 dev_warn(&hdev->pdev->dev,
3097 "received unknown or unhandled event of vector0\n");
3101 hclge_clear_event_cause(hdev, event_cause, clearval);
3103 /* Enable interrupt if it is not cause by reset. And when
3104 * clearval equal to 0, it means interrupt status may be
3105 * cleared by hardware before driver reads status register.
3106 * For this case, vector0 interrupt also should be enabled.
3109 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3110 hclge_enable_vector(&hdev->misc_vector, true);
3116 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3118 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3119 dev_warn(&hdev->pdev->dev,
3120 "vector(vector_id %d) has been freed.\n", vector_id);
3124 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3125 hdev->num_msi_left += 1;
3126 hdev->num_msi_used -= 1;
3129 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3131 struct hclge_misc_vector *vector = &hdev->misc_vector;
3133 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3135 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3136 hdev->vector_status[0] = 0;
3138 hdev->num_msi_left -= 1;
3139 hdev->num_msi_used += 1;
3142 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3143 const cpumask_t *mask)
3145 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3148 cpumask_copy(&hdev->affinity_mask, mask);
3151 static void hclge_irq_affinity_release(struct kref *ref)
3155 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3157 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3158 &hdev->affinity_mask);
3160 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3161 hdev->affinity_notify.release = hclge_irq_affinity_release;
3162 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3163 &hdev->affinity_notify);
3166 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3168 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3169 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3172 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3176 hclge_get_misc_vector(hdev);
3178 /* this would be explicitly freed in the end */
3179 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3180 HCLGE_NAME, pci_name(hdev->pdev));
3181 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3182 0, hdev->misc_vector.name, hdev);
3184 hclge_free_vector(hdev, 0);
3185 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3186 hdev->misc_vector.vector_irq);
3192 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3194 free_irq(hdev->misc_vector.vector_irq, hdev);
3195 hclge_free_vector(hdev, 0);
3198 int hclge_notify_client(struct hclge_dev *hdev,
3199 enum hnae3_reset_notify_type type)
3201 struct hnae3_client *client = hdev->nic_client;
3204 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3207 if (!client->ops->reset_notify)
3210 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3211 struct hnae3_handle *handle = &hdev->vport[i].nic;
3214 ret = client->ops->reset_notify(handle, type);
3216 dev_err(&hdev->pdev->dev,
3217 "notify nic client failed %d(%d)\n", type, ret);
3225 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3226 enum hnae3_reset_notify_type type)
3228 struct hnae3_client *client = hdev->roce_client;
3232 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3235 if (!client->ops->reset_notify)
3238 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3239 struct hnae3_handle *handle = &hdev->vport[i].roce;
3241 ret = client->ops->reset_notify(handle, type);
3243 dev_err(&hdev->pdev->dev,
3244 "notify roce client failed %d(%d)",
3253 static int hclge_reset_wait(struct hclge_dev *hdev)
3255 #define HCLGE_RESET_WATI_MS 100
3256 #define HCLGE_RESET_WAIT_CNT 350
3258 u32 val, reg, reg_bit;
3261 switch (hdev->reset_type) {
3262 case HNAE3_IMP_RESET:
3263 reg = HCLGE_GLOBAL_RESET_REG;
3264 reg_bit = HCLGE_IMP_RESET_BIT;
3266 case HNAE3_GLOBAL_RESET:
3267 reg = HCLGE_GLOBAL_RESET_REG;
3268 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3270 case HNAE3_FUNC_RESET:
3271 reg = HCLGE_FUN_RST_ING;
3272 reg_bit = HCLGE_FUN_RST_ING_B;
3275 dev_err(&hdev->pdev->dev,
3276 "Wait for unsupported reset type: %d\n",
3281 val = hclge_read_dev(&hdev->hw, reg);
3282 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3283 msleep(HCLGE_RESET_WATI_MS);
3284 val = hclge_read_dev(&hdev->hw, reg);
3288 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3289 dev_warn(&hdev->pdev->dev,
3290 "Wait for reset timeout: %d\n", hdev->reset_type);
3297 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3299 struct hclge_vf_rst_cmd *req;
3300 struct hclge_desc desc;
3302 req = (struct hclge_vf_rst_cmd *)desc.data;
3303 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3304 req->dest_vfid = func_id;
3309 return hclge_cmd_send(&hdev->hw, &desc, 1);
3312 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3316 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3317 struct hclge_vport *vport = &hdev->vport[i];
3320 /* Send cmd to set/clear VF's FUNC_RST_ING */
3321 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3323 dev_err(&hdev->pdev->dev,
3324 "set vf(%u) rst failed %d!\n",
3325 vport->vport_id, ret);
3329 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3332 /* Inform VF to process the reset.
3333 * hclge_inform_reset_assert_to_vf may fail if VF
3334 * driver is not loaded.
3336 ret = hclge_inform_reset_assert_to_vf(vport);
3338 dev_warn(&hdev->pdev->dev,
3339 "inform reset to vf(%u) failed %d!\n",
3340 vport->vport_id, ret);
3346 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3348 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3349 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3350 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3353 hclge_mbx_handler(hdev);
3355 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3358 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3360 struct hclge_pf_rst_sync_cmd *req;
3361 struct hclge_desc desc;
3365 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3366 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3369 /* vf need to down netdev by mbx during PF or FLR reset */
3370 hclge_mailbox_service_task(hdev);
3372 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3373 /* for compatible with old firmware, wait
3374 * 100 ms for VF to stop IO
3376 if (ret == -EOPNOTSUPP) {
3377 msleep(HCLGE_RESET_SYNC_TIME);
3380 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3383 } else if (req->all_vf_ready) {
3386 msleep(HCLGE_PF_RESET_SYNC_TIME);
3387 hclge_cmd_reuse_desc(&desc, true);
3388 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3390 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3393 void hclge_report_hw_error(struct hclge_dev *hdev,
3394 enum hnae3_hw_error_type type)
3396 struct hnae3_client *client = hdev->nic_client;
3399 if (!client || !client->ops->process_hw_error ||
3400 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3403 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3404 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3407 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3411 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3412 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3413 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3414 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3415 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3418 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3419 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3420 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3421 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3425 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3427 struct hclge_desc desc;
3428 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3431 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3432 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3433 req->fun_reset_vfid = func_id;
3435 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3437 dev_err(&hdev->pdev->dev,
3438 "send function reset cmd fail, status =%d\n", ret);
3443 static void hclge_do_reset(struct hclge_dev *hdev)
3445 struct hnae3_handle *handle = &hdev->vport[0].nic;
3446 struct pci_dev *pdev = hdev->pdev;
3449 if (hclge_get_hw_reset_stat(handle)) {
3450 dev_info(&pdev->dev, "hardware reset not finish\n");
3451 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3452 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3453 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3457 switch (hdev->reset_type) {
3458 case HNAE3_GLOBAL_RESET:
3459 dev_info(&pdev->dev, "global reset requested\n");
3460 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3461 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3462 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3464 case HNAE3_FUNC_RESET:
3465 dev_info(&pdev->dev, "PF reset requested\n");
3466 /* schedule again to check later */
3467 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3468 hclge_reset_task_schedule(hdev);
3471 dev_warn(&pdev->dev,
3472 "unsupported reset type: %d\n", hdev->reset_type);
3477 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3478 unsigned long *addr)
3480 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3481 struct hclge_dev *hdev = ae_dev->priv;
3483 /* first, resolve any unknown reset type to the known type(s) */
3484 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3485 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3486 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3487 /* we will intentionally ignore any errors from this function
3488 * as we will end up in *some* reset request in any case
3490 if (hclge_handle_hw_msix_error(hdev, addr))
3491 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3494 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3495 /* We defered the clearing of the error event which caused
3496 * interrupt since it was not posssible to do that in
3497 * interrupt context (and this is the reason we introduced
3498 * new UNKNOWN reset type). Now, the errors have been
3499 * handled and cleared in hardware we can safely enable
3500 * interrupts. This is an exception to the norm.
3502 hclge_enable_vector(&hdev->misc_vector, true);
3505 /* return the highest priority reset level amongst all */
3506 if (test_bit(HNAE3_IMP_RESET, addr)) {
3507 rst_level = HNAE3_IMP_RESET;
3508 clear_bit(HNAE3_IMP_RESET, addr);
3509 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510 clear_bit(HNAE3_FUNC_RESET, addr);
3511 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3512 rst_level = HNAE3_GLOBAL_RESET;
3513 clear_bit(HNAE3_GLOBAL_RESET, addr);
3514 clear_bit(HNAE3_FUNC_RESET, addr);
3515 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3516 rst_level = HNAE3_FUNC_RESET;
3517 clear_bit(HNAE3_FUNC_RESET, addr);
3518 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3519 rst_level = HNAE3_FLR_RESET;
3520 clear_bit(HNAE3_FLR_RESET, addr);
3523 if (hdev->reset_type != HNAE3_NONE_RESET &&
3524 rst_level < hdev->reset_type)
3525 return HNAE3_NONE_RESET;
3530 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3534 switch (hdev->reset_type) {
3535 case HNAE3_IMP_RESET:
3536 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3538 case HNAE3_GLOBAL_RESET:
3539 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3548 /* For revision 0x20, the reset interrupt source
3549 * can only be cleared after hardware reset done
3551 if (hdev->pdev->revision == 0x20)
3552 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3555 hclge_enable_vector(&hdev->misc_vector, true);
3558 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3562 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3564 reg_val |= HCLGE_NIC_SW_RST_RDY;
3566 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3568 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3571 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3575 ret = hclge_set_all_vf_rst(hdev, true);
3579 hclge_func_reset_sync_vf(hdev);
3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3589 switch (hdev->reset_type) {
3590 case HNAE3_FUNC_RESET:
3591 ret = hclge_func_reset_notify_vf(hdev);
3595 ret = hclge_func_reset_cmd(hdev, 0);
3597 dev_err(&hdev->pdev->dev,
3598 "asserting function reset fail %d!\n", ret);
3602 /* After performaning pf reset, it is not necessary to do the
3603 * mailbox handling or send any command to firmware, because
3604 * any mailbox handling or command to firmware is only valid
3605 * after hclge_cmd_init is called.
3607 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3608 hdev->rst_stats.pf_rst_cnt++;
3610 case HNAE3_FLR_RESET:
3611 ret = hclge_func_reset_notify_vf(hdev);
3615 case HNAE3_IMP_RESET:
3616 hclge_handle_imp_error(hdev);
3617 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3618 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3619 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3625 /* inform hardware that preparatory work is done */
3626 msleep(HCLGE_RESET_SYNC_TIME);
3627 hclge_reset_handshake(hdev, true);
3628 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3633 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3635 #define MAX_RESET_FAIL_CNT 5
3637 if (hdev->reset_pending) {
3638 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3639 hdev->reset_pending);
3641 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3642 HCLGE_RESET_INT_M) {
3643 dev_info(&hdev->pdev->dev,
3644 "reset failed because new reset interrupt\n");
3645 hclge_clear_reset_cause(hdev);
3647 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3648 hdev->rst_stats.reset_fail_cnt++;
3649 set_bit(hdev->reset_type, &hdev->reset_pending);
3650 dev_info(&hdev->pdev->dev,
3651 "re-schedule reset task(%u)\n",
3652 hdev->rst_stats.reset_fail_cnt);
3656 hclge_clear_reset_cause(hdev);
3658 /* recover the handshake status when reset fail */
3659 hclge_reset_handshake(hdev, true);
3661 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3663 hclge_dbg_dump_rst_info(hdev);
3665 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3670 static int hclge_set_rst_done(struct hclge_dev *hdev)
3672 struct hclge_pf_rst_done_cmd *req;
3673 struct hclge_desc desc;
3676 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3677 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3678 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3680 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3681 /* To be compatible with the old firmware, which does not support
3682 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3685 if (ret == -EOPNOTSUPP) {
3686 dev_warn(&hdev->pdev->dev,
3687 "current firmware does not support command(0x%x)!\n",
3688 HCLGE_OPC_PF_RST_DONE);
3691 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3698 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3702 switch (hdev->reset_type) {
3703 case HNAE3_FUNC_RESET:
3705 case HNAE3_FLR_RESET:
3706 ret = hclge_set_all_vf_rst(hdev, false);
3708 case HNAE3_GLOBAL_RESET:
3710 case HNAE3_IMP_RESET:
3711 ret = hclge_set_rst_done(hdev);
3717 /* clear up the handshake status after re-initialize done */
3718 hclge_reset_handshake(hdev, false);
3723 static int hclge_reset_stack(struct hclge_dev *hdev)
3727 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3731 ret = hclge_reset_ae_dev(hdev->ae_dev);
3735 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3738 static int hclge_reset_prepare(struct hclge_dev *hdev)
3742 hdev->rst_stats.reset_cnt++;
3743 /* perform reset of the stack & ae device for a client */
3744 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3749 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3754 return hclge_reset_prepare_wait(hdev);
3757 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3759 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3760 enum hnae3_reset_type reset_level;
3763 hdev->rst_stats.hw_reset_done_cnt++;
3765 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3770 ret = hclge_reset_stack(hdev);
3775 hclge_clear_reset_cause(hdev);
3777 ret = hclge_reset_prepare_up(hdev);
3782 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3783 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3787 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3791 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3796 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3800 hdev->last_reset_time = jiffies;
3801 hdev->rst_stats.reset_fail_cnt = 0;
3802 hdev->rst_stats.reset_done_cnt++;
3803 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3805 /* if default_reset_request has a higher level reset request,
3806 * it should be handled as soon as possible. since some errors
3807 * need this kind of reset to fix.
3809 reset_level = hclge_get_reset_level(ae_dev,
3810 &hdev->default_reset_request);
3811 if (reset_level != HNAE3_NONE_RESET)
3812 set_bit(reset_level, &hdev->reset_request);
3817 static void hclge_reset(struct hclge_dev *hdev)
3819 if (hclge_reset_prepare(hdev))
3822 if (hclge_reset_wait(hdev))
3825 if (hclge_reset_rebuild(hdev))
3831 if (hclge_reset_err_handle(hdev))
3832 hclge_reset_task_schedule(hdev);
3835 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3837 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3838 struct hclge_dev *hdev = ae_dev->priv;
3840 /* We might end up getting called broadly because of 2 below cases:
3841 * 1. Recoverable error was conveyed through APEI and only way to bring
3842 * normalcy is to reset.
3843 * 2. A new reset request from the stack due to timeout
3845 * For the first case,error event might not have ae handle available.
3846 * check if this is a new reset request and we are not here just because
3847 * last reset attempt did not succeed and watchdog hit us again. We will
3848 * know this if last reset request did not occur very recently (watchdog
3849 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3850 * In case of new request we reset the "reset level" to PF reset.
3851 * And if it is a repeat reset request of the most recent one then we
3852 * want to make sure we throttle the reset request. Therefore, we will
3853 * not allow it again before 3*HZ times.
3856 handle = &hdev->vport[0].nic;
3858 if (time_before(jiffies, (hdev->last_reset_time +
3859 HCLGE_RESET_INTERVAL))) {
3860 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3862 } else if (hdev->default_reset_request) {
3864 hclge_get_reset_level(ae_dev,
3865 &hdev->default_reset_request);
3866 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3867 hdev->reset_level = HNAE3_FUNC_RESET;
3870 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3873 /* request reset & schedule reset task */
3874 set_bit(hdev->reset_level, &hdev->reset_request);
3875 hclge_reset_task_schedule(hdev);
3877 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3878 hdev->reset_level++;
3881 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3882 enum hnae3_reset_type rst_type)
3884 struct hclge_dev *hdev = ae_dev->priv;
3886 set_bit(rst_type, &hdev->default_reset_request);
3889 static void hclge_reset_timer(struct timer_list *t)
3891 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3893 /* if default_reset_request has no value, it means that this reset
3894 * request has already be handled, so just return here
3896 if (!hdev->default_reset_request)
3899 dev_info(&hdev->pdev->dev,
3900 "triggering reset in reset timer\n");
3901 hclge_reset_event(hdev->pdev, NULL);
3904 static void hclge_reset_subtask(struct hclge_dev *hdev)
3906 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3908 /* check if there is any ongoing reset in the hardware. This status can
3909 * be checked from reset_pending. If there is then, we need to wait for
3910 * hardware to complete reset.
3911 * a. If we are able to figure out in reasonable time that hardware
3912 * has fully resetted then, we can proceed with driver, client
3914 * b. else, we can come back later to check this status so re-sched
3917 hdev->last_reset_time = jiffies;
3918 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3919 if (hdev->reset_type != HNAE3_NONE_RESET)
3922 /* check if we got any *new* reset requests to be honored */
3923 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3924 if (hdev->reset_type != HNAE3_NONE_RESET)
3925 hclge_do_reset(hdev);
3927 hdev->reset_type = HNAE3_NONE_RESET;
3930 static void hclge_reset_service_task(struct hclge_dev *hdev)
3932 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3935 down(&hdev->reset_sem);
3936 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3938 hclge_reset_subtask(hdev);
3940 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3941 up(&hdev->reset_sem);
3944 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3948 /* start from vport 1 for PF is always alive */
3949 for (i = 1; i < hdev->num_alloc_vport; i++) {
3950 struct hclge_vport *vport = &hdev->vport[i];
3952 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3953 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3955 /* If vf is not alive, set to default value */
3956 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3957 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3961 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3963 unsigned long delta = round_jiffies_relative(HZ);
3965 /* Always handle the link updating to make sure link state is
3966 * updated when it is triggered by mbx.
3968 hclge_update_link_status(hdev);
3969 hclge_sync_mac_table(hdev);
3970 hclge_sync_promisc_mode(hdev);
3972 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3973 delta = jiffies - hdev->last_serv_processed;
3975 if (delta < round_jiffies_relative(HZ)) {
3976 delta = round_jiffies_relative(HZ) - delta;
3981 hdev->serv_processed_cnt++;
3982 hclge_update_vport_alive(hdev);
3984 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3985 hdev->last_serv_processed = jiffies;
3989 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3990 hclge_update_stats_for_all(hdev);
3992 hclge_update_port_info(hdev);
3993 hclge_sync_vlan_filter(hdev);
3995 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3996 hclge_rfs_filter_expire(hdev);
3998 hdev->last_serv_processed = jiffies;
4001 hclge_task_schedule(hdev, delta);
4004 static void hclge_service_task(struct work_struct *work)
4006 struct hclge_dev *hdev =
4007 container_of(work, struct hclge_dev, service_task.work);
4009 hclge_reset_service_task(hdev);
4010 hclge_mailbox_service_task(hdev);
4011 hclge_periodic_service_task(hdev);
4013 /* Handle reset and mbx again in case periodical task delays the
4014 * handling by calling hclge_task_schedule() in
4015 * hclge_periodic_service_task().
4017 hclge_reset_service_task(hdev);
4018 hclge_mailbox_service_task(hdev);
4021 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4023 /* VF handle has no client */
4024 if (!handle->client)
4025 return container_of(handle, struct hclge_vport, nic);
4026 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4027 return container_of(handle, struct hclge_vport, roce);
4029 return container_of(handle, struct hclge_vport, nic);
4032 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4033 struct hnae3_vector_info *vector_info)
4035 struct hclge_vport *vport = hclge_get_vport(handle);
4036 struct hnae3_vector_info *vector = vector_info;
4037 struct hclge_dev *hdev = vport->back;
4041 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4042 vector_num = min(hdev->num_msi_left, vector_num);
4044 for (j = 0; j < vector_num; j++) {
4045 for (i = 1; i < hdev->num_msi; i++) {
4046 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4047 vector->vector = pci_irq_vector(hdev->pdev, i);
4048 vector->io_addr = hdev->hw.io_base +
4049 HCLGE_VECTOR_REG_BASE +
4050 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4052 HCLGE_VECTOR_VF_OFFSET;
4053 hdev->vector_status[i] = vport->vport_id;
4054 hdev->vector_irq[i] = vector->vector;
4063 hdev->num_msi_left -= alloc;
4064 hdev->num_msi_used += alloc;
4069 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4073 for (i = 0; i < hdev->num_msi; i++)
4074 if (vector == hdev->vector_irq[i])
4080 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4082 struct hclge_vport *vport = hclge_get_vport(handle);
4083 struct hclge_dev *hdev = vport->back;
4086 vector_id = hclge_get_vector_index(hdev, vector);
4087 if (vector_id < 0) {
4088 dev_err(&hdev->pdev->dev,
4089 "Get vector index fail. vector = %d\n", vector);
4093 hclge_free_vector(hdev, vector_id);
4098 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4100 return HCLGE_RSS_KEY_SIZE;
4103 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4105 return HCLGE_RSS_IND_TBL_SIZE;
4108 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4109 const u8 hfunc, const u8 *key)
4111 struct hclge_rss_config_cmd *req;
4112 unsigned int key_offset = 0;
4113 struct hclge_desc desc;
4118 key_counts = HCLGE_RSS_KEY_SIZE;
4119 req = (struct hclge_rss_config_cmd *)desc.data;
4121 while (key_counts) {
4122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4125 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4126 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4128 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4129 memcpy(req->hash_key,
4130 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4132 key_counts -= key_size;
4134 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4136 dev_err(&hdev->pdev->dev,
4137 "Configure RSS config fail, status = %d\n",
4145 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4147 struct hclge_rss_indirection_table_cmd *req;
4148 struct hclge_desc desc;
4152 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4154 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4155 hclge_cmd_setup_basic_desc
4156 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4158 req->start_table_index =
4159 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4160 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4162 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4163 req->rss_result[j] =
4164 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4166 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4168 dev_err(&hdev->pdev->dev,
4169 "Configure rss indir table fail,status = %d\n",
4177 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4178 u16 *tc_size, u16 *tc_offset)
4180 struct hclge_rss_tc_mode_cmd *req;
4181 struct hclge_desc desc;
4185 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4186 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4188 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4191 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4192 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4193 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4194 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4195 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4197 req->rss_tc_mode[i] = cpu_to_le16(mode);
4200 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4202 dev_err(&hdev->pdev->dev,
4203 "Configure rss tc mode fail, status = %d\n", ret);
4208 static void hclge_get_rss_type(struct hclge_vport *vport)
4210 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4211 vport->rss_tuple_sets.ipv4_udp_en ||
4212 vport->rss_tuple_sets.ipv4_sctp_en ||
4213 vport->rss_tuple_sets.ipv6_tcp_en ||
4214 vport->rss_tuple_sets.ipv6_udp_en ||
4215 vport->rss_tuple_sets.ipv6_sctp_en)
4216 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4217 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4218 vport->rss_tuple_sets.ipv6_fragment_en)
4219 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4221 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4224 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4226 struct hclge_rss_input_tuple_cmd *req;
4227 struct hclge_desc desc;
4230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4232 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4234 /* Get the tuple cfg from pf */
4235 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4236 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4237 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4238 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4239 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4240 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4241 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4242 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4243 hclge_get_rss_type(&hdev->vport[0]);
4244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4246 dev_err(&hdev->pdev->dev,
4247 "Configure rss input fail, status = %d\n", ret);
4251 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4254 struct hclge_vport *vport = hclge_get_vport(handle);
4257 /* Get hash algorithm */
4259 switch (vport->rss_algo) {
4260 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4261 *hfunc = ETH_RSS_HASH_TOP;
4263 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4264 *hfunc = ETH_RSS_HASH_XOR;
4267 *hfunc = ETH_RSS_HASH_UNKNOWN;
4272 /* Get the RSS Key required by the user */
4274 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4276 /* Get indirect table */
4278 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4279 indir[i] = vport->rss_indirection_tbl[i];
4284 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4285 const u8 *key, const u8 hfunc)
4287 struct hclge_vport *vport = hclge_get_vport(handle);
4288 struct hclge_dev *hdev = vport->back;
4292 /* Set the RSS Hash Key if specififed by the user */
4295 case ETH_RSS_HASH_TOP:
4296 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4298 case ETH_RSS_HASH_XOR:
4299 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4301 case ETH_RSS_HASH_NO_CHANGE:
4302 hash_algo = vport->rss_algo;
4308 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4312 /* Update the shadow RSS key with user specified qids */
4313 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4314 vport->rss_algo = hash_algo;
4317 /* Update the shadow RSS table with user specified qids */
4318 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4319 vport->rss_indirection_tbl[i] = indir[i];
4321 /* Update the hardware */
4322 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4325 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4327 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4329 if (nfc->data & RXH_L4_B_2_3)
4330 hash_sets |= HCLGE_D_PORT_BIT;
4332 hash_sets &= ~HCLGE_D_PORT_BIT;
4334 if (nfc->data & RXH_IP_SRC)
4335 hash_sets |= HCLGE_S_IP_BIT;
4337 hash_sets &= ~HCLGE_S_IP_BIT;
4339 if (nfc->data & RXH_IP_DST)
4340 hash_sets |= HCLGE_D_IP_BIT;
4342 hash_sets &= ~HCLGE_D_IP_BIT;
4344 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4345 hash_sets |= HCLGE_V_TAG_BIT;
4350 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4351 struct ethtool_rxnfc *nfc)
4353 struct hclge_vport *vport = hclge_get_vport(handle);
4354 struct hclge_dev *hdev = vport->back;
4355 struct hclge_rss_input_tuple_cmd *req;
4356 struct hclge_desc desc;
4360 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4361 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4364 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4365 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4367 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4368 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4369 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4370 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4371 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4372 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4373 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4374 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4376 tuple_sets = hclge_get_rss_hash_bits(nfc);
4377 switch (nfc->flow_type) {
4379 req->ipv4_tcp_en = tuple_sets;
4382 req->ipv6_tcp_en = tuple_sets;
4385 req->ipv4_udp_en = tuple_sets;
4388 req->ipv6_udp_en = tuple_sets;
4391 req->ipv4_sctp_en = tuple_sets;
4394 if ((nfc->data & RXH_L4_B_0_1) ||
4395 (nfc->data & RXH_L4_B_2_3))
4398 req->ipv6_sctp_en = tuple_sets;
4401 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4404 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4410 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4412 dev_err(&hdev->pdev->dev,
4413 "Set rss tuple fail, status = %d\n", ret);
4417 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4418 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4419 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4420 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4421 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4422 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4423 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4424 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4425 hclge_get_rss_type(vport);
4429 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4430 struct ethtool_rxnfc *nfc)
4432 struct hclge_vport *vport = hclge_get_vport(handle);
4437 switch (nfc->flow_type) {
4439 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4442 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4445 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4448 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4451 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4454 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4458 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4467 if (tuple_sets & HCLGE_D_PORT_BIT)
4468 nfc->data |= RXH_L4_B_2_3;
4469 if (tuple_sets & HCLGE_S_PORT_BIT)
4470 nfc->data |= RXH_L4_B_0_1;
4471 if (tuple_sets & HCLGE_D_IP_BIT)
4472 nfc->data |= RXH_IP_DST;
4473 if (tuple_sets & HCLGE_S_IP_BIT)
4474 nfc->data |= RXH_IP_SRC;
4479 static int hclge_get_tc_size(struct hnae3_handle *handle)
4481 struct hclge_vport *vport = hclge_get_vport(handle);
4482 struct hclge_dev *hdev = vport->back;
4484 return hdev->rss_size_max;
4487 int hclge_rss_init_hw(struct hclge_dev *hdev)
4489 struct hclge_vport *vport = hdev->vport;
4490 u8 *rss_indir = vport[0].rss_indirection_tbl;
4491 u16 rss_size = vport[0].alloc_rss_size;
4492 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4493 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4494 u8 *key = vport[0].rss_hash_key;
4495 u8 hfunc = vport[0].rss_algo;
4496 u16 tc_valid[HCLGE_MAX_TC_NUM];
4501 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4505 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4509 ret = hclge_set_rss_input_tuple(hdev);
4513 /* Each TC have the same queue size, and tc_size set to hardware is
4514 * the log2 of roundup power of two of rss_size, the acutal queue
4515 * size is limited by indirection table.
4517 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4518 dev_err(&hdev->pdev->dev,
4519 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4524 roundup_size = roundup_pow_of_two(rss_size);
4525 roundup_size = ilog2(roundup_size);
4527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4530 if (!(hdev->hw_tc_map & BIT(i)))
4534 tc_size[i] = roundup_size;
4535 tc_offset[i] = rss_size * i;
4538 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4541 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4543 struct hclge_vport *vport = hdev->vport;
4546 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4547 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4548 vport[j].rss_indirection_tbl[i] =
4549 i % vport[j].alloc_rss_size;
4553 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4555 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4556 struct hclge_vport *vport = hdev->vport;
4558 if (hdev->pdev->revision >= 0x21)
4559 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4561 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4562 vport[i].rss_tuple_sets.ipv4_tcp_en =
4563 HCLGE_RSS_INPUT_TUPLE_OTHER;
4564 vport[i].rss_tuple_sets.ipv4_udp_en =
4565 HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 vport[i].rss_tuple_sets.ipv4_sctp_en =
4567 HCLGE_RSS_INPUT_TUPLE_SCTP;
4568 vport[i].rss_tuple_sets.ipv4_fragment_en =
4569 HCLGE_RSS_INPUT_TUPLE_OTHER;
4570 vport[i].rss_tuple_sets.ipv6_tcp_en =
4571 HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 vport[i].rss_tuple_sets.ipv6_udp_en =
4573 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 vport[i].rss_tuple_sets.ipv6_sctp_en =
4575 HCLGE_RSS_INPUT_TUPLE_SCTP;
4576 vport[i].rss_tuple_sets.ipv6_fragment_en =
4577 HCLGE_RSS_INPUT_TUPLE_OTHER;
4579 vport[i].rss_algo = rss_algo;
4581 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4582 HCLGE_RSS_KEY_SIZE);
4585 hclge_rss_indir_init_cfg(hdev);
4588 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4589 int vector_id, bool en,
4590 struct hnae3_ring_chain_node *ring_chain)
4592 struct hclge_dev *hdev = vport->back;
4593 struct hnae3_ring_chain_node *node;
4594 struct hclge_desc desc;
4595 struct hclge_ctrl_vector_chain_cmd *req =
4596 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4597 enum hclge_cmd_status status;
4598 enum hclge_opcode_type op;
4599 u16 tqp_type_and_id;
4602 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4603 hclge_cmd_setup_basic_desc(&desc, op, false);
4604 req->int_vector_id = vector_id;
4607 for (node = ring_chain; node; node = node->next) {
4608 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4609 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4611 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4612 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4613 HCLGE_TQP_ID_S, node->tqp_index);
4614 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4616 hnae3_get_field(node->int_gl_idx,
4617 HNAE3_RING_GL_IDX_M,
4618 HNAE3_RING_GL_IDX_S));
4619 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4620 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4621 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4622 req->vfid = vport->vport_id;
4624 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4626 dev_err(&hdev->pdev->dev,
4627 "Map TQP fail, status is %d.\n",
4633 hclge_cmd_setup_basic_desc(&desc,
4636 req->int_vector_id = vector_id;
4641 req->int_cause_num = i;
4642 req->vfid = vport->vport_id;
4643 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4645 dev_err(&hdev->pdev->dev,
4646 "Map TQP fail, status is %d.\n", status);
4654 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4655 struct hnae3_ring_chain_node *ring_chain)
4657 struct hclge_vport *vport = hclge_get_vport(handle);
4658 struct hclge_dev *hdev = vport->back;
4661 vector_id = hclge_get_vector_index(hdev, vector);
4662 if (vector_id < 0) {
4663 dev_err(&hdev->pdev->dev,
4664 "failed to get vector index. vector=%d\n", vector);
4668 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4671 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4672 struct hnae3_ring_chain_node *ring_chain)
4674 struct hclge_vport *vport = hclge_get_vport(handle);
4675 struct hclge_dev *hdev = vport->back;
4678 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4681 vector_id = hclge_get_vector_index(hdev, vector);
4682 if (vector_id < 0) {
4683 dev_err(&handle->pdev->dev,
4684 "Get vector index fail. ret =%d\n", vector_id);
4688 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4690 dev_err(&handle->pdev->dev,
4691 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4697 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4698 struct hclge_promisc_param *param)
4700 struct hclge_promisc_cfg_cmd *req;
4701 struct hclge_desc desc;
4704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4706 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4707 req->vf_id = param->vf_id;
4709 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4710 * pdev revision(0x20), new revision support them. The
4711 * value of this two fields will not return error when driver
4712 * send command to fireware in revision(0x20).
4714 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4715 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4717 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4719 dev_err(&hdev->pdev->dev,
4720 "failed to set vport %d promisc mode, ret = %d.\n",
4726 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4727 bool en_uc, bool en_mc, bool en_bc,
4733 memset(param, 0, sizeof(struct hclge_promisc_param));
4735 param->enable = HCLGE_PROMISC_EN_UC;
4737 param->enable |= HCLGE_PROMISC_EN_MC;
4739 param->enable |= HCLGE_PROMISC_EN_BC;
4740 param->vf_id = vport_id;
4743 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4744 bool en_mc_pmc, bool en_bc_pmc)
4746 struct hclge_dev *hdev = vport->back;
4747 struct hclge_promisc_param param;
4749 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4751 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4754 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4757 struct hclge_vport *vport = hclge_get_vport(handle);
4758 bool en_bc_pmc = true;
4760 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4761 * always bypassed. So broadcast promisc should be disabled until
4762 * user enable promisc mode
4764 if (handle->pdev->revision == 0x20)
4765 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4767 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4771 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4773 struct hclge_vport *vport = hclge_get_vport(handle);
4774 struct hclge_dev *hdev = vport->back;
4776 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4779 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4781 struct hclge_get_fd_mode_cmd *req;
4782 struct hclge_desc desc;
4785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4787 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4789 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4791 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4795 *fd_mode = req->mode;
4800 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4801 u32 *stage1_entry_num,
4802 u32 *stage2_entry_num,
4803 u16 *stage1_counter_num,
4804 u16 *stage2_counter_num)
4806 struct hclge_get_fd_allocation_cmd *req;
4807 struct hclge_desc desc;
4810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4812 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4814 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4816 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4821 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4822 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4823 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4824 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4829 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4830 enum HCLGE_FD_STAGE stage_num)
4832 struct hclge_set_fd_key_config_cmd *req;
4833 struct hclge_fd_key_cfg *stage;
4834 struct hclge_desc desc;
4837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4839 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4840 stage = &hdev->fd_cfg.key_cfg[stage_num];
4841 req->stage = stage_num;
4842 req->key_select = stage->key_sel;
4843 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4844 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4845 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4846 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4847 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4848 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4852 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4857 static int hclge_init_fd_config(struct hclge_dev *hdev)
4859 #define LOW_2_WORDS 0x03
4860 struct hclge_fd_key_cfg *key_cfg;
4863 if (!hnae3_dev_fd_supported(hdev))
4866 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4870 switch (hdev->fd_cfg.fd_mode) {
4871 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4872 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4874 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4875 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4878 dev_err(&hdev->pdev->dev,
4879 "Unsupported flow director mode %u\n",
4880 hdev->fd_cfg.fd_mode);
4884 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4885 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4886 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4887 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4888 key_cfg->outer_sipv6_word_en = 0;
4889 key_cfg->outer_dipv6_word_en = 0;
4891 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4892 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4893 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4894 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4896 /* If use max 400bit key, we can support tuples for ether type */
4897 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4898 key_cfg->tuple_active |=
4899 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4901 /* roce_type is used to filter roce frames
4902 * dst_vport is used to specify the rule
4904 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4906 ret = hclge_get_fd_allocation(hdev,
4907 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4908 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4909 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4910 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4914 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4917 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4918 int loc, u8 *key, bool is_add)
4920 struct hclge_fd_tcam_config_1_cmd *req1;
4921 struct hclge_fd_tcam_config_2_cmd *req2;
4922 struct hclge_fd_tcam_config_3_cmd *req3;
4923 struct hclge_desc desc[3];
4926 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4927 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4928 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4929 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4930 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4932 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4933 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4934 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4936 req1->stage = stage;
4937 req1->xy_sel = sel_x ? 1 : 0;
4938 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4939 req1->index = cpu_to_le32(loc);
4940 req1->entry_vld = sel_x ? is_add : 0;
4943 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4944 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4945 sizeof(req2->tcam_data));
4946 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4947 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4950 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4952 dev_err(&hdev->pdev->dev,
4953 "config tcam key fail, ret=%d\n",
4959 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4960 struct hclge_fd_ad_data *action)
4962 struct hclge_fd_ad_config_cmd *req;
4963 struct hclge_desc desc;
4967 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4969 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4970 req->index = cpu_to_le32(loc);
4973 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4974 action->write_rule_id_to_bd);
4975 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4978 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4979 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4980 action->forward_to_direct_queue);
4981 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4983 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4984 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4985 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4986 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4987 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4988 action->counter_id);
4990 req->ad_data = cpu_to_le64(ad_data);
4991 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4993 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4998 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4999 struct hclge_fd_rule *rule)
5001 u16 tmp_x_s, tmp_y_s;
5002 u32 tmp_x_l, tmp_y_l;
5005 if (rule->unused_tuple & tuple_bit)
5008 switch (tuple_bit) {
5009 case BIT(INNER_DST_MAC):
5010 for (i = 0; i < ETH_ALEN; i++) {
5011 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5012 rule->tuples_mask.dst_mac[i]);
5013 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5014 rule->tuples_mask.dst_mac[i]);
5018 case BIT(INNER_SRC_MAC):
5019 for (i = 0; i < ETH_ALEN; i++) {
5020 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5021 rule->tuples.src_mac[i]);
5022 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5023 rule->tuples.src_mac[i]);
5027 case BIT(INNER_VLAN_TAG_FST):
5028 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5029 rule->tuples_mask.vlan_tag1);
5030 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5031 rule->tuples_mask.vlan_tag1);
5032 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5033 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5036 case BIT(INNER_ETH_TYPE):
5037 calc_x(tmp_x_s, rule->tuples.ether_proto,
5038 rule->tuples_mask.ether_proto);
5039 calc_y(tmp_y_s, rule->tuples.ether_proto,
5040 rule->tuples_mask.ether_proto);
5041 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5042 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5045 case BIT(INNER_IP_TOS):
5046 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5050 case BIT(INNER_IP_PROTO):
5051 calc_x(*key_x, rule->tuples.ip_proto,
5052 rule->tuples_mask.ip_proto);
5053 calc_y(*key_y, rule->tuples.ip_proto,
5054 rule->tuples_mask.ip_proto);
5057 case BIT(INNER_SRC_IP):
5058 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5059 rule->tuples_mask.src_ip[IPV4_INDEX]);
5060 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5061 rule->tuples_mask.src_ip[IPV4_INDEX]);
5062 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5063 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5066 case BIT(INNER_DST_IP):
5067 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5068 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5069 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5070 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5071 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5072 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5075 case BIT(INNER_SRC_PORT):
5076 calc_x(tmp_x_s, rule->tuples.src_port,
5077 rule->tuples_mask.src_port);
5078 calc_y(tmp_y_s, rule->tuples.src_port,
5079 rule->tuples_mask.src_port);
5080 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5081 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5084 case BIT(INNER_DST_PORT):
5085 calc_x(tmp_x_s, rule->tuples.dst_port,
5086 rule->tuples_mask.dst_port);
5087 calc_y(tmp_y_s, rule->tuples.dst_port,
5088 rule->tuples_mask.dst_port);
5089 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5090 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5098 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5099 u8 vf_id, u8 network_port_id)
5101 u32 port_number = 0;
5103 if (port_type == HOST_PORT) {
5104 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5106 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5108 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5110 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5111 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5112 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5118 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5119 __le32 *key_x, __le32 *key_y,
5120 struct hclge_fd_rule *rule)
5122 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5123 u8 cur_pos = 0, tuple_size, shift_bits;
5126 for (i = 0; i < MAX_META_DATA; i++) {
5127 tuple_size = meta_data_key_info[i].key_length;
5128 tuple_bit = key_cfg->meta_data_active & BIT(i);
5130 switch (tuple_bit) {
5131 case BIT(ROCE_TYPE):
5132 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5133 cur_pos += tuple_size;
5135 case BIT(DST_VPORT):
5136 port_number = hclge_get_port_number(HOST_PORT, 0,
5138 hnae3_set_field(meta_data,
5139 GENMASK(cur_pos + tuple_size, cur_pos),
5140 cur_pos, port_number);
5141 cur_pos += tuple_size;
5148 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5149 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5150 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5152 *key_x = cpu_to_le32(tmp_x << shift_bits);
5153 *key_y = cpu_to_le32(tmp_y << shift_bits);
5156 /* A complete key is combined with meta data key and tuple key.
5157 * Meta data key is stored at the MSB region, and tuple key is stored at
5158 * the LSB region, unused bits will be filled 0.
5160 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5161 struct hclge_fd_rule *rule)
5163 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5164 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5165 u8 *cur_key_x, *cur_key_y;
5166 u8 meta_data_region;
5171 memset(key_x, 0, sizeof(key_x));
5172 memset(key_y, 0, sizeof(key_y));
5176 for (i = 0 ; i < MAX_TUPLE; i++) {
5180 tuple_size = tuple_key_info[i].key_length / 8;
5181 check_tuple = key_cfg->tuple_active & BIT(i);
5183 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5186 cur_key_x += tuple_size;
5187 cur_key_y += tuple_size;
5191 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5192 MAX_META_DATA_LENGTH / 8;
5194 hclge_fd_convert_meta_data(key_cfg,
5195 (__le32 *)(key_x + meta_data_region),
5196 (__le32 *)(key_y + meta_data_region),
5199 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5202 dev_err(&hdev->pdev->dev,
5203 "fd key_y config fail, loc=%u, ret=%d\n",
5204 rule->queue_id, ret);
5208 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5211 dev_err(&hdev->pdev->dev,
5212 "fd key_x config fail, loc=%u, ret=%d\n",
5213 rule->queue_id, ret);
5217 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5218 struct hclge_fd_rule *rule)
5220 struct hclge_fd_ad_data ad_data;
5222 ad_data.ad_id = rule->location;
5224 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5225 ad_data.drop_packet = true;
5226 ad_data.forward_to_direct_queue = false;
5227 ad_data.queue_id = 0;
5229 ad_data.drop_packet = false;
5230 ad_data.forward_to_direct_queue = true;
5231 ad_data.queue_id = rule->queue_id;
5234 ad_data.use_counter = false;
5235 ad_data.counter_id = 0;
5237 ad_data.use_next_stage = false;
5238 ad_data.next_input_key = 0;
5240 ad_data.write_rule_id_to_bd = true;
5241 ad_data.rule_id = rule->location;
5243 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5246 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5249 if (!spec || !unused_tuple)
5252 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5255 *unused_tuple |= BIT(INNER_SRC_IP);
5258 *unused_tuple |= BIT(INNER_DST_IP);
5261 *unused_tuple |= BIT(INNER_SRC_PORT);
5264 *unused_tuple |= BIT(INNER_DST_PORT);
5267 *unused_tuple |= BIT(INNER_IP_TOS);
5272 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5275 if (!spec || !unused_tuple)
5278 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5279 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5282 *unused_tuple |= BIT(INNER_SRC_IP);
5285 *unused_tuple |= BIT(INNER_DST_IP);
5288 *unused_tuple |= BIT(INNER_IP_TOS);
5291 *unused_tuple |= BIT(INNER_IP_PROTO);
5293 if (spec->l4_4_bytes)
5296 if (spec->ip_ver != ETH_RX_NFC_IP4)
5302 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5305 if (!spec || !unused_tuple)
5308 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5311 /* check whether src/dst ip address used */
5312 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5313 !spec->ip6src[2] && !spec->ip6src[3])
5314 *unused_tuple |= BIT(INNER_SRC_IP);
5316 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5317 !spec->ip6dst[2] && !spec->ip6dst[3])
5318 *unused_tuple |= BIT(INNER_DST_IP);
5321 *unused_tuple |= BIT(INNER_SRC_PORT);
5324 *unused_tuple |= BIT(INNER_DST_PORT);
5332 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5335 if (!spec || !unused_tuple)
5338 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5339 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5341 /* check whether src/dst ip address used */
5342 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5343 !spec->ip6src[2] && !spec->ip6src[3])
5344 *unused_tuple |= BIT(INNER_SRC_IP);
5346 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5347 !spec->ip6dst[2] && !spec->ip6dst[3])
5348 *unused_tuple |= BIT(INNER_DST_IP);
5350 if (!spec->l4_proto)
5351 *unused_tuple |= BIT(INNER_IP_PROTO);
5356 if (spec->l4_4_bytes)
5362 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5364 if (!spec || !unused_tuple)
5367 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5368 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5369 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5371 if (is_zero_ether_addr(spec->h_source))
5372 *unused_tuple |= BIT(INNER_SRC_MAC);
5374 if (is_zero_ether_addr(spec->h_dest))
5375 *unused_tuple |= BIT(INNER_DST_MAC);
5378 *unused_tuple |= BIT(INNER_ETH_TYPE);
5383 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5384 struct ethtool_rx_flow_spec *fs,
5387 if (fs->flow_type & FLOW_EXT) {
5388 if (fs->h_ext.vlan_etype) {
5389 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5393 if (!fs->h_ext.vlan_tci)
5394 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5396 if (fs->m_ext.vlan_tci &&
5397 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5398 dev_err(&hdev->pdev->dev,
5399 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5400 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5404 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5407 if (fs->flow_type & FLOW_MAC_EXT) {
5408 if (hdev->fd_cfg.fd_mode !=
5409 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5410 dev_err(&hdev->pdev->dev,
5411 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5415 if (is_zero_ether_addr(fs->h_ext.h_dest))
5416 *unused_tuple |= BIT(INNER_DST_MAC);
5418 *unused_tuple &= ~BIT(INNER_DST_MAC);
5424 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5425 struct ethtool_rx_flow_spec *fs,
5431 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5432 dev_err(&hdev->pdev->dev,
5433 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5435 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5439 if ((fs->flow_type & FLOW_EXT) &&
5440 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5441 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5445 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5446 switch (flow_type) {
5450 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5454 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5460 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5463 case IPV6_USER_FLOW:
5464 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5468 if (hdev->fd_cfg.fd_mode !=
5469 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5470 dev_err(&hdev->pdev->dev,
5471 "ETHER_FLOW is not supported in current fd mode!\n");
5475 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5479 dev_err(&hdev->pdev->dev,
5480 "unsupported protocol type, protocol type = %#x\n",
5486 dev_err(&hdev->pdev->dev,
5487 "failed to check flow union tuple, ret = %d\n",
5492 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5495 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5497 struct hclge_fd_rule *rule = NULL;
5498 struct hlist_node *node2;
5500 spin_lock_bh(&hdev->fd_rule_lock);
5501 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5502 if (rule->location >= location)
5506 spin_unlock_bh(&hdev->fd_rule_lock);
5508 return rule && rule->location == location;
5511 /* make sure being called after lock up with fd_rule_lock */
5512 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5513 struct hclge_fd_rule *new_rule,
5517 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5518 struct hlist_node *node2;
5520 if (is_add && !new_rule)
5523 hlist_for_each_entry_safe(rule, node2,
5524 &hdev->fd_rule_list, rule_node) {
5525 if (rule->location >= location)
5530 if (rule && rule->location == location) {
5531 hlist_del(&rule->rule_node);
5533 hdev->hclge_fd_rule_num--;
5536 if (!hdev->hclge_fd_rule_num)
5537 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5538 clear_bit(location, hdev->fd_bmap);
5542 } else if (!is_add) {
5543 dev_err(&hdev->pdev->dev,
5544 "delete fail, rule %u is inexistent\n",
5549 INIT_HLIST_NODE(&new_rule->rule_node);
5552 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5554 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5556 set_bit(location, hdev->fd_bmap);
5557 hdev->hclge_fd_rule_num++;
5558 hdev->fd_active_type = new_rule->rule_type;
5563 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5564 struct ethtool_rx_flow_spec *fs,
5565 struct hclge_fd_rule *rule)
5567 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5569 switch (flow_type) {
5573 rule->tuples.src_ip[IPV4_INDEX] =
5574 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5575 rule->tuples_mask.src_ip[IPV4_INDEX] =
5576 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5578 rule->tuples.dst_ip[IPV4_INDEX] =
5579 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5580 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5581 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5583 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5584 rule->tuples_mask.src_port =
5585 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5587 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5588 rule->tuples_mask.dst_port =
5589 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5591 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5592 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5594 rule->tuples.ether_proto = ETH_P_IP;
5595 rule->tuples_mask.ether_proto = 0xFFFF;
5599 rule->tuples.src_ip[IPV4_INDEX] =
5600 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5601 rule->tuples_mask.src_ip[IPV4_INDEX] =
5602 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5604 rule->tuples.dst_ip[IPV4_INDEX] =
5605 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5606 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5607 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5609 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5610 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5612 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5613 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5615 rule->tuples.ether_proto = ETH_P_IP;
5616 rule->tuples_mask.ether_proto = 0xFFFF;
5622 be32_to_cpu_array(rule->tuples.src_ip,
5623 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5624 be32_to_cpu_array(rule->tuples_mask.src_ip,
5625 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5627 be32_to_cpu_array(rule->tuples.dst_ip,
5628 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5629 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5630 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5632 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5633 rule->tuples_mask.src_port =
5634 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5636 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5637 rule->tuples_mask.dst_port =
5638 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5640 rule->tuples.ether_proto = ETH_P_IPV6;
5641 rule->tuples_mask.ether_proto = 0xFFFF;
5644 case IPV6_USER_FLOW:
5645 be32_to_cpu_array(rule->tuples.src_ip,
5646 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5647 be32_to_cpu_array(rule->tuples_mask.src_ip,
5648 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5650 be32_to_cpu_array(rule->tuples.dst_ip,
5651 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5652 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5653 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5655 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5656 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5658 rule->tuples.ether_proto = ETH_P_IPV6;
5659 rule->tuples_mask.ether_proto = 0xFFFF;
5663 ether_addr_copy(rule->tuples.src_mac,
5664 fs->h_u.ether_spec.h_source);
5665 ether_addr_copy(rule->tuples_mask.src_mac,
5666 fs->m_u.ether_spec.h_source);
5668 ether_addr_copy(rule->tuples.dst_mac,
5669 fs->h_u.ether_spec.h_dest);
5670 ether_addr_copy(rule->tuples_mask.dst_mac,
5671 fs->m_u.ether_spec.h_dest);
5673 rule->tuples.ether_proto =
5674 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5675 rule->tuples_mask.ether_proto =
5676 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5683 switch (flow_type) {
5686 rule->tuples.ip_proto = IPPROTO_SCTP;
5687 rule->tuples_mask.ip_proto = 0xFF;
5691 rule->tuples.ip_proto = IPPROTO_TCP;
5692 rule->tuples_mask.ip_proto = 0xFF;
5696 rule->tuples.ip_proto = IPPROTO_UDP;
5697 rule->tuples_mask.ip_proto = 0xFF;
5703 if (fs->flow_type & FLOW_EXT) {
5704 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5705 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5708 if (fs->flow_type & FLOW_MAC_EXT) {
5709 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5710 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5716 /* make sure being called after lock up with fd_rule_lock */
5717 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5718 struct hclge_fd_rule *rule)
5723 dev_err(&hdev->pdev->dev,
5724 "The flow director rule is NULL\n");
5728 /* it will never fail here, so needn't to check return value */
5729 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5731 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5735 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5742 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5746 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5747 struct ethtool_rxnfc *cmd)
5749 struct hclge_vport *vport = hclge_get_vport(handle);
5750 struct hclge_dev *hdev = vport->back;
5751 u16 dst_vport_id = 0, q_index = 0;
5752 struct ethtool_rx_flow_spec *fs;
5753 struct hclge_fd_rule *rule;
5758 if (!hnae3_dev_fd_supported(hdev)) {
5759 dev_err(&hdev->pdev->dev,
5760 "flow table director is not supported\n");
5765 dev_err(&hdev->pdev->dev,
5766 "please enable flow director first\n");
5770 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5772 ret = hclge_fd_check_spec(hdev, fs, &unused);
5776 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5777 action = HCLGE_FD_ACTION_DROP_PACKET;
5779 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5780 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5783 if (vf > hdev->num_req_vfs) {
5784 dev_err(&hdev->pdev->dev,
5785 "Error: vf id (%u) > max vf num (%u)\n",
5786 vf, hdev->num_req_vfs);
5790 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5791 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5794 dev_err(&hdev->pdev->dev,
5795 "Error: queue id (%u) > max tqp num (%u)\n",
5800 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5804 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5808 ret = hclge_fd_get_tuple(hdev, fs, rule);
5814 rule->flow_type = fs->flow_type;
5815 rule->location = fs->location;
5816 rule->unused_tuple = unused;
5817 rule->vf_id = dst_vport_id;
5818 rule->queue_id = q_index;
5819 rule->action = action;
5820 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5822 /* to avoid rule conflict, when user configure rule by ethtool,
5823 * we need to clear all arfs rules
5825 hclge_clear_arfs_rules(handle);
5827 spin_lock_bh(&hdev->fd_rule_lock);
5828 ret = hclge_fd_config_rule(hdev, rule);
5830 spin_unlock_bh(&hdev->fd_rule_lock);
5835 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5836 struct ethtool_rxnfc *cmd)
5838 struct hclge_vport *vport = hclge_get_vport(handle);
5839 struct hclge_dev *hdev = vport->back;
5840 struct ethtool_rx_flow_spec *fs;
5843 if (!hnae3_dev_fd_supported(hdev))
5846 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5848 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5851 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5852 dev_err(&hdev->pdev->dev,
5853 "Delete fail, rule %u is inexistent\n", fs->location);
5857 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5862 spin_lock_bh(&hdev->fd_rule_lock);
5863 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5865 spin_unlock_bh(&hdev->fd_rule_lock);
5870 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5873 struct hclge_vport *vport = hclge_get_vport(handle);
5874 struct hclge_dev *hdev = vport->back;
5875 struct hclge_fd_rule *rule;
5876 struct hlist_node *node;
5879 if (!hnae3_dev_fd_supported(hdev))
5882 spin_lock_bh(&hdev->fd_rule_lock);
5883 for_each_set_bit(location, hdev->fd_bmap,
5884 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5885 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5889 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5891 hlist_del(&rule->rule_node);
5894 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5895 hdev->hclge_fd_rule_num = 0;
5896 bitmap_zero(hdev->fd_bmap,
5897 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5900 spin_unlock_bh(&hdev->fd_rule_lock);
5903 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5905 struct hclge_vport *vport = hclge_get_vport(handle);
5906 struct hclge_dev *hdev = vport->back;
5907 struct hclge_fd_rule *rule;
5908 struct hlist_node *node;
5911 /* Return ok here, because reset error handling will check this
5912 * return value. If error is returned here, the reset process will
5915 if (!hnae3_dev_fd_supported(hdev))
5918 /* if fd is disabled, should not restore it when reset */
5922 spin_lock_bh(&hdev->fd_rule_lock);
5923 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5924 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5926 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5929 dev_warn(&hdev->pdev->dev,
5930 "Restore rule %u failed, remove it\n",
5932 clear_bit(rule->location, hdev->fd_bmap);
5933 hlist_del(&rule->rule_node);
5935 hdev->hclge_fd_rule_num--;
5939 if (hdev->hclge_fd_rule_num)
5940 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5942 spin_unlock_bh(&hdev->fd_rule_lock);
5947 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5948 struct ethtool_rxnfc *cmd)
5950 struct hclge_vport *vport = hclge_get_vport(handle);
5951 struct hclge_dev *hdev = vport->back;
5953 if (!hnae3_dev_fd_supported(hdev))
5956 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5957 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5962 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5963 struct ethtool_tcpip4_spec *spec,
5964 struct ethtool_tcpip4_spec *spec_mask)
5966 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5967 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5968 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5970 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5971 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5972 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5974 spec->psrc = cpu_to_be16(rule->tuples.src_port);
5975 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5976 0 : cpu_to_be16(rule->tuples_mask.src_port);
5978 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5979 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5980 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5982 spec->tos = rule->tuples.ip_tos;
5983 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5984 0 : rule->tuples_mask.ip_tos;
5987 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5988 struct ethtool_usrip4_spec *spec,
5989 struct ethtool_usrip4_spec *spec_mask)
5991 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5992 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5993 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5995 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5996 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5997 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5999 spec->tos = rule->tuples.ip_tos;
6000 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6001 0 : rule->tuples_mask.ip_tos;
6003 spec->proto = rule->tuples.ip_proto;
6004 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6005 0 : rule->tuples_mask.ip_proto;
6007 spec->ip_ver = ETH_RX_NFC_IP4;
6010 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6011 struct ethtool_tcpip6_spec *spec,
6012 struct ethtool_tcpip6_spec *spec_mask)
6014 cpu_to_be32_array(spec->ip6src,
6015 rule->tuples.src_ip, IPV6_SIZE);
6016 cpu_to_be32_array(spec->ip6dst,
6017 rule->tuples.dst_ip, IPV6_SIZE);
6018 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6019 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6021 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6024 if (rule->unused_tuple & BIT(INNER_DST_IP))
6025 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6027 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6030 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6031 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6032 0 : cpu_to_be16(rule->tuples_mask.src_port);
6034 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6035 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6036 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6039 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6040 struct ethtool_usrip6_spec *spec,
6041 struct ethtool_usrip6_spec *spec_mask)
6043 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6044 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6045 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6046 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6048 cpu_to_be32_array(spec_mask->ip6src,
6049 rule->tuples_mask.src_ip, IPV6_SIZE);
6051 if (rule->unused_tuple & BIT(INNER_DST_IP))
6052 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6054 cpu_to_be32_array(spec_mask->ip6dst,
6055 rule->tuples_mask.dst_ip, IPV6_SIZE);
6057 spec->l4_proto = rule->tuples.ip_proto;
6058 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6059 0 : rule->tuples_mask.ip_proto;
6062 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6063 struct ethhdr *spec,
6064 struct ethhdr *spec_mask)
6066 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6067 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6069 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6070 eth_zero_addr(spec_mask->h_source);
6072 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6074 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6075 eth_zero_addr(spec_mask->h_dest);
6077 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6079 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6080 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6081 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6084 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6085 struct hclge_fd_rule *rule)
6087 if (fs->flow_type & FLOW_EXT) {
6088 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6089 fs->m_ext.vlan_tci =
6090 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6091 cpu_to_be16(VLAN_VID_MASK) :
6092 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6095 if (fs->flow_type & FLOW_MAC_EXT) {
6096 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6097 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6098 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6100 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6101 rule->tuples_mask.dst_mac);
6105 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6106 struct ethtool_rxnfc *cmd)
6108 struct hclge_vport *vport = hclge_get_vport(handle);
6109 struct hclge_fd_rule *rule = NULL;
6110 struct hclge_dev *hdev = vport->back;
6111 struct ethtool_rx_flow_spec *fs;
6112 struct hlist_node *node2;
6114 if (!hnae3_dev_fd_supported(hdev))
6117 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6119 spin_lock_bh(&hdev->fd_rule_lock);
6121 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6122 if (rule->location >= fs->location)
6126 if (!rule || fs->location != rule->location) {
6127 spin_unlock_bh(&hdev->fd_rule_lock);
6132 fs->flow_type = rule->flow_type;
6133 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6137 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6138 &fs->m_u.tcp_ip4_spec);
6141 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6142 &fs->m_u.usr_ip4_spec);
6147 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6148 &fs->m_u.tcp_ip6_spec);
6150 case IPV6_USER_FLOW:
6151 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6152 &fs->m_u.usr_ip6_spec);
6154 /* The flow type of fd rule has been checked before adding in to rule
6155 * list. As other flow types have been handled, it must be ETHER_FLOW
6156 * for the default case
6159 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6160 &fs->m_u.ether_spec);
6164 hclge_fd_get_ext_info(fs, rule);
6166 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6167 fs->ring_cookie = RX_CLS_FLOW_DISC;
6171 fs->ring_cookie = rule->queue_id;
6172 vf_id = rule->vf_id;
6173 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6174 fs->ring_cookie |= vf_id;
6177 spin_unlock_bh(&hdev->fd_rule_lock);
6182 static int hclge_get_all_rules(struct hnae3_handle *handle,
6183 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6185 struct hclge_vport *vport = hclge_get_vport(handle);
6186 struct hclge_dev *hdev = vport->back;
6187 struct hclge_fd_rule *rule;
6188 struct hlist_node *node2;
6191 if (!hnae3_dev_fd_supported(hdev))
6194 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6196 spin_lock_bh(&hdev->fd_rule_lock);
6197 hlist_for_each_entry_safe(rule, node2,
6198 &hdev->fd_rule_list, rule_node) {
6199 if (cnt == cmd->rule_cnt) {
6200 spin_unlock_bh(&hdev->fd_rule_lock);
6204 rule_locs[cnt] = rule->location;
6208 spin_unlock_bh(&hdev->fd_rule_lock);
6210 cmd->rule_cnt = cnt;
6215 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6216 struct hclge_fd_rule_tuples *tuples)
6218 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6219 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6221 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6222 tuples->ip_proto = fkeys->basic.ip_proto;
6223 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6225 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6226 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6227 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6231 for (i = 0; i < IPV6_SIZE; i++) {
6232 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6233 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6238 /* traverse all rules, check whether an existed rule has the same tuples */
6239 static struct hclge_fd_rule *
6240 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6241 const struct hclge_fd_rule_tuples *tuples)
6243 struct hclge_fd_rule *rule = NULL;
6244 struct hlist_node *node;
6246 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6247 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6254 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6255 struct hclge_fd_rule *rule)
6257 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6258 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6259 BIT(INNER_SRC_PORT);
6262 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6263 if (tuples->ether_proto == ETH_P_IP) {
6264 if (tuples->ip_proto == IPPROTO_TCP)
6265 rule->flow_type = TCP_V4_FLOW;
6267 rule->flow_type = UDP_V4_FLOW;
6269 if (tuples->ip_proto == IPPROTO_TCP)
6270 rule->flow_type = TCP_V6_FLOW;
6272 rule->flow_type = UDP_V6_FLOW;
6274 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6275 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6278 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6279 u16 flow_id, struct flow_keys *fkeys)
6281 struct hclge_vport *vport = hclge_get_vport(handle);
6282 struct hclge_fd_rule_tuples new_tuples;
6283 struct hclge_dev *hdev = vport->back;
6284 struct hclge_fd_rule *rule;
6289 if (!hnae3_dev_fd_supported(hdev))
6292 memset(&new_tuples, 0, sizeof(new_tuples));
6293 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6295 spin_lock_bh(&hdev->fd_rule_lock);
6297 /* when there is already fd rule existed add by user,
6298 * arfs should not work
6300 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6301 spin_unlock_bh(&hdev->fd_rule_lock);
6305 /* check is there flow director filter existed for this flow,
6306 * if not, create a new filter for it;
6307 * if filter exist with different queue id, modify the filter;
6308 * if filter exist with same queue id, do nothing
6310 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6312 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6313 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6314 spin_unlock_bh(&hdev->fd_rule_lock);
6318 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6320 spin_unlock_bh(&hdev->fd_rule_lock);
6324 set_bit(bit_id, hdev->fd_bmap);
6325 rule->location = bit_id;
6326 rule->flow_id = flow_id;
6327 rule->queue_id = queue_id;
6328 hclge_fd_build_arfs_rule(&new_tuples, rule);
6329 ret = hclge_fd_config_rule(hdev, rule);
6331 spin_unlock_bh(&hdev->fd_rule_lock);
6336 return rule->location;
6339 spin_unlock_bh(&hdev->fd_rule_lock);
6341 if (rule->queue_id == queue_id)
6342 return rule->location;
6344 tmp_queue_id = rule->queue_id;
6345 rule->queue_id = queue_id;
6346 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6348 rule->queue_id = tmp_queue_id;
6352 return rule->location;
6355 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6357 #ifdef CONFIG_RFS_ACCEL
6358 struct hnae3_handle *handle = &hdev->vport[0].nic;
6359 struct hclge_fd_rule *rule;
6360 struct hlist_node *node;
6361 HLIST_HEAD(del_list);
6363 spin_lock_bh(&hdev->fd_rule_lock);
6364 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6365 spin_unlock_bh(&hdev->fd_rule_lock);
6368 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6369 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6370 rule->flow_id, rule->location)) {
6371 hlist_del_init(&rule->rule_node);
6372 hlist_add_head(&rule->rule_node, &del_list);
6373 hdev->hclge_fd_rule_num--;
6374 clear_bit(rule->location, hdev->fd_bmap);
6377 spin_unlock_bh(&hdev->fd_rule_lock);
6379 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6380 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6381 rule->location, NULL, false);
6387 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6389 #ifdef CONFIG_RFS_ACCEL
6390 struct hclge_vport *vport = hclge_get_vport(handle);
6391 struct hclge_dev *hdev = vport->back;
6393 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6394 hclge_del_all_fd_entries(handle, true);
6398 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6400 struct hclge_vport *vport = hclge_get_vport(handle);
6401 struct hclge_dev *hdev = vport->back;
6403 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6404 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6407 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6409 struct hclge_vport *vport = hclge_get_vport(handle);
6410 struct hclge_dev *hdev = vport->back;
6412 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6415 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6417 struct hclge_vport *vport = hclge_get_vport(handle);
6418 struct hclge_dev *hdev = vport->back;
6420 return hdev->rst_stats.hw_reset_done_cnt;
6423 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6425 struct hclge_vport *vport = hclge_get_vport(handle);
6426 struct hclge_dev *hdev = vport->back;
6429 hdev->fd_en = enable;
6430 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6432 hclge_del_all_fd_entries(handle, clear);
6434 hclge_restore_fd_entries(handle);
6437 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6439 struct hclge_desc desc;
6440 struct hclge_config_mac_mode_cmd *req =
6441 (struct hclge_config_mac_mode_cmd *)desc.data;
6445 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6448 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6449 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6450 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6451 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6452 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6453 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6454 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6455 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6456 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6457 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6460 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6462 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6464 dev_err(&hdev->pdev->dev,
6465 "mac enable fail, ret =%d.\n", ret);
6468 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6469 u8 switch_param, u8 param_mask)
6471 struct hclge_mac_vlan_switch_cmd *req;
6472 struct hclge_desc desc;
6476 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6477 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6479 /* read current config parameter */
6480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6482 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6483 req->func_id = cpu_to_le32(func_id);
6485 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6487 dev_err(&hdev->pdev->dev,
6488 "read mac vlan switch parameter fail, ret = %d\n", ret);
6492 /* modify and write new config parameter */
6493 hclge_cmd_reuse_desc(&desc, false);
6494 req->switch_param = (req->switch_param & param_mask) | switch_param;
6495 req->param_mask = param_mask;
6497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6499 dev_err(&hdev->pdev->dev,
6500 "set mac vlan switch parameter fail, ret = %d\n", ret);
6504 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6507 #define HCLGE_PHY_LINK_STATUS_NUM 200
6509 struct phy_device *phydev = hdev->hw.mac.phydev;
6514 ret = phy_read_status(phydev);
6516 dev_err(&hdev->pdev->dev,
6517 "phy update link status fail, ret = %d\n", ret);
6521 if (phydev->link == link_ret)
6524 msleep(HCLGE_LINK_STATUS_MS);
6525 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6528 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6530 #define HCLGE_MAC_LINK_STATUS_NUM 100
6536 ret = hclge_get_mac_link_status(hdev);
6539 else if (ret == link_ret)
6542 msleep(HCLGE_LINK_STATUS_MS);
6543 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6547 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6550 #define HCLGE_LINK_STATUS_DOWN 0
6551 #define HCLGE_LINK_STATUS_UP 1
6555 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6558 hclge_phy_link_status_wait(hdev, link_ret);
6560 return hclge_mac_link_status_wait(hdev, link_ret);
6563 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6565 struct hclge_config_mac_mode_cmd *req;
6566 struct hclge_desc desc;
6570 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6571 /* 1 Read out the MAC mode config at first */
6572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6575 dev_err(&hdev->pdev->dev,
6576 "mac loopback get fail, ret =%d.\n", ret);
6580 /* 2 Then setup the loopback flag */
6581 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6582 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6583 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6584 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6586 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6588 /* 3 Config mac work mode with loopback flag
6589 * and its original configure parameters
6591 hclge_cmd_reuse_desc(&desc, false);
6592 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6594 dev_err(&hdev->pdev->dev,
6595 "mac loopback set fail, ret =%d.\n", ret);
6599 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6600 enum hnae3_loop loop_mode)
6602 #define HCLGE_SERDES_RETRY_MS 10
6603 #define HCLGE_SERDES_RETRY_NUM 100
6605 struct hclge_serdes_lb_cmd *req;
6606 struct hclge_desc desc;
6610 req = (struct hclge_serdes_lb_cmd *)desc.data;
6611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6613 switch (loop_mode) {
6614 case HNAE3_LOOP_SERIAL_SERDES:
6615 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6617 case HNAE3_LOOP_PARALLEL_SERDES:
6618 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6621 dev_err(&hdev->pdev->dev,
6622 "unsupported serdes loopback mode %d\n", loop_mode);
6627 req->enable = loop_mode_b;
6628 req->mask = loop_mode_b;
6630 req->mask = loop_mode_b;
6633 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6635 dev_err(&hdev->pdev->dev,
6636 "serdes loopback set fail, ret = %d\n", ret);
6641 msleep(HCLGE_SERDES_RETRY_MS);
6642 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6644 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6646 dev_err(&hdev->pdev->dev,
6647 "serdes loopback get, ret = %d\n", ret);
6650 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6651 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6653 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6654 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6656 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6657 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6663 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6664 enum hnae3_loop loop_mode)
6668 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6672 hclge_cfg_mac_mode(hdev, en);
6674 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6676 dev_err(&hdev->pdev->dev,
6677 "serdes loopback config mac mode timeout\n");
6682 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6683 struct phy_device *phydev)
6687 if (!phydev->suspended) {
6688 ret = phy_suspend(phydev);
6693 ret = phy_resume(phydev);
6697 return phy_loopback(phydev, true);
6700 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6701 struct phy_device *phydev)
6705 ret = phy_loopback(phydev, false);
6709 return phy_suspend(phydev);
6712 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6714 struct phy_device *phydev = hdev->hw.mac.phydev;
6721 ret = hclge_enable_phy_loopback(hdev, phydev);
6723 ret = hclge_disable_phy_loopback(hdev, phydev);
6725 dev_err(&hdev->pdev->dev,
6726 "set phy loopback fail, ret = %d\n", ret);
6730 hclge_cfg_mac_mode(hdev, en);
6732 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6734 dev_err(&hdev->pdev->dev,
6735 "phy loopback config mac mode timeout\n");
6740 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6741 int stream_id, bool enable)
6743 struct hclge_desc desc;
6744 struct hclge_cfg_com_tqp_queue_cmd *req =
6745 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6749 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6750 req->stream_id = cpu_to_le16(stream_id);
6752 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6756 dev_err(&hdev->pdev->dev,
6757 "Tqp enable fail, status =%d.\n", ret);
6761 static int hclge_set_loopback(struct hnae3_handle *handle,
6762 enum hnae3_loop loop_mode, bool en)
6764 struct hclge_vport *vport = hclge_get_vport(handle);
6765 struct hnae3_knic_private_info *kinfo;
6766 struct hclge_dev *hdev = vport->back;
6769 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6770 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6771 * the same, the packets are looped back in the SSU. If SSU loopback
6772 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6774 if (hdev->pdev->revision >= 0x21) {
6775 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6777 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6778 HCLGE_SWITCH_ALW_LPBK_MASK);
6783 switch (loop_mode) {
6784 case HNAE3_LOOP_APP:
6785 ret = hclge_set_app_loopback(hdev, en);
6787 case HNAE3_LOOP_SERIAL_SERDES:
6788 case HNAE3_LOOP_PARALLEL_SERDES:
6789 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6791 case HNAE3_LOOP_PHY:
6792 ret = hclge_set_phy_loopback(hdev, en);
6796 dev_err(&hdev->pdev->dev,
6797 "loop_mode %d is not supported\n", loop_mode);
6804 kinfo = &vport->nic.kinfo;
6805 for (i = 0; i < kinfo->num_tqps; i++) {
6806 ret = hclge_tqp_enable(hdev, i, 0, en);
6814 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6818 ret = hclge_set_app_loopback(hdev, false);
6822 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6826 return hclge_cfg_serdes_loopback(hdev, false,
6827 HNAE3_LOOP_PARALLEL_SERDES);
6830 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6832 struct hclge_vport *vport = hclge_get_vport(handle);
6833 struct hnae3_knic_private_info *kinfo;
6834 struct hnae3_queue *queue;
6835 struct hclge_tqp *tqp;
6838 kinfo = &vport->nic.kinfo;
6839 for (i = 0; i < kinfo->num_tqps; i++) {
6840 queue = handle->kinfo.tqp[i];
6841 tqp = container_of(queue, struct hclge_tqp, q);
6842 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6846 static void hclge_flush_link_update(struct hclge_dev *hdev)
6848 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6850 unsigned long last = hdev->serv_processed_cnt;
6853 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6854 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6855 last == hdev->serv_processed_cnt)
6859 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6861 struct hclge_vport *vport = hclge_get_vport(handle);
6862 struct hclge_dev *hdev = vport->back;
6865 hclge_task_schedule(hdev, 0);
6867 /* Set the DOWN flag here to disable link updating */
6868 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6870 /* flush memory to make sure DOWN is seen by service task */
6871 smp_mb__before_atomic();
6872 hclge_flush_link_update(hdev);
6876 static int hclge_ae_start(struct hnae3_handle *handle)
6878 struct hclge_vport *vport = hclge_get_vport(handle);
6879 struct hclge_dev *hdev = vport->back;
6882 hclge_cfg_mac_mode(hdev, true);
6883 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6884 hdev->hw.mac.link = 0;
6886 /* reset tqp stats */
6887 hclge_reset_tqp_stats(handle);
6889 hclge_mac_start_phy(hdev);
6894 static void hclge_ae_stop(struct hnae3_handle *handle)
6896 struct hclge_vport *vport = hclge_get_vport(handle);
6897 struct hclge_dev *hdev = vport->back;
6900 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6902 hclge_clear_arfs_rules(handle);
6904 /* If it is not PF reset, the firmware will disable the MAC,
6905 * so it only need to stop phy here.
6907 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6908 hdev->reset_type != HNAE3_FUNC_RESET) {
6909 hclge_mac_stop_phy(hdev);
6910 hclge_update_link_status(hdev);
6914 for (i = 0; i < handle->kinfo.num_tqps; i++)
6915 hclge_reset_tqp(handle, i);
6917 hclge_config_mac_tnl_int(hdev, false);
6920 hclge_cfg_mac_mode(hdev, false);
6922 hclge_mac_stop_phy(hdev);
6924 /* reset tqp stats */
6925 hclge_reset_tqp_stats(handle);
6926 hclge_update_link_status(hdev);
6929 int hclge_vport_start(struct hclge_vport *vport)
6931 struct hclge_dev *hdev = vport->back;
6933 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6934 vport->last_active_jiffies = jiffies;
6936 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6937 if (vport->vport_id) {
6938 hclge_restore_mac_table_common(vport);
6939 hclge_restore_vport_vlan_table(vport);
6941 hclge_restore_hw_table(hdev);
6945 clear_bit(vport->vport_id, hdev->vport_config_block);
6950 void hclge_vport_stop(struct hclge_vport *vport)
6952 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6955 static int hclge_client_start(struct hnae3_handle *handle)
6957 struct hclge_vport *vport = hclge_get_vport(handle);
6959 return hclge_vport_start(vport);
6962 static void hclge_client_stop(struct hnae3_handle *handle)
6964 struct hclge_vport *vport = hclge_get_vport(handle);
6966 hclge_vport_stop(vport);
6969 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6970 u16 cmdq_resp, u8 resp_code,
6971 enum hclge_mac_vlan_tbl_opcode op)
6973 struct hclge_dev *hdev = vport->back;
6976 dev_err(&hdev->pdev->dev,
6977 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6982 if (op == HCLGE_MAC_VLAN_ADD) {
6983 if (!resp_code || resp_code == 1)
6985 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6986 resp_code == HCLGE_ADD_MC_OVERFLOW)
6989 dev_err(&hdev->pdev->dev,
6990 "add mac addr failed for undefined, code=%u.\n",
6993 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6996 } else if (resp_code == 1) {
6997 dev_dbg(&hdev->pdev->dev,
6998 "remove mac addr failed for miss.\n");
7002 dev_err(&hdev->pdev->dev,
7003 "remove mac addr failed for undefined, code=%u.\n",
7006 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7009 } else if (resp_code == 1) {
7010 dev_dbg(&hdev->pdev->dev,
7011 "lookup mac addr failed for miss.\n");
7015 dev_err(&hdev->pdev->dev,
7016 "lookup mac addr failed for undefined, code=%u.\n",
7021 dev_err(&hdev->pdev->dev,
7022 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7027 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7029 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7031 unsigned int word_num;
7032 unsigned int bit_num;
7034 if (vfid > 255 || vfid < 0)
7037 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7038 word_num = vfid / 32;
7039 bit_num = vfid % 32;
7041 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7043 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7045 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7046 bit_num = vfid % 32;
7048 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7050 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7056 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7058 #define HCLGE_DESC_NUMBER 3
7059 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7062 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7063 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7064 if (desc[i].data[j])
7070 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7071 const u8 *addr, bool is_mc)
7073 const unsigned char *mac_addr = addr;
7074 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7075 (mac_addr[0]) | (mac_addr[1] << 8);
7076 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7078 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7080 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7081 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7084 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7085 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7088 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7089 struct hclge_mac_vlan_tbl_entry_cmd *req)
7091 struct hclge_dev *hdev = vport->back;
7092 struct hclge_desc desc;
7097 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7099 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7101 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7103 dev_err(&hdev->pdev->dev,
7104 "del mac addr failed for cmd_send, ret =%d.\n",
7108 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7109 retval = le16_to_cpu(desc.retval);
7111 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7112 HCLGE_MAC_VLAN_REMOVE);
7115 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7116 struct hclge_mac_vlan_tbl_entry_cmd *req,
7117 struct hclge_desc *desc,
7120 struct hclge_dev *hdev = vport->back;
7125 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7127 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7128 memcpy(desc[0].data,
7130 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7131 hclge_cmd_setup_basic_desc(&desc[1],
7132 HCLGE_OPC_MAC_VLAN_ADD,
7134 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7135 hclge_cmd_setup_basic_desc(&desc[2],
7136 HCLGE_OPC_MAC_VLAN_ADD,
7138 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7140 memcpy(desc[0].data,
7142 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7143 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7146 dev_err(&hdev->pdev->dev,
7147 "lookup mac addr failed for cmd_send, ret =%d.\n",
7151 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7152 retval = le16_to_cpu(desc[0].retval);
7154 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7155 HCLGE_MAC_VLAN_LKUP);
7158 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7159 struct hclge_mac_vlan_tbl_entry_cmd *req,
7160 struct hclge_desc *mc_desc)
7162 struct hclge_dev *hdev = vport->back;
7169 struct hclge_desc desc;
7171 hclge_cmd_setup_basic_desc(&desc,
7172 HCLGE_OPC_MAC_VLAN_ADD,
7174 memcpy(desc.data, req,
7175 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7176 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7177 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7178 retval = le16_to_cpu(desc.retval);
7180 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7182 HCLGE_MAC_VLAN_ADD);
7184 hclge_cmd_reuse_desc(&mc_desc[0], false);
7185 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7186 hclge_cmd_reuse_desc(&mc_desc[1], false);
7187 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7188 hclge_cmd_reuse_desc(&mc_desc[2], false);
7189 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7190 memcpy(mc_desc[0].data, req,
7191 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7192 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7193 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7194 retval = le16_to_cpu(mc_desc[0].retval);
7196 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7198 HCLGE_MAC_VLAN_ADD);
7202 dev_err(&hdev->pdev->dev,
7203 "add mac addr failed for cmd_send, ret =%d.\n",
7211 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7212 u16 *allocated_size)
7214 struct hclge_umv_spc_alc_cmd *req;
7215 struct hclge_desc desc;
7218 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7219 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7221 req->space_size = cpu_to_le32(space_size);
7223 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7225 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7230 *allocated_size = le32_to_cpu(desc.data[1]);
7235 static int hclge_init_umv_space(struct hclge_dev *hdev)
7237 u16 allocated_size = 0;
7240 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7244 if (allocated_size < hdev->wanted_umv_size)
7245 dev_warn(&hdev->pdev->dev,
7246 "failed to alloc umv space, want %u, get %u\n",
7247 hdev->wanted_umv_size, allocated_size);
7249 hdev->max_umv_size = allocated_size;
7250 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7251 hdev->share_umv_size = hdev->priv_umv_size +
7252 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7257 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7259 struct hclge_vport *vport;
7262 for (i = 0; i < hdev->num_alloc_vport; i++) {
7263 vport = &hdev->vport[i];
7264 vport->used_umv_num = 0;
7267 mutex_lock(&hdev->vport_lock);
7268 hdev->share_umv_size = hdev->priv_umv_size +
7269 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7270 mutex_unlock(&hdev->vport_lock);
7273 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7275 struct hclge_dev *hdev = vport->back;
7279 mutex_lock(&hdev->vport_lock);
7281 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7282 hdev->share_umv_size == 0);
7285 mutex_unlock(&hdev->vport_lock);
7290 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7292 struct hclge_dev *hdev = vport->back;
7295 if (vport->used_umv_num > hdev->priv_umv_size)
7296 hdev->share_umv_size++;
7298 if (vport->used_umv_num > 0)
7299 vport->used_umv_num--;
7301 if (vport->used_umv_num >= hdev->priv_umv_size &&
7302 hdev->share_umv_size > 0)
7303 hdev->share_umv_size--;
7304 vport->used_umv_num++;
7308 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7311 struct hclge_mac_node *mac_node, *tmp;
7313 list_for_each_entry_safe(mac_node, tmp, list, node)
7314 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7320 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7321 enum HCLGE_MAC_NODE_STATE state)
7324 /* from set_rx_mode or tmp_add_list */
7325 case HCLGE_MAC_TO_ADD:
7326 if (mac_node->state == HCLGE_MAC_TO_DEL)
7327 mac_node->state = HCLGE_MAC_ACTIVE;
7329 /* only from set_rx_mode */
7330 case HCLGE_MAC_TO_DEL:
7331 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7332 list_del(&mac_node->node);
7335 mac_node->state = HCLGE_MAC_TO_DEL;
7338 /* only from tmp_add_list, the mac_node->state won't be
7341 case HCLGE_MAC_ACTIVE:
7342 if (mac_node->state == HCLGE_MAC_TO_ADD)
7343 mac_node->state = HCLGE_MAC_ACTIVE;
7349 int hclge_update_mac_list(struct hclge_vport *vport,
7350 enum HCLGE_MAC_NODE_STATE state,
7351 enum HCLGE_MAC_ADDR_TYPE mac_type,
7352 const unsigned char *addr)
7354 struct hclge_dev *hdev = vport->back;
7355 struct hclge_mac_node *mac_node;
7356 struct list_head *list;
7358 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7359 &vport->uc_mac_list : &vport->mc_mac_list;
7361 spin_lock_bh(&vport->mac_list_lock);
7363 /* if the mac addr is already in the mac list, no need to add a new
7364 * one into it, just check the mac addr state, convert it to a new
7365 * new state, or just remove it, or do nothing.
7367 mac_node = hclge_find_mac_node(list, addr);
7369 hclge_update_mac_node(mac_node, state);
7370 spin_unlock_bh(&vport->mac_list_lock);
7371 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7375 /* if this address is never added, unnecessary to delete */
7376 if (state == HCLGE_MAC_TO_DEL) {
7377 spin_unlock_bh(&vport->mac_list_lock);
7378 dev_err(&hdev->pdev->dev,
7379 "failed to delete address %pM from mac list\n",
7384 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7386 spin_unlock_bh(&vport->mac_list_lock);
7390 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7392 mac_node->state = state;
7393 ether_addr_copy(mac_node->mac_addr, addr);
7394 list_add_tail(&mac_node->node, list);
7396 spin_unlock_bh(&vport->mac_list_lock);
7401 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7402 const unsigned char *addr)
7404 struct hclge_vport *vport = hclge_get_vport(handle);
7406 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7410 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7411 const unsigned char *addr)
7413 struct hclge_dev *hdev = vport->back;
7414 struct hclge_mac_vlan_tbl_entry_cmd req;
7415 struct hclge_desc desc;
7416 u16 egress_port = 0;
7419 /* mac addr check */
7420 if (is_zero_ether_addr(addr) ||
7421 is_broadcast_ether_addr(addr) ||
7422 is_multicast_ether_addr(addr)) {
7423 dev_err(&hdev->pdev->dev,
7424 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7425 addr, is_zero_ether_addr(addr),
7426 is_broadcast_ether_addr(addr),
7427 is_multicast_ether_addr(addr));
7431 memset(&req, 0, sizeof(req));
7433 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7434 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7436 req.egress_port = cpu_to_le16(egress_port);
7438 hclge_prepare_mac_addr(&req, addr, false);
7440 /* Lookup the mac address in the mac_vlan table, and add
7441 * it if the entry is inexistent. Repeated unicast entry
7442 * is not allowed in the mac vlan table.
7444 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7445 if (ret == -ENOENT) {
7446 mutex_lock(&hdev->vport_lock);
7447 if (!hclge_is_umv_space_full(vport, false)) {
7448 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7450 hclge_update_umv_space(vport, false);
7451 mutex_unlock(&hdev->vport_lock);
7454 mutex_unlock(&hdev->vport_lock);
7456 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7457 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7458 hdev->priv_umv_size);
7463 /* check if we just hit the duplicate */
7465 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7466 vport->vport_id, addr);
7470 dev_err(&hdev->pdev->dev,
7471 "PF failed to add unicast entry(%pM) in the MAC table\n",
7477 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7478 const unsigned char *addr)
7480 struct hclge_vport *vport = hclge_get_vport(handle);
7482 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7486 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7487 const unsigned char *addr)
7489 struct hclge_dev *hdev = vport->back;
7490 struct hclge_mac_vlan_tbl_entry_cmd req;
7493 /* mac addr check */
7494 if (is_zero_ether_addr(addr) ||
7495 is_broadcast_ether_addr(addr) ||
7496 is_multicast_ether_addr(addr)) {
7497 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7502 memset(&req, 0, sizeof(req));
7503 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7504 hclge_prepare_mac_addr(&req, addr, false);
7505 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7507 mutex_lock(&hdev->vport_lock);
7508 hclge_update_umv_space(vport, true);
7509 mutex_unlock(&hdev->vport_lock);
7510 } else if (ret == -ENOENT) {
7517 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7518 const unsigned char *addr)
7520 struct hclge_vport *vport = hclge_get_vport(handle);
7522 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7526 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7527 const unsigned char *addr)
7529 struct hclge_dev *hdev = vport->back;
7530 struct hclge_mac_vlan_tbl_entry_cmd req;
7531 struct hclge_desc desc[3];
7534 /* mac addr check */
7535 if (!is_multicast_ether_addr(addr)) {
7536 dev_err(&hdev->pdev->dev,
7537 "Add mc mac err! invalid mac:%pM.\n",
7541 memset(&req, 0, sizeof(req));
7542 hclge_prepare_mac_addr(&req, addr, true);
7543 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7545 /* This mac addr do not exist, add new entry for it */
7546 memset(desc[0].data, 0, sizeof(desc[0].data));
7547 memset(desc[1].data, 0, sizeof(desc[0].data));
7548 memset(desc[2].data, 0, sizeof(desc[0].data));
7550 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7553 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7555 /* if already overflow, not to print each time */
7556 if (status == -ENOSPC &&
7557 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7558 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7563 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7564 const unsigned char *addr)
7566 struct hclge_vport *vport = hclge_get_vport(handle);
7568 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7572 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7573 const unsigned char *addr)
7575 struct hclge_dev *hdev = vport->back;
7576 struct hclge_mac_vlan_tbl_entry_cmd req;
7577 enum hclge_cmd_status status;
7578 struct hclge_desc desc[3];
7580 /* mac addr check */
7581 if (!is_multicast_ether_addr(addr)) {
7582 dev_dbg(&hdev->pdev->dev,
7583 "Remove mc mac err! invalid mac:%pM.\n",
7588 memset(&req, 0, sizeof(req));
7589 hclge_prepare_mac_addr(&req, addr, true);
7590 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7592 /* This mac addr exist, remove this handle's VFID for it */
7593 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7597 if (hclge_is_all_function_id_zero(desc))
7598 /* All the vfid is zero, so need to delete this entry */
7599 status = hclge_remove_mac_vlan_tbl(vport, &req);
7601 /* Not all the vfid is zero, update the vfid */
7602 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7604 } else if (status == -ENOENT) {
7611 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7612 struct list_head *list,
7613 int (*sync)(struct hclge_vport *,
7614 const unsigned char *))
7616 struct hclge_mac_node *mac_node, *tmp;
7619 list_for_each_entry_safe(mac_node, tmp, list, node) {
7620 ret = sync(vport, mac_node->mac_addr);
7622 mac_node->state = HCLGE_MAC_ACTIVE;
7624 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7631 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7632 struct list_head *list,
7633 int (*unsync)(struct hclge_vport *,
7634 const unsigned char *))
7636 struct hclge_mac_node *mac_node, *tmp;
7639 list_for_each_entry_safe(mac_node, tmp, list, node) {
7640 ret = unsync(vport, mac_node->mac_addr);
7641 if (!ret || ret == -ENOENT) {
7642 list_del(&mac_node->node);
7645 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7652 static bool hclge_sync_from_add_list(struct list_head *add_list,
7653 struct list_head *mac_list)
7655 struct hclge_mac_node *mac_node, *tmp, *new_node;
7656 bool all_added = true;
7658 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7659 if (mac_node->state == HCLGE_MAC_TO_ADD)
7662 /* if the mac address from tmp_add_list is not in the
7663 * uc/mc_mac_list, it means have received a TO_DEL request
7664 * during the time window of adding the mac address into mac
7665 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7666 * then it will be removed at next time. else it must be TO_ADD,
7667 * this address hasn't been added into mac table,
7668 * so just remove the mac node.
7670 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7672 hclge_update_mac_node(new_node, mac_node->state);
7673 list_del(&mac_node->node);
7675 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7676 mac_node->state = HCLGE_MAC_TO_DEL;
7677 list_del(&mac_node->node);
7678 list_add_tail(&mac_node->node, mac_list);
7680 list_del(&mac_node->node);
7688 static void hclge_sync_from_del_list(struct list_head *del_list,
7689 struct list_head *mac_list)
7691 struct hclge_mac_node *mac_node, *tmp, *new_node;
7693 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7694 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7696 /* If the mac addr exists in the mac list, it means
7697 * received a new TO_ADD request during the time window
7698 * of configuring the mac address. For the mac node
7699 * state is TO_ADD, and the address is already in the
7700 * in the hardware(due to delete fail), so we just need
7701 * to change the mac node state to ACTIVE.
7703 new_node->state = HCLGE_MAC_ACTIVE;
7704 list_del(&mac_node->node);
7707 list_del(&mac_node->node);
7708 list_add_tail(&mac_node->node, mac_list);
7713 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7714 enum HCLGE_MAC_ADDR_TYPE mac_type,
7717 if (mac_type == HCLGE_MAC_ADDR_UC) {
7719 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7721 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7724 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7726 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7730 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7731 enum HCLGE_MAC_ADDR_TYPE mac_type)
7733 struct hclge_mac_node *mac_node, *tmp, *new_node;
7734 struct list_head tmp_add_list, tmp_del_list;
7735 struct list_head *list;
7738 INIT_LIST_HEAD(&tmp_add_list);
7739 INIT_LIST_HEAD(&tmp_del_list);
7741 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7742 * we can add/delete these mac addr outside the spin lock
7744 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7745 &vport->uc_mac_list : &vport->mc_mac_list;
7747 spin_lock_bh(&vport->mac_list_lock);
7749 list_for_each_entry_safe(mac_node, tmp, list, node) {
7750 switch (mac_node->state) {
7751 case HCLGE_MAC_TO_DEL:
7752 list_del(&mac_node->node);
7753 list_add_tail(&mac_node->node, &tmp_del_list);
7755 case HCLGE_MAC_TO_ADD:
7756 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7759 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7760 new_node->state = mac_node->state;
7761 list_add_tail(&new_node->node, &tmp_add_list);
7769 spin_unlock_bh(&vport->mac_list_lock);
7771 /* delete first, in order to get max mac table space for adding */
7772 if (mac_type == HCLGE_MAC_ADDR_UC) {
7773 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7774 hclge_rm_uc_addr_common);
7775 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7776 hclge_add_uc_addr_common);
7778 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7779 hclge_rm_mc_addr_common);
7780 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7781 hclge_add_mc_addr_common);
7784 /* if some mac addresses were added/deleted fail, move back to the
7785 * mac_list, and retry at next time.
7787 spin_lock_bh(&vport->mac_list_lock);
7789 hclge_sync_from_del_list(&tmp_del_list, list);
7790 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7792 spin_unlock_bh(&vport->mac_list_lock);
7794 hclge_update_overflow_flags(vport, mac_type, all_added);
7797 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7799 struct hclge_dev *hdev = vport->back;
7801 if (test_bit(vport->vport_id, hdev->vport_config_block))
7804 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7810 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7814 for (i = 0; i < hdev->num_alloc_vport; i++) {
7815 struct hclge_vport *vport = &hdev->vport[i];
7817 if (!hclge_need_sync_mac_table(vport))
7820 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7821 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7825 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7826 enum HCLGE_MAC_ADDR_TYPE mac_type)
7828 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7829 struct hclge_mac_node *mac_cfg, *tmp;
7830 struct hclge_dev *hdev = vport->back;
7831 struct list_head tmp_del_list, *list;
7834 if (mac_type == HCLGE_MAC_ADDR_UC) {
7835 list = &vport->uc_mac_list;
7836 unsync = hclge_rm_uc_addr_common;
7838 list = &vport->mc_mac_list;
7839 unsync = hclge_rm_mc_addr_common;
7842 INIT_LIST_HEAD(&tmp_del_list);
7845 set_bit(vport->vport_id, hdev->vport_config_block);
7847 spin_lock_bh(&vport->mac_list_lock);
7849 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7850 switch (mac_cfg->state) {
7851 case HCLGE_MAC_TO_DEL:
7852 case HCLGE_MAC_ACTIVE:
7853 list_del(&mac_cfg->node);
7854 list_add_tail(&mac_cfg->node, &tmp_del_list);
7856 case HCLGE_MAC_TO_ADD:
7858 list_del(&mac_cfg->node);
7865 spin_unlock_bh(&vport->mac_list_lock);
7867 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7868 ret = unsync(vport, mac_cfg->mac_addr);
7869 if (!ret || ret == -ENOENT) {
7870 /* clear all mac addr from hardware, but remain these
7871 * mac addr in the mac list, and restore them after
7872 * vf reset finished.
7875 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7876 mac_cfg->state = HCLGE_MAC_TO_ADD;
7878 list_del(&mac_cfg->node);
7881 } else if (is_del_list) {
7882 mac_cfg->state = HCLGE_MAC_TO_DEL;
7886 spin_lock_bh(&vport->mac_list_lock);
7888 hclge_sync_from_del_list(&tmp_del_list, list);
7890 spin_unlock_bh(&vport->mac_list_lock);
7893 /* remove all mac address when uninitailize */
7894 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7895 enum HCLGE_MAC_ADDR_TYPE mac_type)
7897 struct hclge_mac_node *mac_node, *tmp;
7898 struct hclge_dev *hdev = vport->back;
7899 struct list_head tmp_del_list, *list;
7901 INIT_LIST_HEAD(&tmp_del_list);
7903 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7904 &vport->uc_mac_list : &vport->mc_mac_list;
7906 spin_lock_bh(&vport->mac_list_lock);
7908 list_for_each_entry_safe(mac_node, tmp, list, node) {
7909 switch (mac_node->state) {
7910 case HCLGE_MAC_TO_DEL:
7911 case HCLGE_MAC_ACTIVE:
7912 list_del(&mac_node->node);
7913 list_add_tail(&mac_node->node, &tmp_del_list);
7915 case HCLGE_MAC_TO_ADD:
7916 list_del(&mac_node->node);
7922 spin_unlock_bh(&vport->mac_list_lock);
7924 if (mac_type == HCLGE_MAC_ADDR_UC)
7925 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7926 hclge_rm_uc_addr_common);
7928 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7929 hclge_rm_mc_addr_common);
7931 if (!list_empty(&tmp_del_list))
7932 dev_warn(&hdev->pdev->dev,
7933 "uninit %s mac list for vport %u not completely.\n",
7934 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7937 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7938 list_del(&mac_node->node);
7943 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7945 struct hclge_vport *vport;
7948 for (i = 0; i < hdev->num_alloc_vport; i++) {
7949 vport = &hdev->vport[i];
7950 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7951 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7955 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7956 u16 cmdq_resp, u8 resp_code)
7958 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7959 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7960 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7961 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7966 dev_err(&hdev->pdev->dev,
7967 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7972 switch (resp_code) {
7973 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7974 case HCLGE_ETHERTYPE_ALREADY_ADD:
7977 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7978 dev_err(&hdev->pdev->dev,
7979 "add mac ethertype failed for manager table overflow.\n");
7980 return_status = -EIO;
7982 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7983 dev_err(&hdev->pdev->dev,
7984 "add mac ethertype failed for key conflict.\n");
7985 return_status = -EIO;
7988 dev_err(&hdev->pdev->dev,
7989 "add mac ethertype failed for undefined, code=%u.\n",
7991 return_status = -EIO;
7994 return return_status;
7997 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8000 struct hclge_mac_vlan_tbl_entry_cmd req;
8001 struct hclge_dev *hdev = vport->back;
8002 struct hclge_desc desc;
8003 u16 egress_port = 0;
8006 if (is_zero_ether_addr(mac_addr))
8009 memset(&req, 0, sizeof(req));
8010 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8011 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8012 req.egress_port = cpu_to_le16(egress_port);
8013 hclge_prepare_mac_addr(&req, mac_addr, false);
8015 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8018 vf_idx += HCLGE_VF_VPORT_START_NUM;
8019 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8021 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8027 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8030 struct hclge_vport *vport = hclge_get_vport(handle);
8031 struct hclge_dev *hdev = vport->back;
8033 vport = hclge_get_vf_vport(hdev, vf);
8037 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8038 dev_info(&hdev->pdev->dev,
8039 "Specified MAC(=%pM) is same as before, no change committed!\n",
8044 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8045 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8050 ether_addr_copy(vport->vf_info.mac, mac_addr);
8052 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8053 dev_info(&hdev->pdev->dev,
8054 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8056 return hclge_inform_reset_assert_to_vf(vport);
8059 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8064 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8065 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8067 struct hclge_desc desc;
8072 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8073 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8075 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8077 dev_err(&hdev->pdev->dev,
8078 "add mac ethertype failed for cmd_send, ret =%d.\n",
8083 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8084 retval = le16_to_cpu(desc.retval);
8086 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8089 static int init_mgr_tbl(struct hclge_dev *hdev)
8094 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8095 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8097 dev_err(&hdev->pdev->dev,
8098 "add mac ethertype failed, ret =%d.\n",
8107 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8109 struct hclge_vport *vport = hclge_get_vport(handle);
8110 struct hclge_dev *hdev = vport->back;
8112 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8115 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8116 const u8 *old_addr, const u8 *new_addr)
8118 struct list_head *list = &vport->uc_mac_list;
8119 struct hclge_mac_node *old_node, *new_node;
8121 new_node = hclge_find_mac_node(list, new_addr);
8123 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8127 new_node->state = HCLGE_MAC_TO_ADD;
8128 ether_addr_copy(new_node->mac_addr, new_addr);
8129 list_add(&new_node->node, list);
8131 if (new_node->state == HCLGE_MAC_TO_DEL)
8132 new_node->state = HCLGE_MAC_ACTIVE;
8134 /* make sure the new addr is in the list head, avoid dev
8135 * addr may be not re-added into mac table for the umv space
8136 * limitation after global/imp reset which will clear mac
8137 * table by hardware.
8139 list_move(&new_node->node, list);
8142 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8143 old_node = hclge_find_mac_node(list, old_addr);
8145 if (old_node->state == HCLGE_MAC_TO_ADD) {
8146 list_del(&old_node->node);
8149 old_node->state = HCLGE_MAC_TO_DEL;
8154 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8159 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8162 const unsigned char *new_addr = (const unsigned char *)p;
8163 struct hclge_vport *vport = hclge_get_vport(handle);
8164 struct hclge_dev *hdev = vport->back;
8165 unsigned char *old_addr = NULL;
8168 /* mac addr check */
8169 if (is_zero_ether_addr(new_addr) ||
8170 is_broadcast_ether_addr(new_addr) ||
8171 is_multicast_ether_addr(new_addr)) {
8172 dev_err(&hdev->pdev->dev,
8173 "change uc mac err! invalid mac: %pM.\n",
8178 ret = hclge_pause_addr_cfg(hdev, new_addr);
8180 dev_err(&hdev->pdev->dev,
8181 "failed to configure mac pause address, ret = %d\n",
8187 old_addr = hdev->hw.mac.mac_addr;
8189 spin_lock_bh(&vport->mac_list_lock);
8190 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8192 dev_err(&hdev->pdev->dev,
8193 "failed to change the mac addr:%pM, ret = %d\n",
8195 spin_unlock_bh(&vport->mac_list_lock);
8198 hclge_pause_addr_cfg(hdev, old_addr);
8202 /* we must update dev addr with spin lock protect, preventing dev addr
8203 * being removed by set_rx_mode path.
8205 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8206 spin_unlock_bh(&vport->mac_list_lock);
8208 hclge_task_schedule(hdev, 0);
8213 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8216 struct hclge_vport *vport = hclge_get_vport(handle);
8217 struct hclge_dev *hdev = vport->back;
8219 if (!hdev->hw.mac.phydev)
8222 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8225 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8226 u8 fe_type, bool filter_en, u8 vf_id)
8228 struct hclge_vlan_filter_ctrl_cmd *req;
8229 struct hclge_desc desc;
8232 /* read current vlan filter parameter */
8233 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8234 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8235 req->vlan_type = vlan_type;
8238 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8240 dev_err(&hdev->pdev->dev,
8241 "failed to get vlan filter config, ret = %d.\n", ret);
8245 /* modify and write new config parameter */
8246 hclge_cmd_reuse_desc(&desc, false);
8247 req->vlan_fe = filter_en ?
8248 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8250 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8252 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8258 #define HCLGE_FILTER_TYPE_VF 0
8259 #define HCLGE_FILTER_TYPE_PORT 1
8260 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8261 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8262 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8263 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8264 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8265 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8266 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8267 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8268 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8270 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8272 struct hclge_vport *vport = hclge_get_vport(handle);
8273 struct hclge_dev *hdev = vport->back;
8275 if (hdev->pdev->revision >= 0x21) {
8276 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8277 HCLGE_FILTER_FE_EGRESS, enable, 0);
8278 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8279 HCLGE_FILTER_FE_INGRESS, enable, 0);
8281 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8282 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8286 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8288 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8291 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8292 bool is_kill, u16 vlan,
8295 struct hclge_vport *vport = &hdev->vport[vfid];
8296 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8297 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8298 struct hclge_desc desc[2];
8303 /* if vf vlan table is full, firmware will close vf vlan filter, it
8304 * is unable and unnecessary to add new vlan id to vf vlan filter.
8305 * If spoof check is enable, and vf vlan is full, it shouldn't add
8306 * new vlan, because tx packets with these vlan id will be dropped.
8308 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8309 if (vport->vf_info.spoofchk && vlan) {
8310 dev_err(&hdev->pdev->dev,
8311 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8317 hclge_cmd_setup_basic_desc(&desc[0],
8318 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8319 hclge_cmd_setup_basic_desc(&desc[1],
8320 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8322 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8324 vf_byte_off = vfid / 8;
8325 vf_byte_val = 1 << (vfid % 8);
8327 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8328 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8330 req0->vlan_id = cpu_to_le16(vlan);
8331 req0->vlan_cfg = is_kill;
8333 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8334 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8336 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8338 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8340 dev_err(&hdev->pdev->dev,
8341 "Send vf vlan command fail, ret =%d.\n",
8347 #define HCLGE_VF_VLAN_NO_ENTRY 2
8348 if (!req0->resp_code || req0->resp_code == 1)
8351 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8352 set_bit(vfid, hdev->vf_vlan_full);
8353 dev_warn(&hdev->pdev->dev,
8354 "vf vlan table is full, vf vlan filter is disabled\n");
8358 dev_err(&hdev->pdev->dev,
8359 "Add vf vlan filter fail, ret =%u.\n",
8362 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8363 if (!req0->resp_code)
8366 /* vf vlan filter is disabled when vf vlan table is full,
8367 * then new vlan id will not be added into vf vlan table.
8368 * Just return 0 without warning, avoid massive verbose
8369 * print logs when unload.
8371 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8374 dev_err(&hdev->pdev->dev,
8375 "Kill vf vlan filter fail, ret =%u.\n",
8382 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8383 u16 vlan_id, bool is_kill)
8385 struct hclge_vlan_filter_pf_cfg_cmd *req;
8386 struct hclge_desc desc;
8387 u8 vlan_offset_byte_val;
8388 u8 vlan_offset_byte;
8392 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8394 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8395 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8396 HCLGE_VLAN_BYTE_SIZE;
8397 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8399 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8400 req->vlan_offset = vlan_offset_160;
8401 req->vlan_cfg = is_kill;
8402 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8404 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8406 dev_err(&hdev->pdev->dev,
8407 "port vlan command, send fail, ret =%d.\n", ret);
8411 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8412 u16 vport_id, u16 vlan_id,
8415 u16 vport_idx, vport_num = 0;
8418 if (is_kill && !vlan_id)
8421 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8424 dev_err(&hdev->pdev->dev,
8425 "Set %u vport vlan filter config fail, ret =%d.\n",
8430 /* vlan 0 may be added twice when 8021q module is enabled */
8431 if (!is_kill && !vlan_id &&
8432 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8435 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8436 dev_err(&hdev->pdev->dev,
8437 "Add port vlan failed, vport %u is already in vlan %u\n",
8443 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8444 dev_err(&hdev->pdev->dev,
8445 "Delete port vlan failed, vport %u is not in vlan %u\n",
8450 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8453 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8454 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8460 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8462 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8463 struct hclge_vport_vtag_tx_cfg_cmd *req;
8464 struct hclge_dev *hdev = vport->back;
8465 struct hclge_desc desc;
8469 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8471 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8472 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8473 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8474 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8475 vcfg->accept_tag1 ? 1 : 0);
8476 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8477 vcfg->accept_untag1 ? 1 : 0);
8478 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8479 vcfg->accept_tag2 ? 1 : 0);
8480 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8481 vcfg->accept_untag2 ? 1 : 0);
8482 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8483 vcfg->insert_tag1_en ? 1 : 0);
8484 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8485 vcfg->insert_tag2_en ? 1 : 0);
8486 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8488 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8489 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8490 HCLGE_VF_NUM_PER_BYTE;
8491 req->vf_bitmap[bmap_index] =
8492 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8494 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8496 dev_err(&hdev->pdev->dev,
8497 "Send port txvlan cfg command fail, ret =%d\n",
8503 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8505 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8506 struct hclge_vport_vtag_rx_cfg_cmd *req;
8507 struct hclge_dev *hdev = vport->back;
8508 struct hclge_desc desc;
8512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8514 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8515 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8516 vcfg->strip_tag1_en ? 1 : 0);
8517 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8518 vcfg->strip_tag2_en ? 1 : 0);
8519 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8520 vcfg->vlan1_vlan_prionly ? 1 : 0);
8521 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8522 vcfg->vlan2_vlan_prionly ? 1 : 0);
8524 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8525 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8526 HCLGE_VF_NUM_PER_BYTE;
8527 req->vf_bitmap[bmap_index] =
8528 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8530 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8532 dev_err(&hdev->pdev->dev,
8533 "Send port rxvlan cfg command fail, ret =%d\n",
8539 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8540 u16 port_base_vlan_state,
8545 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8546 vport->txvlan_cfg.accept_tag1 = true;
8547 vport->txvlan_cfg.insert_tag1_en = false;
8548 vport->txvlan_cfg.default_tag1 = 0;
8550 vport->txvlan_cfg.accept_tag1 = false;
8551 vport->txvlan_cfg.insert_tag1_en = true;
8552 vport->txvlan_cfg.default_tag1 = vlan_tag;
8555 vport->txvlan_cfg.accept_untag1 = true;
8557 /* accept_tag2 and accept_untag2 are not supported on
8558 * pdev revision(0x20), new revision support them,
8559 * this two fields can not be configured by user.
8561 vport->txvlan_cfg.accept_tag2 = true;
8562 vport->txvlan_cfg.accept_untag2 = true;
8563 vport->txvlan_cfg.insert_tag2_en = false;
8564 vport->txvlan_cfg.default_tag2 = 0;
8566 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8567 vport->rxvlan_cfg.strip_tag1_en = false;
8568 vport->rxvlan_cfg.strip_tag2_en =
8569 vport->rxvlan_cfg.rx_vlan_offload_en;
8571 vport->rxvlan_cfg.strip_tag1_en =
8572 vport->rxvlan_cfg.rx_vlan_offload_en;
8573 vport->rxvlan_cfg.strip_tag2_en = true;
8575 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8576 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8578 ret = hclge_set_vlan_tx_offload_cfg(vport);
8582 return hclge_set_vlan_rx_offload_cfg(vport);
8585 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8587 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8588 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8589 struct hclge_desc desc;
8592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8593 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8594 rx_req->ot_fst_vlan_type =
8595 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8596 rx_req->ot_sec_vlan_type =
8597 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8598 rx_req->in_fst_vlan_type =
8599 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8600 rx_req->in_sec_vlan_type =
8601 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8603 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8605 dev_err(&hdev->pdev->dev,
8606 "Send rxvlan protocol type command fail, ret =%d\n",
8611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8613 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8614 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8615 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8617 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8619 dev_err(&hdev->pdev->dev,
8620 "Send txvlan protocol type command fail, ret =%d\n",
8626 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8628 #define HCLGE_DEF_VLAN_TYPE 0x8100
8630 struct hnae3_handle *handle = &hdev->vport[0].nic;
8631 struct hclge_vport *vport;
8635 if (hdev->pdev->revision >= 0x21) {
8636 /* for revision 0x21, vf vlan filter is per function */
8637 for (i = 0; i < hdev->num_alloc_vport; i++) {
8638 vport = &hdev->vport[i];
8639 ret = hclge_set_vlan_filter_ctrl(hdev,
8640 HCLGE_FILTER_TYPE_VF,
8641 HCLGE_FILTER_FE_EGRESS,
8648 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8649 HCLGE_FILTER_FE_INGRESS, true,
8654 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8655 HCLGE_FILTER_FE_EGRESS_V1_B,
8661 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8663 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8664 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8665 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8666 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8667 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8668 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8670 ret = hclge_set_vlan_protocol_type(hdev);
8674 for (i = 0; i < hdev->num_alloc_vport; i++) {
8677 vport = &hdev->vport[i];
8678 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8680 ret = hclge_vlan_offload_cfg(vport,
8681 vport->port_base_vlan_cfg.state,
8687 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8690 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8693 struct hclge_vport_vlan_cfg *vlan;
8695 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8699 vlan->hd_tbl_status = writen_to_tbl;
8700 vlan->vlan_id = vlan_id;
8702 list_add_tail(&vlan->node, &vport->vlan_list);
8705 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8707 struct hclge_vport_vlan_cfg *vlan, *tmp;
8708 struct hclge_dev *hdev = vport->back;
8711 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8712 if (!vlan->hd_tbl_status) {
8713 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8715 vlan->vlan_id, false);
8717 dev_err(&hdev->pdev->dev,
8718 "restore vport vlan list failed, ret=%d\n",
8723 vlan->hd_tbl_status = true;
8729 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8732 struct hclge_vport_vlan_cfg *vlan, *tmp;
8733 struct hclge_dev *hdev = vport->back;
8735 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8736 if (vlan->vlan_id == vlan_id) {
8737 if (is_write_tbl && vlan->hd_tbl_status)
8738 hclge_set_vlan_filter_hw(hdev,
8744 list_del(&vlan->node);
8751 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8753 struct hclge_vport_vlan_cfg *vlan, *tmp;
8754 struct hclge_dev *hdev = vport->back;
8756 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8757 if (vlan->hd_tbl_status)
8758 hclge_set_vlan_filter_hw(hdev,
8764 vlan->hd_tbl_status = false;
8766 list_del(&vlan->node);
8770 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8773 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8775 struct hclge_vport_vlan_cfg *vlan, *tmp;
8776 struct hclge_vport *vport;
8779 for (i = 0; i < hdev->num_alloc_vport; i++) {
8780 vport = &hdev->vport[i];
8781 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8782 list_del(&vlan->node);
8788 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8790 struct hclge_vport_vlan_cfg *vlan, *tmp;
8791 struct hclge_dev *hdev = vport->back;
8797 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8798 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8799 state = vport->port_base_vlan_cfg.state;
8801 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8802 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8803 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8804 vport->vport_id, vlan_id,
8809 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8810 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8812 vlan->vlan_id, false);
8815 vlan->hd_tbl_status = true;
8819 /* For global reset and imp reset, hardware will clear the mac table,
8820 * so we change the mac address state from ACTIVE to TO_ADD, then they
8821 * can be restored in the service task after reset complete. Furtherly,
8822 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8823 * be restored after reset, so just remove these mac nodes from mac_list.
8825 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8827 struct hclge_mac_node *mac_node, *tmp;
8829 list_for_each_entry_safe(mac_node, tmp, list, node) {
8830 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8831 mac_node->state = HCLGE_MAC_TO_ADD;
8832 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8833 list_del(&mac_node->node);
8839 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8841 spin_lock_bh(&vport->mac_list_lock);
8843 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8844 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8845 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8847 spin_unlock_bh(&vport->mac_list_lock);
8850 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8852 struct hclge_vport *vport = &hdev->vport[0];
8853 struct hnae3_handle *handle = &vport->nic;
8855 hclge_restore_mac_table_common(vport);
8856 hclge_restore_vport_vlan_table(vport);
8857 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8859 hclge_restore_fd_entries(handle);
8862 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8864 struct hclge_vport *vport = hclge_get_vport(handle);
8866 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8867 vport->rxvlan_cfg.strip_tag1_en = false;
8868 vport->rxvlan_cfg.strip_tag2_en = enable;
8870 vport->rxvlan_cfg.strip_tag1_en = enable;
8871 vport->rxvlan_cfg.strip_tag2_en = true;
8873 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8874 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8875 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8877 return hclge_set_vlan_rx_offload_cfg(vport);
8880 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8881 u16 port_base_vlan_state,
8882 struct hclge_vlan_info *new_info,
8883 struct hclge_vlan_info *old_info)
8885 struct hclge_dev *hdev = vport->back;
8888 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8889 hclge_rm_vport_all_vlan_table(vport, false);
8890 return hclge_set_vlan_filter_hw(hdev,
8891 htons(new_info->vlan_proto),
8897 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8898 vport->vport_id, old_info->vlan_tag,
8903 return hclge_add_vport_all_vlan_table(vport);
8906 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8907 struct hclge_vlan_info *vlan_info)
8909 struct hnae3_handle *nic = &vport->nic;
8910 struct hclge_vlan_info *old_vlan_info;
8911 struct hclge_dev *hdev = vport->back;
8914 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8916 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8920 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8921 /* add new VLAN tag */
8922 ret = hclge_set_vlan_filter_hw(hdev,
8923 htons(vlan_info->vlan_proto),
8925 vlan_info->vlan_tag,
8930 /* remove old VLAN tag */
8931 ret = hclge_set_vlan_filter_hw(hdev,
8932 htons(old_vlan_info->vlan_proto),
8934 old_vlan_info->vlan_tag,
8942 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8947 /* update state only when disable/enable port based VLAN */
8948 vport->port_base_vlan_cfg.state = state;
8949 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8950 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8952 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8955 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8956 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8957 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8962 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8963 enum hnae3_port_base_vlan_state state,
8966 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8968 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8970 return HNAE3_PORT_BASE_VLAN_ENABLE;
8973 return HNAE3_PORT_BASE_VLAN_DISABLE;
8974 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8975 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8977 return HNAE3_PORT_BASE_VLAN_MODIFY;
8981 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8982 u16 vlan, u8 qos, __be16 proto)
8984 struct hclge_vport *vport = hclge_get_vport(handle);
8985 struct hclge_dev *hdev = vport->back;
8986 struct hclge_vlan_info vlan_info;
8990 if (hdev->pdev->revision == 0x20)
8993 vport = hclge_get_vf_vport(hdev, vfid);
8997 /* qos is a 3 bits value, so can not be bigger than 7 */
8998 if (vlan > VLAN_N_VID - 1 || qos > 7)
9000 if (proto != htons(ETH_P_8021Q))
9001 return -EPROTONOSUPPORT;
9003 state = hclge_get_port_base_vlan_state(vport,
9004 vport->port_base_vlan_cfg.state,
9006 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9009 vlan_info.vlan_tag = vlan;
9010 vlan_info.qos = qos;
9011 vlan_info.vlan_proto = ntohs(proto);
9013 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9014 return hclge_update_port_base_vlan_cfg(vport, state,
9017 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9018 vport->vport_id, state,
9025 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9027 struct hclge_vlan_info *vlan_info;
9028 struct hclge_vport *vport;
9032 /* clear port base vlan for all vf */
9033 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9034 vport = &hdev->vport[vf];
9035 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9037 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9039 vlan_info->vlan_tag, true);
9041 dev_err(&hdev->pdev->dev,
9042 "failed to clear vf vlan for vf%d, ret = %d\n",
9043 vf - HCLGE_VF_VPORT_START_NUM, ret);
9047 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9048 u16 vlan_id, bool is_kill)
9050 struct hclge_vport *vport = hclge_get_vport(handle);
9051 struct hclge_dev *hdev = vport->back;
9052 bool writen_to_tbl = false;
9055 /* When device is resetting, firmware is unable to handle
9056 * mailbox. Just record the vlan id, and remove it after
9059 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9060 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9064 /* when port base vlan enabled, we use port base vlan as the vlan
9065 * filter entry. In this case, we don't update vlan filter table
9066 * when user add new vlan or remove exist vlan, just update the vport
9067 * vlan list. The vlan id in vlan list will be writen in vlan filter
9068 * table until port base vlan disabled
9070 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9071 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9073 writen_to_tbl = true;
9078 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9080 hclge_add_vport_vlan_table(vport, vlan_id,
9082 } else if (is_kill) {
9083 /* when remove hw vlan filter failed, record the vlan id,
9084 * and try to remove it from hw later, to be consistence
9087 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9092 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9094 #define HCLGE_MAX_SYNC_COUNT 60
9096 int i, ret, sync_cnt = 0;
9099 /* start from vport 1 for PF is always alive */
9100 for (i = 0; i < hdev->num_alloc_vport; i++) {
9101 struct hclge_vport *vport = &hdev->vport[i];
9103 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9105 while (vlan_id != VLAN_N_VID) {
9106 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9107 vport->vport_id, vlan_id,
9109 if (ret && ret != -EINVAL)
9112 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9113 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9116 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9119 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9125 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9127 struct hclge_config_max_frm_size_cmd *req;
9128 struct hclge_desc desc;
9130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9132 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9133 req->max_frm_size = cpu_to_le16(new_mps);
9134 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9136 return hclge_cmd_send(&hdev->hw, &desc, 1);
9139 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9141 struct hclge_vport *vport = hclge_get_vport(handle);
9143 return hclge_set_vport_mtu(vport, new_mtu);
9146 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9148 struct hclge_dev *hdev = vport->back;
9149 int i, max_frm_size, ret;
9151 /* HW supprt 2 layer vlan */
9152 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9153 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9154 max_frm_size > HCLGE_MAC_MAX_FRAME)
9157 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9158 mutex_lock(&hdev->vport_lock);
9159 /* VF's mps must fit within hdev->mps */
9160 if (vport->vport_id && max_frm_size > hdev->mps) {
9161 mutex_unlock(&hdev->vport_lock);
9163 } else if (vport->vport_id) {
9164 vport->mps = max_frm_size;
9165 mutex_unlock(&hdev->vport_lock);
9169 /* PF's mps must be greater then VF's mps */
9170 for (i = 1; i < hdev->num_alloc_vport; i++)
9171 if (max_frm_size < hdev->vport[i].mps) {
9172 mutex_unlock(&hdev->vport_lock);
9176 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9178 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9180 dev_err(&hdev->pdev->dev,
9181 "Change mtu fail, ret =%d\n", ret);
9185 hdev->mps = max_frm_size;
9186 vport->mps = max_frm_size;
9188 ret = hclge_buffer_alloc(hdev);
9190 dev_err(&hdev->pdev->dev,
9191 "Allocate buffer fail, ret =%d\n", ret);
9194 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9195 mutex_unlock(&hdev->vport_lock);
9199 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9202 struct hclge_reset_tqp_queue_cmd *req;
9203 struct hclge_desc desc;
9206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9208 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9209 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9211 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9213 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9215 dev_err(&hdev->pdev->dev,
9216 "Send tqp reset cmd error, status =%d\n", ret);
9223 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9225 struct hclge_reset_tqp_queue_cmd *req;
9226 struct hclge_desc desc;
9229 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9231 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9232 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9236 dev_err(&hdev->pdev->dev,
9237 "Get reset status error, status =%d\n", ret);
9241 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9244 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9246 struct hnae3_queue *queue;
9247 struct hclge_tqp *tqp;
9249 queue = handle->kinfo.tqp[queue_id];
9250 tqp = container_of(queue, struct hclge_tqp, q);
9255 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9257 struct hclge_vport *vport = hclge_get_vport(handle);
9258 struct hclge_dev *hdev = vport->back;
9259 int reset_try_times = 0;
9264 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9266 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9268 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9272 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9274 dev_err(&hdev->pdev->dev,
9275 "Send reset tqp cmd fail, ret = %d\n", ret);
9279 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9280 reset_status = hclge_get_reset_status(hdev, queue_gid);
9284 /* Wait for tqp hw reset */
9285 usleep_range(1000, 1200);
9288 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9289 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9293 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9295 dev_err(&hdev->pdev->dev,
9296 "Deassert the soft reset fail, ret = %d\n", ret);
9301 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9303 struct hclge_dev *hdev = vport->back;
9304 int reset_try_times = 0;
9309 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9311 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9313 dev_warn(&hdev->pdev->dev,
9314 "Send reset tqp cmd fail, ret = %d\n", ret);
9318 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9319 reset_status = hclge_get_reset_status(hdev, queue_gid);
9323 /* Wait for tqp hw reset */
9324 usleep_range(1000, 1200);
9327 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9328 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9332 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9334 dev_warn(&hdev->pdev->dev,
9335 "Deassert the soft reset fail, ret = %d\n", ret);
9338 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9340 struct hclge_vport *vport = hclge_get_vport(handle);
9341 struct hclge_dev *hdev = vport->back;
9343 return hdev->fw_version;
9346 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9348 struct phy_device *phydev = hdev->hw.mac.phydev;
9353 phy_set_asym_pause(phydev, rx_en, tx_en);
9356 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9360 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9363 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9365 dev_err(&hdev->pdev->dev,
9366 "configure pauseparam error, ret = %d.\n", ret);
9371 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9373 struct phy_device *phydev = hdev->hw.mac.phydev;
9374 u16 remote_advertising = 0;
9375 u16 local_advertising;
9376 u32 rx_pause, tx_pause;
9379 if (!phydev->link || !phydev->autoneg)
9382 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9385 remote_advertising = LPA_PAUSE_CAP;
9387 if (phydev->asym_pause)
9388 remote_advertising |= LPA_PAUSE_ASYM;
9390 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9391 remote_advertising);
9392 tx_pause = flowctl & FLOW_CTRL_TX;
9393 rx_pause = flowctl & FLOW_CTRL_RX;
9395 if (phydev->duplex == HCLGE_MAC_HALF) {
9400 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9403 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9404 u32 *rx_en, u32 *tx_en)
9406 struct hclge_vport *vport = hclge_get_vport(handle);
9407 struct hclge_dev *hdev = vport->back;
9408 struct phy_device *phydev = hdev->hw.mac.phydev;
9410 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9412 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9418 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9421 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9424 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9433 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9434 u32 rx_en, u32 tx_en)
9437 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9438 else if (rx_en && !tx_en)
9439 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9440 else if (!rx_en && tx_en)
9441 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9443 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9445 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9448 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9449 u32 rx_en, u32 tx_en)
9451 struct hclge_vport *vport = hclge_get_vport(handle);
9452 struct hclge_dev *hdev = vport->back;
9453 struct phy_device *phydev = hdev->hw.mac.phydev;
9457 fc_autoneg = hclge_get_autoneg(handle);
9458 if (auto_neg != fc_autoneg) {
9459 dev_info(&hdev->pdev->dev,
9460 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9465 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9466 dev_info(&hdev->pdev->dev,
9467 "Priority flow control enabled. Cannot set link flow control.\n");
9471 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9473 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9476 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9479 return phy_start_aneg(phydev);
9484 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9485 u8 *auto_neg, u32 *speed, u8 *duplex)
9487 struct hclge_vport *vport = hclge_get_vport(handle);
9488 struct hclge_dev *hdev = vport->back;
9491 *speed = hdev->hw.mac.speed;
9493 *duplex = hdev->hw.mac.duplex;
9495 *auto_neg = hdev->hw.mac.autoneg;
9498 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9501 struct hclge_vport *vport = hclge_get_vport(handle);
9502 struct hclge_dev *hdev = vport->back;
9504 /* When nic is down, the service task is not running, doesn't update
9505 * the port information per second. Query the port information before
9506 * return the media type, ensure getting the correct media information.
9508 hclge_update_port_info(hdev);
9511 *media_type = hdev->hw.mac.media_type;
9514 *module_type = hdev->hw.mac.module_type;
9517 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9518 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9520 struct hclge_vport *vport = hclge_get_vport(handle);
9521 struct hclge_dev *hdev = vport->back;
9522 struct phy_device *phydev = hdev->hw.mac.phydev;
9523 int mdix_ctrl, mdix, is_resolved;
9524 unsigned int retval;
9527 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9528 *tp_mdix = ETH_TP_MDI_INVALID;
9532 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9534 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9535 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9536 HCLGE_PHY_MDIX_CTRL_S);
9538 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9539 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9540 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9542 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9544 switch (mdix_ctrl) {
9546 *tp_mdix_ctrl = ETH_TP_MDI;
9549 *tp_mdix_ctrl = ETH_TP_MDI_X;
9552 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9555 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9560 *tp_mdix = ETH_TP_MDI_INVALID;
9562 *tp_mdix = ETH_TP_MDI_X;
9564 *tp_mdix = ETH_TP_MDI;
9567 static void hclge_info_show(struct hclge_dev *hdev)
9569 struct device *dev = &hdev->pdev->dev;
9571 dev_info(dev, "PF info begin:\n");
9573 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9574 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9575 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9576 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9577 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9578 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9579 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9580 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9581 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9582 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9583 dev_info(dev, "This is %s PF\n",
9584 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9585 dev_info(dev, "DCB %s\n",
9586 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9587 dev_info(dev, "MQPRIO %s\n",
9588 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9590 dev_info(dev, "PF info end.\n");
9593 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9594 struct hclge_vport *vport)
9596 struct hnae3_client *client = vport->nic.client;
9597 struct hclge_dev *hdev = ae_dev->priv;
9598 int rst_cnt = hdev->rst_stats.reset_cnt;
9601 ret = client->ops->init_instance(&vport->nic);
9605 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9606 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9607 rst_cnt != hdev->rst_stats.reset_cnt) {
9612 /* Enable nic hw error interrupts */
9613 ret = hclge_config_nic_hw_error(hdev, true);
9615 dev_err(&ae_dev->pdev->dev,
9616 "fail(%d) to enable hw error interrupts\n", ret);
9620 hnae3_set_client_init_flag(client, ae_dev, 1);
9622 if (netif_msg_drv(&hdev->vport->nic))
9623 hclge_info_show(hdev);
9628 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9629 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9630 msleep(HCLGE_WAIT_RESET_DONE);
9632 client->ops->uninit_instance(&vport->nic, 0);
9637 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9638 struct hclge_vport *vport)
9640 struct hclge_dev *hdev = ae_dev->priv;
9641 struct hnae3_client *client;
9645 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9649 client = hdev->roce_client;
9650 ret = hclge_init_roce_base_info(vport);
9654 rst_cnt = hdev->rst_stats.reset_cnt;
9655 ret = client->ops->init_instance(&vport->roce);
9659 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9660 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9661 rst_cnt != hdev->rst_stats.reset_cnt) {
9666 /* Enable roce ras interrupts */
9667 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9669 dev_err(&ae_dev->pdev->dev,
9670 "fail(%d) to enable roce ras interrupts\n", ret);
9674 hnae3_set_client_init_flag(client, ae_dev, 1);
9679 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9680 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9681 msleep(HCLGE_WAIT_RESET_DONE);
9683 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9688 static int hclge_init_client_instance(struct hnae3_client *client,
9689 struct hnae3_ae_dev *ae_dev)
9691 struct hclge_dev *hdev = ae_dev->priv;
9692 struct hclge_vport *vport;
9695 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9696 vport = &hdev->vport[i];
9698 switch (client->type) {
9699 case HNAE3_CLIENT_KNIC:
9700 hdev->nic_client = client;
9701 vport->nic.client = client;
9702 ret = hclge_init_nic_client_instance(ae_dev, vport);
9706 ret = hclge_init_roce_client_instance(ae_dev, vport);
9711 case HNAE3_CLIENT_ROCE:
9712 if (hnae3_dev_roce_supported(hdev)) {
9713 hdev->roce_client = client;
9714 vport->roce.client = client;
9717 ret = hclge_init_roce_client_instance(ae_dev, vport);
9730 hdev->nic_client = NULL;
9731 vport->nic.client = NULL;
9734 hdev->roce_client = NULL;
9735 vport->roce.client = NULL;
9739 static void hclge_uninit_client_instance(struct hnae3_client *client,
9740 struct hnae3_ae_dev *ae_dev)
9742 struct hclge_dev *hdev = ae_dev->priv;
9743 struct hclge_vport *vport;
9746 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9747 vport = &hdev->vport[i];
9748 if (hdev->roce_client) {
9749 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9750 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9751 msleep(HCLGE_WAIT_RESET_DONE);
9753 hdev->roce_client->ops->uninit_instance(&vport->roce,
9755 hdev->roce_client = NULL;
9756 vport->roce.client = NULL;
9758 if (client->type == HNAE3_CLIENT_ROCE)
9760 if (hdev->nic_client && client->ops->uninit_instance) {
9761 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9762 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9763 msleep(HCLGE_WAIT_RESET_DONE);
9765 client->ops->uninit_instance(&vport->nic, 0);
9766 hdev->nic_client = NULL;
9767 vport->nic.client = NULL;
9772 static int hclge_pci_init(struct hclge_dev *hdev)
9774 struct pci_dev *pdev = hdev->pdev;
9775 struct hclge_hw *hw;
9778 ret = pci_enable_device(pdev);
9780 dev_err(&pdev->dev, "failed to enable PCI device\n");
9784 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9786 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9789 "can't set consistent PCI DMA");
9790 goto err_disable_device;
9792 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9795 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9797 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9798 goto err_disable_device;
9801 pci_set_master(pdev);
9803 hw->io_base = pcim_iomap(pdev, 2, 0);
9805 dev_err(&pdev->dev, "Can't map configuration register space\n");
9807 goto err_clr_master;
9810 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9814 pci_clear_master(pdev);
9815 pci_release_regions(pdev);
9817 pci_disable_device(pdev);
9822 static void hclge_pci_uninit(struct hclge_dev *hdev)
9824 struct pci_dev *pdev = hdev->pdev;
9826 pcim_iounmap(pdev, hdev->hw.io_base);
9827 pci_free_irq_vectors(pdev);
9828 pci_clear_master(pdev);
9829 pci_release_mem_regions(pdev);
9830 pci_disable_device(pdev);
9833 static void hclge_state_init(struct hclge_dev *hdev)
9835 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9836 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9837 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9838 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9839 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9840 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9841 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9844 static void hclge_state_uninit(struct hclge_dev *hdev)
9846 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9847 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9849 if (hdev->reset_timer.function)
9850 del_timer_sync(&hdev->reset_timer);
9851 if (hdev->service_task.work.func)
9852 cancel_delayed_work_sync(&hdev->service_task);
9855 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9857 #define HCLGE_FLR_RETRY_WAIT_MS 500
9858 #define HCLGE_FLR_RETRY_CNT 5
9860 struct hclge_dev *hdev = ae_dev->priv;
9865 down(&hdev->reset_sem);
9866 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9867 hdev->reset_type = HNAE3_FLR_RESET;
9868 ret = hclge_reset_prepare(hdev);
9870 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9872 if (hdev->reset_pending ||
9873 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9874 dev_err(&hdev->pdev->dev,
9875 "reset_pending:0x%lx, retry_cnt:%d\n",
9876 hdev->reset_pending, retry_cnt);
9877 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9878 up(&hdev->reset_sem);
9879 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9884 /* disable misc vector before FLR done */
9885 hclge_enable_vector(&hdev->misc_vector, false);
9886 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9887 hdev->rst_stats.flr_rst_cnt++;
9890 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9892 struct hclge_dev *hdev = ae_dev->priv;
9895 hclge_enable_vector(&hdev->misc_vector, true);
9897 ret = hclge_reset_rebuild(hdev);
9899 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9901 hdev->reset_type = HNAE3_NONE_RESET;
9902 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9903 up(&hdev->reset_sem);
9906 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9910 for (i = 0; i < hdev->num_alloc_vport; i++) {
9911 struct hclge_vport *vport = &hdev->vport[i];
9914 /* Send cmd to clear VF's FUNC_RST_ING */
9915 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9917 dev_warn(&hdev->pdev->dev,
9918 "clear vf(%u) rst failed %d!\n",
9919 vport->vport_id, ret);
9923 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9925 struct pci_dev *pdev = ae_dev->pdev;
9926 struct hclge_dev *hdev;
9929 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9936 hdev->ae_dev = ae_dev;
9937 hdev->reset_type = HNAE3_NONE_RESET;
9938 hdev->reset_level = HNAE3_FUNC_RESET;
9939 ae_dev->priv = hdev;
9941 /* HW supprt 2 layer vlan */
9942 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9944 mutex_init(&hdev->vport_lock);
9945 spin_lock_init(&hdev->fd_rule_lock);
9946 sema_init(&hdev->reset_sem, 1);
9948 ret = hclge_pci_init(hdev);
9952 /* Firmware command queue initialize */
9953 ret = hclge_cmd_queue_init(hdev);
9955 goto err_pci_uninit;
9957 /* Firmware command initialize */
9958 ret = hclge_cmd_init(hdev);
9960 goto err_cmd_uninit;
9962 ret = hclge_get_cap(hdev);
9964 goto err_cmd_uninit;
9966 ret = hclge_configure(hdev);
9968 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9969 goto err_cmd_uninit;
9972 ret = hclge_init_msi(hdev);
9974 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9975 goto err_cmd_uninit;
9978 ret = hclge_misc_irq_init(hdev);
9980 goto err_msi_uninit;
9982 ret = hclge_alloc_tqps(hdev);
9984 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9985 goto err_msi_irq_uninit;
9988 ret = hclge_alloc_vport(hdev);
9990 goto err_msi_irq_uninit;
9992 ret = hclge_map_tqp(hdev);
9994 goto err_msi_irq_uninit;
9996 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9997 ret = hclge_mac_mdio_config(hdev);
9999 goto err_msi_irq_uninit;
10002 ret = hclge_init_umv_space(hdev);
10004 goto err_mdiobus_unreg;
10006 ret = hclge_mac_init(hdev);
10008 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10009 goto err_mdiobus_unreg;
10012 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10014 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10015 goto err_mdiobus_unreg;
10018 ret = hclge_config_gro(hdev, true);
10020 goto err_mdiobus_unreg;
10022 ret = hclge_init_vlan_config(hdev);
10024 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10025 goto err_mdiobus_unreg;
10028 ret = hclge_tm_schd_init(hdev);
10030 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10031 goto err_mdiobus_unreg;
10034 hclge_rss_init_cfg(hdev);
10035 ret = hclge_rss_init_hw(hdev);
10037 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10038 goto err_mdiobus_unreg;
10041 ret = init_mgr_tbl(hdev);
10043 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10044 goto err_mdiobus_unreg;
10047 ret = hclge_init_fd_config(hdev);
10049 dev_err(&pdev->dev,
10050 "fd table init fail, ret=%d\n", ret);
10051 goto err_mdiobus_unreg;
10054 INIT_KFIFO(hdev->mac_tnl_log);
10056 hclge_dcb_ops_set(hdev);
10058 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10059 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10061 /* Setup affinity after service timer setup because add_timer_on
10062 * is called in affinity notify.
10064 hclge_misc_affinity_setup(hdev);
10066 hclge_clear_all_event_cause(hdev);
10067 hclge_clear_resetting_state(hdev);
10069 /* Log and clear the hw errors those already occurred */
10070 hclge_handle_all_hns_hw_errors(ae_dev);
10072 /* request delayed reset for the error recovery because an immediate
10073 * global reset on a PF affecting pending initialization of other PFs
10075 if (ae_dev->hw_err_reset_req) {
10076 enum hnae3_reset_type reset_level;
10078 reset_level = hclge_get_reset_level(ae_dev,
10079 &ae_dev->hw_err_reset_req);
10080 hclge_set_def_reset_request(ae_dev, reset_level);
10081 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10084 /* Enable MISC vector(vector0) */
10085 hclge_enable_vector(&hdev->misc_vector, true);
10087 hclge_state_init(hdev);
10088 hdev->last_reset_time = jiffies;
10090 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10091 HCLGE_DRIVER_NAME);
10093 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10098 if (hdev->hw.mac.phydev)
10099 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10100 err_msi_irq_uninit:
10101 hclge_misc_irq_uninit(hdev);
10103 pci_free_irq_vectors(pdev);
10105 hclge_cmd_uninit(hdev);
10107 pcim_iounmap(pdev, hdev->hw.io_base);
10108 pci_clear_master(pdev);
10109 pci_release_regions(pdev);
10110 pci_disable_device(pdev);
10115 static void hclge_stats_clear(struct hclge_dev *hdev)
10117 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10120 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10122 return hclge_config_switch_param(hdev, vf, enable,
10123 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10126 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10128 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10129 HCLGE_FILTER_FE_NIC_INGRESS_B,
10133 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10137 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10139 dev_err(&hdev->pdev->dev,
10140 "Set vf %d mac spoof check %s failed, ret=%d\n",
10141 vf, enable ? "on" : "off", ret);
10145 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10147 dev_err(&hdev->pdev->dev,
10148 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10149 vf, enable ? "on" : "off", ret);
10154 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10157 struct hclge_vport *vport = hclge_get_vport(handle);
10158 struct hclge_dev *hdev = vport->back;
10159 u32 new_spoofchk = enable ? 1 : 0;
10162 if (hdev->pdev->revision == 0x20)
10163 return -EOPNOTSUPP;
10165 vport = hclge_get_vf_vport(hdev, vf);
10169 if (vport->vf_info.spoofchk == new_spoofchk)
10172 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10173 dev_warn(&hdev->pdev->dev,
10174 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10176 else if (enable && hclge_is_umv_space_full(vport, true))
10177 dev_warn(&hdev->pdev->dev,
10178 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10181 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10185 vport->vf_info.spoofchk = new_spoofchk;
10189 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10191 struct hclge_vport *vport = hdev->vport;
10195 if (hdev->pdev->revision == 0x20)
10198 /* resume the vf spoof check state after reset */
10199 for (i = 0; i < hdev->num_alloc_vport; i++) {
10200 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10201 vport->vf_info.spoofchk);
10211 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10213 struct hclge_vport *vport = hclge_get_vport(handle);
10214 struct hclge_dev *hdev = vport->back;
10215 u32 new_trusted = enable ? 1 : 0;
10219 vport = hclge_get_vf_vport(hdev, vf);
10223 if (vport->vf_info.trusted == new_trusted)
10226 /* Disable promisc mode for VF if it is not trusted any more. */
10227 if (!enable && vport->vf_info.promisc_enable) {
10228 en_bc_pmc = hdev->pdev->revision != 0x20;
10229 ret = hclge_set_vport_promisc_mode(vport, false, false,
10233 vport->vf_info.promisc_enable = 0;
10234 hclge_inform_vf_promisc_info(vport);
10237 vport->vf_info.trusted = new_trusted;
10242 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10247 /* reset vf rate to default value */
10248 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10249 struct hclge_vport *vport = &hdev->vport[vf];
10251 vport->vf_info.max_tx_rate = 0;
10252 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10254 dev_err(&hdev->pdev->dev,
10255 "vf%d failed to reset to default, ret=%d\n",
10256 vf - HCLGE_VF_VPORT_START_NUM, ret);
10260 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10261 int min_tx_rate, int max_tx_rate)
10263 if (min_tx_rate != 0 ||
10264 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10265 dev_err(&hdev->pdev->dev,
10266 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10267 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10274 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10275 int min_tx_rate, int max_tx_rate, bool force)
10277 struct hclge_vport *vport = hclge_get_vport(handle);
10278 struct hclge_dev *hdev = vport->back;
10281 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10285 vport = hclge_get_vf_vport(hdev, vf);
10289 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10292 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10296 vport->vf_info.max_tx_rate = max_tx_rate;
10301 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10303 struct hnae3_handle *handle = &hdev->vport->nic;
10304 struct hclge_vport *vport;
10308 /* resume the vf max_tx_rate after reset */
10309 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10310 vport = hclge_get_vf_vport(hdev, vf);
10314 /* zero means max rate, after reset, firmware already set it to
10315 * max rate, so just continue.
10317 if (!vport->vf_info.max_tx_rate)
10320 ret = hclge_set_vf_rate(handle, vf, 0,
10321 vport->vf_info.max_tx_rate, true);
10323 dev_err(&hdev->pdev->dev,
10324 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10325 vf, vport->vf_info.max_tx_rate, ret);
10333 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10335 struct hclge_vport *vport = hdev->vport;
10338 for (i = 0; i < hdev->num_alloc_vport; i++) {
10339 hclge_vport_stop(vport);
10344 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10346 struct hclge_dev *hdev = ae_dev->priv;
10347 struct pci_dev *pdev = ae_dev->pdev;
10350 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10352 hclge_stats_clear(hdev);
10353 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10354 * so here should not clean table in memory.
10356 if (hdev->reset_type == HNAE3_IMP_RESET ||
10357 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10358 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10359 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10360 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10361 hclge_reset_umv_space(hdev);
10364 ret = hclge_cmd_init(hdev);
10366 dev_err(&pdev->dev, "Cmd queue init failed\n");
10370 ret = hclge_map_tqp(hdev);
10372 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10376 ret = hclge_mac_init(hdev);
10378 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10382 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10384 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10388 ret = hclge_config_gro(hdev, true);
10392 ret = hclge_init_vlan_config(hdev);
10394 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10398 ret = hclge_tm_init_hw(hdev, true);
10400 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10404 ret = hclge_rss_init_hw(hdev);
10406 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10410 ret = init_mgr_tbl(hdev);
10412 dev_err(&pdev->dev,
10413 "failed to reinit manager table, ret = %d\n", ret);
10417 ret = hclge_init_fd_config(hdev);
10419 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10423 /* Log and clear the hw errors those already occurred */
10424 hclge_handle_all_hns_hw_errors(ae_dev);
10426 /* Re-enable the hw error interrupts because
10427 * the interrupts get disabled on global reset.
10429 ret = hclge_config_nic_hw_error(hdev, true);
10431 dev_err(&pdev->dev,
10432 "fail(%d) to re-enable NIC hw error interrupts\n",
10437 if (hdev->roce_client) {
10438 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10440 dev_err(&pdev->dev,
10441 "fail(%d) to re-enable roce ras interrupts\n",
10447 hclge_reset_vport_state(hdev);
10448 ret = hclge_reset_vport_spoofchk(hdev);
10452 ret = hclge_resume_vf_rate(hdev);
10456 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10457 HCLGE_DRIVER_NAME);
10462 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10464 struct hclge_dev *hdev = ae_dev->priv;
10465 struct hclge_mac *mac = &hdev->hw.mac;
10467 hclge_reset_vf_rate(hdev);
10468 hclge_clear_vf_vlan(hdev);
10469 hclge_misc_affinity_teardown(hdev);
10470 hclge_state_uninit(hdev);
10471 hclge_uninit_mac_table(hdev);
10474 mdiobus_unregister(mac->mdio_bus);
10476 /* Disable MISC vector(vector0) */
10477 hclge_enable_vector(&hdev->misc_vector, false);
10478 synchronize_irq(hdev->misc_vector.vector_irq);
10480 /* Disable all hw interrupts */
10481 hclge_config_mac_tnl_int(hdev, false);
10482 hclge_config_nic_hw_error(hdev, false);
10483 hclge_config_rocee_ras_interrupt(hdev, false);
10485 hclge_cmd_uninit(hdev);
10486 hclge_misc_irq_uninit(hdev);
10487 hclge_pci_uninit(hdev);
10488 mutex_destroy(&hdev->vport_lock);
10489 hclge_uninit_vport_vlan_table(hdev);
10490 ae_dev->priv = NULL;
10493 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10495 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10496 struct hclge_vport *vport = hclge_get_vport(handle);
10497 struct hclge_dev *hdev = vport->back;
10499 return min_t(u32, hdev->rss_size_max,
10500 vport->alloc_tqps / kinfo->num_tc);
10503 static void hclge_get_channels(struct hnae3_handle *handle,
10504 struct ethtool_channels *ch)
10506 ch->max_combined = hclge_get_max_channels(handle);
10507 ch->other_count = 1;
10509 ch->combined_count = handle->kinfo.rss_size;
10512 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10513 u16 *alloc_tqps, u16 *max_rss_size)
10515 struct hclge_vport *vport = hclge_get_vport(handle);
10516 struct hclge_dev *hdev = vport->back;
10518 *alloc_tqps = vport->alloc_tqps;
10519 *max_rss_size = hdev->rss_size_max;
10522 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10523 bool rxfh_configured)
10525 struct hclge_vport *vport = hclge_get_vport(handle);
10526 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10527 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10528 struct hclge_dev *hdev = vport->back;
10529 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10530 u16 cur_rss_size = kinfo->rss_size;
10531 u16 cur_tqps = kinfo->num_tqps;
10532 u16 tc_valid[HCLGE_MAX_TC_NUM];
10538 kinfo->req_rss_size = new_tqps_num;
10540 ret = hclge_tm_vport_map_update(hdev);
10542 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10546 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10547 roundup_size = ilog2(roundup_size);
10548 /* Set the RSS TC mode according to the new RSS size */
10549 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10552 if (!(hdev->hw_tc_map & BIT(i)))
10556 tc_size[i] = roundup_size;
10557 tc_offset[i] = kinfo->rss_size * i;
10559 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10563 /* RSS indirection table has been configuared by user */
10564 if (rxfh_configured)
10567 /* Reinitializes the rss indirect table according to the new RSS size */
10568 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10572 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10573 rss_indir[i] = i % kinfo->rss_size;
10575 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10577 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10584 dev_info(&hdev->pdev->dev,
10585 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10586 cur_rss_size, kinfo->rss_size,
10587 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10592 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10593 u32 *regs_num_64_bit)
10595 struct hclge_desc desc;
10599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10600 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10602 dev_err(&hdev->pdev->dev,
10603 "Query register number cmd failed, ret = %d.\n", ret);
10607 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10608 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10610 total_num = *regs_num_32_bit + *regs_num_64_bit;
10617 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10620 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10621 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10623 struct hclge_desc *desc;
10624 u32 *reg_val = data;
10634 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10635 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10636 HCLGE_32_BIT_REG_RTN_DATANUM);
10637 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10641 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10642 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10644 dev_err(&hdev->pdev->dev,
10645 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10650 for (i = 0; i < cmd_num; i++) {
10652 desc_data = (__le32 *)(&desc[i].data[0]);
10653 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10655 desc_data = (__le32 *)(&desc[i]);
10656 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10658 for (k = 0; k < n; k++) {
10659 *reg_val++ = le32_to_cpu(*desc_data++);
10671 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10674 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10675 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10677 struct hclge_desc *desc;
10678 u64 *reg_val = data;
10688 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10689 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10690 HCLGE_64_BIT_REG_RTN_DATANUM);
10691 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10695 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10696 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10698 dev_err(&hdev->pdev->dev,
10699 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10704 for (i = 0; i < cmd_num; i++) {
10706 desc_data = (__le64 *)(&desc[i].data[0]);
10707 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10709 desc_data = (__le64 *)(&desc[i]);
10710 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10712 for (k = 0; k < n; k++) {
10713 *reg_val++ = le64_to_cpu(*desc_data++);
10725 #define MAX_SEPARATE_NUM 4
10726 #define SEPARATOR_VALUE 0xFDFCFBFA
10727 #define REG_NUM_PER_LINE 4
10728 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10729 #define REG_SEPARATOR_LINE 1
10730 #define REG_NUM_REMAIN_MASK 3
10731 #define BD_LIST_MAX_NUM 30
10733 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10735 /*prepare 4 commands to query DFX BD number*/
10736 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10737 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10738 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10739 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10740 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10741 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10742 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10744 return hclge_cmd_send(&hdev->hw, desc, 4);
10747 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10751 u32 entries_per_desc, desc_index, index, offset, i;
10752 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10755 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10757 dev_err(&hdev->pdev->dev,
10758 "Get dfx bd num fail, status is %d.\n", ret);
10762 entries_per_desc = ARRAY_SIZE(desc[0].data);
10763 for (i = 0; i < type_num; i++) {
10764 offset = hclge_dfx_bd_offset_list[i];
10765 index = offset % entries_per_desc;
10766 desc_index = offset / entries_per_desc;
10767 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10773 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10774 struct hclge_desc *desc_src, int bd_num,
10775 enum hclge_opcode_type cmd)
10777 struct hclge_desc *desc = desc_src;
10780 hclge_cmd_setup_basic_desc(desc, cmd, true);
10781 for (i = 0; i < bd_num - 1; i++) {
10782 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10784 hclge_cmd_setup_basic_desc(desc, cmd, true);
10788 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10790 dev_err(&hdev->pdev->dev,
10791 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10797 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10800 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10801 struct hclge_desc *desc = desc_src;
10804 entries_per_desc = ARRAY_SIZE(desc->data);
10805 reg_num = entries_per_desc * bd_num;
10806 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10807 for (i = 0; i < reg_num; i++) {
10808 index = i % entries_per_desc;
10809 desc_index = i / entries_per_desc;
10810 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10812 for (i = 0; i < separator_num; i++)
10813 *reg++ = SEPARATOR_VALUE;
10815 return reg_num + separator_num;
10818 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10820 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10821 int data_len_per_desc, bd_num, i;
10822 int bd_num_list[BD_LIST_MAX_NUM];
10826 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10828 dev_err(&hdev->pdev->dev,
10829 "Get dfx reg bd num fail, status is %d.\n", ret);
10833 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10835 for (i = 0; i < dfx_reg_type_num; i++) {
10836 bd_num = bd_num_list[i];
10837 data_len = data_len_per_desc * bd_num;
10838 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10844 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10846 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10847 int bd_num, bd_num_max, buf_len, i;
10848 int bd_num_list[BD_LIST_MAX_NUM];
10849 struct hclge_desc *desc_src;
10853 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10855 dev_err(&hdev->pdev->dev,
10856 "Get dfx reg bd num fail, status is %d.\n", ret);
10860 bd_num_max = bd_num_list[0];
10861 for (i = 1; i < dfx_reg_type_num; i++)
10862 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10864 buf_len = sizeof(*desc_src) * bd_num_max;
10865 desc_src = kzalloc(buf_len, GFP_KERNEL);
10869 for (i = 0; i < dfx_reg_type_num; i++) {
10870 bd_num = bd_num_list[i];
10871 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10872 hclge_dfx_reg_opcode_list[i]);
10874 dev_err(&hdev->pdev->dev,
10875 "Get dfx reg fail, status is %d.\n", ret);
10879 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10886 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10887 struct hnae3_knic_private_info *kinfo)
10889 #define HCLGE_RING_REG_OFFSET 0x200
10890 #define HCLGE_RING_INT_REG_OFFSET 0x4
10892 int i, j, reg_num, separator_num;
10896 /* fetching per-PF registers valus from PF PCIe register space */
10897 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10898 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10899 for (i = 0; i < reg_num; i++)
10900 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10901 for (i = 0; i < separator_num; i++)
10902 *reg++ = SEPARATOR_VALUE;
10903 data_num_sum = reg_num + separator_num;
10905 reg_num = ARRAY_SIZE(common_reg_addr_list);
10906 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10907 for (i = 0; i < reg_num; i++)
10908 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10909 for (i = 0; i < separator_num; i++)
10910 *reg++ = SEPARATOR_VALUE;
10911 data_num_sum += reg_num + separator_num;
10913 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10914 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10915 for (j = 0; j < kinfo->num_tqps; j++) {
10916 for (i = 0; i < reg_num; i++)
10917 *reg++ = hclge_read_dev(&hdev->hw,
10918 ring_reg_addr_list[i] +
10919 HCLGE_RING_REG_OFFSET * j);
10920 for (i = 0; i < separator_num; i++)
10921 *reg++ = SEPARATOR_VALUE;
10923 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10925 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10926 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10927 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10928 for (i = 0; i < reg_num; i++)
10929 *reg++ = hclge_read_dev(&hdev->hw,
10930 tqp_intr_reg_addr_list[i] +
10931 HCLGE_RING_INT_REG_OFFSET * j);
10932 for (i = 0; i < separator_num; i++)
10933 *reg++ = SEPARATOR_VALUE;
10935 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10937 return data_num_sum;
10940 static int hclge_get_regs_len(struct hnae3_handle *handle)
10942 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10943 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10944 struct hclge_vport *vport = hclge_get_vport(handle);
10945 struct hclge_dev *hdev = vport->back;
10946 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10947 int regs_lines_32_bit, regs_lines_64_bit;
10950 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10952 dev_err(&hdev->pdev->dev,
10953 "Get register number failed, ret = %d.\n", ret);
10957 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10959 dev_err(&hdev->pdev->dev,
10960 "Get dfx reg len failed, ret = %d.\n", ret);
10964 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10965 REG_SEPARATOR_LINE;
10966 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10967 REG_SEPARATOR_LINE;
10968 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10969 REG_SEPARATOR_LINE;
10970 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10971 REG_SEPARATOR_LINE;
10972 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10973 REG_SEPARATOR_LINE;
10974 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10975 REG_SEPARATOR_LINE;
10977 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10978 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10979 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10982 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10985 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10986 struct hclge_vport *vport = hclge_get_vport(handle);
10987 struct hclge_dev *hdev = vport->back;
10988 u32 regs_num_32_bit, regs_num_64_bit;
10989 int i, reg_num, separator_num, ret;
10992 *version = hdev->fw_version;
10994 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10996 dev_err(&hdev->pdev->dev,
10997 "Get register number failed, ret = %d.\n", ret);
11001 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11003 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11005 dev_err(&hdev->pdev->dev,
11006 "Get 32 bit register failed, ret = %d.\n", ret);
11009 reg_num = regs_num_32_bit;
11011 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11012 for (i = 0; i < separator_num; i++)
11013 *reg++ = SEPARATOR_VALUE;
11015 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11017 dev_err(&hdev->pdev->dev,
11018 "Get 64 bit register failed, ret = %d.\n", ret);
11021 reg_num = regs_num_64_bit * 2;
11023 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11024 for (i = 0; i < separator_num; i++)
11025 *reg++ = SEPARATOR_VALUE;
11027 ret = hclge_get_dfx_reg(hdev, reg);
11029 dev_err(&hdev->pdev->dev,
11030 "Get dfx register failed, ret = %d.\n", ret);
11033 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11035 struct hclge_set_led_state_cmd *req;
11036 struct hclge_desc desc;
11039 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11041 req = (struct hclge_set_led_state_cmd *)desc.data;
11042 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11043 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11045 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11047 dev_err(&hdev->pdev->dev,
11048 "Send set led state cmd error, ret =%d\n", ret);
11053 enum hclge_led_status {
11056 HCLGE_LED_NO_CHANGE = 0xFF,
11059 static int hclge_set_led_id(struct hnae3_handle *handle,
11060 enum ethtool_phys_id_state status)
11062 struct hclge_vport *vport = hclge_get_vport(handle);
11063 struct hclge_dev *hdev = vport->back;
11066 case ETHTOOL_ID_ACTIVE:
11067 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11068 case ETHTOOL_ID_INACTIVE:
11069 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11075 static void hclge_get_link_mode(struct hnae3_handle *handle,
11076 unsigned long *supported,
11077 unsigned long *advertising)
11079 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11080 struct hclge_vport *vport = hclge_get_vport(handle);
11081 struct hclge_dev *hdev = vport->back;
11082 unsigned int idx = 0;
11084 for (; idx < size; idx++) {
11085 supported[idx] = hdev->hw.mac.supported[idx];
11086 advertising[idx] = hdev->hw.mac.advertising[idx];
11090 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11092 struct hclge_vport *vport = hclge_get_vport(handle);
11093 struct hclge_dev *hdev = vport->back;
11095 return hclge_config_gro(hdev, enable);
11098 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11100 struct hclge_vport *vport = &hdev->vport[0];
11101 struct hnae3_handle *handle = &vport->nic;
11105 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11106 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11107 vport->last_promisc_flags = vport->overflow_promisc_flags;
11110 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11111 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11112 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11113 tmp_flags & HNAE3_MPE);
11115 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11116 hclge_enable_vlan_filter(handle,
11117 tmp_flags & HNAE3_VLAN_FLTR);
11122 static const struct hnae3_ae_ops hclge_ops = {
11123 .init_ae_dev = hclge_init_ae_dev,
11124 .uninit_ae_dev = hclge_uninit_ae_dev,
11125 .flr_prepare = hclge_flr_prepare,
11126 .flr_done = hclge_flr_done,
11127 .init_client_instance = hclge_init_client_instance,
11128 .uninit_client_instance = hclge_uninit_client_instance,
11129 .map_ring_to_vector = hclge_map_ring_to_vector,
11130 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11131 .get_vector = hclge_get_vector,
11132 .put_vector = hclge_put_vector,
11133 .set_promisc_mode = hclge_set_promisc_mode,
11134 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11135 .set_loopback = hclge_set_loopback,
11136 .start = hclge_ae_start,
11137 .stop = hclge_ae_stop,
11138 .client_start = hclge_client_start,
11139 .client_stop = hclge_client_stop,
11140 .get_status = hclge_get_status,
11141 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11142 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11143 .get_media_type = hclge_get_media_type,
11144 .check_port_speed = hclge_check_port_speed,
11145 .get_fec = hclge_get_fec,
11146 .set_fec = hclge_set_fec,
11147 .get_rss_key_size = hclge_get_rss_key_size,
11148 .get_rss_indir_size = hclge_get_rss_indir_size,
11149 .get_rss = hclge_get_rss,
11150 .set_rss = hclge_set_rss,
11151 .set_rss_tuple = hclge_set_rss_tuple,
11152 .get_rss_tuple = hclge_get_rss_tuple,
11153 .get_tc_size = hclge_get_tc_size,
11154 .get_mac_addr = hclge_get_mac_addr,
11155 .set_mac_addr = hclge_set_mac_addr,
11156 .do_ioctl = hclge_do_ioctl,
11157 .add_uc_addr = hclge_add_uc_addr,
11158 .rm_uc_addr = hclge_rm_uc_addr,
11159 .add_mc_addr = hclge_add_mc_addr,
11160 .rm_mc_addr = hclge_rm_mc_addr,
11161 .set_autoneg = hclge_set_autoneg,
11162 .get_autoneg = hclge_get_autoneg,
11163 .restart_autoneg = hclge_restart_autoneg,
11164 .halt_autoneg = hclge_halt_autoneg,
11165 .get_pauseparam = hclge_get_pauseparam,
11166 .set_pauseparam = hclge_set_pauseparam,
11167 .set_mtu = hclge_set_mtu,
11168 .reset_queue = hclge_reset_tqp,
11169 .get_stats = hclge_get_stats,
11170 .get_mac_stats = hclge_get_mac_stat,
11171 .update_stats = hclge_update_stats,
11172 .get_strings = hclge_get_strings,
11173 .get_sset_count = hclge_get_sset_count,
11174 .get_fw_version = hclge_get_fw_version,
11175 .get_mdix_mode = hclge_get_mdix_mode,
11176 .enable_vlan_filter = hclge_enable_vlan_filter,
11177 .set_vlan_filter = hclge_set_vlan_filter,
11178 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11179 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11180 .reset_event = hclge_reset_event,
11181 .get_reset_level = hclge_get_reset_level,
11182 .set_default_reset_request = hclge_set_def_reset_request,
11183 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11184 .set_channels = hclge_set_channels,
11185 .get_channels = hclge_get_channels,
11186 .get_regs_len = hclge_get_regs_len,
11187 .get_regs = hclge_get_regs,
11188 .set_led_id = hclge_set_led_id,
11189 .get_link_mode = hclge_get_link_mode,
11190 .add_fd_entry = hclge_add_fd_entry,
11191 .del_fd_entry = hclge_del_fd_entry,
11192 .del_all_fd_entries = hclge_del_all_fd_entries,
11193 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11194 .get_fd_rule_info = hclge_get_fd_rule_info,
11195 .get_fd_all_rules = hclge_get_all_rules,
11196 .enable_fd = hclge_enable_fd,
11197 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11198 .dbg_run_cmd = hclge_dbg_run_cmd,
11199 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11200 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11201 .ae_dev_resetting = hclge_ae_dev_resetting,
11202 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11203 .set_gro_en = hclge_gro_en,
11204 .get_global_queue_id = hclge_covert_handle_qid_global,
11205 .set_timer_task = hclge_set_timer_task,
11206 .mac_connect_phy = hclge_mac_connect_phy,
11207 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11208 .get_vf_config = hclge_get_vf_config,
11209 .set_vf_link_state = hclge_set_vf_link_state,
11210 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11211 .set_vf_trust = hclge_set_vf_trust,
11212 .set_vf_rate = hclge_set_vf_rate,
11213 .set_vf_mac = hclge_set_vf_mac,
11216 static struct hnae3_ae_algo ae_algo = {
11218 .pdev_id_table = ae_algo_pci_tbl,
11221 static int hclge_init(void)
11223 pr_info("%s is initializing\n", HCLGE_NAME);
11225 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11227 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11231 hnae3_register_ae_algo(&ae_algo);
11236 static void hclge_exit(void)
11238 hnae3_unregister_ae_algo(&ae_algo);
11239 destroy_workqueue(hclge_wq);
11241 module_init(hclge_init);
11242 module_exit(hclge_exit);
11244 MODULE_LICENSE("GPL");
11245 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11246 MODULE_DESCRIPTION("HCLGE Driver");
11247 MODULE_VERSION(HCLGE_MOD_VERSION);