1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
337 static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
375 static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
386 static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
394 { OUTER_IP_PROTO, 8},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
410 { INNER_IP_PROTO, 8},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 #define HCLGE_MAC_CMD_NUM 21
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 /* for special opcode 0032, only the first desc has the head */
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
466 /* This may be called inside atomic sections,
467 * so GFP_ATOMIC is more suitalbe here
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
480 for (i = 0; i < desc_num; i++) {
481 /* for special opcode 0034, only the first desc has the head */
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 struct hclge_desc desc;
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 /* The firmware supports the new statistics acquisition method */
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
558 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
578 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 /* each tqp has TX & RX two queues */
618 return kinfo->num_tqps * (2);
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 buff = buff + ETH_GSTRING_LEN;
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 buff = buff + ETH_GSTRING_LEN;
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
663 char *buff = (char *)data;
666 if (stringset != ETH_SS_STATS)
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 struct hnae3_handle *handle;
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
692 status = hclge_mac_update_stats(hdev);
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
708 status = hclge_mac_update_stats(hdev);
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
714 status = hclge_tqps_update_stats(handle);
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
734 /* Loopback test support rules:
735 * mac: only GE mode support
736 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 * phy: only support when phy device exist on board
739 if (stringset == ETH_SS_TEST) {
740 /* clear loopback bit flags at first */
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->pdev->revision >= 0x21 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 if (hdev->hw.mac.phydev) {
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759 } else if (stringset == ETH_SS_STATS) {
760 count = ARRAY_SIZE(g_mac_stats_string) +
761 hclge_tqps_get_sset_count(handle, stringset);
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 u8 *p = (char *)data;
773 if (stringset == ETH_SS_STATS) {
774 size = ARRAY_SIZE(g_mac_stats_string);
775 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 p = hclge_tqps_get_strings(handle, p);
778 } else if (stringset == ETH_SS_TEST) {
779 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 p += ETH_GSTRING_LEN;
784 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 p += ETH_GSTRING_LEN;
789 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 p += ETH_GSTRING_LEN;
795 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 p += ETH_GSTRING_LEN;
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 struct hclge_vport *vport = hclge_get_vport(handle);
806 struct hclge_dev *hdev = vport->back;
809 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 ARRAY_SIZE(g_mac_stats_string), data);
811 p = hclge_tqps_get_stats(handle, p);
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 struct hns3_mac_stats *mac_stats)
817 struct hclge_vport *vport = hclge_get_vport(handle);
818 struct hclge_dev *hdev = vport->back;
820 hclge_update_stats(handle, NULL);
822 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 struct hclge_func_status_cmd *status)
829 #define HCLGE_MAC_ID_MASK 0xF
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
840 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
844 static int hclge_query_function_status(struct hclge_dev *hdev)
846 #define HCLGE_QUERY_MAX_CNT 5
848 struct hclge_func_status_cmd *req;
849 struct hclge_desc desc;
853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 req = (struct hclge_func_status_cmd *)desc.data;
857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 dev_err(&hdev->pdev->dev,
860 "query function status failed %d.\n", ret);
864 /* Check pf reset is done */
867 usleep_range(1000, 2000);
868 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870 return hclge_parse_func_status(hdev, req);
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 struct hclge_pf_res_cmd *req;
876 struct hclge_desc desc;
879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 dev_err(&hdev->pdev->dev,
883 "query pf resource failed %d.\n", ret);
887 req = (struct hclge_pf_res_cmd *)desc.data;
888 hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891 if (req->tx_buf_size)
893 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
895 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
897 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
899 if (req->dv_buf_size)
901 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
903 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
905 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
907 if (hnae3_dev_roce_supported(hdev)) {
908 hdev->roce_base_msix_offset =
909 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
912 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
915 /* nic's msix numbers is always equals to the roce's. */
916 hdev->num_nic_msi = hdev->num_roce_msi;
918 /* PF should have NIC vectors and Roce vectors,
919 * NIC vectors are queued before Roce vectors.
921 hdev->num_msi = hdev->num_roce_msi +
922 hdev->roce_base_msix_offset;
925 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
928 hdev->num_nic_msi = hdev->num_msi;
931 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 dev_err(&hdev->pdev->dev,
933 "Just %u msi resources, not enough for pf(min:2).\n",
941 static int hclge_parse_speed(int speed_cmd, int *speed)
945 *speed = HCLGE_MAC_SPEED_10M;
948 *speed = HCLGE_MAC_SPEED_100M;
951 *speed = HCLGE_MAC_SPEED_1G;
954 *speed = HCLGE_MAC_SPEED_10G;
957 *speed = HCLGE_MAC_SPEED_25G;
960 *speed = HCLGE_MAC_SPEED_40G;
963 *speed = HCLGE_MAC_SPEED_50G;
966 *speed = HCLGE_MAC_SPEED_100G;
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
977 struct hclge_vport *vport = hclge_get_vport(handle);
978 struct hclge_dev *hdev = vport->back;
979 u32 speed_ability = hdev->hw.mac.speed_ability;
983 case HCLGE_MAC_SPEED_10M:
984 speed_bit = HCLGE_SUPPORT_10M_BIT;
986 case HCLGE_MAC_SPEED_100M:
987 speed_bit = HCLGE_SUPPORT_100M_BIT;
989 case HCLGE_MAC_SPEED_1G:
990 speed_bit = HCLGE_SUPPORT_1G_BIT;
992 case HCLGE_MAC_SPEED_10G:
993 speed_bit = HCLGE_SUPPORT_10G_BIT;
995 case HCLGE_MAC_SPEED_25G:
996 speed_bit = HCLGE_SUPPORT_25G_BIT;
998 case HCLGE_MAC_SPEED_40G:
999 speed_bit = HCLGE_SUPPORT_40G_BIT;
1001 case HCLGE_MAC_SPEED_50G:
1002 speed_bit = HCLGE_SUPPORT_50G_BIT;
1004 case HCLGE_MAC_SPEED_100G:
1005 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 if (speed_bit & speed_ability)
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1057 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1069 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1091 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1098 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1101 switch (mac->speed) {
1102 case HCLGE_MAC_SPEED_10G:
1103 case HCLGE_MAC_SPEED_40G:
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1107 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1109 case HCLGE_MAC_SPEED_25G:
1110 case HCLGE_MAC_SPEED_50G:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1114 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 BIT(HNAE3_FEC_AUTO);
1117 case HCLGE_MAC_SPEED_100G:
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1122 mac->fec_ability = 0;
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1130 struct hclge_mac *mac = &hdev->hw.mac;
1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1136 hclge_convert_setting_sr(mac, speed_ability);
1137 hclge_convert_setting_lr(mac, speed_ability);
1138 hclge_convert_setting_cr(mac, speed_ability);
1139 if (hdev->pdev->revision >= 0x21)
1140 hclge_convert_setting_fec(mac);
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1150 struct hclge_mac *mac = &hdev->hw.mac;
1152 hclge_convert_setting_kr(mac, speed_ability);
1153 if (hdev->pdev->revision >= 0x21)
1154 hclge_convert_setting_fec(mac);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1163 unsigned long *supported = hdev->hw.mac.supported;
1165 /* default to support all speed for GE port */
1167 speed_ability = HCLGE_SUPPORT_GE;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1173 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1180 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1193 u8 media_type = hdev->hw.mac.media_type;
1195 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 hclge_parse_copper_link_mode(hdev, speed_ability);
1199 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 hclge_parse_backplane_link_mode(hdev, speed_ability);
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1205 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 return HCLGE_MAC_SPEED_100G;
1208 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 return HCLGE_MAC_SPEED_50G;
1211 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 return HCLGE_MAC_SPEED_40G;
1214 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 return HCLGE_MAC_SPEED_25G;
1217 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 return HCLGE_MAC_SPEED_10G;
1220 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 return HCLGE_MAC_SPEED_1G;
1223 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 return HCLGE_MAC_SPEED_100M;
1226 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 return HCLGE_MAC_SPEED_10M;
1229 return HCLGE_MAC_SPEED_1G;
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1234 struct hclge_cfg_param_cmd *req;
1235 u64 mac_addr_tmp_high;
1239 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1241 /* get the configuration */
1242 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 HCLGE_CFG_TQP_DESC_N_M,
1249 HCLGE_CFG_TQP_DESC_N_S);
1251 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_PHY_ADDR_M,
1253 HCLGE_CFG_PHY_ADDR_S);
1254 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_MEDIA_TP_M,
1256 HCLGE_CFG_MEDIA_TP_S);
1257 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 HCLGE_CFG_RX_BUF_LEN_M,
1259 HCLGE_CFG_RX_BUF_LEN_S);
1260 /* get mac_address */
1261 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 HCLGE_CFG_MAC_ADDR_H_M,
1264 HCLGE_CFG_MAC_ADDR_H_S);
1266 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1268 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_DEFAULT_SPEED_M,
1270 HCLGE_CFG_DEFAULT_SPEED_S);
1271 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 HCLGE_CFG_RSS_SIZE_M,
1273 HCLGE_CFG_RSS_SIZE_S);
1275 for (i = 0; i < ETH_ALEN; i++)
1276 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1278 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1281 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_SPEED_ABILITY_M,
1283 HCLGE_CFG_SPEED_ABILITY_S);
1284 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 if (!cfg->umv_space)
1288 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1291 /* hclge_get_cfg: query the static parameter from flash
1292 * @hdev: pointer to struct hclge_dev
1293 * @hcfg: the config structure to be getted
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1297 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 struct hclge_cfg_param_cmd *req;
1302 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1305 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1308 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310 /* Len should be united by 4 bytes when send to hardware */
1311 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 req->offset = cpu_to_le32(offset);
1316 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1318 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1322 hclge_parse_cfg(hcfg, desc);
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1331 ret = hclge_query_function_status(hdev);
1333 dev_err(&hdev->pdev->dev,
1334 "query function status error %d.\n", ret);
1338 /* get pf resource */
1339 return hclge_query_pf_resource(hdev);
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 #define HCLGE_MIN_TX_DESC 64
1345 #define HCLGE_MIN_RX_DESC 64
1347 if (!is_kdump_kernel())
1350 dev_info(&hdev->pdev->dev,
1351 "Running kdump kernel. Using minimal resources\n");
1353 /* minimal queue pairs equals to the number of vports */
1354 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1359 static int hclge_configure(struct hclge_dev *hdev)
1361 struct hclge_cfg cfg;
1365 ret = hclge_get_cfg(hdev, &cfg);
1369 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 hdev->base_tqp_pid = 0;
1371 hdev->rss_size_max = cfg.rss_size_max;
1372 hdev->rx_buf_len = cfg.rx_buf_len;
1373 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 hdev->hw.mac.media_type = cfg.media_type;
1375 hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 hdev->num_tx_desc = cfg.tqp_desc_num;
1377 hdev->num_rx_desc = cfg.tqp_desc_num;
1378 hdev->tm_info.num_pg = 1;
1379 hdev->tc_max = cfg.tc_num;
1380 hdev->tm_info.hw_pfc_map = 0;
1381 hdev->wanted_umv_size = cfg.umv_space;
1383 if (hnae3_dev_fd_supported(hdev)) {
1385 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1388 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1390 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1394 hclge_parse_link_mode(hdev, cfg.speed_ability);
1396 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 (hdev->tc_max < 1)) {
1400 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1405 /* Dev does not support DCB */
1406 if (!hnae3_dev_dcb_supported(hdev)) {
1410 hdev->pfc_max = hdev->tc_max;
1413 hdev->tm_info.num_tc = 1;
1415 /* Currently not support uncontiuous tc */
1416 for (i = 0; i < hdev->tm_info.num_tc; i++)
1417 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421 hclge_init_kdump_kernel_config(hdev);
1423 /* Set the init affinity based on pci func number */
1424 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 &hdev->affinity_mask);
1432 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1433 unsigned int tso_mss_max)
1435 struct hclge_cfg_tso_status_cmd *req;
1436 struct hclge_desc desc;
1439 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1441 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1444 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1445 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1446 req->tso_mss_min = cpu_to_le16(tso_mss);
1449 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1450 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1451 req->tso_mss_max = cpu_to_le16(tso_mss);
1453 return hclge_cmd_send(&hdev->hw, &desc, 1);
1456 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1458 struct hclge_cfg_gro_status_cmd *req;
1459 struct hclge_desc desc;
1462 if (!hnae3_dev_gro_supported(hdev))
1465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1466 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1468 req->gro_en = cpu_to_le16(en ? 1 : 0);
1470 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1472 dev_err(&hdev->pdev->dev,
1473 "GRO hardware config cmd failed, ret = %d\n", ret);
1478 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1480 struct hclge_tqp *tqp;
1483 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1484 sizeof(struct hclge_tqp), GFP_KERNEL);
1490 for (i = 0; i < hdev->num_tqps; i++) {
1491 tqp->dev = &hdev->pdev->dev;
1494 tqp->q.ae_algo = &ae_algo;
1495 tqp->q.buf_size = hdev->rx_buf_len;
1496 tqp->q.tx_desc_num = hdev->num_tx_desc;
1497 tqp->q.rx_desc_num = hdev->num_rx_desc;
1498 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1499 i * HCLGE_TQP_REG_SIZE;
1507 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1508 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1510 struct hclge_tqp_map_cmd *req;
1511 struct hclge_desc desc;
1514 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1516 req = (struct hclge_tqp_map_cmd *)desc.data;
1517 req->tqp_id = cpu_to_le16(tqp_pid);
1518 req->tqp_vf = func_id;
1519 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1521 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1522 req->tqp_vid = cpu_to_le16(tqp_vid);
1524 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1526 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1531 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1533 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1534 struct hclge_dev *hdev = vport->back;
1537 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1538 alloced < num_tqps; i++) {
1539 if (!hdev->htqp[i].alloced) {
1540 hdev->htqp[i].q.handle = &vport->nic;
1541 hdev->htqp[i].q.tqp_index = alloced;
1542 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1543 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1544 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1545 hdev->htqp[i].alloced = true;
1549 vport->alloc_tqps = alloced;
1550 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1551 vport->alloc_tqps / hdev->tm_info.num_tc);
1553 /* ensure one to one mapping between irq and queue at default */
1554 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1555 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1560 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1561 u16 num_tx_desc, u16 num_rx_desc)
1564 struct hnae3_handle *nic = &vport->nic;
1565 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1566 struct hclge_dev *hdev = vport->back;
1569 kinfo->num_tx_desc = num_tx_desc;
1570 kinfo->num_rx_desc = num_rx_desc;
1572 kinfo->rx_buf_len = hdev->rx_buf_len;
1574 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1575 sizeof(struct hnae3_queue *), GFP_KERNEL);
1579 ret = hclge_assign_tqp(vport, num_tqps);
1581 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1586 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1587 struct hclge_vport *vport)
1589 struct hnae3_handle *nic = &vport->nic;
1590 struct hnae3_knic_private_info *kinfo;
1593 kinfo = &nic->kinfo;
1594 for (i = 0; i < vport->alloc_tqps; i++) {
1595 struct hclge_tqp *q =
1596 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1600 is_pf = !(vport->vport_id);
1601 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1610 static int hclge_map_tqp(struct hclge_dev *hdev)
1612 struct hclge_vport *vport = hdev->vport;
1615 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1616 for (i = 0; i < num_vport; i++) {
1619 ret = hclge_map_tqp_to_vport(hdev, vport);
1629 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1631 struct hnae3_handle *nic = &vport->nic;
1632 struct hclge_dev *hdev = vport->back;
1635 nic->pdev = hdev->pdev;
1636 nic->ae_algo = &ae_algo;
1637 nic->numa_node_mask = hdev->numa_node_mask;
1639 ret = hclge_knic_setup(vport, num_tqps,
1640 hdev->num_tx_desc, hdev->num_rx_desc);
1642 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1647 static int hclge_alloc_vport(struct hclge_dev *hdev)
1649 struct pci_dev *pdev = hdev->pdev;
1650 struct hclge_vport *vport;
1656 /* We need to alloc a vport for main NIC of PF */
1657 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1659 if (hdev->num_tqps < num_vport) {
1660 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1661 hdev->num_tqps, num_vport);
1665 /* Alloc the same number of TQPs for every vport */
1666 tqp_per_vport = hdev->num_tqps / num_vport;
1667 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1669 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1674 hdev->vport = vport;
1675 hdev->num_alloc_vport = num_vport;
1677 if (IS_ENABLED(CONFIG_PCI_IOV))
1678 hdev->num_alloc_vfs = hdev->num_req_vfs;
1680 for (i = 0; i < num_vport; i++) {
1682 vport->vport_id = i;
1683 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1684 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1685 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1686 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1687 INIT_LIST_HEAD(&vport->vlan_list);
1688 INIT_LIST_HEAD(&vport->uc_mac_list);
1689 INIT_LIST_HEAD(&vport->mc_mac_list);
1690 spin_lock_init(&vport->mac_list_lock);
1693 ret = hclge_vport_setup(vport, tqp_main_vport);
1695 ret = hclge_vport_setup(vport, tqp_per_vport);
1698 "vport setup failed for vport %d, %d\n",
1709 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1710 struct hclge_pkt_buf_alloc *buf_alloc)
1712 /* TX buffer size is unit by 128 byte */
1713 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1714 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1715 struct hclge_tx_buff_alloc_cmd *req;
1716 struct hclge_desc desc;
1720 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1724 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1726 req->tx_pkt_buff[i] =
1727 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1728 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1733 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1739 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1740 struct hclge_pkt_buf_alloc *buf_alloc)
1742 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1750 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1755 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1756 if (hdev->hw_tc_map & BIT(i))
1761 /* Get the number of pfc enabled TCs, which have private buffer */
1762 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1763 struct hclge_pkt_buf_alloc *buf_alloc)
1765 struct hclge_priv_buf *priv;
1769 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1770 priv = &buf_alloc->priv_buf[i];
1771 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1779 /* Get the number of pfc disabled TCs, which have private buffer */
1780 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1781 struct hclge_pkt_buf_alloc *buf_alloc)
1783 struct hclge_priv_buf *priv;
1787 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1788 priv = &buf_alloc->priv_buf[i];
1789 if (hdev->hw_tc_map & BIT(i) &&
1790 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1798 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1800 struct hclge_priv_buf *priv;
1804 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1805 priv = &buf_alloc->priv_buf[i];
1807 rx_priv += priv->buf_size;
1812 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1814 u32 i, total_tx_size = 0;
1816 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1817 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1819 return total_tx_size;
1822 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1823 struct hclge_pkt_buf_alloc *buf_alloc,
1826 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1827 u32 tc_num = hclge_get_tc_num(hdev);
1828 u32 shared_buf, aligned_mps;
1832 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1834 if (hnae3_dev_dcb_supported(hdev))
1835 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1839 + hdev->dv_buf_size;
1841 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1842 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1843 HCLGE_BUF_SIZE_UNIT);
1845 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1846 if (rx_all < rx_priv + shared_std)
1849 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1850 buf_alloc->s_buf.buf_size = shared_buf;
1851 if (hnae3_dev_dcb_supported(hdev)) {
1852 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1853 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1854 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1855 HCLGE_BUF_SIZE_UNIT);
1857 buf_alloc->s_buf.self.high = aligned_mps +
1858 HCLGE_NON_DCB_ADDITIONAL_BUF;
1859 buf_alloc->s_buf.self.low = aligned_mps;
1862 if (hnae3_dev_dcb_supported(hdev)) {
1863 hi_thrd = shared_buf - hdev->dv_buf_size;
1865 if (tc_num <= NEED_RESERVE_TC_NUM)
1866 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1870 hi_thrd = hi_thrd / tc_num;
1872 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1873 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1874 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1876 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1877 lo_thrd = aligned_mps;
1880 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1881 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1882 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1888 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1889 struct hclge_pkt_buf_alloc *buf_alloc)
1893 total_size = hdev->pkt_buf_size;
1895 /* alloc tx buffer for all enabled tc */
1896 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1897 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1899 if (hdev->hw_tc_map & BIT(i)) {
1900 if (total_size < hdev->tx_buf_size)
1903 priv->tx_buf_size = hdev->tx_buf_size;
1905 priv->tx_buf_size = 0;
1908 total_size -= priv->tx_buf_size;
1914 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1915 struct hclge_pkt_buf_alloc *buf_alloc)
1917 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1918 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1922 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1929 if (!(hdev->hw_tc_map & BIT(i)))
1934 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1935 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1936 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1937 HCLGE_BUF_SIZE_UNIT);
1940 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1944 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1951 struct hclge_pkt_buf_alloc *buf_alloc)
1953 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1954 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957 /* let the last to be cleared first */
1958 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1959 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1960 unsigned int mask = BIT((unsigned int)i);
1962 if (hdev->hw_tc_map & mask &&
1963 !(hdev->tm_info.hw_pfc_map & mask)) {
1964 /* Clear the no pfc TC private buffer */
1972 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1973 no_pfc_priv_num == 0)
1977 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1981 struct hclge_pkt_buf_alloc *buf_alloc)
1983 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1984 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987 /* let the last to be cleared first */
1988 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1989 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1990 unsigned int mask = BIT((unsigned int)i);
1992 if (hdev->hw_tc_map & mask &&
1993 hdev->tm_info.hw_pfc_map & mask) {
1994 /* Reduce the number of pfc TC with private buffer */
2002 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2007 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2013 #define COMPENSATE_BUFFER 0x3C00
2014 #define COMPENSATE_HALF_MPS_NUM 5
2015 #define PRIV_WL_GAP 0x1800
2017 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2018 u32 tc_num = hclge_get_tc_num(hdev);
2019 u32 half_mps = hdev->mps >> 1;
2024 rx_priv = rx_priv / tc_num;
2026 if (tc_num <= NEED_RESERVE_TC_NUM)
2027 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2029 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2030 COMPENSATE_HALF_MPS_NUM * half_mps;
2031 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 if (rx_priv < min_rx_priv)
2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2045 if (!(hdev->hw_tc_map & BIT(i)))
2049 priv->buf_size = rx_priv;
2050 priv->wl.high = rx_priv - hdev->dv_buf_size;
2051 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054 buf_alloc->s_buf.buf_size = 0;
2059 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2060 * @hdev: pointer to struct hclge_dev
2061 * @buf_alloc: pointer to buffer calculation data
2062 * @return: 0: calculate sucessful, negative: fail
2064 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2065 struct hclge_pkt_buf_alloc *buf_alloc)
2067 /* When DCB is not supported, rx private buffer is not allocated. */
2068 if (!hnae3_dev_dcb_supported(hdev)) {
2069 u32 rx_all = hdev->pkt_buf_size;
2071 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2072 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2078 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084 /* try to decrease the buffer size */
2085 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2097 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
2100 struct hclge_rx_priv_buff_cmd *req;
2101 struct hclge_desc desc;
2105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2106 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2108 /* Alloc private buffer TCs */
2109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2110 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2115 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2119 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2120 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2122 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2124 dev_err(&hdev->pdev->dev,
2125 "rx private buffer alloc cmd failed %d\n", ret);
2130 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2131 struct hclge_pkt_buf_alloc *buf_alloc)
2133 struct hclge_rx_priv_wl_buf *req;
2134 struct hclge_priv_buf *priv;
2135 struct hclge_desc desc[2];
2139 for (i = 0; i < 2; i++) {
2140 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2142 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2144 /* The first descriptor set the NEXT bit to 1 */
2146 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2148 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2151 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2153 priv = &buf_alloc->priv_buf[idx];
2154 req->tc_wl[j].high =
2155 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].high |=
2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2159 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2160 req->tc_wl[j].low |=
2161 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2165 /* Send 2 descriptor at one time */
2166 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2168 dev_err(&hdev->pdev->dev,
2169 "rx private waterline config cmd failed %d\n",
2174 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2175 struct hclge_pkt_buf_alloc *buf_alloc)
2177 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2178 struct hclge_rx_com_thrd *req;
2179 struct hclge_desc desc[2];
2180 struct hclge_tc_thrd *tc;
2184 for (i = 0; i < 2; i++) {
2185 hclge_cmd_setup_basic_desc(&desc[i],
2186 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2187 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2189 /* The first descriptor set the NEXT bit to 1 */
2191 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2193 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2196 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2198 req->com_thrd[j].high =
2199 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].high |=
2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202 req->com_thrd[j].low =
2203 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2204 req->com_thrd[j].low |=
2205 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2209 /* Send 2 descriptors at one time */
2210 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2212 dev_err(&hdev->pdev->dev,
2213 "common threshold config cmd failed %d\n", ret);
2217 static int hclge_common_wl_config(struct hclge_dev *hdev,
2218 struct hclge_pkt_buf_alloc *buf_alloc)
2220 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2221 struct hclge_rx_com_wl *req;
2222 struct hclge_desc desc;
2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2227 req = (struct hclge_rx_com_wl *)desc.data;
2228 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2229 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2231 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2232 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2236 dev_err(&hdev->pdev->dev,
2237 "common waterline config cmd failed %d\n", ret);
2242 int hclge_buffer_alloc(struct hclge_dev *hdev)
2244 struct hclge_pkt_buf_alloc *pkt_buf;
2247 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2251 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2253 dev_err(&hdev->pdev->dev,
2254 "could not calc tx buffer size for all TCs %d\n", ret);
2258 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2260 dev_err(&hdev->pdev->dev,
2261 "could not alloc tx buffers %d\n", ret);
2265 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2267 dev_err(&hdev->pdev->dev,
2268 "could not calc rx priv buffer size for all TCs %d\n",
2273 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2275 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2280 if (hnae3_dev_dcb_supported(hdev)) {
2281 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2283 dev_err(&hdev->pdev->dev,
2284 "could not configure rx private waterline %d\n",
2289 ret = hclge_common_thrd_config(hdev, pkt_buf);
2291 dev_err(&hdev->pdev->dev,
2292 "could not configure common threshold %d\n",
2298 ret = hclge_common_wl_config(hdev, pkt_buf);
2300 dev_err(&hdev->pdev->dev,
2301 "could not configure common waterline %d\n", ret);
2308 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2310 struct hnae3_handle *roce = &vport->roce;
2311 struct hnae3_handle *nic = &vport->nic;
2313 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2315 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2316 vport->back->num_msi_left == 0)
2319 roce->rinfo.base_vector = vport->back->roce_base_vector;
2321 roce->rinfo.netdev = nic->kinfo.netdev;
2322 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2324 roce->pdev = nic->pdev;
2325 roce->ae_algo = nic->ae_algo;
2326 roce->numa_node_mask = nic->numa_node_mask;
2331 static int hclge_init_msi(struct hclge_dev *hdev)
2333 struct pci_dev *pdev = hdev->pdev;
2337 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2339 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342 "failed(%d) to allocate MSI/MSI-X vectors\n",
2346 if (vectors < hdev->num_msi)
2347 dev_warn(&hdev->pdev->dev,
2348 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2349 hdev->num_msi, vectors);
2351 hdev->num_msi = vectors;
2352 hdev->num_msi_left = vectors;
2354 hdev->base_msi_vector = pdev->irq;
2355 hdev->roce_base_vector = hdev->base_msi_vector +
2356 hdev->roce_base_msix_offset;
2358 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2359 sizeof(u16), GFP_KERNEL);
2360 if (!hdev->vector_status) {
2361 pci_free_irq_vectors(pdev);
2365 for (i = 0; i < hdev->num_msi; i++)
2366 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2368 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2369 sizeof(int), GFP_KERNEL);
2370 if (!hdev->vector_irq) {
2371 pci_free_irq_vectors(pdev);
2378 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2380 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2381 duplex = HCLGE_MAC_FULL;
2386 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389 struct hclge_config_mac_speed_dup_cmd *req;
2390 struct hclge_desc desc;
2393 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401 case HCLGE_MAC_SPEED_10M:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 6);
2405 case HCLGE_MAC_SPEED_100M:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 7);
2409 case HCLGE_MAC_SPEED_1G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 0);
2413 case HCLGE_MAC_SPEED_10G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 1);
2417 case HCLGE_MAC_SPEED_25G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 2);
2421 case HCLGE_MAC_SPEED_40G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 3);
2425 case HCLGE_MAC_SPEED_50G:
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 4);
2429 case HCLGE_MAC_SPEED_100G:
2430 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2431 HCLGE_CFG_SPEED_S, 5);
2434 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2438 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2443 dev_err(&hdev->pdev->dev,
2444 "mac speed/duplex config cmd failed %d.\n", ret);
2451 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2453 struct hclge_mac *mac = &hdev->hw.mac;
2456 duplex = hclge_check_speed_dup(duplex, speed);
2457 if (!mac->support_autoneg && mac->speed == speed &&
2458 mac->duplex == duplex)
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2482 struct hclge_config_auto_neg_cmd *req;
2483 struct hclge_desc desc;
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2507 if (!hdev->hw.mac.support_autoneg) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2517 return hclge_set_autoneg_en(hdev, enable);
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2527 return phydev->autoneg;
2529 return hdev->hw.mac.autoneg;
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2610 *fec_ability = mac->fec_ability;
2612 *fec_mode = mac->fec_mode;
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2617 struct hclge_mac *mac = &hdev->hw.mac;
2620 hdev->support_sfp_query = true;
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
2627 if (hdev->hw.mac.support_autoneg) {
2628 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2635 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2636 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2641 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2643 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2647 ret = hclge_set_default_loopback(hdev);
2651 ret = hclge_buffer_alloc(hdev);
2653 dev_err(&hdev->pdev->dev,
2654 "allocate buffer fail, ret=%d\n", ret);
2659 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2661 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2662 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2663 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2664 hclge_wq, &hdev->service_task, 0);
2667 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2669 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2671 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2672 hclge_wq, &hdev->service_task, 0);
2675 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2677 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2678 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2679 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2680 hclge_wq, &hdev->service_task,
2684 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2686 struct hclge_link_status_cmd *req;
2687 struct hclge_desc desc;
2691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2694 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2699 req = (struct hclge_link_status_cmd *)desc.data;
2700 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2702 return !!link_status;
2705 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2707 unsigned int mac_state;
2710 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2713 mac_state = hclge_get_mac_link_status(hdev);
2715 if (hdev->hw.mac.phydev) {
2716 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2717 link_stat = mac_state &
2718 hdev->hw.mac.phydev->link;
2723 link_stat = mac_state;
2729 static void hclge_update_link_status(struct hclge_dev *hdev)
2731 struct hnae3_client *rclient = hdev->roce_client;
2732 struct hnae3_client *client = hdev->nic_client;
2733 struct hnae3_handle *rhandle;
2734 struct hnae3_handle *handle;
2741 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2744 state = hclge_get_mac_phy_link(hdev);
2745 if (state != hdev->hw.mac.link) {
2746 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2747 handle = &hdev->vport[i].nic;
2748 client->ops->link_status_change(handle, state);
2749 hclge_config_mac_tnl_int(hdev, state);
2750 rhandle = &hdev->vport[i].roce;
2751 if (rclient && rclient->ops->link_status_change)
2752 rclient->ops->link_status_change(rhandle,
2755 hdev->hw.mac.link = state;
2758 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2761 static void hclge_update_port_capability(struct hclge_mac *mac)
2763 /* update fec ability by speed */
2764 hclge_convert_setting_fec(mac);
2766 /* firmware can not identify back plane type, the media type
2767 * read from configuration can help deal it
2769 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2770 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2771 mac->module_type = HNAE3_MODULE_TYPE_KR;
2772 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2773 mac->module_type = HNAE3_MODULE_TYPE_TP;
2775 if (mac->support_autoneg) {
2776 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2777 linkmode_copy(mac->advertising, mac->supported);
2779 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2781 linkmode_zero(mac->advertising);
2785 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2787 struct hclge_sfp_info_cmd *resp;
2788 struct hclge_desc desc;
2791 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2792 resp = (struct hclge_sfp_info_cmd *)desc.data;
2793 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2794 if (ret == -EOPNOTSUPP) {
2795 dev_warn(&hdev->pdev->dev,
2796 "IMP do not support get SFP speed %d\n", ret);
2799 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2803 *speed = le32_to_cpu(resp->speed);
2808 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2810 struct hclge_sfp_info_cmd *resp;
2811 struct hclge_desc desc;
2814 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2815 resp = (struct hclge_sfp_info_cmd *)desc.data;
2817 resp->query_type = QUERY_ACTIVE_SPEED;
2819 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2820 if (ret == -EOPNOTSUPP) {
2821 dev_warn(&hdev->pdev->dev,
2822 "IMP does not support get SFP info %d\n", ret);
2825 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2829 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2830 * set to mac->speed.
2832 if (!le32_to_cpu(resp->speed))
2835 mac->speed = le32_to_cpu(resp->speed);
2836 /* if resp->speed_ability is 0, it means it's an old version
2837 * firmware, do not update these params
2839 if (resp->speed_ability) {
2840 mac->module_type = le32_to_cpu(resp->module_type);
2841 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2842 mac->autoneg = resp->autoneg;
2843 mac->support_autoneg = resp->autoneg_ability;
2844 mac->speed_type = QUERY_ACTIVE_SPEED;
2845 if (!resp->active_fec)
2848 mac->fec_mode = BIT(resp->active_fec);
2850 mac->speed_type = QUERY_SFP_SPEED;
2856 static int hclge_update_port_info(struct hclge_dev *hdev)
2858 struct hclge_mac *mac = &hdev->hw.mac;
2859 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2862 /* get the port info from SFP cmd if not copper port */
2863 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2866 /* if IMP does not support get SFP/qSFP info, return directly */
2867 if (!hdev->support_sfp_query)
2870 if (hdev->pdev->revision >= 0x21)
2871 ret = hclge_get_sfp_info(hdev, mac);
2873 ret = hclge_get_sfp_speed(hdev, &speed);
2875 if (ret == -EOPNOTSUPP) {
2876 hdev->support_sfp_query = false;
2882 if (hdev->pdev->revision >= 0x21) {
2883 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2884 hclge_update_port_capability(mac);
2887 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2890 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2891 return 0; /* do nothing if no SFP */
2893 /* must config full duplex for SFP */
2894 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2898 static int hclge_get_status(struct hnae3_handle *handle)
2900 struct hclge_vport *vport = hclge_get_vport(handle);
2901 struct hclge_dev *hdev = vport->back;
2903 hclge_update_link_status(hdev);
2905 return hdev->hw.mac.link;
2908 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2910 if (!pci_num_vf(hdev->pdev)) {
2911 dev_err(&hdev->pdev->dev,
2912 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2916 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2917 dev_err(&hdev->pdev->dev,
2918 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2919 vf, pci_num_vf(hdev->pdev));
2923 /* VF start from 1 in vport */
2924 vf += HCLGE_VF_VPORT_START_NUM;
2925 return &hdev->vport[vf];
2928 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2929 struct ifla_vf_info *ivf)
2931 struct hclge_vport *vport = hclge_get_vport(handle);
2932 struct hclge_dev *hdev = vport->back;
2934 vport = hclge_get_vf_vport(hdev, vf);
2939 ivf->linkstate = vport->vf_info.link_state;
2940 ivf->spoofchk = vport->vf_info.spoofchk;
2941 ivf->trusted = vport->vf_info.trusted;
2942 ivf->min_tx_rate = 0;
2943 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2944 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2945 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2946 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2947 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2952 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2955 struct hclge_vport *vport = hclge_get_vport(handle);
2956 struct hclge_dev *hdev = vport->back;
2958 vport = hclge_get_vf_vport(hdev, vf);
2962 vport->vf_info.link_state = link_state;
2967 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2969 u32 cmdq_src_reg, msix_src_reg;
2971 /* fetch the events from their corresponding regs */
2972 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2973 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2975 /* Assumption: If by any chance reset and mailbox events are reported
2976 * together then we will only process reset event in this go and will
2977 * defer the processing of the mailbox events. Since, we would have not
2978 * cleared RX CMDQ event this time we would receive again another
2979 * interrupt from H/W just for the mailbox.
2981 * check for vector0 reset event sources
2983 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2984 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2985 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2986 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2987 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2988 hdev->rst_stats.imp_rst_cnt++;
2989 return HCLGE_VECTOR0_EVENT_RST;
2992 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2993 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2994 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2995 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2996 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2997 hdev->rst_stats.global_rst_cnt++;
2998 return HCLGE_VECTOR0_EVENT_RST;
3001 /* check for vector0 msix event source */
3002 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3003 *clearval = msix_src_reg;
3004 return HCLGE_VECTOR0_EVENT_ERR;
3007 /* check for vector0 mailbox(=CMDQ RX) event source */
3008 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3009 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3010 *clearval = cmdq_src_reg;
3011 return HCLGE_VECTOR0_EVENT_MBX;
3014 /* print other vector0 event source */
3015 dev_info(&hdev->pdev->dev,
3016 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3017 cmdq_src_reg, msix_src_reg);
3018 *clearval = msix_src_reg;
3020 return HCLGE_VECTOR0_EVENT_OTHER;
3023 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3026 switch (event_type) {
3027 case HCLGE_VECTOR0_EVENT_RST:
3028 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3030 case HCLGE_VECTOR0_EVENT_MBX:
3031 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3038 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3041 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3042 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3043 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3047 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3049 writel(enable ? 1 : 0, vector->addr);
3052 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3054 struct hclge_dev *hdev = data;
3058 hclge_enable_vector(&hdev->misc_vector, false);
3059 event_cause = hclge_check_event_cause(hdev, &clearval);
3061 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3062 switch (event_cause) {
3063 case HCLGE_VECTOR0_EVENT_ERR:
3064 /* we do not know what type of reset is required now. This could
3065 * only be decided after we fetch the type of errors which
3066 * caused this event. Therefore, we will do below for now:
3067 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3068 * have defered type of reset to be used.
3069 * 2. Schedule the reset serivce task.
3070 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3071 * will fetch the correct type of reset. This would be done
3072 * by first decoding the types of errors.
3074 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3076 case HCLGE_VECTOR0_EVENT_RST:
3077 hclge_reset_task_schedule(hdev);
3079 case HCLGE_VECTOR0_EVENT_MBX:
3080 /* If we are here then,
3081 * 1. Either we are not handling any mbx task and we are not
3084 * 2. We could be handling a mbx task but nothing more is
3086 * In both cases, we should schedule mbx task as there are more
3087 * mbx messages reported by this interrupt.
3089 hclge_mbx_task_schedule(hdev);
3092 dev_warn(&hdev->pdev->dev,
3093 "received unknown or unhandled event of vector0\n");
3097 hclge_clear_event_cause(hdev, event_cause, clearval);
3099 /* Enable interrupt if it is not cause by reset. And when
3100 * clearval equal to 0, it means interrupt status may be
3101 * cleared by hardware before driver reads status register.
3102 * For this case, vector0 interrupt also should be enabled.
3105 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3106 hclge_enable_vector(&hdev->misc_vector, true);
3112 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3114 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3115 dev_warn(&hdev->pdev->dev,
3116 "vector(vector_id %d) has been freed.\n", vector_id);
3120 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3121 hdev->num_msi_left += 1;
3122 hdev->num_msi_used -= 1;
3125 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3127 struct hclge_misc_vector *vector = &hdev->misc_vector;
3129 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3131 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3132 hdev->vector_status[0] = 0;
3134 hdev->num_msi_left -= 1;
3135 hdev->num_msi_used += 1;
3138 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3139 const cpumask_t *mask)
3141 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3144 cpumask_copy(&hdev->affinity_mask, mask);
3147 static void hclge_irq_affinity_release(struct kref *ref)
3151 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3153 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3154 &hdev->affinity_mask);
3156 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3157 hdev->affinity_notify.release = hclge_irq_affinity_release;
3158 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3159 &hdev->affinity_notify);
3162 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3164 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3165 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3168 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3172 hclge_get_misc_vector(hdev);
3174 /* this would be explicitly freed in the end */
3175 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3176 HCLGE_NAME, pci_name(hdev->pdev));
3177 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3178 0, hdev->misc_vector.name, hdev);
3180 hclge_free_vector(hdev, 0);
3181 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3182 hdev->misc_vector.vector_irq);
3188 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3190 free_irq(hdev->misc_vector.vector_irq, hdev);
3191 hclge_free_vector(hdev, 0);
3194 int hclge_notify_client(struct hclge_dev *hdev,
3195 enum hnae3_reset_notify_type type)
3197 struct hnae3_client *client = hdev->nic_client;
3200 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3203 if (!client->ops->reset_notify)
3206 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3207 struct hnae3_handle *handle = &hdev->vport[i].nic;
3210 ret = client->ops->reset_notify(handle, type);
3212 dev_err(&hdev->pdev->dev,
3213 "notify nic client failed %d(%d)\n", type, ret);
3221 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3222 enum hnae3_reset_notify_type type)
3224 struct hnae3_client *client = hdev->roce_client;
3228 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3231 if (!client->ops->reset_notify)
3234 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3235 struct hnae3_handle *handle = &hdev->vport[i].roce;
3237 ret = client->ops->reset_notify(handle, type);
3239 dev_err(&hdev->pdev->dev,
3240 "notify roce client failed %d(%d)",
3249 static int hclge_reset_wait(struct hclge_dev *hdev)
3251 #define HCLGE_RESET_WATI_MS 100
3252 #define HCLGE_RESET_WAIT_CNT 350
3254 u32 val, reg, reg_bit;
3257 switch (hdev->reset_type) {
3258 case HNAE3_IMP_RESET:
3259 reg = HCLGE_GLOBAL_RESET_REG;
3260 reg_bit = HCLGE_IMP_RESET_BIT;
3262 case HNAE3_GLOBAL_RESET:
3263 reg = HCLGE_GLOBAL_RESET_REG;
3264 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3266 case HNAE3_FUNC_RESET:
3267 reg = HCLGE_FUN_RST_ING;
3268 reg_bit = HCLGE_FUN_RST_ING_B;
3271 dev_err(&hdev->pdev->dev,
3272 "Wait for unsupported reset type: %d\n",
3277 val = hclge_read_dev(&hdev->hw, reg);
3278 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3279 msleep(HCLGE_RESET_WATI_MS);
3280 val = hclge_read_dev(&hdev->hw, reg);
3284 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3285 dev_warn(&hdev->pdev->dev,
3286 "Wait for reset timeout: %d\n", hdev->reset_type);
3293 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3295 struct hclge_vf_rst_cmd *req;
3296 struct hclge_desc desc;
3298 req = (struct hclge_vf_rst_cmd *)desc.data;
3299 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3300 req->dest_vfid = func_id;
3305 return hclge_cmd_send(&hdev->hw, &desc, 1);
3308 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3312 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3313 struct hclge_vport *vport = &hdev->vport[i];
3316 /* Send cmd to set/clear VF's FUNC_RST_ING */
3317 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3319 dev_err(&hdev->pdev->dev,
3320 "set vf(%u) rst failed %d!\n",
3321 vport->vport_id, ret);
3325 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3328 /* Inform VF to process the reset.
3329 * hclge_inform_reset_assert_to_vf may fail if VF
3330 * driver is not loaded.
3332 ret = hclge_inform_reset_assert_to_vf(vport);
3334 dev_warn(&hdev->pdev->dev,
3335 "inform reset to vf(%u) failed %d!\n",
3336 vport->vport_id, ret);
3342 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3344 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3345 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3346 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3349 hclge_mbx_handler(hdev);
3351 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3354 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3356 struct hclge_pf_rst_sync_cmd *req;
3357 struct hclge_desc desc;
3361 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3362 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3365 /* vf need to down netdev by mbx during PF or FLR reset */
3366 hclge_mailbox_service_task(hdev);
3368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3369 /* for compatible with old firmware, wait
3370 * 100 ms for VF to stop IO
3372 if (ret == -EOPNOTSUPP) {
3373 msleep(HCLGE_RESET_SYNC_TIME);
3376 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3379 } else if (req->all_vf_ready) {
3382 msleep(HCLGE_PF_RESET_SYNC_TIME);
3383 hclge_cmd_reuse_desc(&desc, true);
3384 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3386 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3389 void hclge_report_hw_error(struct hclge_dev *hdev,
3390 enum hnae3_hw_error_type type)
3392 struct hnae3_client *client = hdev->nic_client;
3395 if (!client || !client->ops->process_hw_error ||
3396 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3399 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3400 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3403 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3407 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3408 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3409 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3410 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3411 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3414 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3415 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3416 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3417 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3421 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3423 struct hclge_desc desc;
3424 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3427 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3428 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3429 req->fun_reset_vfid = func_id;
3431 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3433 dev_err(&hdev->pdev->dev,
3434 "send function reset cmd fail, status =%d\n", ret);
3439 static void hclge_do_reset(struct hclge_dev *hdev)
3441 struct hnae3_handle *handle = &hdev->vport[0].nic;
3442 struct pci_dev *pdev = hdev->pdev;
3445 if (hclge_get_hw_reset_stat(handle)) {
3446 dev_info(&pdev->dev, "hardware reset not finish\n");
3447 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3448 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3449 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3453 switch (hdev->reset_type) {
3454 case HNAE3_GLOBAL_RESET:
3455 dev_info(&pdev->dev, "global reset requested\n");
3456 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3457 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3458 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3460 case HNAE3_FUNC_RESET:
3461 dev_info(&pdev->dev, "PF reset requested\n");
3462 /* schedule again to check later */
3463 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3464 hclge_reset_task_schedule(hdev);
3467 dev_warn(&pdev->dev,
3468 "unsupported reset type: %d\n", hdev->reset_type);
3473 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3474 unsigned long *addr)
3476 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3477 struct hclge_dev *hdev = ae_dev->priv;
3479 /* first, resolve any unknown reset type to the known type(s) */
3480 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3481 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3482 HCLGE_MISC_VECTOR_INT_STS);
3483 /* we will intentionally ignore any errors from this function
3484 * as we will end up in *some* reset request in any case
3486 if (hclge_handle_hw_msix_error(hdev, addr))
3487 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3490 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3491 /* We defered the clearing of the error event which caused
3492 * interrupt since it was not posssible to do that in
3493 * interrupt context (and this is the reason we introduced
3494 * new UNKNOWN reset type). Now, the errors have been
3495 * handled and cleared in hardware we can safely enable
3496 * interrupts. This is an exception to the norm.
3498 hclge_enable_vector(&hdev->misc_vector, true);
3501 /* return the highest priority reset level amongst all */
3502 if (test_bit(HNAE3_IMP_RESET, addr)) {
3503 rst_level = HNAE3_IMP_RESET;
3504 clear_bit(HNAE3_IMP_RESET, addr);
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3508 rst_level = HNAE3_GLOBAL_RESET;
3509 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510 clear_bit(HNAE3_FUNC_RESET, addr);
3511 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3512 rst_level = HNAE3_FUNC_RESET;
3513 clear_bit(HNAE3_FUNC_RESET, addr);
3514 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3515 rst_level = HNAE3_FLR_RESET;
3516 clear_bit(HNAE3_FLR_RESET, addr);
3519 if (hdev->reset_type != HNAE3_NONE_RESET &&
3520 rst_level < hdev->reset_type)
3521 return HNAE3_NONE_RESET;
3526 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3530 switch (hdev->reset_type) {
3531 case HNAE3_IMP_RESET:
3532 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3534 case HNAE3_GLOBAL_RESET:
3535 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3544 /* For revision 0x20, the reset interrupt source
3545 * can only be cleared after hardware reset done
3547 if (hdev->pdev->revision == 0x20)
3548 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3551 hclge_enable_vector(&hdev->misc_vector, true);
3554 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3558 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3560 reg_val |= HCLGE_NIC_SW_RST_RDY;
3562 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3564 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3567 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3571 ret = hclge_set_all_vf_rst(hdev, true);
3575 hclge_func_reset_sync_vf(hdev);
3580 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3585 switch (hdev->reset_type) {
3586 case HNAE3_FUNC_RESET:
3587 ret = hclge_func_reset_notify_vf(hdev);
3591 ret = hclge_func_reset_cmd(hdev, 0);
3593 dev_err(&hdev->pdev->dev,
3594 "asserting function reset fail %d!\n", ret);
3598 /* After performaning pf reset, it is not necessary to do the
3599 * mailbox handling or send any command to firmware, because
3600 * any mailbox handling or command to firmware is only valid
3601 * after hclge_cmd_init is called.
3603 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3604 hdev->rst_stats.pf_rst_cnt++;
3606 case HNAE3_FLR_RESET:
3607 ret = hclge_func_reset_notify_vf(hdev);
3611 case HNAE3_IMP_RESET:
3612 hclge_handle_imp_error(hdev);
3613 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3614 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3615 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3621 /* inform hardware that preparatory work is done */
3622 msleep(HCLGE_RESET_SYNC_TIME);
3623 hclge_reset_handshake(hdev, true);
3624 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3629 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3631 #define MAX_RESET_FAIL_CNT 5
3633 if (hdev->reset_pending) {
3634 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3635 hdev->reset_pending);
3637 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3638 HCLGE_RESET_INT_M) {
3639 dev_info(&hdev->pdev->dev,
3640 "reset failed because new reset interrupt\n");
3641 hclge_clear_reset_cause(hdev);
3643 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3644 hdev->rst_stats.reset_fail_cnt++;
3645 set_bit(hdev->reset_type, &hdev->reset_pending);
3646 dev_info(&hdev->pdev->dev,
3647 "re-schedule reset task(%u)\n",
3648 hdev->rst_stats.reset_fail_cnt);
3652 hclge_clear_reset_cause(hdev);
3654 /* recover the handshake status when reset fail */
3655 hclge_reset_handshake(hdev, true);
3657 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3659 hclge_dbg_dump_rst_info(hdev);
3661 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3666 static int hclge_set_rst_done(struct hclge_dev *hdev)
3668 struct hclge_pf_rst_done_cmd *req;
3669 struct hclge_desc desc;
3672 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3674 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3676 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3677 /* To be compatible with the old firmware, which does not support
3678 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3681 if (ret == -EOPNOTSUPP) {
3682 dev_warn(&hdev->pdev->dev,
3683 "current firmware does not support command(0x%x)!\n",
3684 HCLGE_OPC_PF_RST_DONE);
3687 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3694 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3698 switch (hdev->reset_type) {
3699 case HNAE3_FUNC_RESET:
3701 case HNAE3_FLR_RESET:
3702 ret = hclge_set_all_vf_rst(hdev, false);
3704 case HNAE3_GLOBAL_RESET:
3706 case HNAE3_IMP_RESET:
3707 ret = hclge_set_rst_done(hdev);
3713 /* clear up the handshake status after re-initialize done */
3714 hclge_reset_handshake(hdev, false);
3719 static int hclge_reset_stack(struct hclge_dev *hdev)
3723 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3727 ret = hclge_reset_ae_dev(hdev->ae_dev);
3731 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3734 static int hclge_reset_prepare(struct hclge_dev *hdev)
3738 hdev->rst_stats.reset_cnt++;
3739 /* perform reset of the stack & ae device for a client */
3740 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3745 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3750 return hclge_reset_prepare_wait(hdev);
3753 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3755 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3756 enum hnae3_reset_type reset_level;
3759 hdev->rst_stats.hw_reset_done_cnt++;
3761 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3766 ret = hclge_reset_stack(hdev);
3771 hclge_clear_reset_cause(hdev);
3773 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3774 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3778 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3781 ret = hclge_reset_prepare_up(hdev);
3786 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3791 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3795 hdev->last_reset_time = jiffies;
3796 hdev->rst_stats.reset_fail_cnt = 0;
3797 hdev->rst_stats.reset_done_cnt++;
3798 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3800 /* if default_reset_request has a higher level reset request,
3801 * it should be handled as soon as possible. since some errors
3802 * need this kind of reset to fix.
3804 reset_level = hclge_get_reset_level(ae_dev,
3805 &hdev->default_reset_request);
3806 if (reset_level != HNAE3_NONE_RESET)
3807 set_bit(reset_level, &hdev->reset_request);
3812 static void hclge_reset(struct hclge_dev *hdev)
3814 if (hclge_reset_prepare(hdev))
3817 if (hclge_reset_wait(hdev))
3820 if (hclge_reset_rebuild(hdev))
3826 if (hclge_reset_err_handle(hdev))
3827 hclge_reset_task_schedule(hdev);
3830 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3832 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3833 struct hclge_dev *hdev = ae_dev->priv;
3835 /* We might end up getting called broadly because of 2 below cases:
3836 * 1. Recoverable error was conveyed through APEI and only way to bring
3837 * normalcy is to reset.
3838 * 2. A new reset request from the stack due to timeout
3840 * For the first case,error event might not have ae handle available.
3841 * check if this is a new reset request and we are not here just because
3842 * last reset attempt did not succeed and watchdog hit us again. We will
3843 * know this if last reset request did not occur very recently (watchdog
3844 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3845 * In case of new request we reset the "reset level" to PF reset.
3846 * And if it is a repeat reset request of the most recent one then we
3847 * want to make sure we throttle the reset request. Therefore, we will
3848 * not allow it again before 3*HZ times.
3851 handle = &hdev->vport[0].nic;
3853 if (time_before(jiffies, (hdev->last_reset_time +
3854 HCLGE_RESET_INTERVAL))) {
3855 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3857 } else if (hdev->default_reset_request) {
3859 hclge_get_reset_level(ae_dev,
3860 &hdev->default_reset_request);
3861 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3862 hdev->reset_level = HNAE3_FUNC_RESET;
3865 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3868 /* request reset & schedule reset task */
3869 set_bit(hdev->reset_level, &hdev->reset_request);
3870 hclge_reset_task_schedule(hdev);
3872 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3873 hdev->reset_level++;
3876 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3877 enum hnae3_reset_type rst_type)
3879 struct hclge_dev *hdev = ae_dev->priv;
3881 set_bit(rst_type, &hdev->default_reset_request);
3884 static void hclge_reset_timer(struct timer_list *t)
3886 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3888 /* if default_reset_request has no value, it means that this reset
3889 * request has already be handled, so just return here
3891 if (!hdev->default_reset_request)
3894 dev_info(&hdev->pdev->dev,
3895 "triggering reset in reset timer\n");
3896 hclge_reset_event(hdev->pdev, NULL);
3899 static void hclge_reset_subtask(struct hclge_dev *hdev)
3901 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3903 /* check if there is any ongoing reset in the hardware. This status can
3904 * be checked from reset_pending. If there is then, we need to wait for
3905 * hardware to complete reset.
3906 * a. If we are able to figure out in reasonable time that hardware
3907 * has fully resetted then, we can proceed with driver, client
3909 * b. else, we can come back later to check this status so re-sched
3912 hdev->last_reset_time = jiffies;
3913 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3914 if (hdev->reset_type != HNAE3_NONE_RESET)
3917 /* check if we got any *new* reset requests to be honored */
3918 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3919 if (hdev->reset_type != HNAE3_NONE_RESET)
3920 hclge_do_reset(hdev);
3922 hdev->reset_type = HNAE3_NONE_RESET;
3925 static void hclge_reset_service_task(struct hclge_dev *hdev)
3927 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3930 down(&hdev->reset_sem);
3931 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3933 hclge_reset_subtask(hdev);
3935 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3936 up(&hdev->reset_sem);
3939 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3943 /* start from vport 1 for PF is always alive */
3944 for (i = 1; i < hdev->num_alloc_vport; i++) {
3945 struct hclge_vport *vport = &hdev->vport[i];
3947 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3948 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3950 /* If vf is not alive, set to default value */
3951 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3952 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3956 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3958 unsigned long delta = round_jiffies_relative(HZ);
3960 /* Always handle the link updating to make sure link state is
3961 * updated when it is triggered by mbx.
3963 hclge_update_link_status(hdev);
3964 hclge_sync_mac_table(hdev);
3965 hclge_sync_promisc_mode(hdev);
3967 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3968 delta = jiffies - hdev->last_serv_processed;
3970 if (delta < round_jiffies_relative(HZ)) {
3971 delta = round_jiffies_relative(HZ) - delta;
3976 hdev->serv_processed_cnt++;
3977 hclge_update_vport_alive(hdev);
3979 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3980 hdev->last_serv_processed = jiffies;
3984 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3985 hclge_update_stats_for_all(hdev);
3987 hclge_update_port_info(hdev);
3988 hclge_sync_vlan_filter(hdev);
3990 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3991 hclge_rfs_filter_expire(hdev);
3993 hdev->last_serv_processed = jiffies;
3996 hclge_task_schedule(hdev, delta);
3999 static void hclge_service_task(struct work_struct *work)
4001 struct hclge_dev *hdev =
4002 container_of(work, struct hclge_dev, service_task.work);
4004 hclge_reset_service_task(hdev);
4005 hclge_mailbox_service_task(hdev);
4006 hclge_periodic_service_task(hdev);
4008 /* Handle reset and mbx again in case periodical task delays the
4009 * handling by calling hclge_task_schedule() in
4010 * hclge_periodic_service_task().
4012 hclge_reset_service_task(hdev);
4013 hclge_mailbox_service_task(hdev);
4016 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4018 /* VF handle has no client */
4019 if (!handle->client)
4020 return container_of(handle, struct hclge_vport, nic);
4021 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4022 return container_of(handle, struct hclge_vport, roce);
4024 return container_of(handle, struct hclge_vport, nic);
4027 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4028 struct hnae3_vector_info *vector_info)
4030 struct hclge_vport *vport = hclge_get_vport(handle);
4031 struct hnae3_vector_info *vector = vector_info;
4032 struct hclge_dev *hdev = vport->back;
4036 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4037 vector_num = min(hdev->num_msi_left, vector_num);
4039 for (j = 0; j < vector_num; j++) {
4040 for (i = 1; i < hdev->num_msi; i++) {
4041 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4042 vector->vector = pci_irq_vector(hdev->pdev, i);
4043 vector->io_addr = hdev->hw.io_base +
4044 HCLGE_VECTOR_REG_BASE +
4045 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4047 HCLGE_VECTOR_VF_OFFSET;
4048 hdev->vector_status[i] = vport->vport_id;
4049 hdev->vector_irq[i] = vector->vector;
4058 hdev->num_msi_left -= alloc;
4059 hdev->num_msi_used += alloc;
4064 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4068 for (i = 0; i < hdev->num_msi; i++)
4069 if (vector == hdev->vector_irq[i])
4075 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4077 struct hclge_vport *vport = hclge_get_vport(handle);
4078 struct hclge_dev *hdev = vport->back;
4081 vector_id = hclge_get_vector_index(hdev, vector);
4082 if (vector_id < 0) {
4083 dev_err(&hdev->pdev->dev,
4084 "Get vector index fail. vector = %d\n", vector);
4088 hclge_free_vector(hdev, vector_id);
4093 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4095 return HCLGE_RSS_KEY_SIZE;
4098 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4100 return HCLGE_RSS_IND_TBL_SIZE;
4103 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4104 const u8 hfunc, const u8 *key)
4106 struct hclge_rss_config_cmd *req;
4107 unsigned int key_offset = 0;
4108 struct hclge_desc desc;
4113 key_counts = HCLGE_RSS_KEY_SIZE;
4114 req = (struct hclge_rss_config_cmd *)desc.data;
4116 while (key_counts) {
4117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4120 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4121 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4123 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4124 memcpy(req->hash_key,
4125 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4127 key_counts -= key_size;
4129 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4131 dev_err(&hdev->pdev->dev,
4132 "Configure RSS config fail, status = %d\n",
4140 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4142 struct hclge_rss_indirection_table_cmd *req;
4143 struct hclge_desc desc;
4147 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4149 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4150 hclge_cmd_setup_basic_desc
4151 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4153 req->start_table_index =
4154 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4155 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4157 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4158 req->rss_result[j] =
4159 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4161 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4163 dev_err(&hdev->pdev->dev,
4164 "Configure rss indir table fail,status = %d\n",
4172 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4173 u16 *tc_size, u16 *tc_offset)
4175 struct hclge_rss_tc_mode_cmd *req;
4176 struct hclge_desc desc;
4180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4181 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4183 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4186 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4187 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4188 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4189 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4190 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4192 req->rss_tc_mode[i] = cpu_to_le16(mode);
4195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4197 dev_err(&hdev->pdev->dev,
4198 "Configure rss tc mode fail, status = %d\n", ret);
4203 static void hclge_get_rss_type(struct hclge_vport *vport)
4205 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4206 vport->rss_tuple_sets.ipv4_udp_en ||
4207 vport->rss_tuple_sets.ipv4_sctp_en ||
4208 vport->rss_tuple_sets.ipv6_tcp_en ||
4209 vport->rss_tuple_sets.ipv6_udp_en ||
4210 vport->rss_tuple_sets.ipv6_sctp_en)
4211 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4212 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4213 vport->rss_tuple_sets.ipv6_fragment_en)
4214 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4216 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4219 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4221 struct hclge_rss_input_tuple_cmd *req;
4222 struct hclge_desc desc;
4225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4227 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4229 /* Get the tuple cfg from pf */
4230 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4231 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4232 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4233 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4234 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4235 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4236 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4237 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4238 hclge_get_rss_type(&hdev->vport[0]);
4239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4241 dev_err(&hdev->pdev->dev,
4242 "Configure rss input fail, status = %d\n", ret);
4246 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4249 struct hclge_vport *vport = hclge_get_vport(handle);
4252 /* Get hash algorithm */
4254 switch (vport->rss_algo) {
4255 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4256 *hfunc = ETH_RSS_HASH_TOP;
4258 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4259 *hfunc = ETH_RSS_HASH_XOR;
4262 *hfunc = ETH_RSS_HASH_UNKNOWN;
4267 /* Get the RSS Key required by the user */
4269 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4271 /* Get indirect table */
4273 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4274 indir[i] = vport->rss_indirection_tbl[i];
4279 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4280 const u8 *key, const u8 hfunc)
4282 struct hclge_vport *vport = hclge_get_vport(handle);
4283 struct hclge_dev *hdev = vport->back;
4287 /* Set the RSS Hash Key if specififed by the user */
4290 case ETH_RSS_HASH_TOP:
4291 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4293 case ETH_RSS_HASH_XOR:
4294 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4296 case ETH_RSS_HASH_NO_CHANGE:
4297 hash_algo = vport->rss_algo;
4303 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4307 /* Update the shadow RSS key with user specified qids */
4308 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4309 vport->rss_algo = hash_algo;
4312 /* Update the shadow RSS table with user specified qids */
4313 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4314 vport->rss_indirection_tbl[i] = indir[i];
4316 /* Update the hardware */
4317 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4320 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4322 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4324 if (nfc->data & RXH_L4_B_2_3)
4325 hash_sets |= HCLGE_D_PORT_BIT;
4327 hash_sets &= ~HCLGE_D_PORT_BIT;
4329 if (nfc->data & RXH_IP_SRC)
4330 hash_sets |= HCLGE_S_IP_BIT;
4332 hash_sets &= ~HCLGE_S_IP_BIT;
4334 if (nfc->data & RXH_IP_DST)
4335 hash_sets |= HCLGE_D_IP_BIT;
4337 hash_sets &= ~HCLGE_D_IP_BIT;
4339 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4340 hash_sets |= HCLGE_V_TAG_BIT;
4345 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4346 struct ethtool_rxnfc *nfc)
4348 struct hclge_vport *vport = hclge_get_vport(handle);
4349 struct hclge_dev *hdev = vport->back;
4350 struct hclge_rss_input_tuple_cmd *req;
4351 struct hclge_desc desc;
4355 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4356 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4359 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4362 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4363 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4364 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4365 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4366 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4367 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4368 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4369 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4371 tuple_sets = hclge_get_rss_hash_bits(nfc);
4372 switch (nfc->flow_type) {
4374 req->ipv4_tcp_en = tuple_sets;
4377 req->ipv6_tcp_en = tuple_sets;
4380 req->ipv4_udp_en = tuple_sets;
4383 req->ipv6_udp_en = tuple_sets;
4386 req->ipv4_sctp_en = tuple_sets;
4389 if ((nfc->data & RXH_L4_B_0_1) ||
4390 (nfc->data & RXH_L4_B_2_3))
4393 req->ipv6_sctp_en = tuple_sets;
4396 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4399 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4407 dev_err(&hdev->pdev->dev,
4408 "Set rss tuple fail, status = %d\n", ret);
4412 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4413 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4414 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4415 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4416 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4417 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4418 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4419 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4420 hclge_get_rss_type(vport);
4424 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4425 struct ethtool_rxnfc *nfc)
4427 struct hclge_vport *vport = hclge_get_vport(handle);
4432 switch (nfc->flow_type) {
4434 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4437 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4440 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4443 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4446 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4449 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4453 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4462 if (tuple_sets & HCLGE_D_PORT_BIT)
4463 nfc->data |= RXH_L4_B_2_3;
4464 if (tuple_sets & HCLGE_S_PORT_BIT)
4465 nfc->data |= RXH_L4_B_0_1;
4466 if (tuple_sets & HCLGE_D_IP_BIT)
4467 nfc->data |= RXH_IP_DST;
4468 if (tuple_sets & HCLGE_S_IP_BIT)
4469 nfc->data |= RXH_IP_SRC;
4474 static int hclge_get_tc_size(struct hnae3_handle *handle)
4476 struct hclge_vport *vport = hclge_get_vport(handle);
4477 struct hclge_dev *hdev = vport->back;
4479 return hdev->rss_size_max;
4482 int hclge_rss_init_hw(struct hclge_dev *hdev)
4484 struct hclge_vport *vport = hdev->vport;
4485 u8 *rss_indir = vport[0].rss_indirection_tbl;
4486 u16 rss_size = vport[0].alloc_rss_size;
4487 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4488 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4489 u8 *key = vport[0].rss_hash_key;
4490 u8 hfunc = vport[0].rss_algo;
4491 u16 tc_valid[HCLGE_MAX_TC_NUM];
4496 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4500 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4504 ret = hclge_set_rss_input_tuple(hdev);
4508 /* Each TC have the same queue size, and tc_size set to hardware is
4509 * the log2 of roundup power of two of rss_size, the acutal queue
4510 * size is limited by indirection table.
4512 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4513 dev_err(&hdev->pdev->dev,
4514 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4519 roundup_size = roundup_pow_of_two(rss_size);
4520 roundup_size = ilog2(roundup_size);
4522 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4525 if (!(hdev->hw_tc_map & BIT(i)))
4529 tc_size[i] = roundup_size;
4530 tc_offset[i] = rss_size * i;
4533 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4536 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4538 struct hclge_vport *vport = hdev->vport;
4541 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4542 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4543 vport[j].rss_indirection_tbl[i] =
4544 i % vport[j].alloc_rss_size;
4548 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4550 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4551 struct hclge_vport *vport = hdev->vport;
4553 if (hdev->pdev->revision >= 0x21)
4554 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4556 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4557 vport[i].rss_tuple_sets.ipv4_tcp_en =
4558 HCLGE_RSS_INPUT_TUPLE_OTHER;
4559 vport[i].rss_tuple_sets.ipv4_udp_en =
4560 HCLGE_RSS_INPUT_TUPLE_OTHER;
4561 vport[i].rss_tuple_sets.ipv4_sctp_en =
4562 HCLGE_RSS_INPUT_TUPLE_SCTP;
4563 vport[i].rss_tuple_sets.ipv4_fragment_en =
4564 HCLGE_RSS_INPUT_TUPLE_OTHER;
4565 vport[i].rss_tuple_sets.ipv6_tcp_en =
4566 HCLGE_RSS_INPUT_TUPLE_OTHER;
4567 vport[i].rss_tuple_sets.ipv6_udp_en =
4568 HCLGE_RSS_INPUT_TUPLE_OTHER;
4569 vport[i].rss_tuple_sets.ipv6_sctp_en =
4570 HCLGE_RSS_INPUT_TUPLE_SCTP;
4571 vport[i].rss_tuple_sets.ipv6_fragment_en =
4572 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 vport[i].rss_algo = rss_algo;
4576 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4577 HCLGE_RSS_KEY_SIZE);
4580 hclge_rss_indir_init_cfg(hdev);
4583 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4584 int vector_id, bool en,
4585 struct hnae3_ring_chain_node *ring_chain)
4587 struct hclge_dev *hdev = vport->back;
4588 struct hnae3_ring_chain_node *node;
4589 struct hclge_desc desc;
4590 struct hclge_ctrl_vector_chain_cmd *req =
4591 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4592 enum hclge_cmd_status status;
4593 enum hclge_opcode_type op;
4594 u16 tqp_type_and_id;
4597 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4598 hclge_cmd_setup_basic_desc(&desc, op, false);
4599 req->int_vector_id = vector_id;
4602 for (node = ring_chain; node; node = node->next) {
4603 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4604 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4606 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4607 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4608 HCLGE_TQP_ID_S, node->tqp_index);
4609 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4611 hnae3_get_field(node->int_gl_idx,
4612 HNAE3_RING_GL_IDX_M,
4613 HNAE3_RING_GL_IDX_S));
4614 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4615 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4616 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4617 req->vfid = vport->vport_id;
4619 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4621 dev_err(&hdev->pdev->dev,
4622 "Map TQP fail, status is %d.\n",
4628 hclge_cmd_setup_basic_desc(&desc,
4631 req->int_vector_id = vector_id;
4636 req->int_cause_num = i;
4637 req->vfid = vport->vport_id;
4638 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4640 dev_err(&hdev->pdev->dev,
4641 "Map TQP fail, status is %d.\n", status);
4649 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4650 struct hnae3_ring_chain_node *ring_chain)
4652 struct hclge_vport *vport = hclge_get_vport(handle);
4653 struct hclge_dev *hdev = vport->back;
4656 vector_id = hclge_get_vector_index(hdev, vector);
4657 if (vector_id < 0) {
4658 dev_err(&hdev->pdev->dev,
4659 "failed to get vector index. vector=%d\n", vector);
4663 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4666 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4667 struct hnae3_ring_chain_node *ring_chain)
4669 struct hclge_vport *vport = hclge_get_vport(handle);
4670 struct hclge_dev *hdev = vport->back;
4673 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4676 vector_id = hclge_get_vector_index(hdev, vector);
4677 if (vector_id < 0) {
4678 dev_err(&handle->pdev->dev,
4679 "Get vector index fail. ret =%d\n", vector_id);
4683 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4685 dev_err(&handle->pdev->dev,
4686 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4692 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4693 struct hclge_promisc_param *param)
4695 struct hclge_promisc_cfg_cmd *req;
4696 struct hclge_desc desc;
4699 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4701 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4702 req->vf_id = param->vf_id;
4704 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4705 * pdev revision(0x20), new revision support them. The
4706 * value of this two fields will not return error when driver
4707 * send command to fireware in revision(0x20).
4709 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4710 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4712 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4714 dev_err(&hdev->pdev->dev,
4715 "failed to set vport %d promisc mode, ret = %d.\n",
4721 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4722 bool en_uc, bool en_mc, bool en_bc,
4728 memset(param, 0, sizeof(struct hclge_promisc_param));
4730 param->enable = HCLGE_PROMISC_EN_UC;
4732 param->enable |= HCLGE_PROMISC_EN_MC;
4734 param->enable |= HCLGE_PROMISC_EN_BC;
4735 param->vf_id = vport_id;
4738 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4739 bool en_mc_pmc, bool en_bc_pmc)
4741 struct hclge_dev *hdev = vport->back;
4742 struct hclge_promisc_param param;
4744 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4746 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4749 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4752 struct hclge_vport *vport = hclge_get_vport(handle);
4753 bool en_bc_pmc = true;
4755 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4756 * always bypassed. So broadcast promisc should be disabled until
4757 * user enable promisc mode
4759 if (handle->pdev->revision == 0x20)
4760 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4762 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4766 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4768 struct hclge_vport *vport = hclge_get_vport(handle);
4769 struct hclge_dev *hdev = vport->back;
4771 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4774 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4776 struct hclge_get_fd_mode_cmd *req;
4777 struct hclge_desc desc;
4780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4782 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4784 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4786 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4790 *fd_mode = req->mode;
4795 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4796 u32 *stage1_entry_num,
4797 u32 *stage2_entry_num,
4798 u16 *stage1_counter_num,
4799 u16 *stage2_counter_num)
4801 struct hclge_get_fd_allocation_cmd *req;
4802 struct hclge_desc desc;
4805 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4807 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4809 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4811 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4816 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4817 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4818 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4819 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4824 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4825 enum HCLGE_FD_STAGE stage_num)
4827 struct hclge_set_fd_key_config_cmd *req;
4828 struct hclge_fd_key_cfg *stage;
4829 struct hclge_desc desc;
4832 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4834 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4835 stage = &hdev->fd_cfg.key_cfg[stage_num];
4836 req->stage = stage_num;
4837 req->key_select = stage->key_sel;
4838 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4839 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4840 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4841 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4842 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4843 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4845 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4847 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4852 static int hclge_init_fd_config(struct hclge_dev *hdev)
4854 #define LOW_2_WORDS 0x03
4855 struct hclge_fd_key_cfg *key_cfg;
4858 if (!hnae3_dev_fd_supported(hdev))
4861 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4865 switch (hdev->fd_cfg.fd_mode) {
4866 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4867 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4869 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4870 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4873 dev_err(&hdev->pdev->dev,
4874 "Unsupported flow director mode %u\n",
4875 hdev->fd_cfg.fd_mode);
4879 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4880 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4881 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4882 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4883 key_cfg->outer_sipv6_word_en = 0;
4884 key_cfg->outer_dipv6_word_en = 0;
4886 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4887 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4888 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4889 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4891 /* If use max 400bit key, we can support tuples for ether type */
4892 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4893 key_cfg->tuple_active |=
4894 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4896 /* roce_type is used to filter roce frames
4897 * dst_vport is used to specify the rule
4899 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4901 ret = hclge_get_fd_allocation(hdev,
4902 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4903 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4904 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4905 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4909 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4912 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4913 int loc, u8 *key, bool is_add)
4915 struct hclge_fd_tcam_config_1_cmd *req1;
4916 struct hclge_fd_tcam_config_2_cmd *req2;
4917 struct hclge_fd_tcam_config_3_cmd *req3;
4918 struct hclge_desc desc[3];
4921 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4922 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4923 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4924 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4927 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4928 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4929 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4931 req1->stage = stage;
4932 req1->xy_sel = sel_x ? 1 : 0;
4933 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4934 req1->index = cpu_to_le32(loc);
4935 req1->entry_vld = sel_x ? is_add : 0;
4938 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4939 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4940 sizeof(req2->tcam_data));
4941 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4942 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4945 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4947 dev_err(&hdev->pdev->dev,
4948 "config tcam key fail, ret=%d\n",
4954 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4955 struct hclge_fd_ad_data *action)
4957 struct hclge_fd_ad_config_cmd *req;
4958 struct hclge_desc desc;
4962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4964 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4965 req->index = cpu_to_le32(loc);
4968 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4969 action->write_rule_id_to_bd);
4970 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4973 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4975 action->forward_to_direct_queue);
4976 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4978 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4979 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4980 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4981 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4982 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4983 action->counter_id);
4985 req->ad_data = cpu_to_le64(ad_data);
4986 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4988 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4993 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4994 struct hclge_fd_rule *rule)
4996 u16 tmp_x_s, tmp_y_s;
4997 u32 tmp_x_l, tmp_y_l;
5000 if (rule->unused_tuple & tuple_bit)
5003 switch (tuple_bit) {
5004 case BIT(INNER_DST_MAC):
5005 for (i = 0; i < ETH_ALEN; i++) {
5006 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5007 rule->tuples_mask.dst_mac[i]);
5008 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5009 rule->tuples_mask.dst_mac[i]);
5013 case BIT(INNER_SRC_MAC):
5014 for (i = 0; i < ETH_ALEN; i++) {
5015 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5016 rule->tuples.src_mac[i]);
5017 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5018 rule->tuples.src_mac[i]);
5022 case BIT(INNER_VLAN_TAG_FST):
5023 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5024 rule->tuples_mask.vlan_tag1);
5025 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5026 rule->tuples_mask.vlan_tag1);
5027 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5028 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5031 case BIT(INNER_ETH_TYPE):
5032 calc_x(tmp_x_s, rule->tuples.ether_proto,
5033 rule->tuples_mask.ether_proto);
5034 calc_y(tmp_y_s, rule->tuples.ether_proto,
5035 rule->tuples_mask.ether_proto);
5036 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5037 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5040 case BIT(INNER_IP_TOS):
5041 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5042 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5045 case BIT(INNER_IP_PROTO):
5046 calc_x(*key_x, rule->tuples.ip_proto,
5047 rule->tuples_mask.ip_proto);
5048 calc_y(*key_y, rule->tuples.ip_proto,
5049 rule->tuples_mask.ip_proto);
5052 case BIT(INNER_SRC_IP):
5053 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5054 rule->tuples_mask.src_ip[IPV4_INDEX]);
5055 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5056 rule->tuples_mask.src_ip[IPV4_INDEX]);
5057 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5058 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5061 case BIT(INNER_DST_IP):
5062 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5063 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5064 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5065 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5066 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5067 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5070 case BIT(INNER_SRC_PORT):
5071 calc_x(tmp_x_s, rule->tuples.src_port,
5072 rule->tuples_mask.src_port);
5073 calc_y(tmp_y_s, rule->tuples.src_port,
5074 rule->tuples_mask.src_port);
5075 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5076 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5079 case BIT(INNER_DST_PORT):
5080 calc_x(tmp_x_s, rule->tuples.dst_port,
5081 rule->tuples_mask.dst_port);
5082 calc_y(tmp_y_s, rule->tuples.dst_port,
5083 rule->tuples_mask.dst_port);
5084 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5085 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5093 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5094 u8 vf_id, u8 network_port_id)
5096 u32 port_number = 0;
5098 if (port_type == HOST_PORT) {
5099 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5101 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5103 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5105 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5106 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5107 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5113 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5114 __le32 *key_x, __le32 *key_y,
5115 struct hclge_fd_rule *rule)
5117 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5118 u8 cur_pos = 0, tuple_size, shift_bits;
5121 for (i = 0; i < MAX_META_DATA; i++) {
5122 tuple_size = meta_data_key_info[i].key_length;
5123 tuple_bit = key_cfg->meta_data_active & BIT(i);
5125 switch (tuple_bit) {
5126 case BIT(ROCE_TYPE):
5127 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5128 cur_pos += tuple_size;
5130 case BIT(DST_VPORT):
5131 port_number = hclge_get_port_number(HOST_PORT, 0,
5133 hnae3_set_field(meta_data,
5134 GENMASK(cur_pos + tuple_size, cur_pos),
5135 cur_pos, port_number);
5136 cur_pos += tuple_size;
5143 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5144 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5145 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5147 *key_x = cpu_to_le32(tmp_x << shift_bits);
5148 *key_y = cpu_to_le32(tmp_y << shift_bits);
5151 /* A complete key is combined with meta data key and tuple key.
5152 * Meta data key is stored at the MSB region, and tuple key is stored at
5153 * the LSB region, unused bits will be filled 0.
5155 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5156 struct hclge_fd_rule *rule)
5158 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5159 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5160 u8 *cur_key_x, *cur_key_y;
5161 u8 meta_data_region;
5166 memset(key_x, 0, sizeof(key_x));
5167 memset(key_y, 0, sizeof(key_y));
5171 for (i = 0 ; i < MAX_TUPLE; i++) {
5175 tuple_size = tuple_key_info[i].key_length / 8;
5176 check_tuple = key_cfg->tuple_active & BIT(i);
5178 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5181 cur_key_x += tuple_size;
5182 cur_key_y += tuple_size;
5186 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5187 MAX_META_DATA_LENGTH / 8;
5189 hclge_fd_convert_meta_data(key_cfg,
5190 (__le32 *)(key_x + meta_data_region),
5191 (__le32 *)(key_y + meta_data_region),
5194 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5197 dev_err(&hdev->pdev->dev,
5198 "fd key_y config fail, loc=%u, ret=%d\n",
5199 rule->queue_id, ret);
5203 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5206 dev_err(&hdev->pdev->dev,
5207 "fd key_x config fail, loc=%u, ret=%d\n",
5208 rule->queue_id, ret);
5212 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5213 struct hclge_fd_rule *rule)
5215 struct hclge_fd_ad_data ad_data;
5217 ad_data.ad_id = rule->location;
5219 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5220 ad_data.drop_packet = true;
5221 ad_data.forward_to_direct_queue = false;
5222 ad_data.queue_id = 0;
5224 ad_data.drop_packet = false;
5225 ad_data.forward_to_direct_queue = true;
5226 ad_data.queue_id = rule->queue_id;
5229 ad_data.use_counter = false;
5230 ad_data.counter_id = 0;
5232 ad_data.use_next_stage = false;
5233 ad_data.next_input_key = 0;
5235 ad_data.write_rule_id_to_bd = true;
5236 ad_data.rule_id = rule->location;
5238 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5241 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5244 if (!spec || !unused_tuple)
5247 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5250 *unused_tuple |= BIT(INNER_SRC_IP);
5253 *unused_tuple |= BIT(INNER_DST_IP);
5256 *unused_tuple |= BIT(INNER_SRC_PORT);
5259 *unused_tuple |= BIT(INNER_DST_PORT);
5262 *unused_tuple |= BIT(INNER_IP_TOS);
5267 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5270 if (!spec || !unused_tuple)
5273 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5274 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5277 *unused_tuple |= BIT(INNER_SRC_IP);
5280 *unused_tuple |= BIT(INNER_DST_IP);
5283 *unused_tuple |= BIT(INNER_IP_TOS);
5286 *unused_tuple |= BIT(INNER_IP_PROTO);
5288 if (spec->l4_4_bytes)
5291 if (spec->ip_ver != ETH_RX_NFC_IP4)
5297 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5300 if (!spec || !unused_tuple)
5303 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5306 /* check whether src/dst ip address used */
5307 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5308 !spec->ip6src[2] && !spec->ip6src[3])
5309 *unused_tuple |= BIT(INNER_SRC_IP);
5311 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5312 !spec->ip6dst[2] && !spec->ip6dst[3])
5313 *unused_tuple |= BIT(INNER_DST_IP);
5316 *unused_tuple |= BIT(INNER_SRC_PORT);
5319 *unused_tuple |= BIT(INNER_DST_PORT);
5327 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5330 if (!spec || !unused_tuple)
5333 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5334 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5336 /* check whether src/dst ip address used */
5337 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5338 !spec->ip6src[2] && !spec->ip6src[3])
5339 *unused_tuple |= BIT(INNER_SRC_IP);
5341 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5342 !spec->ip6dst[2] && !spec->ip6dst[3])
5343 *unused_tuple |= BIT(INNER_DST_IP);
5345 if (!spec->l4_proto)
5346 *unused_tuple |= BIT(INNER_IP_PROTO);
5351 if (spec->l4_4_bytes)
5357 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5359 if (!spec || !unused_tuple)
5362 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5363 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5364 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5366 if (is_zero_ether_addr(spec->h_source))
5367 *unused_tuple |= BIT(INNER_SRC_MAC);
5369 if (is_zero_ether_addr(spec->h_dest))
5370 *unused_tuple |= BIT(INNER_DST_MAC);
5373 *unused_tuple |= BIT(INNER_ETH_TYPE);
5378 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5379 struct ethtool_rx_flow_spec *fs,
5382 if (fs->flow_type & FLOW_EXT) {
5383 if (fs->h_ext.vlan_etype) {
5384 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5388 if (!fs->h_ext.vlan_tci)
5389 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5391 if (fs->m_ext.vlan_tci &&
5392 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5393 dev_err(&hdev->pdev->dev,
5394 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5395 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5399 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5402 if (fs->flow_type & FLOW_MAC_EXT) {
5403 if (hdev->fd_cfg.fd_mode !=
5404 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5405 dev_err(&hdev->pdev->dev,
5406 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5410 if (is_zero_ether_addr(fs->h_ext.h_dest))
5411 *unused_tuple |= BIT(INNER_DST_MAC);
5413 *unused_tuple &= ~BIT(INNER_DST_MAC);
5419 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5420 struct ethtool_rx_flow_spec *fs,
5426 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5427 dev_err(&hdev->pdev->dev,
5428 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5430 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5434 if ((fs->flow_type & FLOW_EXT) &&
5435 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5436 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5440 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5441 switch (flow_type) {
5445 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5449 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5455 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5458 case IPV6_USER_FLOW:
5459 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5463 if (hdev->fd_cfg.fd_mode !=
5464 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5465 dev_err(&hdev->pdev->dev,
5466 "ETHER_FLOW is not supported in current fd mode!\n");
5470 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5474 dev_err(&hdev->pdev->dev,
5475 "unsupported protocol type, protocol type = %#x\n",
5481 dev_err(&hdev->pdev->dev,
5482 "failed to check flow union tuple, ret = %d\n",
5487 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5490 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5492 struct hclge_fd_rule *rule = NULL;
5493 struct hlist_node *node2;
5495 spin_lock_bh(&hdev->fd_rule_lock);
5496 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5497 if (rule->location >= location)
5501 spin_unlock_bh(&hdev->fd_rule_lock);
5503 return rule && rule->location == location;
5506 /* make sure being called after lock up with fd_rule_lock */
5507 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5508 struct hclge_fd_rule *new_rule,
5512 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5513 struct hlist_node *node2;
5515 if (is_add && !new_rule)
5518 hlist_for_each_entry_safe(rule, node2,
5519 &hdev->fd_rule_list, rule_node) {
5520 if (rule->location >= location)
5525 if (rule && rule->location == location) {
5526 hlist_del(&rule->rule_node);
5528 hdev->hclge_fd_rule_num--;
5531 if (!hdev->hclge_fd_rule_num)
5532 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5533 clear_bit(location, hdev->fd_bmap);
5537 } else if (!is_add) {
5538 dev_err(&hdev->pdev->dev,
5539 "delete fail, rule %u is inexistent\n",
5544 INIT_HLIST_NODE(&new_rule->rule_node);
5547 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5549 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5551 set_bit(location, hdev->fd_bmap);
5552 hdev->hclge_fd_rule_num++;
5553 hdev->fd_active_type = new_rule->rule_type;
5558 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5559 struct ethtool_rx_flow_spec *fs,
5560 struct hclge_fd_rule *rule)
5562 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5564 switch (flow_type) {
5568 rule->tuples.src_ip[IPV4_INDEX] =
5569 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5570 rule->tuples_mask.src_ip[IPV4_INDEX] =
5571 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5573 rule->tuples.dst_ip[IPV4_INDEX] =
5574 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5575 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5576 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5578 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5579 rule->tuples_mask.src_port =
5580 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5582 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5583 rule->tuples_mask.dst_port =
5584 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5586 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5587 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5589 rule->tuples.ether_proto = ETH_P_IP;
5590 rule->tuples_mask.ether_proto = 0xFFFF;
5594 rule->tuples.src_ip[IPV4_INDEX] =
5595 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5596 rule->tuples_mask.src_ip[IPV4_INDEX] =
5597 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5599 rule->tuples.dst_ip[IPV4_INDEX] =
5600 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5601 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5602 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5604 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5605 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5607 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5608 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5610 rule->tuples.ether_proto = ETH_P_IP;
5611 rule->tuples_mask.ether_proto = 0xFFFF;
5617 be32_to_cpu_array(rule->tuples.src_ip,
5618 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5619 be32_to_cpu_array(rule->tuples_mask.src_ip,
5620 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5622 be32_to_cpu_array(rule->tuples.dst_ip,
5623 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5624 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5625 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5627 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5628 rule->tuples_mask.src_port =
5629 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5631 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5632 rule->tuples_mask.dst_port =
5633 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5635 rule->tuples.ether_proto = ETH_P_IPV6;
5636 rule->tuples_mask.ether_proto = 0xFFFF;
5639 case IPV6_USER_FLOW:
5640 be32_to_cpu_array(rule->tuples.src_ip,
5641 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5642 be32_to_cpu_array(rule->tuples_mask.src_ip,
5643 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5645 be32_to_cpu_array(rule->tuples.dst_ip,
5646 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5647 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5648 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5650 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5651 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5653 rule->tuples.ether_proto = ETH_P_IPV6;
5654 rule->tuples_mask.ether_proto = 0xFFFF;
5658 ether_addr_copy(rule->tuples.src_mac,
5659 fs->h_u.ether_spec.h_source);
5660 ether_addr_copy(rule->tuples_mask.src_mac,
5661 fs->m_u.ether_spec.h_source);
5663 ether_addr_copy(rule->tuples.dst_mac,
5664 fs->h_u.ether_spec.h_dest);
5665 ether_addr_copy(rule->tuples_mask.dst_mac,
5666 fs->m_u.ether_spec.h_dest);
5668 rule->tuples.ether_proto =
5669 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5670 rule->tuples_mask.ether_proto =
5671 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5678 switch (flow_type) {
5681 rule->tuples.ip_proto = IPPROTO_SCTP;
5682 rule->tuples_mask.ip_proto = 0xFF;
5686 rule->tuples.ip_proto = IPPROTO_TCP;
5687 rule->tuples_mask.ip_proto = 0xFF;
5691 rule->tuples.ip_proto = IPPROTO_UDP;
5692 rule->tuples_mask.ip_proto = 0xFF;
5698 if (fs->flow_type & FLOW_EXT) {
5699 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5700 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5703 if (fs->flow_type & FLOW_MAC_EXT) {
5704 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5705 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5711 /* make sure being called after lock up with fd_rule_lock */
5712 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5713 struct hclge_fd_rule *rule)
5718 dev_err(&hdev->pdev->dev,
5719 "The flow director rule is NULL\n");
5723 /* it will never fail here, so needn't to check return value */
5724 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5726 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5730 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5737 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5741 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5742 struct ethtool_rxnfc *cmd)
5744 struct hclge_vport *vport = hclge_get_vport(handle);
5745 struct hclge_dev *hdev = vport->back;
5746 u16 dst_vport_id = 0, q_index = 0;
5747 struct ethtool_rx_flow_spec *fs;
5748 struct hclge_fd_rule *rule;
5753 if (!hnae3_dev_fd_supported(hdev)) {
5754 dev_err(&hdev->pdev->dev,
5755 "flow table director is not supported\n");
5760 dev_err(&hdev->pdev->dev,
5761 "please enable flow director first\n");
5765 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5767 ret = hclge_fd_check_spec(hdev, fs, &unused);
5771 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5772 action = HCLGE_FD_ACTION_DROP_PACKET;
5774 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5775 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5778 if (vf > hdev->num_req_vfs) {
5779 dev_err(&hdev->pdev->dev,
5780 "Error: vf id (%u) > max vf num (%u)\n",
5781 vf, hdev->num_req_vfs);
5785 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5786 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5789 dev_err(&hdev->pdev->dev,
5790 "Error: queue id (%u) > max tqp num (%u)\n",
5795 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5799 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5803 ret = hclge_fd_get_tuple(hdev, fs, rule);
5809 rule->flow_type = fs->flow_type;
5810 rule->location = fs->location;
5811 rule->unused_tuple = unused;
5812 rule->vf_id = dst_vport_id;
5813 rule->queue_id = q_index;
5814 rule->action = action;
5815 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5817 /* to avoid rule conflict, when user configure rule by ethtool,
5818 * we need to clear all arfs rules
5820 hclge_clear_arfs_rules(handle);
5822 spin_lock_bh(&hdev->fd_rule_lock);
5823 ret = hclge_fd_config_rule(hdev, rule);
5825 spin_unlock_bh(&hdev->fd_rule_lock);
5830 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5831 struct ethtool_rxnfc *cmd)
5833 struct hclge_vport *vport = hclge_get_vport(handle);
5834 struct hclge_dev *hdev = vport->back;
5835 struct ethtool_rx_flow_spec *fs;
5838 if (!hnae3_dev_fd_supported(hdev))
5841 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5843 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5846 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5847 dev_err(&hdev->pdev->dev,
5848 "Delete fail, rule %u is inexistent\n", fs->location);
5852 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5857 spin_lock_bh(&hdev->fd_rule_lock);
5858 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5860 spin_unlock_bh(&hdev->fd_rule_lock);
5865 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5868 struct hclge_vport *vport = hclge_get_vport(handle);
5869 struct hclge_dev *hdev = vport->back;
5870 struct hclge_fd_rule *rule;
5871 struct hlist_node *node;
5874 if (!hnae3_dev_fd_supported(hdev))
5877 spin_lock_bh(&hdev->fd_rule_lock);
5878 for_each_set_bit(location, hdev->fd_bmap,
5879 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5880 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5884 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5886 hlist_del(&rule->rule_node);
5889 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5890 hdev->hclge_fd_rule_num = 0;
5891 bitmap_zero(hdev->fd_bmap,
5892 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5895 spin_unlock_bh(&hdev->fd_rule_lock);
5898 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5900 struct hclge_vport *vport = hclge_get_vport(handle);
5901 struct hclge_dev *hdev = vport->back;
5902 struct hclge_fd_rule *rule;
5903 struct hlist_node *node;
5906 /* Return ok here, because reset error handling will check this
5907 * return value. If error is returned here, the reset process will
5910 if (!hnae3_dev_fd_supported(hdev))
5913 /* if fd is disabled, should not restore it when reset */
5917 spin_lock_bh(&hdev->fd_rule_lock);
5918 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5919 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5921 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5924 dev_warn(&hdev->pdev->dev,
5925 "Restore rule %u failed, remove it\n",
5927 clear_bit(rule->location, hdev->fd_bmap);
5928 hlist_del(&rule->rule_node);
5930 hdev->hclge_fd_rule_num--;
5934 if (hdev->hclge_fd_rule_num)
5935 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5937 spin_unlock_bh(&hdev->fd_rule_lock);
5942 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5943 struct ethtool_rxnfc *cmd)
5945 struct hclge_vport *vport = hclge_get_vport(handle);
5946 struct hclge_dev *hdev = vport->back;
5948 if (!hnae3_dev_fd_supported(hdev))
5951 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5952 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5957 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5958 struct ethtool_tcpip4_spec *spec,
5959 struct ethtool_tcpip4_spec *spec_mask)
5961 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5962 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5963 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5965 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5966 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5967 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5969 spec->psrc = cpu_to_be16(rule->tuples.src_port);
5970 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5971 0 : cpu_to_be16(rule->tuples_mask.src_port);
5973 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5974 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5975 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5977 spec->tos = rule->tuples.ip_tos;
5978 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5979 0 : rule->tuples_mask.ip_tos;
5982 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5983 struct ethtool_usrip4_spec *spec,
5984 struct ethtool_usrip4_spec *spec_mask)
5986 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5987 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5988 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5990 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5991 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5992 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5994 spec->tos = rule->tuples.ip_tos;
5995 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5996 0 : rule->tuples_mask.ip_tos;
5998 spec->proto = rule->tuples.ip_proto;
5999 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6000 0 : rule->tuples_mask.ip_proto;
6002 spec->ip_ver = ETH_RX_NFC_IP4;
6005 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6006 struct ethtool_tcpip6_spec *spec,
6007 struct ethtool_tcpip6_spec *spec_mask)
6009 cpu_to_be32_array(spec->ip6src,
6010 rule->tuples.src_ip, IPV6_SIZE);
6011 cpu_to_be32_array(spec->ip6dst,
6012 rule->tuples.dst_ip, IPV6_SIZE);
6013 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6014 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6016 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6019 if (rule->unused_tuple & BIT(INNER_DST_IP))
6020 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6022 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6025 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6026 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6027 0 : cpu_to_be16(rule->tuples_mask.src_port);
6029 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6030 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6031 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6034 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6035 struct ethtool_usrip6_spec *spec,
6036 struct ethtool_usrip6_spec *spec_mask)
6038 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6039 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6040 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6041 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6043 cpu_to_be32_array(spec_mask->ip6src,
6044 rule->tuples_mask.src_ip, IPV6_SIZE);
6046 if (rule->unused_tuple & BIT(INNER_DST_IP))
6047 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6049 cpu_to_be32_array(spec_mask->ip6dst,
6050 rule->tuples_mask.dst_ip, IPV6_SIZE);
6052 spec->l4_proto = rule->tuples.ip_proto;
6053 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6054 0 : rule->tuples_mask.ip_proto;
6057 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6058 struct ethhdr *spec,
6059 struct ethhdr *spec_mask)
6061 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6062 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6064 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6065 eth_zero_addr(spec_mask->h_source);
6067 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6069 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6070 eth_zero_addr(spec_mask->h_dest);
6072 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6074 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6075 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6076 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6079 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6080 struct hclge_fd_rule *rule)
6082 if (fs->flow_type & FLOW_EXT) {
6083 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6084 fs->m_ext.vlan_tci =
6085 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6086 cpu_to_be16(VLAN_VID_MASK) :
6087 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6090 if (fs->flow_type & FLOW_MAC_EXT) {
6091 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6092 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6093 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6095 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6096 rule->tuples_mask.dst_mac);
6100 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6101 struct ethtool_rxnfc *cmd)
6103 struct hclge_vport *vport = hclge_get_vport(handle);
6104 struct hclge_fd_rule *rule = NULL;
6105 struct hclge_dev *hdev = vport->back;
6106 struct ethtool_rx_flow_spec *fs;
6107 struct hlist_node *node2;
6109 if (!hnae3_dev_fd_supported(hdev))
6112 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6114 spin_lock_bh(&hdev->fd_rule_lock);
6116 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6117 if (rule->location >= fs->location)
6121 if (!rule || fs->location != rule->location) {
6122 spin_unlock_bh(&hdev->fd_rule_lock);
6127 fs->flow_type = rule->flow_type;
6128 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6132 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6133 &fs->m_u.tcp_ip4_spec);
6136 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6137 &fs->m_u.usr_ip4_spec);
6142 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6143 &fs->m_u.tcp_ip6_spec);
6145 case IPV6_USER_FLOW:
6146 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6147 &fs->m_u.usr_ip6_spec);
6149 /* The flow type of fd rule has been checked before adding in to rule
6150 * list. As other flow types have been handled, it must be ETHER_FLOW
6151 * for the default case
6154 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6155 &fs->m_u.ether_spec);
6159 hclge_fd_get_ext_info(fs, rule);
6161 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6162 fs->ring_cookie = RX_CLS_FLOW_DISC;
6166 fs->ring_cookie = rule->queue_id;
6167 vf_id = rule->vf_id;
6168 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6169 fs->ring_cookie |= vf_id;
6172 spin_unlock_bh(&hdev->fd_rule_lock);
6177 static int hclge_get_all_rules(struct hnae3_handle *handle,
6178 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6180 struct hclge_vport *vport = hclge_get_vport(handle);
6181 struct hclge_dev *hdev = vport->back;
6182 struct hclge_fd_rule *rule;
6183 struct hlist_node *node2;
6186 if (!hnae3_dev_fd_supported(hdev))
6189 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6191 spin_lock_bh(&hdev->fd_rule_lock);
6192 hlist_for_each_entry_safe(rule, node2,
6193 &hdev->fd_rule_list, rule_node) {
6194 if (cnt == cmd->rule_cnt) {
6195 spin_unlock_bh(&hdev->fd_rule_lock);
6199 rule_locs[cnt] = rule->location;
6203 spin_unlock_bh(&hdev->fd_rule_lock);
6205 cmd->rule_cnt = cnt;
6210 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6211 struct hclge_fd_rule_tuples *tuples)
6213 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6214 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6216 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6217 tuples->ip_proto = fkeys->basic.ip_proto;
6218 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6220 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6221 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6222 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6226 for (i = 0; i < IPV6_SIZE; i++) {
6227 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6228 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6233 /* traverse all rules, check whether an existed rule has the same tuples */
6234 static struct hclge_fd_rule *
6235 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6236 const struct hclge_fd_rule_tuples *tuples)
6238 struct hclge_fd_rule *rule = NULL;
6239 struct hlist_node *node;
6241 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6242 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6249 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6250 struct hclge_fd_rule *rule)
6252 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6253 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6254 BIT(INNER_SRC_PORT);
6257 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6258 if (tuples->ether_proto == ETH_P_IP) {
6259 if (tuples->ip_proto == IPPROTO_TCP)
6260 rule->flow_type = TCP_V4_FLOW;
6262 rule->flow_type = UDP_V4_FLOW;
6264 if (tuples->ip_proto == IPPROTO_TCP)
6265 rule->flow_type = TCP_V6_FLOW;
6267 rule->flow_type = UDP_V6_FLOW;
6269 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6270 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6273 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6274 u16 flow_id, struct flow_keys *fkeys)
6276 struct hclge_vport *vport = hclge_get_vport(handle);
6277 struct hclge_fd_rule_tuples new_tuples;
6278 struct hclge_dev *hdev = vport->back;
6279 struct hclge_fd_rule *rule;
6284 if (!hnae3_dev_fd_supported(hdev))
6287 memset(&new_tuples, 0, sizeof(new_tuples));
6288 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6290 spin_lock_bh(&hdev->fd_rule_lock);
6292 /* when there is already fd rule existed add by user,
6293 * arfs should not work
6295 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6296 spin_unlock_bh(&hdev->fd_rule_lock);
6300 /* check is there flow director filter existed for this flow,
6301 * if not, create a new filter for it;
6302 * if filter exist with different queue id, modify the filter;
6303 * if filter exist with same queue id, do nothing
6305 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6307 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6308 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6309 spin_unlock_bh(&hdev->fd_rule_lock);
6313 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6315 spin_unlock_bh(&hdev->fd_rule_lock);
6319 set_bit(bit_id, hdev->fd_bmap);
6320 rule->location = bit_id;
6321 rule->flow_id = flow_id;
6322 rule->queue_id = queue_id;
6323 hclge_fd_build_arfs_rule(&new_tuples, rule);
6324 ret = hclge_fd_config_rule(hdev, rule);
6326 spin_unlock_bh(&hdev->fd_rule_lock);
6331 return rule->location;
6334 spin_unlock_bh(&hdev->fd_rule_lock);
6336 if (rule->queue_id == queue_id)
6337 return rule->location;
6339 tmp_queue_id = rule->queue_id;
6340 rule->queue_id = queue_id;
6341 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6343 rule->queue_id = tmp_queue_id;
6347 return rule->location;
6350 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6352 #ifdef CONFIG_RFS_ACCEL
6353 struct hnae3_handle *handle = &hdev->vport[0].nic;
6354 struct hclge_fd_rule *rule;
6355 struct hlist_node *node;
6356 HLIST_HEAD(del_list);
6358 spin_lock_bh(&hdev->fd_rule_lock);
6359 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6360 spin_unlock_bh(&hdev->fd_rule_lock);
6363 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6364 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6365 rule->flow_id, rule->location)) {
6366 hlist_del_init(&rule->rule_node);
6367 hlist_add_head(&rule->rule_node, &del_list);
6368 hdev->hclge_fd_rule_num--;
6369 clear_bit(rule->location, hdev->fd_bmap);
6372 spin_unlock_bh(&hdev->fd_rule_lock);
6374 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6375 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6376 rule->location, NULL, false);
6382 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6384 #ifdef CONFIG_RFS_ACCEL
6385 struct hclge_vport *vport = hclge_get_vport(handle);
6386 struct hclge_dev *hdev = vport->back;
6388 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6389 hclge_del_all_fd_entries(handle, true);
6393 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6395 struct hclge_vport *vport = hclge_get_vport(handle);
6396 struct hclge_dev *hdev = vport->back;
6398 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6399 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6402 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6404 struct hclge_vport *vport = hclge_get_vport(handle);
6405 struct hclge_dev *hdev = vport->back;
6407 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6410 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6412 struct hclge_vport *vport = hclge_get_vport(handle);
6413 struct hclge_dev *hdev = vport->back;
6415 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6418 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6420 struct hclge_vport *vport = hclge_get_vport(handle);
6421 struct hclge_dev *hdev = vport->back;
6423 return hdev->rst_stats.hw_reset_done_cnt;
6426 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6428 struct hclge_vport *vport = hclge_get_vport(handle);
6429 struct hclge_dev *hdev = vport->back;
6432 hdev->fd_en = enable;
6433 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6435 hclge_del_all_fd_entries(handle, clear);
6437 hclge_restore_fd_entries(handle);
6440 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6442 struct hclge_desc desc;
6443 struct hclge_config_mac_mode_cmd *req =
6444 (struct hclge_config_mac_mode_cmd *)desc.data;
6448 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6451 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6452 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6453 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6454 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6455 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6456 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6457 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6458 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6459 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6460 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6463 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6465 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6467 dev_err(&hdev->pdev->dev,
6468 "mac enable fail, ret =%d.\n", ret);
6471 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6472 u8 switch_param, u8 param_mask)
6474 struct hclge_mac_vlan_switch_cmd *req;
6475 struct hclge_desc desc;
6479 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6480 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6482 /* read current config parameter */
6483 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6485 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6486 req->func_id = cpu_to_le32(func_id);
6488 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6490 dev_err(&hdev->pdev->dev,
6491 "read mac vlan switch parameter fail, ret = %d\n", ret);
6495 /* modify and write new config parameter */
6496 hclge_cmd_reuse_desc(&desc, false);
6497 req->switch_param = (req->switch_param & param_mask) | switch_param;
6498 req->param_mask = param_mask;
6500 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6502 dev_err(&hdev->pdev->dev,
6503 "set mac vlan switch parameter fail, ret = %d\n", ret);
6507 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6510 #define HCLGE_PHY_LINK_STATUS_NUM 200
6512 struct phy_device *phydev = hdev->hw.mac.phydev;
6517 ret = phy_read_status(phydev);
6519 dev_err(&hdev->pdev->dev,
6520 "phy update link status fail, ret = %d\n", ret);
6524 if (phydev->link == link_ret)
6527 msleep(HCLGE_LINK_STATUS_MS);
6528 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6531 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6533 #define HCLGE_MAC_LINK_STATUS_NUM 100
6539 ret = hclge_get_mac_link_status(hdev);
6542 else if (ret == link_ret)
6545 msleep(HCLGE_LINK_STATUS_MS);
6546 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6550 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6553 #define HCLGE_LINK_STATUS_DOWN 0
6554 #define HCLGE_LINK_STATUS_UP 1
6558 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6561 hclge_phy_link_status_wait(hdev, link_ret);
6563 return hclge_mac_link_status_wait(hdev, link_ret);
6566 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6568 struct hclge_config_mac_mode_cmd *req;
6569 struct hclge_desc desc;
6573 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6574 /* 1 Read out the MAC mode config at first */
6575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6576 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6578 dev_err(&hdev->pdev->dev,
6579 "mac loopback get fail, ret =%d.\n", ret);
6583 /* 2 Then setup the loopback flag */
6584 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6585 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6587 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6589 /* 3 Config mac work mode with loopback flag
6590 * and its original configure parameters
6592 hclge_cmd_reuse_desc(&desc, false);
6593 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6595 dev_err(&hdev->pdev->dev,
6596 "mac loopback set fail, ret =%d.\n", ret);
6600 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6601 enum hnae3_loop loop_mode)
6603 #define HCLGE_SERDES_RETRY_MS 10
6604 #define HCLGE_SERDES_RETRY_NUM 100
6606 struct hclge_serdes_lb_cmd *req;
6607 struct hclge_desc desc;
6611 req = (struct hclge_serdes_lb_cmd *)desc.data;
6612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6614 switch (loop_mode) {
6615 case HNAE3_LOOP_SERIAL_SERDES:
6616 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6618 case HNAE3_LOOP_PARALLEL_SERDES:
6619 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6622 dev_err(&hdev->pdev->dev,
6623 "unsupported serdes loopback mode %d\n", loop_mode);
6628 req->enable = loop_mode_b;
6629 req->mask = loop_mode_b;
6631 req->mask = loop_mode_b;
6634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6636 dev_err(&hdev->pdev->dev,
6637 "serdes loopback set fail, ret = %d\n", ret);
6642 msleep(HCLGE_SERDES_RETRY_MS);
6643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6647 dev_err(&hdev->pdev->dev,
6648 "serdes loopback get, ret = %d\n", ret);
6651 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6652 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6654 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6655 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6657 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6658 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6664 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6665 enum hnae3_loop loop_mode)
6669 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6673 hclge_cfg_mac_mode(hdev, en);
6675 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6677 dev_err(&hdev->pdev->dev,
6678 "serdes loopback config mac mode timeout\n");
6683 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6684 struct phy_device *phydev)
6688 if (!phydev->suspended) {
6689 ret = phy_suspend(phydev);
6694 ret = phy_resume(phydev);
6698 return phy_loopback(phydev, true);
6701 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6702 struct phy_device *phydev)
6706 ret = phy_loopback(phydev, false);
6710 return phy_suspend(phydev);
6713 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6715 struct phy_device *phydev = hdev->hw.mac.phydev;
6722 ret = hclge_enable_phy_loopback(hdev, phydev);
6724 ret = hclge_disable_phy_loopback(hdev, phydev);
6726 dev_err(&hdev->pdev->dev,
6727 "set phy loopback fail, ret = %d\n", ret);
6731 hclge_cfg_mac_mode(hdev, en);
6733 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6735 dev_err(&hdev->pdev->dev,
6736 "phy loopback config mac mode timeout\n");
6741 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6742 int stream_id, bool enable)
6744 struct hclge_desc desc;
6745 struct hclge_cfg_com_tqp_queue_cmd *req =
6746 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6750 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6751 req->stream_id = cpu_to_le16(stream_id);
6753 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6757 dev_err(&hdev->pdev->dev,
6758 "Tqp enable fail, status =%d.\n", ret);
6762 static int hclge_set_loopback(struct hnae3_handle *handle,
6763 enum hnae3_loop loop_mode, bool en)
6765 struct hclge_vport *vport = hclge_get_vport(handle);
6766 struct hnae3_knic_private_info *kinfo;
6767 struct hclge_dev *hdev = vport->back;
6770 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6771 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6772 * the same, the packets are looped back in the SSU. If SSU loopback
6773 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6775 if (hdev->pdev->revision >= 0x21) {
6776 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6778 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6779 HCLGE_SWITCH_ALW_LPBK_MASK);
6784 switch (loop_mode) {
6785 case HNAE3_LOOP_APP:
6786 ret = hclge_set_app_loopback(hdev, en);
6788 case HNAE3_LOOP_SERIAL_SERDES:
6789 case HNAE3_LOOP_PARALLEL_SERDES:
6790 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6792 case HNAE3_LOOP_PHY:
6793 ret = hclge_set_phy_loopback(hdev, en);
6797 dev_err(&hdev->pdev->dev,
6798 "loop_mode %d is not supported\n", loop_mode);
6805 kinfo = &vport->nic.kinfo;
6806 for (i = 0; i < kinfo->num_tqps; i++) {
6807 ret = hclge_tqp_enable(hdev, i, 0, en);
6815 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6819 ret = hclge_set_app_loopback(hdev, false);
6823 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6827 return hclge_cfg_serdes_loopback(hdev, false,
6828 HNAE3_LOOP_PARALLEL_SERDES);
6831 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6833 struct hclge_vport *vport = hclge_get_vport(handle);
6834 struct hnae3_knic_private_info *kinfo;
6835 struct hnae3_queue *queue;
6836 struct hclge_tqp *tqp;
6839 kinfo = &vport->nic.kinfo;
6840 for (i = 0; i < kinfo->num_tqps; i++) {
6841 queue = handle->kinfo.tqp[i];
6842 tqp = container_of(queue, struct hclge_tqp, q);
6843 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6847 static void hclge_flush_link_update(struct hclge_dev *hdev)
6849 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6851 unsigned long last = hdev->serv_processed_cnt;
6854 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6855 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6856 last == hdev->serv_processed_cnt)
6860 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6862 struct hclge_vport *vport = hclge_get_vport(handle);
6863 struct hclge_dev *hdev = vport->back;
6866 hclge_task_schedule(hdev, 0);
6868 /* Set the DOWN flag here to disable link updating */
6869 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6871 /* flush memory to make sure DOWN is seen by service task */
6872 smp_mb__before_atomic();
6873 hclge_flush_link_update(hdev);
6877 static int hclge_ae_start(struct hnae3_handle *handle)
6879 struct hclge_vport *vport = hclge_get_vport(handle);
6880 struct hclge_dev *hdev = vport->back;
6883 hclge_cfg_mac_mode(hdev, true);
6884 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6885 hdev->hw.mac.link = 0;
6887 /* reset tqp stats */
6888 hclge_reset_tqp_stats(handle);
6890 hclge_mac_start_phy(hdev);
6895 static void hclge_ae_stop(struct hnae3_handle *handle)
6897 struct hclge_vport *vport = hclge_get_vport(handle);
6898 struct hclge_dev *hdev = vport->back;
6901 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6903 hclge_clear_arfs_rules(handle);
6905 /* If it is not PF reset, the firmware will disable the MAC,
6906 * so it only need to stop phy here.
6908 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6909 hdev->reset_type != HNAE3_FUNC_RESET) {
6910 hclge_mac_stop_phy(hdev);
6911 hclge_update_link_status(hdev);
6915 for (i = 0; i < handle->kinfo.num_tqps; i++)
6916 hclge_reset_tqp(handle, i);
6918 hclge_config_mac_tnl_int(hdev, false);
6921 hclge_cfg_mac_mode(hdev, false);
6923 hclge_mac_stop_phy(hdev);
6925 /* reset tqp stats */
6926 hclge_reset_tqp_stats(handle);
6927 hclge_update_link_status(hdev);
6930 int hclge_vport_start(struct hclge_vport *vport)
6932 struct hclge_dev *hdev = vport->back;
6934 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6935 vport->last_active_jiffies = jiffies;
6937 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6938 if (vport->vport_id) {
6939 hclge_restore_mac_table_common(vport);
6940 hclge_restore_vport_vlan_table(vport);
6942 hclge_restore_hw_table(hdev);
6946 clear_bit(vport->vport_id, hdev->vport_config_block);
6951 void hclge_vport_stop(struct hclge_vport *vport)
6953 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6956 static int hclge_client_start(struct hnae3_handle *handle)
6958 struct hclge_vport *vport = hclge_get_vport(handle);
6960 return hclge_vport_start(vport);
6963 static void hclge_client_stop(struct hnae3_handle *handle)
6965 struct hclge_vport *vport = hclge_get_vport(handle);
6967 hclge_vport_stop(vport);
6970 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6971 u16 cmdq_resp, u8 resp_code,
6972 enum hclge_mac_vlan_tbl_opcode op)
6974 struct hclge_dev *hdev = vport->back;
6977 dev_err(&hdev->pdev->dev,
6978 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6983 if (op == HCLGE_MAC_VLAN_ADD) {
6984 if (!resp_code || resp_code == 1)
6986 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6987 resp_code == HCLGE_ADD_MC_OVERFLOW)
6990 dev_err(&hdev->pdev->dev,
6991 "add mac addr failed for undefined, code=%u.\n",
6994 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6997 } else if (resp_code == 1) {
6998 dev_dbg(&hdev->pdev->dev,
6999 "remove mac addr failed for miss.\n");
7003 dev_err(&hdev->pdev->dev,
7004 "remove mac addr failed for undefined, code=%u.\n",
7007 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7010 } else if (resp_code == 1) {
7011 dev_dbg(&hdev->pdev->dev,
7012 "lookup mac addr failed for miss.\n");
7016 dev_err(&hdev->pdev->dev,
7017 "lookup mac addr failed for undefined, code=%u.\n",
7022 dev_err(&hdev->pdev->dev,
7023 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7028 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7030 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7032 unsigned int word_num;
7033 unsigned int bit_num;
7035 if (vfid > 255 || vfid < 0)
7038 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7039 word_num = vfid / 32;
7040 bit_num = vfid % 32;
7042 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7044 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7046 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7047 bit_num = vfid % 32;
7049 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7051 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7057 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7059 #define HCLGE_DESC_NUMBER 3
7060 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7063 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7064 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7065 if (desc[i].data[j])
7071 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7072 const u8 *addr, bool is_mc)
7074 const unsigned char *mac_addr = addr;
7075 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7076 (mac_addr[0]) | (mac_addr[1] << 8);
7077 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7079 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7081 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7082 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7085 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7086 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7089 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7090 struct hclge_mac_vlan_tbl_entry_cmd *req)
7092 struct hclge_dev *hdev = vport->back;
7093 struct hclge_desc desc;
7098 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7100 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7102 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7104 dev_err(&hdev->pdev->dev,
7105 "del mac addr failed for cmd_send, ret =%d.\n",
7109 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7110 retval = le16_to_cpu(desc.retval);
7112 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7113 HCLGE_MAC_VLAN_REMOVE);
7116 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7117 struct hclge_mac_vlan_tbl_entry_cmd *req,
7118 struct hclge_desc *desc,
7121 struct hclge_dev *hdev = vport->back;
7126 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7128 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7129 memcpy(desc[0].data,
7131 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7132 hclge_cmd_setup_basic_desc(&desc[1],
7133 HCLGE_OPC_MAC_VLAN_ADD,
7135 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7136 hclge_cmd_setup_basic_desc(&desc[2],
7137 HCLGE_OPC_MAC_VLAN_ADD,
7139 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7141 memcpy(desc[0].data,
7143 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7144 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7147 dev_err(&hdev->pdev->dev,
7148 "lookup mac addr failed for cmd_send, ret =%d.\n",
7152 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7153 retval = le16_to_cpu(desc[0].retval);
7155 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7156 HCLGE_MAC_VLAN_LKUP);
7159 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7160 struct hclge_mac_vlan_tbl_entry_cmd *req,
7161 struct hclge_desc *mc_desc)
7163 struct hclge_dev *hdev = vport->back;
7170 struct hclge_desc desc;
7172 hclge_cmd_setup_basic_desc(&desc,
7173 HCLGE_OPC_MAC_VLAN_ADD,
7175 memcpy(desc.data, req,
7176 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7178 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7179 retval = le16_to_cpu(desc.retval);
7181 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7183 HCLGE_MAC_VLAN_ADD);
7185 hclge_cmd_reuse_desc(&mc_desc[0], false);
7186 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7187 hclge_cmd_reuse_desc(&mc_desc[1], false);
7188 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7189 hclge_cmd_reuse_desc(&mc_desc[2], false);
7190 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7191 memcpy(mc_desc[0].data, req,
7192 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7193 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7194 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7195 retval = le16_to_cpu(mc_desc[0].retval);
7197 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7199 HCLGE_MAC_VLAN_ADD);
7203 dev_err(&hdev->pdev->dev,
7204 "add mac addr failed for cmd_send, ret =%d.\n",
7212 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7213 u16 *allocated_size)
7215 struct hclge_umv_spc_alc_cmd *req;
7216 struct hclge_desc desc;
7219 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7220 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7222 req->space_size = cpu_to_le32(space_size);
7224 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7226 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7231 *allocated_size = le32_to_cpu(desc.data[1]);
7236 static int hclge_init_umv_space(struct hclge_dev *hdev)
7238 u16 allocated_size = 0;
7241 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7245 if (allocated_size < hdev->wanted_umv_size)
7246 dev_warn(&hdev->pdev->dev,
7247 "failed to alloc umv space, want %u, get %u\n",
7248 hdev->wanted_umv_size, allocated_size);
7250 hdev->max_umv_size = allocated_size;
7251 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7252 hdev->share_umv_size = hdev->priv_umv_size +
7253 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7258 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7260 struct hclge_vport *vport;
7263 for (i = 0; i < hdev->num_alloc_vport; i++) {
7264 vport = &hdev->vport[i];
7265 vport->used_umv_num = 0;
7268 mutex_lock(&hdev->vport_lock);
7269 hdev->share_umv_size = hdev->priv_umv_size +
7270 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7271 mutex_unlock(&hdev->vport_lock);
7274 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7276 struct hclge_dev *hdev = vport->back;
7280 mutex_lock(&hdev->vport_lock);
7282 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7283 hdev->share_umv_size == 0);
7286 mutex_unlock(&hdev->vport_lock);
7291 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7293 struct hclge_dev *hdev = vport->back;
7296 if (vport->used_umv_num > hdev->priv_umv_size)
7297 hdev->share_umv_size++;
7299 if (vport->used_umv_num > 0)
7300 vport->used_umv_num--;
7302 if (vport->used_umv_num >= hdev->priv_umv_size &&
7303 hdev->share_umv_size > 0)
7304 hdev->share_umv_size--;
7305 vport->used_umv_num++;
7309 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7312 struct hclge_mac_node *mac_node, *tmp;
7314 list_for_each_entry_safe(mac_node, tmp, list, node)
7315 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7321 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7322 enum HCLGE_MAC_NODE_STATE state)
7325 /* from set_rx_mode or tmp_add_list */
7326 case HCLGE_MAC_TO_ADD:
7327 if (mac_node->state == HCLGE_MAC_TO_DEL)
7328 mac_node->state = HCLGE_MAC_ACTIVE;
7330 /* only from set_rx_mode */
7331 case HCLGE_MAC_TO_DEL:
7332 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7333 list_del(&mac_node->node);
7336 mac_node->state = HCLGE_MAC_TO_DEL;
7339 /* only from tmp_add_list, the mac_node->state won't be
7342 case HCLGE_MAC_ACTIVE:
7343 if (mac_node->state == HCLGE_MAC_TO_ADD)
7344 mac_node->state = HCLGE_MAC_ACTIVE;
7350 int hclge_update_mac_list(struct hclge_vport *vport,
7351 enum HCLGE_MAC_NODE_STATE state,
7352 enum HCLGE_MAC_ADDR_TYPE mac_type,
7353 const unsigned char *addr)
7355 struct hclge_dev *hdev = vport->back;
7356 struct hclge_mac_node *mac_node;
7357 struct list_head *list;
7359 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7360 &vport->uc_mac_list : &vport->mc_mac_list;
7362 spin_lock_bh(&vport->mac_list_lock);
7364 /* if the mac addr is already in the mac list, no need to add a new
7365 * one into it, just check the mac addr state, convert it to a new
7366 * new state, or just remove it, or do nothing.
7368 mac_node = hclge_find_mac_node(list, addr);
7370 hclge_update_mac_node(mac_node, state);
7371 spin_unlock_bh(&vport->mac_list_lock);
7372 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7376 /* if this address is never added, unnecessary to delete */
7377 if (state == HCLGE_MAC_TO_DEL) {
7378 spin_unlock_bh(&vport->mac_list_lock);
7379 dev_err(&hdev->pdev->dev,
7380 "failed to delete address %pM from mac list\n",
7385 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7387 spin_unlock_bh(&vport->mac_list_lock);
7391 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7393 mac_node->state = state;
7394 ether_addr_copy(mac_node->mac_addr, addr);
7395 list_add_tail(&mac_node->node, list);
7397 spin_unlock_bh(&vport->mac_list_lock);
7402 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7403 const unsigned char *addr)
7405 struct hclge_vport *vport = hclge_get_vport(handle);
7407 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7411 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7412 const unsigned char *addr)
7414 struct hclge_dev *hdev = vport->back;
7415 struct hclge_mac_vlan_tbl_entry_cmd req;
7416 struct hclge_desc desc;
7417 u16 egress_port = 0;
7420 /* mac addr check */
7421 if (is_zero_ether_addr(addr) ||
7422 is_broadcast_ether_addr(addr) ||
7423 is_multicast_ether_addr(addr)) {
7424 dev_err(&hdev->pdev->dev,
7425 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7426 addr, is_zero_ether_addr(addr),
7427 is_broadcast_ether_addr(addr),
7428 is_multicast_ether_addr(addr));
7432 memset(&req, 0, sizeof(req));
7434 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7435 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7437 req.egress_port = cpu_to_le16(egress_port);
7439 hclge_prepare_mac_addr(&req, addr, false);
7441 /* Lookup the mac address in the mac_vlan table, and add
7442 * it if the entry is inexistent. Repeated unicast entry
7443 * is not allowed in the mac vlan table.
7445 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7446 if (ret == -ENOENT) {
7447 mutex_lock(&hdev->vport_lock);
7448 if (!hclge_is_umv_space_full(vport, false)) {
7449 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7451 hclge_update_umv_space(vport, false);
7452 mutex_unlock(&hdev->vport_lock);
7455 mutex_unlock(&hdev->vport_lock);
7457 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7458 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7459 hdev->priv_umv_size);
7464 /* check if we just hit the duplicate */
7466 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7467 vport->vport_id, addr);
7471 dev_err(&hdev->pdev->dev,
7472 "PF failed to add unicast entry(%pM) in the MAC table\n",
7478 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7479 const unsigned char *addr)
7481 struct hclge_vport *vport = hclge_get_vport(handle);
7483 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7487 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7488 const unsigned char *addr)
7490 struct hclge_dev *hdev = vport->back;
7491 struct hclge_mac_vlan_tbl_entry_cmd req;
7494 /* mac addr check */
7495 if (is_zero_ether_addr(addr) ||
7496 is_broadcast_ether_addr(addr) ||
7497 is_multicast_ether_addr(addr)) {
7498 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7503 memset(&req, 0, sizeof(req));
7504 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7505 hclge_prepare_mac_addr(&req, addr, false);
7506 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7508 mutex_lock(&hdev->vport_lock);
7509 hclge_update_umv_space(vport, true);
7510 mutex_unlock(&hdev->vport_lock);
7511 } else if (ret == -ENOENT) {
7518 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7519 const unsigned char *addr)
7521 struct hclge_vport *vport = hclge_get_vport(handle);
7523 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7527 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7528 const unsigned char *addr)
7530 struct hclge_dev *hdev = vport->back;
7531 struct hclge_mac_vlan_tbl_entry_cmd req;
7532 struct hclge_desc desc[3];
7535 /* mac addr check */
7536 if (!is_multicast_ether_addr(addr)) {
7537 dev_err(&hdev->pdev->dev,
7538 "Add mc mac err! invalid mac:%pM.\n",
7542 memset(&req, 0, sizeof(req));
7543 hclge_prepare_mac_addr(&req, addr, true);
7544 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7546 /* This mac addr do not exist, add new entry for it */
7547 memset(desc[0].data, 0, sizeof(desc[0].data));
7548 memset(desc[1].data, 0, sizeof(desc[0].data));
7549 memset(desc[2].data, 0, sizeof(desc[0].data));
7551 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7554 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7556 /* if already overflow, not to print each time */
7557 if (status == -ENOSPC &&
7558 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7559 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7564 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7565 const unsigned char *addr)
7567 struct hclge_vport *vport = hclge_get_vport(handle);
7569 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7573 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7574 const unsigned char *addr)
7576 struct hclge_dev *hdev = vport->back;
7577 struct hclge_mac_vlan_tbl_entry_cmd req;
7578 enum hclge_cmd_status status;
7579 struct hclge_desc desc[3];
7581 /* mac addr check */
7582 if (!is_multicast_ether_addr(addr)) {
7583 dev_dbg(&hdev->pdev->dev,
7584 "Remove mc mac err! invalid mac:%pM.\n",
7589 memset(&req, 0, sizeof(req));
7590 hclge_prepare_mac_addr(&req, addr, true);
7591 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7593 /* This mac addr exist, remove this handle's VFID for it */
7594 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7598 if (hclge_is_all_function_id_zero(desc))
7599 /* All the vfid is zero, so need to delete this entry */
7600 status = hclge_remove_mac_vlan_tbl(vport, &req);
7602 /* Not all the vfid is zero, update the vfid */
7603 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7605 } else if (status == -ENOENT) {
7612 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7613 struct list_head *list,
7614 int (*sync)(struct hclge_vport *,
7615 const unsigned char *))
7617 struct hclge_mac_node *mac_node, *tmp;
7620 list_for_each_entry_safe(mac_node, tmp, list, node) {
7621 ret = sync(vport, mac_node->mac_addr);
7623 mac_node->state = HCLGE_MAC_ACTIVE;
7625 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7632 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7633 struct list_head *list,
7634 int (*unsync)(struct hclge_vport *,
7635 const unsigned char *))
7637 struct hclge_mac_node *mac_node, *tmp;
7640 list_for_each_entry_safe(mac_node, tmp, list, node) {
7641 ret = unsync(vport, mac_node->mac_addr);
7642 if (!ret || ret == -ENOENT) {
7643 list_del(&mac_node->node);
7646 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7653 static bool hclge_sync_from_add_list(struct list_head *add_list,
7654 struct list_head *mac_list)
7656 struct hclge_mac_node *mac_node, *tmp, *new_node;
7657 bool all_added = true;
7659 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7660 if (mac_node->state == HCLGE_MAC_TO_ADD)
7663 /* if the mac address from tmp_add_list is not in the
7664 * uc/mc_mac_list, it means have received a TO_DEL request
7665 * during the time window of adding the mac address into mac
7666 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7667 * then it will be removed at next time. else it must be TO_ADD,
7668 * this address hasn't been added into mac table,
7669 * so just remove the mac node.
7671 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7673 hclge_update_mac_node(new_node, mac_node->state);
7674 list_del(&mac_node->node);
7676 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7677 mac_node->state = HCLGE_MAC_TO_DEL;
7678 list_del(&mac_node->node);
7679 list_add_tail(&mac_node->node, mac_list);
7681 list_del(&mac_node->node);
7689 static void hclge_sync_from_del_list(struct list_head *del_list,
7690 struct list_head *mac_list)
7692 struct hclge_mac_node *mac_node, *tmp, *new_node;
7694 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7695 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7697 /* If the mac addr exists in the mac list, it means
7698 * received a new TO_ADD request during the time window
7699 * of configuring the mac address. For the mac node
7700 * state is TO_ADD, and the address is already in the
7701 * in the hardware(due to delete fail), so we just need
7702 * to change the mac node state to ACTIVE.
7704 new_node->state = HCLGE_MAC_ACTIVE;
7705 list_del(&mac_node->node);
7708 list_del(&mac_node->node);
7709 list_add_tail(&mac_node->node, mac_list);
7714 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7715 enum HCLGE_MAC_ADDR_TYPE mac_type,
7718 if (mac_type == HCLGE_MAC_ADDR_UC) {
7720 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7722 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7725 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7727 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7731 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7732 enum HCLGE_MAC_ADDR_TYPE mac_type)
7734 struct hclge_mac_node *mac_node, *tmp, *new_node;
7735 struct list_head tmp_add_list, tmp_del_list;
7736 struct list_head *list;
7739 INIT_LIST_HEAD(&tmp_add_list);
7740 INIT_LIST_HEAD(&tmp_del_list);
7742 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7743 * we can add/delete these mac addr outside the spin lock
7745 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7746 &vport->uc_mac_list : &vport->mc_mac_list;
7748 spin_lock_bh(&vport->mac_list_lock);
7750 list_for_each_entry_safe(mac_node, tmp, list, node) {
7751 switch (mac_node->state) {
7752 case HCLGE_MAC_TO_DEL:
7753 list_del(&mac_node->node);
7754 list_add_tail(&mac_node->node, &tmp_del_list);
7756 case HCLGE_MAC_TO_ADD:
7757 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7760 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7761 new_node->state = mac_node->state;
7762 list_add_tail(&new_node->node, &tmp_add_list);
7770 spin_unlock_bh(&vport->mac_list_lock);
7772 /* delete first, in order to get max mac table space for adding */
7773 if (mac_type == HCLGE_MAC_ADDR_UC) {
7774 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7775 hclge_rm_uc_addr_common);
7776 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7777 hclge_add_uc_addr_common);
7779 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7780 hclge_rm_mc_addr_common);
7781 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7782 hclge_add_mc_addr_common);
7785 /* if some mac addresses were added/deleted fail, move back to the
7786 * mac_list, and retry at next time.
7788 spin_lock_bh(&vport->mac_list_lock);
7790 hclge_sync_from_del_list(&tmp_del_list, list);
7791 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7793 spin_unlock_bh(&vport->mac_list_lock);
7795 hclge_update_overflow_flags(vport, mac_type, all_added);
7798 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7800 struct hclge_dev *hdev = vport->back;
7802 if (test_bit(vport->vport_id, hdev->vport_config_block))
7805 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7811 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7815 for (i = 0; i < hdev->num_alloc_vport; i++) {
7816 struct hclge_vport *vport = &hdev->vport[i];
7818 if (!hclge_need_sync_mac_table(vport))
7821 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7822 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7826 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7827 enum HCLGE_MAC_ADDR_TYPE mac_type)
7829 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7830 struct hclge_mac_node *mac_cfg, *tmp;
7831 struct hclge_dev *hdev = vport->back;
7832 struct list_head tmp_del_list, *list;
7835 if (mac_type == HCLGE_MAC_ADDR_UC) {
7836 list = &vport->uc_mac_list;
7837 unsync = hclge_rm_uc_addr_common;
7839 list = &vport->mc_mac_list;
7840 unsync = hclge_rm_mc_addr_common;
7843 INIT_LIST_HEAD(&tmp_del_list);
7846 set_bit(vport->vport_id, hdev->vport_config_block);
7848 spin_lock_bh(&vport->mac_list_lock);
7850 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7851 switch (mac_cfg->state) {
7852 case HCLGE_MAC_TO_DEL:
7853 case HCLGE_MAC_ACTIVE:
7854 list_del(&mac_cfg->node);
7855 list_add_tail(&mac_cfg->node, &tmp_del_list);
7857 case HCLGE_MAC_TO_ADD:
7859 list_del(&mac_cfg->node);
7866 spin_unlock_bh(&vport->mac_list_lock);
7868 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7869 ret = unsync(vport, mac_cfg->mac_addr);
7870 if (!ret || ret == -ENOENT) {
7871 /* clear all mac addr from hardware, but remain these
7872 * mac addr in the mac list, and restore them after
7873 * vf reset finished.
7876 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7877 mac_cfg->state = HCLGE_MAC_TO_ADD;
7879 list_del(&mac_cfg->node);
7882 } else if (is_del_list) {
7883 mac_cfg->state = HCLGE_MAC_TO_DEL;
7887 spin_lock_bh(&vport->mac_list_lock);
7889 hclge_sync_from_del_list(&tmp_del_list, list);
7891 spin_unlock_bh(&vport->mac_list_lock);
7894 /* remove all mac address when uninitailize */
7895 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7896 enum HCLGE_MAC_ADDR_TYPE mac_type)
7898 struct hclge_mac_node *mac_node, *tmp;
7899 struct hclge_dev *hdev = vport->back;
7900 struct list_head tmp_del_list, *list;
7902 INIT_LIST_HEAD(&tmp_del_list);
7904 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7905 &vport->uc_mac_list : &vport->mc_mac_list;
7907 spin_lock_bh(&vport->mac_list_lock);
7909 list_for_each_entry_safe(mac_node, tmp, list, node) {
7910 switch (mac_node->state) {
7911 case HCLGE_MAC_TO_DEL:
7912 case HCLGE_MAC_ACTIVE:
7913 list_del(&mac_node->node);
7914 list_add_tail(&mac_node->node, &tmp_del_list);
7916 case HCLGE_MAC_TO_ADD:
7917 list_del(&mac_node->node);
7923 spin_unlock_bh(&vport->mac_list_lock);
7925 if (mac_type == HCLGE_MAC_ADDR_UC)
7926 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7927 hclge_rm_uc_addr_common);
7929 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7930 hclge_rm_mc_addr_common);
7932 if (!list_empty(&tmp_del_list))
7933 dev_warn(&hdev->pdev->dev,
7934 "uninit %s mac list for vport %u not completely.\n",
7935 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7938 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7939 list_del(&mac_node->node);
7944 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7946 struct hclge_vport *vport;
7949 for (i = 0; i < hdev->num_alloc_vport; i++) {
7950 vport = &hdev->vport[i];
7951 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7952 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7956 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7957 u16 cmdq_resp, u8 resp_code)
7959 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7960 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7961 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7962 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7967 dev_err(&hdev->pdev->dev,
7968 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7973 switch (resp_code) {
7974 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7975 case HCLGE_ETHERTYPE_ALREADY_ADD:
7978 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7979 dev_err(&hdev->pdev->dev,
7980 "add mac ethertype failed for manager table overflow.\n");
7981 return_status = -EIO;
7983 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7984 dev_err(&hdev->pdev->dev,
7985 "add mac ethertype failed for key conflict.\n");
7986 return_status = -EIO;
7989 dev_err(&hdev->pdev->dev,
7990 "add mac ethertype failed for undefined, code=%u.\n",
7992 return_status = -EIO;
7995 return return_status;
7998 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8001 struct hclge_mac_vlan_tbl_entry_cmd req;
8002 struct hclge_dev *hdev = vport->back;
8003 struct hclge_desc desc;
8004 u16 egress_port = 0;
8007 if (is_zero_ether_addr(mac_addr))
8010 memset(&req, 0, sizeof(req));
8011 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8012 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8013 req.egress_port = cpu_to_le16(egress_port);
8014 hclge_prepare_mac_addr(&req, mac_addr, false);
8016 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8019 vf_idx += HCLGE_VF_VPORT_START_NUM;
8020 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8022 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8028 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8031 struct hclge_vport *vport = hclge_get_vport(handle);
8032 struct hclge_dev *hdev = vport->back;
8034 vport = hclge_get_vf_vport(hdev, vf);
8038 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8039 dev_info(&hdev->pdev->dev,
8040 "Specified MAC(=%pM) is same as before, no change committed!\n",
8045 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8046 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8051 ether_addr_copy(vport->vf_info.mac, mac_addr);
8053 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8054 dev_info(&hdev->pdev->dev,
8055 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8057 return hclge_inform_reset_assert_to_vf(vport);
8060 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8065 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8066 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8068 struct hclge_desc desc;
8073 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8074 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8076 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8078 dev_err(&hdev->pdev->dev,
8079 "add mac ethertype failed for cmd_send, ret =%d.\n",
8084 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8085 retval = le16_to_cpu(desc.retval);
8087 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8090 static int init_mgr_tbl(struct hclge_dev *hdev)
8095 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8096 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8098 dev_err(&hdev->pdev->dev,
8099 "add mac ethertype failed, ret =%d.\n",
8108 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8110 struct hclge_vport *vport = hclge_get_vport(handle);
8111 struct hclge_dev *hdev = vport->back;
8113 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8116 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8117 const u8 *old_addr, const u8 *new_addr)
8119 struct list_head *list = &vport->uc_mac_list;
8120 struct hclge_mac_node *old_node, *new_node;
8122 new_node = hclge_find_mac_node(list, new_addr);
8124 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8128 new_node->state = HCLGE_MAC_TO_ADD;
8129 ether_addr_copy(new_node->mac_addr, new_addr);
8130 list_add(&new_node->node, list);
8132 if (new_node->state == HCLGE_MAC_TO_DEL)
8133 new_node->state = HCLGE_MAC_ACTIVE;
8135 /* make sure the new addr is in the list head, avoid dev
8136 * addr may be not re-added into mac table for the umv space
8137 * limitation after global/imp reset which will clear mac
8138 * table by hardware.
8140 list_move(&new_node->node, list);
8143 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8144 old_node = hclge_find_mac_node(list, old_addr);
8146 if (old_node->state == HCLGE_MAC_TO_ADD) {
8147 list_del(&old_node->node);
8150 old_node->state = HCLGE_MAC_TO_DEL;
8155 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8160 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8163 const unsigned char *new_addr = (const unsigned char *)p;
8164 struct hclge_vport *vport = hclge_get_vport(handle);
8165 struct hclge_dev *hdev = vport->back;
8166 unsigned char *old_addr = NULL;
8169 /* mac addr check */
8170 if (is_zero_ether_addr(new_addr) ||
8171 is_broadcast_ether_addr(new_addr) ||
8172 is_multicast_ether_addr(new_addr)) {
8173 dev_err(&hdev->pdev->dev,
8174 "change uc mac err! invalid mac: %pM.\n",
8179 ret = hclge_pause_addr_cfg(hdev, new_addr);
8181 dev_err(&hdev->pdev->dev,
8182 "failed to configure mac pause address, ret = %d\n",
8188 old_addr = hdev->hw.mac.mac_addr;
8190 spin_lock_bh(&vport->mac_list_lock);
8191 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8193 dev_err(&hdev->pdev->dev,
8194 "failed to change the mac addr:%pM, ret = %d\n",
8196 spin_unlock_bh(&vport->mac_list_lock);
8199 hclge_pause_addr_cfg(hdev, old_addr);
8203 /* we must update dev addr with spin lock protect, preventing dev addr
8204 * being removed by set_rx_mode path.
8206 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8207 spin_unlock_bh(&vport->mac_list_lock);
8209 hclge_task_schedule(hdev, 0);
8214 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8217 struct hclge_vport *vport = hclge_get_vport(handle);
8218 struct hclge_dev *hdev = vport->back;
8220 if (!hdev->hw.mac.phydev)
8223 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8226 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8227 u8 fe_type, bool filter_en, u8 vf_id)
8229 struct hclge_vlan_filter_ctrl_cmd *req;
8230 struct hclge_desc desc;
8233 /* read current vlan filter parameter */
8234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8235 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8236 req->vlan_type = vlan_type;
8239 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8241 dev_err(&hdev->pdev->dev,
8242 "failed to get vlan filter config, ret = %d.\n", ret);
8246 /* modify and write new config parameter */
8247 hclge_cmd_reuse_desc(&desc, false);
8248 req->vlan_fe = filter_en ?
8249 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8251 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8253 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8259 #define HCLGE_FILTER_TYPE_VF 0
8260 #define HCLGE_FILTER_TYPE_PORT 1
8261 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8262 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8263 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8264 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8265 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8266 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8267 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8268 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8269 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8271 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8273 struct hclge_vport *vport = hclge_get_vport(handle);
8274 struct hclge_dev *hdev = vport->back;
8276 if (hdev->pdev->revision >= 0x21) {
8277 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8278 HCLGE_FILTER_FE_EGRESS, enable, 0);
8279 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8280 HCLGE_FILTER_FE_INGRESS, enable, 0);
8282 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8283 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8287 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8289 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8292 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8293 bool is_kill, u16 vlan,
8296 struct hclge_vport *vport = &hdev->vport[vfid];
8297 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8298 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8299 struct hclge_desc desc[2];
8304 /* if vf vlan table is full, firmware will close vf vlan filter, it
8305 * is unable and unnecessary to add new vlan id to vf vlan filter.
8306 * If spoof check is enable, and vf vlan is full, it shouldn't add
8307 * new vlan, because tx packets with these vlan id will be dropped.
8309 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8310 if (vport->vf_info.spoofchk && vlan) {
8311 dev_err(&hdev->pdev->dev,
8312 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8318 hclge_cmd_setup_basic_desc(&desc[0],
8319 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8320 hclge_cmd_setup_basic_desc(&desc[1],
8321 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8323 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8325 vf_byte_off = vfid / 8;
8326 vf_byte_val = 1 << (vfid % 8);
8328 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8329 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8331 req0->vlan_id = cpu_to_le16(vlan);
8332 req0->vlan_cfg = is_kill;
8334 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8335 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8337 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8339 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8341 dev_err(&hdev->pdev->dev,
8342 "Send vf vlan command fail, ret =%d.\n",
8348 #define HCLGE_VF_VLAN_NO_ENTRY 2
8349 if (!req0->resp_code || req0->resp_code == 1)
8352 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8353 set_bit(vfid, hdev->vf_vlan_full);
8354 dev_warn(&hdev->pdev->dev,
8355 "vf vlan table is full, vf vlan filter is disabled\n");
8359 dev_err(&hdev->pdev->dev,
8360 "Add vf vlan filter fail, ret =%u.\n",
8363 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8364 if (!req0->resp_code)
8367 /* vf vlan filter is disabled when vf vlan table is full,
8368 * then new vlan id will not be added into vf vlan table.
8369 * Just return 0 without warning, avoid massive verbose
8370 * print logs when unload.
8372 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8375 dev_err(&hdev->pdev->dev,
8376 "Kill vf vlan filter fail, ret =%u.\n",
8383 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8384 u16 vlan_id, bool is_kill)
8386 struct hclge_vlan_filter_pf_cfg_cmd *req;
8387 struct hclge_desc desc;
8388 u8 vlan_offset_byte_val;
8389 u8 vlan_offset_byte;
8393 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8395 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8396 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8397 HCLGE_VLAN_BYTE_SIZE;
8398 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8400 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8401 req->vlan_offset = vlan_offset_160;
8402 req->vlan_cfg = is_kill;
8403 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8407 dev_err(&hdev->pdev->dev,
8408 "port vlan command, send fail, ret =%d.\n", ret);
8412 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8413 u16 vport_id, u16 vlan_id,
8416 u16 vport_idx, vport_num = 0;
8419 if (is_kill && !vlan_id)
8422 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8425 dev_err(&hdev->pdev->dev,
8426 "Set %u vport vlan filter config fail, ret =%d.\n",
8431 /* vlan 0 may be added twice when 8021q module is enabled */
8432 if (!is_kill && !vlan_id &&
8433 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8436 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8437 dev_err(&hdev->pdev->dev,
8438 "Add port vlan failed, vport %u is already in vlan %u\n",
8444 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8445 dev_err(&hdev->pdev->dev,
8446 "Delete port vlan failed, vport %u is not in vlan %u\n",
8451 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8454 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8455 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8461 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8463 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8464 struct hclge_vport_vtag_tx_cfg_cmd *req;
8465 struct hclge_dev *hdev = vport->back;
8466 struct hclge_desc desc;
8470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8472 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8473 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8474 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8475 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8476 vcfg->accept_tag1 ? 1 : 0);
8477 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8478 vcfg->accept_untag1 ? 1 : 0);
8479 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8480 vcfg->accept_tag2 ? 1 : 0);
8481 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8482 vcfg->accept_untag2 ? 1 : 0);
8483 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8484 vcfg->insert_tag1_en ? 1 : 0);
8485 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8486 vcfg->insert_tag2_en ? 1 : 0);
8487 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8489 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8490 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8491 HCLGE_VF_NUM_PER_BYTE;
8492 req->vf_bitmap[bmap_index] =
8493 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8495 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8497 dev_err(&hdev->pdev->dev,
8498 "Send port txvlan cfg command fail, ret =%d\n",
8504 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8506 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8507 struct hclge_vport_vtag_rx_cfg_cmd *req;
8508 struct hclge_dev *hdev = vport->back;
8509 struct hclge_desc desc;
8513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8515 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8516 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8517 vcfg->strip_tag1_en ? 1 : 0);
8518 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8519 vcfg->strip_tag2_en ? 1 : 0);
8520 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8521 vcfg->vlan1_vlan_prionly ? 1 : 0);
8522 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8523 vcfg->vlan2_vlan_prionly ? 1 : 0);
8525 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8526 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8527 HCLGE_VF_NUM_PER_BYTE;
8528 req->vf_bitmap[bmap_index] =
8529 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8531 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8533 dev_err(&hdev->pdev->dev,
8534 "Send port rxvlan cfg command fail, ret =%d\n",
8540 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8541 u16 port_base_vlan_state,
8546 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8547 vport->txvlan_cfg.accept_tag1 = true;
8548 vport->txvlan_cfg.insert_tag1_en = false;
8549 vport->txvlan_cfg.default_tag1 = 0;
8551 vport->txvlan_cfg.accept_tag1 = false;
8552 vport->txvlan_cfg.insert_tag1_en = true;
8553 vport->txvlan_cfg.default_tag1 = vlan_tag;
8556 vport->txvlan_cfg.accept_untag1 = true;
8558 /* accept_tag2 and accept_untag2 are not supported on
8559 * pdev revision(0x20), new revision support them,
8560 * this two fields can not be configured by user.
8562 vport->txvlan_cfg.accept_tag2 = true;
8563 vport->txvlan_cfg.accept_untag2 = true;
8564 vport->txvlan_cfg.insert_tag2_en = false;
8565 vport->txvlan_cfg.default_tag2 = 0;
8567 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8568 vport->rxvlan_cfg.strip_tag1_en = false;
8569 vport->rxvlan_cfg.strip_tag2_en =
8570 vport->rxvlan_cfg.rx_vlan_offload_en;
8572 vport->rxvlan_cfg.strip_tag1_en =
8573 vport->rxvlan_cfg.rx_vlan_offload_en;
8574 vport->rxvlan_cfg.strip_tag2_en = true;
8576 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8577 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8579 ret = hclge_set_vlan_tx_offload_cfg(vport);
8583 return hclge_set_vlan_rx_offload_cfg(vport);
8586 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8588 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8589 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8590 struct hclge_desc desc;
8593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8594 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8595 rx_req->ot_fst_vlan_type =
8596 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8597 rx_req->ot_sec_vlan_type =
8598 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8599 rx_req->in_fst_vlan_type =
8600 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8601 rx_req->in_sec_vlan_type =
8602 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8604 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8606 dev_err(&hdev->pdev->dev,
8607 "Send rxvlan protocol type command fail, ret =%d\n",
8612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8614 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8615 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8616 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8618 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8620 dev_err(&hdev->pdev->dev,
8621 "Send txvlan protocol type command fail, ret =%d\n",
8627 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8629 #define HCLGE_DEF_VLAN_TYPE 0x8100
8631 struct hnae3_handle *handle = &hdev->vport[0].nic;
8632 struct hclge_vport *vport;
8636 if (hdev->pdev->revision >= 0x21) {
8637 /* for revision 0x21, vf vlan filter is per function */
8638 for (i = 0; i < hdev->num_alloc_vport; i++) {
8639 vport = &hdev->vport[i];
8640 ret = hclge_set_vlan_filter_ctrl(hdev,
8641 HCLGE_FILTER_TYPE_VF,
8642 HCLGE_FILTER_FE_EGRESS,
8649 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8650 HCLGE_FILTER_FE_INGRESS, true,
8655 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8656 HCLGE_FILTER_FE_EGRESS_V1_B,
8662 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8664 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8665 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8666 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8667 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8668 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8669 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8671 ret = hclge_set_vlan_protocol_type(hdev);
8675 for (i = 0; i < hdev->num_alloc_vport; i++) {
8678 vport = &hdev->vport[i];
8679 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8681 ret = hclge_vlan_offload_cfg(vport,
8682 vport->port_base_vlan_cfg.state,
8688 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8691 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8694 struct hclge_vport_vlan_cfg *vlan;
8696 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8700 vlan->hd_tbl_status = writen_to_tbl;
8701 vlan->vlan_id = vlan_id;
8703 list_add_tail(&vlan->node, &vport->vlan_list);
8706 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8708 struct hclge_vport_vlan_cfg *vlan, *tmp;
8709 struct hclge_dev *hdev = vport->back;
8712 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8713 if (!vlan->hd_tbl_status) {
8714 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8716 vlan->vlan_id, false);
8718 dev_err(&hdev->pdev->dev,
8719 "restore vport vlan list failed, ret=%d\n",
8724 vlan->hd_tbl_status = true;
8730 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8733 struct hclge_vport_vlan_cfg *vlan, *tmp;
8734 struct hclge_dev *hdev = vport->back;
8736 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8737 if (vlan->vlan_id == vlan_id) {
8738 if (is_write_tbl && vlan->hd_tbl_status)
8739 hclge_set_vlan_filter_hw(hdev,
8745 list_del(&vlan->node);
8752 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8754 struct hclge_vport_vlan_cfg *vlan, *tmp;
8755 struct hclge_dev *hdev = vport->back;
8757 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8758 if (vlan->hd_tbl_status)
8759 hclge_set_vlan_filter_hw(hdev,
8765 vlan->hd_tbl_status = false;
8767 list_del(&vlan->node);
8771 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8774 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8776 struct hclge_vport_vlan_cfg *vlan, *tmp;
8777 struct hclge_vport *vport;
8780 for (i = 0; i < hdev->num_alloc_vport; i++) {
8781 vport = &hdev->vport[i];
8782 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8783 list_del(&vlan->node);
8789 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8791 struct hclge_vport_vlan_cfg *vlan, *tmp;
8792 struct hclge_dev *hdev = vport->back;
8798 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8799 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8800 state = vport->port_base_vlan_cfg.state;
8802 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8803 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8804 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8805 vport->vport_id, vlan_id,
8810 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8811 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8813 vlan->vlan_id, false);
8816 vlan->hd_tbl_status = true;
8820 /* For global reset and imp reset, hardware will clear the mac table,
8821 * so we change the mac address state from ACTIVE to TO_ADD, then they
8822 * can be restored in the service task after reset complete. Furtherly,
8823 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8824 * be restored after reset, so just remove these mac nodes from mac_list.
8826 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8828 struct hclge_mac_node *mac_node, *tmp;
8830 list_for_each_entry_safe(mac_node, tmp, list, node) {
8831 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8832 mac_node->state = HCLGE_MAC_TO_ADD;
8833 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8834 list_del(&mac_node->node);
8840 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8842 spin_lock_bh(&vport->mac_list_lock);
8844 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8845 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8846 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8848 spin_unlock_bh(&vport->mac_list_lock);
8851 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8853 struct hclge_vport *vport = &hdev->vport[0];
8854 struct hnae3_handle *handle = &vport->nic;
8856 hclge_restore_mac_table_common(vport);
8857 hclge_restore_vport_vlan_table(vport);
8858 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8860 hclge_restore_fd_entries(handle);
8863 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8865 struct hclge_vport *vport = hclge_get_vport(handle);
8867 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8868 vport->rxvlan_cfg.strip_tag1_en = false;
8869 vport->rxvlan_cfg.strip_tag2_en = enable;
8871 vport->rxvlan_cfg.strip_tag1_en = enable;
8872 vport->rxvlan_cfg.strip_tag2_en = true;
8874 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8875 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8876 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8878 return hclge_set_vlan_rx_offload_cfg(vport);
8881 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8882 u16 port_base_vlan_state,
8883 struct hclge_vlan_info *new_info,
8884 struct hclge_vlan_info *old_info)
8886 struct hclge_dev *hdev = vport->back;
8889 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8890 hclge_rm_vport_all_vlan_table(vport, false);
8891 return hclge_set_vlan_filter_hw(hdev,
8892 htons(new_info->vlan_proto),
8898 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8899 vport->vport_id, old_info->vlan_tag,
8904 return hclge_add_vport_all_vlan_table(vport);
8907 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8908 struct hclge_vlan_info *vlan_info)
8910 struct hnae3_handle *nic = &vport->nic;
8911 struct hclge_vlan_info *old_vlan_info;
8912 struct hclge_dev *hdev = vport->back;
8915 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8917 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8921 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8922 /* add new VLAN tag */
8923 ret = hclge_set_vlan_filter_hw(hdev,
8924 htons(vlan_info->vlan_proto),
8926 vlan_info->vlan_tag,
8931 /* remove old VLAN tag */
8932 ret = hclge_set_vlan_filter_hw(hdev,
8933 htons(old_vlan_info->vlan_proto),
8935 old_vlan_info->vlan_tag,
8943 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8948 /* update state only when disable/enable port based VLAN */
8949 vport->port_base_vlan_cfg.state = state;
8950 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8951 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8953 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8956 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8957 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8958 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8963 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8964 enum hnae3_port_base_vlan_state state,
8967 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8969 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8971 return HNAE3_PORT_BASE_VLAN_ENABLE;
8974 return HNAE3_PORT_BASE_VLAN_DISABLE;
8975 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8976 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8978 return HNAE3_PORT_BASE_VLAN_MODIFY;
8982 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8983 u16 vlan, u8 qos, __be16 proto)
8985 struct hclge_vport *vport = hclge_get_vport(handle);
8986 struct hclge_dev *hdev = vport->back;
8987 struct hclge_vlan_info vlan_info;
8991 if (hdev->pdev->revision == 0x20)
8994 vport = hclge_get_vf_vport(hdev, vfid);
8998 /* qos is a 3 bits value, so can not be bigger than 7 */
8999 if (vlan > VLAN_N_VID - 1 || qos > 7)
9001 if (proto != htons(ETH_P_8021Q))
9002 return -EPROTONOSUPPORT;
9004 state = hclge_get_port_base_vlan_state(vport,
9005 vport->port_base_vlan_cfg.state,
9007 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9010 vlan_info.vlan_tag = vlan;
9011 vlan_info.qos = qos;
9012 vlan_info.vlan_proto = ntohs(proto);
9014 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9015 return hclge_update_port_base_vlan_cfg(vport, state,
9018 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9019 vport->vport_id, state,
9026 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9028 struct hclge_vlan_info *vlan_info;
9029 struct hclge_vport *vport;
9033 /* clear port base vlan for all vf */
9034 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9035 vport = &hdev->vport[vf];
9036 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9038 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9040 vlan_info->vlan_tag, true);
9042 dev_err(&hdev->pdev->dev,
9043 "failed to clear vf vlan for vf%d, ret = %d\n",
9044 vf - HCLGE_VF_VPORT_START_NUM, ret);
9048 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9049 u16 vlan_id, bool is_kill)
9051 struct hclge_vport *vport = hclge_get_vport(handle);
9052 struct hclge_dev *hdev = vport->back;
9053 bool writen_to_tbl = false;
9056 /* When device is resetting, firmware is unable to handle
9057 * mailbox. Just record the vlan id, and remove it after
9060 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9061 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9065 /* when port base vlan enabled, we use port base vlan as the vlan
9066 * filter entry. In this case, we don't update vlan filter table
9067 * when user add new vlan or remove exist vlan, just update the vport
9068 * vlan list. The vlan id in vlan list will be writen in vlan filter
9069 * table until port base vlan disabled
9071 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9072 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9074 writen_to_tbl = true;
9079 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9081 hclge_add_vport_vlan_table(vport, vlan_id,
9083 } else if (is_kill) {
9084 /* when remove hw vlan filter failed, record the vlan id,
9085 * and try to remove it from hw later, to be consistence
9088 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9093 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9095 #define HCLGE_MAX_SYNC_COUNT 60
9097 int i, ret, sync_cnt = 0;
9100 /* start from vport 1 for PF is always alive */
9101 for (i = 0; i < hdev->num_alloc_vport; i++) {
9102 struct hclge_vport *vport = &hdev->vport[i];
9104 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9106 while (vlan_id != VLAN_N_VID) {
9107 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9108 vport->vport_id, vlan_id,
9110 if (ret && ret != -EINVAL)
9113 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9114 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9117 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9120 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9126 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9128 struct hclge_config_max_frm_size_cmd *req;
9129 struct hclge_desc desc;
9131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9133 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9134 req->max_frm_size = cpu_to_le16(new_mps);
9135 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9137 return hclge_cmd_send(&hdev->hw, &desc, 1);
9140 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9142 struct hclge_vport *vport = hclge_get_vport(handle);
9144 return hclge_set_vport_mtu(vport, new_mtu);
9147 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9149 struct hclge_dev *hdev = vport->back;
9150 int i, max_frm_size, ret;
9152 /* HW supprt 2 layer vlan */
9153 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9154 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9155 max_frm_size > HCLGE_MAC_MAX_FRAME)
9158 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9159 mutex_lock(&hdev->vport_lock);
9160 /* VF's mps must fit within hdev->mps */
9161 if (vport->vport_id && max_frm_size > hdev->mps) {
9162 mutex_unlock(&hdev->vport_lock);
9164 } else if (vport->vport_id) {
9165 vport->mps = max_frm_size;
9166 mutex_unlock(&hdev->vport_lock);
9170 /* PF's mps must be greater then VF's mps */
9171 for (i = 1; i < hdev->num_alloc_vport; i++)
9172 if (max_frm_size < hdev->vport[i].mps) {
9173 mutex_unlock(&hdev->vport_lock);
9177 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9179 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9181 dev_err(&hdev->pdev->dev,
9182 "Change mtu fail, ret =%d\n", ret);
9186 hdev->mps = max_frm_size;
9187 vport->mps = max_frm_size;
9189 ret = hclge_buffer_alloc(hdev);
9191 dev_err(&hdev->pdev->dev,
9192 "Allocate buffer fail, ret =%d\n", ret);
9195 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9196 mutex_unlock(&hdev->vport_lock);
9200 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9203 struct hclge_reset_tqp_queue_cmd *req;
9204 struct hclge_desc desc;
9207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9209 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9210 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9212 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9214 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9216 dev_err(&hdev->pdev->dev,
9217 "Send tqp reset cmd error, status =%d\n", ret);
9224 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9226 struct hclge_reset_tqp_queue_cmd *req;
9227 struct hclge_desc desc;
9230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9232 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9233 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9235 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9237 dev_err(&hdev->pdev->dev,
9238 "Get reset status error, status =%d\n", ret);
9242 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9245 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9247 struct hnae3_queue *queue;
9248 struct hclge_tqp *tqp;
9250 queue = handle->kinfo.tqp[queue_id];
9251 tqp = container_of(queue, struct hclge_tqp, q);
9256 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9258 struct hclge_vport *vport = hclge_get_vport(handle);
9259 struct hclge_dev *hdev = vport->back;
9260 int reset_try_times = 0;
9265 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9267 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9269 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9273 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9275 dev_err(&hdev->pdev->dev,
9276 "Send reset tqp cmd fail, ret = %d\n", ret);
9280 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9281 reset_status = hclge_get_reset_status(hdev, queue_gid);
9285 /* Wait for tqp hw reset */
9286 usleep_range(1000, 1200);
9289 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9290 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9294 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9296 dev_err(&hdev->pdev->dev,
9297 "Deassert the soft reset fail, ret = %d\n", ret);
9302 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9304 struct hclge_dev *hdev = vport->back;
9305 int reset_try_times = 0;
9310 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9312 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9314 dev_warn(&hdev->pdev->dev,
9315 "Send reset tqp cmd fail, ret = %d\n", ret);
9319 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9320 reset_status = hclge_get_reset_status(hdev, queue_gid);
9324 /* Wait for tqp hw reset */
9325 usleep_range(1000, 1200);
9328 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9329 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9333 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9335 dev_warn(&hdev->pdev->dev,
9336 "Deassert the soft reset fail, ret = %d\n", ret);
9339 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9341 struct hclge_vport *vport = hclge_get_vport(handle);
9342 struct hclge_dev *hdev = vport->back;
9344 return hdev->fw_version;
9347 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9349 struct phy_device *phydev = hdev->hw.mac.phydev;
9354 phy_set_asym_pause(phydev, rx_en, tx_en);
9357 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9361 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9364 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9366 dev_err(&hdev->pdev->dev,
9367 "configure pauseparam error, ret = %d.\n", ret);
9372 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9374 struct phy_device *phydev = hdev->hw.mac.phydev;
9375 u16 remote_advertising = 0;
9376 u16 local_advertising;
9377 u32 rx_pause, tx_pause;
9380 if (!phydev->link || !phydev->autoneg)
9383 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9386 remote_advertising = LPA_PAUSE_CAP;
9388 if (phydev->asym_pause)
9389 remote_advertising |= LPA_PAUSE_ASYM;
9391 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9392 remote_advertising);
9393 tx_pause = flowctl & FLOW_CTRL_TX;
9394 rx_pause = flowctl & FLOW_CTRL_RX;
9396 if (phydev->duplex == HCLGE_MAC_HALF) {
9401 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9404 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9405 u32 *rx_en, u32 *tx_en)
9407 struct hclge_vport *vport = hclge_get_vport(handle);
9408 struct hclge_dev *hdev = vport->back;
9409 struct phy_device *phydev = hdev->hw.mac.phydev;
9411 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9413 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9419 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9422 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9425 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9434 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9435 u32 rx_en, u32 tx_en)
9438 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9439 else if (rx_en && !tx_en)
9440 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9441 else if (!rx_en && tx_en)
9442 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9444 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9446 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9449 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9450 u32 rx_en, u32 tx_en)
9452 struct hclge_vport *vport = hclge_get_vport(handle);
9453 struct hclge_dev *hdev = vport->back;
9454 struct phy_device *phydev = hdev->hw.mac.phydev;
9458 fc_autoneg = hclge_get_autoneg(handle);
9459 if (auto_neg != fc_autoneg) {
9460 dev_info(&hdev->pdev->dev,
9461 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9466 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9467 dev_info(&hdev->pdev->dev,
9468 "Priority flow control enabled. Cannot set link flow control.\n");
9472 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9474 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9477 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9480 return phy_start_aneg(phydev);
9485 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9486 u8 *auto_neg, u32 *speed, u8 *duplex)
9488 struct hclge_vport *vport = hclge_get_vport(handle);
9489 struct hclge_dev *hdev = vport->back;
9492 *speed = hdev->hw.mac.speed;
9494 *duplex = hdev->hw.mac.duplex;
9496 *auto_neg = hdev->hw.mac.autoneg;
9499 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9502 struct hclge_vport *vport = hclge_get_vport(handle);
9503 struct hclge_dev *hdev = vport->back;
9505 /* When nic is down, the service task is not running, doesn't update
9506 * the port information per second. Query the port information before
9507 * return the media type, ensure getting the correct media information.
9509 hclge_update_port_info(hdev);
9512 *media_type = hdev->hw.mac.media_type;
9515 *module_type = hdev->hw.mac.module_type;
9518 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9519 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9521 struct hclge_vport *vport = hclge_get_vport(handle);
9522 struct hclge_dev *hdev = vport->back;
9523 struct phy_device *phydev = hdev->hw.mac.phydev;
9524 int mdix_ctrl, mdix, is_resolved;
9525 unsigned int retval;
9528 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9529 *tp_mdix = ETH_TP_MDI_INVALID;
9533 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9535 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9536 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9537 HCLGE_PHY_MDIX_CTRL_S);
9539 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9540 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9541 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9543 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9545 switch (mdix_ctrl) {
9547 *tp_mdix_ctrl = ETH_TP_MDI;
9550 *tp_mdix_ctrl = ETH_TP_MDI_X;
9553 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9556 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9561 *tp_mdix = ETH_TP_MDI_INVALID;
9563 *tp_mdix = ETH_TP_MDI_X;
9565 *tp_mdix = ETH_TP_MDI;
9568 static void hclge_info_show(struct hclge_dev *hdev)
9570 struct device *dev = &hdev->pdev->dev;
9572 dev_info(dev, "PF info begin:\n");
9574 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9575 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9576 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9577 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9578 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9579 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9580 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9581 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9582 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9583 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9584 dev_info(dev, "This is %s PF\n",
9585 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9586 dev_info(dev, "DCB %s\n",
9587 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9588 dev_info(dev, "MQPRIO %s\n",
9589 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9591 dev_info(dev, "PF info end.\n");
9594 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9595 struct hclge_vport *vport)
9597 struct hnae3_client *client = vport->nic.client;
9598 struct hclge_dev *hdev = ae_dev->priv;
9599 int rst_cnt = hdev->rst_stats.reset_cnt;
9602 ret = client->ops->init_instance(&vport->nic);
9606 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9607 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9608 rst_cnt != hdev->rst_stats.reset_cnt) {
9613 /* Enable nic hw error interrupts */
9614 ret = hclge_config_nic_hw_error(hdev, true);
9616 dev_err(&ae_dev->pdev->dev,
9617 "fail(%d) to enable hw error interrupts\n", ret);
9621 hnae3_set_client_init_flag(client, ae_dev, 1);
9623 if (netif_msg_drv(&hdev->vport->nic))
9624 hclge_info_show(hdev);
9629 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9630 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9631 msleep(HCLGE_WAIT_RESET_DONE);
9633 client->ops->uninit_instance(&vport->nic, 0);
9638 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9639 struct hclge_vport *vport)
9641 struct hclge_dev *hdev = ae_dev->priv;
9642 struct hnae3_client *client;
9646 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9650 client = hdev->roce_client;
9651 ret = hclge_init_roce_base_info(vport);
9655 rst_cnt = hdev->rst_stats.reset_cnt;
9656 ret = client->ops->init_instance(&vport->roce);
9660 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9661 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9662 rst_cnt != hdev->rst_stats.reset_cnt) {
9667 /* Enable roce ras interrupts */
9668 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9670 dev_err(&ae_dev->pdev->dev,
9671 "fail(%d) to enable roce ras interrupts\n", ret);
9675 hnae3_set_client_init_flag(client, ae_dev, 1);
9680 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9681 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9682 msleep(HCLGE_WAIT_RESET_DONE);
9684 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9689 static int hclge_init_client_instance(struct hnae3_client *client,
9690 struct hnae3_ae_dev *ae_dev)
9692 struct hclge_dev *hdev = ae_dev->priv;
9693 struct hclge_vport *vport;
9696 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9697 vport = &hdev->vport[i];
9699 switch (client->type) {
9700 case HNAE3_CLIENT_KNIC:
9701 hdev->nic_client = client;
9702 vport->nic.client = client;
9703 ret = hclge_init_nic_client_instance(ae_dev, vport);
9707 ret = hclge_init_roce_client_instance(ae_dev, vport);
9712 case HNAE3_CLIENT_ROCE:
9713 if (hnae3_dev_roce_supported(hdev)) {
9714 hdev->roce_client = client;
9715 vport->roce.client = client;
9718 ret = hclge_init_roce_client_instance(ae_dev, vport);
9731 hdev->nic_client = NULL;
9732 vport->nic.client = NULL;
9735 hdev->roce_client = NULL;
9736 vport->roce.client = NULL;
9740 static void hclge_uninit_client_instance(struct hnae3_client *client,
9741 struct hnae3_ae_dev *ae_dev)
9743 struct hclge_dev *hdev = ae_dev->priv;
9744 struct hclge_vport *vport;
9747 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9748 vport = &hdev->vport[i];
9749 if (hdev->roce_client) {
9750 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9751 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9752 msleep(HCLGE_WAIT_RESET_DONE);
9754 hdev->roce_client->ops->uninit_instance(&vport->roce,
9756 hdev->roce_client = NULL;
9757 vport->roce.client = NULL;
9759 if (client->type == HNAE3_CLIENT_ROCE)
9761 if (hdev->nic_client && client->ops->uninit_instance) {
9762 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9763 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9764 msleep(HCLGE_WAIT_RESET_DONE);
9766 client->ops->uninit_instance(&vport->nic, 0);
9767 hdev->nic_client = NULL;
9768 vport->nic.client = NULL;
9773 static int hclge_pci_init(struct hclge_dev *hdev)
9775 struct pci_dev *pdev = hdev->pdev;
9776 struct hclge_hw *hw;
9779 ret = pci_enable_device(pdev);
9781 dev_err(&pdev->dev, "failed to enable PCI device\n");
9785 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9787 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9790 "can't set consistent PCI DMA");
9791 goto err_disable_device;
9793 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9796 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9798 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9799 goto err_disable_device;
9802 pci_set_master(pdev);
9804 hw->io_base = pcim_iomap(pdev, 2, 0);
9806 dev_err(&pdev->dev, "Can't map configuration register space\n");
9808 goto err_clr_master;
9811 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9815 pci_clear_master(pdev);
9816 pci_release_regions(pdev);
9818 pci_disable_device(pdev);
9823 static void hclge_pci_uninit(struct hclge_dev *hdev)
9825 struct pci_dev *pdev = hdev->pdev;
9827 pcim_iounmap(pdev, hdev->hw.io_base);
9828 pci_free_irq_vectors(pdev);
9829 pci_clear_master(pdev);
9830 pci_release_mem_regions(pdev);
9831 pci_disable_device(pdev);
9834 static void hclge_state_init(struct hclge_dev *hdev)
9836 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9837 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9838 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9839 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9840 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9841 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9842 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9845 static void hclge_state_uninit(struct hclge_dev *hdev)
9847 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9848 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9850 if (hdev->reset_timer.function)
9851 del_timer_sync(&hdev->reset_timer);
9852 if (hdev->service_task.work.func)
9853 cancel_delayed_work_sync(&hdev->service_task);
9856 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9858 #define HCLGE_FLR_RETRY_WAIT_MS 500
9859 #define HCLGE_FLR_RETRY_CNT 5
9861 struct hclge_dev *hdev = ae_dev->priv;
9866 down(&hdev->reset_sem);
9867 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9868 hdev->reset_type = HNAE3_FLR_RESET;
9869 ret = hclge_reset_prepare(hdev);
9871 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9873 if (hdev->reset_pending ||
9874 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9875 dev_err(&hdev->pdev->dev,
9876 "reset_pending:0x%lx, retry_cnt:%d\n",
9877 hdev->reset_pending, retry_cnt);
9878 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9879 up(&hdev->reset_sem);
9880 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9885 /* disable misc vector before FLR done */
9886 hclge_enable_vector(&hdev->misc_vector, false);
9887 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9888 hdev->rst_stats.flr_rst_cnt++;
9891 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9893 struct hclge_dev *hdev = ae_dev->priv;
9896 hclge_enable_vector(&hdev->misc_vector, true);
9898 ret = hclge_reset_rebuild(hdev);
9900 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9902 hdev->reset_type = HNAE3_NONE_RESET;
9903 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9904 up(&hdev->reset_sem);
9907 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9911 for (i = 0; i < hdev->num_alloc_vport; i++) {
9912 struct hclge_vport *vport = &hdev->vport[i];
9915 /* Send cmd to clear VF's FUNC_RST_ING */
9916 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9918 dev_warn(&hdev->pdev->dev,
9919 "clear vf(%u) rst failed %d!\n",
9920 vport->vport_id, ret);
9924 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9926 struct pci_dev *pdev = ae_dev->pdev;
9927 struct hclge_dev *hdev;
9930 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9937 hdev->ae_dev = ae_dev;
9938 hdev->reset_type = HNAE3_NONE_RESET;
9939 hdev->reset_level = HNAE3_FUNC_RESET;
9940 ae_dev->priv = hdev;
9942 /* HW supprt 2 layer vlan */
9943 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9945 mutex_init(&hdev->vport_lock);
9946 spin_lock_init(&hdev->fd_rule_lock);
9947 sema_init(&hdev->reset_sem, 1);
9949 ret = hclge_pci_init(hdev);
9953 /* Firmware command queue initialize */
9954 ret = hclge_cmd_queue_init(hdev);
9956 goto err_pci_uninit;
9958 /* Firmware command initialize */
9959 ret = hclge_cmd_init(hdev);
9961 goto err_cmd_uninit;
9963 ret = hclge_get_cap(hdev);
9965 goto err_cmd_uninit;
9967 ret = hclge_configure(hdev);
9969 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9970 goto err_cmd_uninit;
9973 ret = hclge_init_msi(hdev);
9975 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9976 goto err_cmd_uninit;
9979 ret = hclge_misc_irq_init(hdev);
9981 goto err_msi_uninit;
9983 ret = hclge_alloc_tqps(hdev);
9985 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9986 goto err_msi_irq_uninit;
9989 ret = hclge_alloc_vport(hdev);
9991 goto err_msi_irq_uninit;
9993 ret = hclge_map_tqp(hdev);
9995 goto err_msi_irq_uninit;
9997 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9998 ret = hclge_mac_mdio_config(hdev);
10000 goto err_msi_irq_uninit;
10003 ret = hclge_init_umv_space(hdev);
10005 goto err_mdiobus_unreg;
10007 ret = hclge_mac_init(hdev);
10009 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10010 goto err_mdiobus_unreg;
10013 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10015 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10016 goto err_mdiobus_unreg;
10019 ret = hclge_config_gro(hdev, true);
10021 goto err_mdiobus_unreg;
10023 ret = hclge_init_vlan_config(hdev);
10025 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10026 goto err_mdiobus_unreg;
10029 ret = hclge_tm_schd_init(hdev);
10031 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10032 goto err_mdiobus_unreg;
10035 hclge_rss_init_cfg(hdev);
10036 ret = hclge_rss_init_hw(hdev);
10038 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10039 goto err_mdiobus_unreg;
10042 ret = init_mgr_tbl(hdev);
10044 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10045 goto err_mdiobus_unreg;
10048 ret = hclge_init_fd_config(hdev);
10050 dev_err(&pdev->dev,
10051 "fd table init fail, ret=%d\n", ret);
10052 goto err_mdiobus_unreg;
10055 INIT_KFIFO(hdev->mac_tnl_log);
10057 hclge_dcb_ops_set(hdev);
10059 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10060 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10062 /* Setup affinity after service timer setup because add_timer_on
10063 * is called in affinity notify.
10065 hclge_misc_affinity_setup(hdev);
10067 hclge_clear_all_event_cause(hdev);
10068 hclge_clear_resetting_state(hdev);
10070 /* Log and clear the hw errors those already occurred */
10071 hclge_handle_all_hns_hw_errors(ae_dev);
10073 /* request delayed reset for the error recovery because an immediate
10074 * global reset on a PF affecting pending initialization of other PFs
10076 if (ae_dev->hw_err_reset_req) {
10077 enum hnae3_reset_type reset_level;
10079 reset_level = hclge_get_reset_level(ae_dev,
10080 &ae_dev->hw_err_reset_req);
10081 hclge_set_def_reset_request(ae_dev, reset_level);
10082 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10085 /* Enable MISC vector(vector0) */
10086 hclge_enable_vector(&hdev->misc_vector, true);
10088 hclge_state_init(hdev);
10089 hdev->last_reset_time = jiffies;
10091 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10092 HCLGE_DRIVER_NAME);
10094 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10099 if (hdev->hw.mac.phydev)
10100 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10101 err_msi_irq_uninit:
10102 hclge_misc_irq_uninit(hdev);
10104 pci_free_irq_vectors(pdev);
10106 hclge_cmd_uninit(hdev);
10108 pcim_iounmap(pdev, hdev->hw.io_base);
10109 pci_clear_master(pdev);
10110 pci_release_regions(pdev);
10111 pci_disable_device(pdev);
10116 static void hclge_stats_clear(struct hclge_dev *hdev)
10118 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10121 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10123 return hclge_config_switch_param(hdev, vf, enable,
10124 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10127 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10129 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10130 HCLGE_FILTER_FE_NIC_INGRESS_B,
10134 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10138 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10140 dev_err(&hdev->pdev->dev,
10141 "Set vf %d mac spoof check %s failed, ret=%d\n",
10142 vf, enable ? "on" : "off", ret);
10146 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10148 dev_err(&hdev->pdev->dev,
10149 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10150 vf, enable ? "on" : "off", ret);
10155 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10158 struct hclge_vport *vport = hclge_get_vport(handle);
10159 struct hclge_dev *hdev = vport->back;
10160 u32 new_spoofchk = enable ? 1 : 0;
10163 if (hdev->pdev->revision == 0x20)
10164 return -EOPNOTSUPP;
10166 vport = hclge_get_vf_vport(hdev, vf);
10170 if (vport->vf_info.spoofchk == new_spoofchk)
10173 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10174 dev_warn(&hdev->pdev->dev,
10175 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10177 else if (enable && hclge_is_umv_space_full(vport, true))
10178 dev_warn(&hdev->pdev->dev,
10179 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10182 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10186 vport->vf_info.spoofchk = new_spoofchk;
10190 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10192 struct hclge_vport *vport = hdev->vport;
10196 if (hdev->pdev->revision == 0x20)
10199 /* resume the vf spoof check state after reset */
10200 for (i = 0; i < hdev->num_alloc_vport; i++) {
10201 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10202 vport->vf_info.spoofchk);
10212 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10214 struct hclge_vport *vport = hclge_get_vport(handle);
10215 struct hclge_dev *hdev = vport->back;
10216 u32 new_trusted = enable ? 1 : 0;
10220 vport = hclge_get_vf_vport(hdev, vf);
10224 if (vport->vf_info.trusted == new_trusted)
10227 /* Disable promisc mode for VF if it is not trusted any more. */
10228 if (!enable && vport->vf_info.promisc_enable) {
10229 en_bc_pmc = hdev->pdev->revision != 0x20;
10230 ret = hclge_set_vport_promisc_mode(vport, false, false,
10234 vport->vf_info.promisc_enable = 0;
10235 hclge_inform_vf_promisc_info(vport);
10238 vport->vf_info.trusted = new_trusted;
10243 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10248 /* reset vf rate to default value */
10249 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10250 struct hclge_vport *vport = &hdev->vport[vf];
10252 vport->vf_info.max_tx_rate = 0;
10253 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10255 dev_err(&hdev->pdev->dev,
10256 "vf%d failed to reset to default, ret=%d\n",
10257 vf - HCLGE_VF_VPORT_START_NUM, ret);
10261 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10262 int min_tx_rate, int max_tx_rate)
10264 if (min_tx_rate != 0 ||
10265 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10266 dev_err(&hdev->pdev->dev,
10267 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10268 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10275 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10276 int min_tx_rate, int max_tx_rate, bool force)
10278 struct hclge_vport *vport = hclge_get_vport(handle);
10279 struct hclge_dev *hdev = vport->back;
10282 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10286 vport = hclge_get_vf_vport(hdev, vf);
10290 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10293 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10297 vport->vf_info.max_tx_rate = max_tx_rate;
10302 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10304 struct hnae3_handle *handle = &hdev->vport->nic;
10305 struct hclge_vport *vport;
10309 /* resume the vf max_tx_rate after reset */
10310 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10311 vport = hclge_get_vf_vport(hdev, vf);
10315 /* zero means max rate, after reset, firmware already set it to
10316 * max rate, so just continue.
10318 if (!vport->vf_info.max_tx_rate)
10321 ret = hclge_set_vf_rate(handle, vf, 0,
10322 vport->vf_info.max_tx_rate, true);
10324 dev_err(&hdev->pdev->dev,
10325 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10326 vf, vport->vf_info.max_tx_rate, ret);
10334 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10336 struct hclge_vport *vport = hdev->vport;
10339 for (i = 0; i < hdev->num_alloc_vport; i++) {
10340 hclge_vport_stop(vport);
10345 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10347 struct hclge_dev *hdev = ae_dev->priv;
10348 struct pci_dev *pdev = ae_dev->pdev;
10351 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10353 hclge_stats_clear(hdev);
10354 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10355 * so here should not clean table in memory.
10357 if (hdev->reset_type == HNAE3_IMP_RESET ||
10358 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10359 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10360 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10361 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10362 hclge_reset_umv_space(hdev);
10365 ret = hclge_cmd_init(hdev);
10367 dev_err(&pdev->dev, "Cmd queue init failed\n");
10371 ret = hclge_map_tqp(hdev);
10373 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10377 ret = hclge_mac_init(hdev);
10379 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10383 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10385 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10389 ret = hclge_config_gro(hdev, true);
10393 ret = hclge_init_vlan_config(hdev);
10395 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10399 ret = hclge_tm_init_hw(hdev, true);
10401 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10405 ret = hclge_rss_init_hw(hdev);
10407 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10411 ret = init_mgr_tbl(hdev);
10413 dev_err(&pdev->dev,
10414 "failed to reinit manager table, ret = %d\n", ret);
10418 ret = hclge_init_fd_config(hdev);
10420 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10424 /* Log and clear the hw errors those already occurred */
10425 hclge_handle_all_hns_hw_errors(ae_dev);
10427 /* Re-enable the hw error interrupts because
10428 * the interrupts get disabled on global reset.
10430 ret = hclge_config_nic_hw_error(hdev, true);
10432 dev_err(&pdev->dev,
10433 "fail(%d) to re-enable NIC hw error interrupts\n",
10438 if (hdev->roce_client) {
10439 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10441 dev_err(&pdev->dev,
10442 "fail(%d) to re-enable roce ras interrupts\n",
10448 hclge_reset_vport_state(hdev);
10449 ret = hclge_reset_vport_spoofchk(hdev);
10453 ret = hclge_resume_vf_rate(hdev);
10457 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10458 HCLGE_DRIVER_NAME);
10463 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10465 struct hclge_dev *hdev = ae_dev->priv;
10466 struct hclge_mac *mac = &hdev->hw.mac;
10468 hclge_reset_vf_rate(hdev);
10469 hclge_clear_vf_vlan(hdev);
10470 hclge_misc_affinity_teardown(hdev);
10471 hclge_state_uninit(hdev);
10472 hclge_uninit_mac_table(hdev);
10475 mdiobus_unregister(mac->mdio_bus);
10477 /* Disable MISC vector(vector0) */
10478 hclge_enable_vector(&hdev->misc_vector, false);
10479 synchronize_irq(hdev->misc_vector.vector_irq);
10481 /* Disable all hw interrupts */
10482 hclge_config_mac_tnl_int(hdev, false);
10483 hclge_config_nic_hw_error(hdev, false);
10484 hclge_config_rocee_ras_interrupt(hdev, false);
10486 hclge_cmd_uninit(hdev);
10487 hclge_misc_irq_uninit(hdev);
10488 hclge_pci_uninit(hdev);
10489 mutex_destroy(&hdev->vport_lock);
10490 hclge_uninit_vport_vlan_table(hdev);
10491 ae_dev->priv = NULL;
10494 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10496 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10497 struct hclge_vport *vport = hclge_get_vport(handle);
10498 struct hclge_dev *hdev = vport->back;
10500 return min_t(u32, hdev->rss_size_max,
10501 vport->alloc_tqps / kinfo->num_tc);
10504 static void hclge_get_channels(struct hnae3_handle *handle,
10505 struct ethtool_channels *ch)
10507 ch->max_combined = hclge_get_max_channels(handle);
10508 ch->other_count = 1;
10510 ch->combined_count = handle->kinfo.rss_size;
10513 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10514 u16 *alloc_tqps, u16 *max_rss_size)
10516 struct hclge_vport *vport = hclge_get_vport(handle);
10517 struct hclge_dev *hdev = vport->back;
10519 *alloc_tqps = vport->alloc_tqps;
10520 *max_rss_size = hdev->rss_size_max;
10523 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10524 bool rxfh_configured)
10526 struct hclge_vport *vport = hclge_get_vport(handle);
10527 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10528 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10529 struct hclge_dev *hdev = vport->back;
10530 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10531 u16 cur_rss_size = kinfo->rss_size;
10532 u16 cur_tqps = kinfo->num_tqps;
10533 u16 tc_valid[HCLGE_MAX_TC_NUM];
10539 kinfo->req_rss_size = new_tqps_num;
10541 ret = hclge_tm_vport_map_update(hdev);
10543 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10547 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10548 roundup_size = ilog2(roundup_size);
10549 /* Set the RSS TC mode according to the new RSS size */
10550 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10553 if (!(hdev->hw_tc_map & BIT(i)))
10557 tc_size[i] = roundup_size;
10558 tc_offset[i] = kinfo->rss_size * i;
10560 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10564 /* RSS indirection table has been configuared by user */
10565 if (rxfh_configured)
10568 /* Reinitializes the rss indirect table according to the new RSS size */
10569 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10573 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10574 rss_indir[i] = i % kinfo->rss_size;
10576 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10578 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10585 dev_info(&hdev->pdev->dev,
10586 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10587 cur_rss_size, kinfo->rss_size,
10588 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10593 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10594 u32 *regs_num_64_bit)
10596 struct hclge_desc desc;
10600 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10603 dev_err(&hdev->pdev->dev,
10604 "Query register number cmd failed, ret = %d.\n", ret);
10608 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10609 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10611 total_num = *regs_num_32_bit + *regs_num_64_bit;
10618 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10621 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10622 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10624 struct hclge_desc *desc;
10625 u32 *reg_val = data;
10635 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10636 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10637 HCLGE_32_BIT_REG_RTN_DATANUM);
10638 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10642 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10643 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10645 dev_err(&hdev->pdev->dev,
10646 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10651 for (i = 0; i < cmd_num; i++) {
10653 desc_data = (__le32 *)(&desc[i].data[0]);
10654 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10656 desc_data = (__le32 *)(&desc[i]);
10657 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10659 for (k = 0; k < n; k++) {
10660 *reg_val++ = le32_to_cpu(*desc_data++);
10672 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10675 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10676 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10678 struct hclge_desc *desc;
10679 u64 *reg_val = data;
10689 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10690 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10691 HCLGE_64_BIT_REG_RTN_DATANUM);
10692 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10696 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10697 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10699 dev_err(&hdev->pdev->dev,
10700 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10705 for (i = 0; i < cmd_num; i++) {
10707 desc_data = (__le64 *)(&desc[i].data[0]);
10708 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10710 desc_data = (__le64 *)(&desc[i]);
10711 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10713 for (k = 0; k < n; k++) {
10714 *reg_val++ = le64_to_cpu(*desc_data++);
10726 #define MAX_SEPARATE_NUM 4
10727 #define SEPARATOR_VALUE 0xFDFCFBFA
10728 #define REG_NUM_PER_LINE 4
10729 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10730 #define REG_SEPARATOR_LINE 1
10731 #define REG_NUM_REMAIN_MASK 3
10732 #define BD_LIST_MAX_NUM 30
10734 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10736 /*prepare 4 commands to query DFX BD number*/
10737 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10738 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10739 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10740 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10741 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10742 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10743 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10745 return hclge_cmd_send(&hdev->hw, desc, 4);
10748 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10752 u32 entries_per_desc, desc_index, index, offset, i;
10753 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10756 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10758 dev_err(&hdev->pdev->dev,
10759 "Get dfx bd num fail, status is %d.\n", ret);
10763 entries_per_desc = ARRAY_SIZE(desc[0].data);
10764 for (i = 0; i < type_num; i++) {
10765 offset = hclge_dfx_bd_offset_list[i];
10766 index = offset % entries_per_desc;
10767 desc_index = offset / entries_per_desc;
10768 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10774 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10775 struct hclge_desc *desc_src, int bd_num,
10776 enum hclge_opcode_type cmd)
10778 struct hclge_desc *desc = desc_src;
10781 hclge_cmd_setup_basic_desc(desc, cmd, true);
10782 for (i = 0; i < bd_num - 1; i++) {
10783 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10785 hclge_cmd_setup_basic_desc(desc, cmd, true);
10789 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10791 dev_err(&hdev->pdev->dev,
10792 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10798 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10801 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10802 struct hclge_desc *desc = desc_src;
10805 entries_per_desc = ARRAY_SIZE(desc->data);
10806 reg_num = entries_per_desc * bd_num;
10807 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10808 for (i = 0; i < reg_num; i++) {
10809 index = i % entries_per_desc;
10810 desc_index = i / entries_per_desc;
10811 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10813 for (i = 0; i < separator_num; i++)
10814 *reg++ = SEPARATOR_VALUE;
10816 return reg_num + separator_num;
10819 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10821 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10822 int data_len_per_desc, bd_num, i;
10823 int bd_num_list[BD_LIST_MAX_NUM];
10827 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10829 dev_err(&hdev->pdev->dev,
10830 "Get dfx reg bd num fail, status is %d.\n", ret);
10834 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10836 for (i = 0; i < dfx_reg_type_num; i++) {
10837 bd_num = bd_num_list[i];
10838 data_len = data_len_per_desc * bd_num;
10839 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10845 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10847 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10848 int bd_num, bd_num_max, buf_len, i;
10849 int bd_num_list[BD_LIST_MAX_NUM];
10850 struct hclge_desc *desc_src;
10854 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10856 dev_err(&hdev->pdev->dev,
10857 "Get dfx reg bd num fail, status is %d.\n", ret);
10861 bd_num_max = bd_num_list[0];
10862 for (i = 1; i < dfx_reg_type_num; i++)
10863 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10865 buf_len = sizeof(*desc_src) * bd_num_max;
10866 desc_src = kzalloc(buf_len, GFP_KERNEL);
10870 for (i = 0; i < dfx_reg_type_num; i++) {
10871 bd_num = bd_num_list[i];
10872 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10873 hclge_dfx_reg_opcode_list[i]);
10875 dev_err(&hdev->pdev->dev,
10876 "Get dfx reg fail, status is %d.\n", ret);
10880 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10887 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10888 struct hnae3_knic_private_info *kinfo)
10890 #define HCLGE_RING_REG_OFFSET 0x200
10891 #define HCLGE_RING_INT_REG_OFFSET 0x4
10893 int i, j, reg_num, separator_num;
10897 /* fetching per-PF registers valus from PF PCIe register space */
10898 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10899 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10900 for (i = 0; i < reg_num; i++)
10901 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10902 for (i = 0; i < separator_num; i++)
10903 *reg++ = SEPARATOR_VALUE;
10904 data_num_sum = reg_num + separator_num;
10906 reg_num = ARRAY_SIZE(common_reg_addr_list);
10907 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10908 for (i = 0; i < reg_num; i++)
10909 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10910 for (i = 0; i < separator_num; i++)
10911 *reg++ = SEPARATOR_VALUE;
10912 data_num_sum += reg_num + separator_num;
10914 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10915 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10916 for (j = 0; j < kinfo->num_tqps; j++) {
10917 for (i = 0; i < reg_num; i++)
10918 *reg++ = hclge_read_dev(&hdev->hw,
10919 ring_reg_addr_list[i] +
10920 HCLGE_RING_REG_OFFSET * j);
10921 for (i = 0; i < separator_num; i++)
10922 *reg++ = SEPARATOR_VALUE;
10924 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10926 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10927 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10928 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10929 for (i = 0; i < reg_num; i++)
10930 *reg++ = hclge_read_dev(&hdev->hw,
10931 tqp_intr_reg_addr_list[i] +
10932 HCLGE_RING_INT_REG_OFFSET * j);
10933 for (i = 0; i < separator_num; i++)
10934 *reg++ = SEPARATOR_VALUE;
10936 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10938 return data_num_sum;
10941 static int hclge_get_regs_len(struct hnae3_handle *handle)
10943 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10944 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10945 struct hclge_vport *vport = hclge_get_vport(handle);
10946 struct hclge_dev *hdev = vport->back;
10947 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10948 int regs_lines_32_bit, regs_lines_64_bit;
10951 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10953 dev_err(&hdev->pdev->dev,
10954 "Get register number failed, ret = %d.\n", ret);
10958 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10960 dev_err(&hdev->pdev->dev,
10961 "Get dfx reg len failed, ret = %d.\n", ret);
10965 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10966 REG_SEPARATOR_LINE;
10967 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10968 REG_SEPARATOR_LINE;
10969 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10970 REG_SEPARATOR_LINE;
10971 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10972 REG_SEPARATOR_LINE;
10973 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10974 REG_SEPARATOR_LINE;
10975 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10976 REG_SEPARATOR_LINE;
10978 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10979 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10980 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10983 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10986 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10987 struct hclge_vport *vport = hclge_get_vport(handle);
10988 struct hclge_dev *hdev = vport->back;
10989 u32 regs_num_32_bit, regs_num_64_bit;
10990 int i, reg_num, separator_num, ret;
10993 *version = hdev->fw_version;
10995 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10997 dev_err(&hdev->pdev->dev,
10998 "Get register number failed, ret = %d.\n", ret);
11002 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11004 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11006 dev_err(&hdev->pdev->dev,
11007 "Get 32 bit register failed, ret = %d.\n", ret);
11010 reg_num = regs_num_32_bit;
11012 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11013 for (i = 0; i < separator_num; i++)
11014 *reg++ = SEPARATOR_VALUE;
11016 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11018 dev_err(&hdev->pdev->dev,
11019 "Get 64 bit register failed, ret = %d.\n", ret);
11022 reg_num = regs_num_64_bit * 2;
11024 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11025 for (i = 0; i < separator_num; i++)
11026 *reg++ = SEPARATOR_VALUE;
11028 ret = hclge_get_dfx_reg(hdev, reg);
11030 dev_err(&hdev->pdev->dev,
11031 "Get dfx register failed, ret = %d.\n", ret);
11034 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11036 struct hclge_set_led_state_cmd *req;
11037 struct hclge_desc desc;
11040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11042 req = (struct hclge_set_led_state_cmd *)desc.data;
11043 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11044 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11046 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11048 dev_err(&hdev->pdev->dev,
11049 "Send set led state cmd error, ret =%d\n", ret);
11054 enum hclge_led_status {
11057 HCLGE_LED_NO_CHANGE = 0xFF,
11060 static int hclge_set_led_id(struct hnae3_handle *handle,
11061 enum ethtool_phys_id_state status)
11063 struct hclge_vport *vport = hclge_get_vport(handle);
11064 struct hclge_dev *hdev = vport->back;
11067 case ETHTOOL_ID_ACTIVE:
11068 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11069 case ETHTOOL_ID_INACTIVE:
11070 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11076 static void hclge_get_link_mode(struct hnae3_handle *handle,
11077 unsigned long *supported,
11078 unsigned long *advertising)
11080 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11081 struct hclge_vport *vport = hclge_get_vport(handle);
11082 struct hclge_dev *hdev = vport->back;
11083 unsigned int idx = 0;
11085 for (; idx < size; idx++) {
11086 supported[idx] = hdev->hw.mac.supported[idx];
11087 advertising[idx] = hdev->hw.mac.advertising[idx];
11091 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11093 struct hclge_vport *vport = hclge_get_vport(handle);
11094 struct hclge_dev *hdev = vport->back;
11096 return hclge_config_gro(hdev, enable);
11099 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11101 struct hclge_vport *vport = &hdev->vport[0];
11102 struct hnae3_handle *handle = &vport->nic;
11106 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11107 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11108 vport->last_promisc_flags = vport->overflow_promisc_flags;
11111 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11112 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11113 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11114 tmp_flags & HNAE3_MPE);
11116 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11117 hclge_enable_vlan_filter(handle,
11118 tmp_flags & HNAE3_VLAN_FLTR);
11123 static bool hclge_module_existed(struct hclge_dev *hdev)
11125 struct hclge_desc desc;
11129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11130 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11132 dev_err(&hdev->pdev->dev,
11133 "failed to get SFP exist state, ret = %d\n", ret);
11137 existed = le32_to_cpu(desc.data[0]);
11139 return existed != 0;
11142 /* need 6 bds(total 140 bytes) in one reading
11143 * return the number of bytes actually read, 0 means read failed.
11145 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11148 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11149 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11155 /* setup all 6 bds to read module eeprom info. */
11156 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11157 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11160 /* bd0~bd4 need next flag */
11161 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11162 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11165 /* setup bd0, this bd contains offset and read length. */
11166 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11167 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11168 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11169 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11171 ret = hclge_cmd_send(&hdev->hw, desc, i);
11173 dev_err(&hdev->pdev->dev,
11174 "failed to get SFP eeprom info, ret = %d\n", ret);
11178 /* copy sfp info from bd0 to out buffer. */
11179 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11180 memcpy(data, sfp_info_bd0->data, copy_len);
11181 read_len = copy_len;
11183 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11184 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11185 if (read_len >= len)
11188 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11189 memcpy(data + read_len, desc[i].data, copy_len);
11190 read_len += copy_len;
11196 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11199 struct hclge_vport *vport = hclge_get_vport(handle);
11200 struct hclge_dev *hdev = vport->back;
11204 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11205 return -EOPNOTSUPP;
11207 if (!hclge_module_existed(hdev))
11210 while (read_len < len) {
11211 data_len = hclge_get_sfp_eeprom_info(hdev,
11218 read_len += data_len;
11224 static const struct hnae3_ae_ops hclge_ops = {
11225 .init_ae_dev = hclge_init_ae_dev,
11226 .uninit_ae_dev = hclge_uninit_ae_dev,
11227 .flr_prepare = hclge_flr_prepare,
11228 .flr_done = hclge_flr_done,
11229 .init_client_instance = hclge_init_client_instance,
11230 .uninit_client_instance = hclge_uninit_client_instance,
11231 .map_ring_to_vector = hclge_map_ring_to_vector,
11232 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11233 .get_vector = hclge_get_vector,
11234 .put_vector = hclge_put_vector,
11235 .set_promisc_mode = hclge_set_promisc_mode,
11236 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11237 .set_loopback = hclge_set_loopback,
11238 .start = hclge_ae_start,
11239 .stop = hclge_ae_stop,
11240 .client_start = hclge_client_start,
11241 .client_stop = hclge_client_stop,
11242 .get_status = hclge_get_status,
11243 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11244 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11245 .get_media_type = hclge_get_media_type,
11246 .check_port_speed = hclge_check_port_speed,
11247 .get_fec = hclge_get_fec,
11248 .set_fec = hclge_set_fec,
11249 .get_rss_key_size = hclge_get_rss_key_size,
11250 .get_rss_indir_size = hclge_get_rss_indir_size,
11251 .get_rss = hclge_get_rss,
11252 .set_rss = hclge_set_rss,
11253 .set_rss_tuple = hclge_set_rss_tuple,
11254 .get_rss_tuple = hclge_get_rss_tuple,
11255 .get_tc_size = hclge_get_tc_size,
11256 .get_mac_addr = hclge_get_mac_addr,
11257 .set_mac_addr = hclge_set_mac_addr,
11258 .do_ioctl = hclge_do_ioctl,
11259 .add_uc_addr = hclge_add_uc_addr,
11260 .rm_uc_addr = hclge_rm_uc_addr,
11261 .add_mc_addr = hclge_add_mc_addr,
11262 .rm_mc_addr = hclge_rm_mc_addr,
11263 .set_autoneg = hclge_set_autoneg,
11264 .get_autoneg = hclge_get_autoneg,
11265 .restart_autoneg = hclge_restart_autoneg,
11266 .halt_autoneg = hclge_halt_autoneg,
11267 .get_pauseparam = hclge_get_pauseparam,
11268 .set_pauseparam = hclge_set_pauseparam,
11269 .set_mtu = hclge_set_mtu,
11270 .reset_queue = hclge_reset_tqp,
11271 .get_stats = hclge_get_stats,
11272 .get_mac_stats = hclge_get_mac_stat,
11273 .update_stats = hclge_update_stats,
11274 .get_strings = hclge_get_strings,
11275 .get_sset_count = hclge_get_sset_count,
11276 .get_fw_version = hclge_get_fw_version,
11277 .get_mdix_mode = hclge_get_mdix_mode,
11278 .enable_vlan_filter = hclge_enable_vlan_filter,
11279 .set_vlan_filter = hclge_set_vlan_filter,
11280 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11281 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11282 .reset_event = hclge_reset_event,
11283 .get_reset_level = hclge_get_reset_level,
11284 .set_default_reset_request = hclge_set_def_reset_request,
11285 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11286 .set_channels = hclge_set_channels,
11287 .get_channels = hclge_get_channels,
11288 .get_regs_len = hclge_get_regs_len,
11289 .get_regs = hclge_get_regs,
11290 .set_led_id = hclge_set_led_id,
11291 .get_link_mode = hclge_get_link_mode,
11292 .add_fd_entry = hclge_add_fd_entry,
11293 .del_fd_entry = hclge_del_fd_entry,
11294 .del_all_fd_entries = hclge_del_all_fd_entries,
11295 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11296 .get_fd_rule_info = hclge_get_fd_rule_info,
11297 .get_fd_all_rules = hclge_get_all_rules,
11298 .enable_fd = hclge_enable_fd,
11299 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11300 .dbg_run_cmd = hclge_dbg_run_cmd,
11301 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11302 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11303 .ae_dev_resetting = hclge_ae_dev_resetting,
11304 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11305 .set_gro_en = hclge_gro_en,
11306 .get_global_queue_id = hclge_covert_handle_qid_global,
11307 .set_timer_task = hclge_set_timer_task,
11308 .mac_connect_phy = hclge_mac_connect_phy,
11309 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11310 .get_vf_config = hclge_get_vf_config,
11311 .set_vf_link_state = hclge_set_vf_link_state,
11312 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11313 .set_vf_trust = hclge_set_vf_trust,
11314 .set_vf_rate = hclge_set_vf_rate,
11315 .set_vf_mac = hclge_set_vf_mac,
11316 .get_module_eeprom = hclge_get_module_eeprom,
11317 .get_cmdq_stat = hclge_get_cmdq_stat,
11320 static struct hnae3_ae_algo ae_algo = {
11322 .pdev_id_table = ae_algo_pci_tbl,
11325 static int hclge_init(void)
11327 pr_info("%s is initializing\n", HCLGE_NAME);
11329 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11331 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11335 hnae3_register_ae_algo(&ae_algo);
11340 static void hclge_exit(void)
11342 hnae3_unregister_ae_algo(&ae_algo);
11343 destroy_workqueue(hclge_wq);
11345 module_init(hclge_init);
11346 module_exit(hclge_exit);
11348 MODULE_LICENSE("GPL");
11349 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11350 MODULE_DESCRIPTION("HCLGE Driver");
11351 MODULE_VERSION(HCLGE_MOD_VERSION);