1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48},
389 { OUTER_SRC_MAC, 48},
390 { OUTER_VLAN_TAG_FST, 16},
391 { OUTER_VLAN_TAG_SEC, 16},
392 { OUTER_ETH_TYPE, 16},
395 { OUTER_IP_PROTO, 8},
399 { OUTER_SRC_PORT, 16},
400 { OUTER_DST_PORT, 16},
402 { OUTER_TUN_VNI, 24},
403 { OUTER_TUN_FLOW_ID, 8},
404 { INNER_DST_MAC, 48},
405 { INNER_SRC_MAC, 48},
406 { INNER_VLAN_TAG_FST, 16},
407 { INNER_VLAN_TAG_SEC, 16},
408 { INNER_ETH_TYPE, 16},
411 { INNER_IP_PROTO, 8},
415 { INNER_SRC_PORT, 16},
416 { INNER_DST_PORT, 16},
420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
422 #define HCLGE_MAC_CMD_NUM 21
424 u64 *data = (u64 *)(&hdev->mac_stats);
425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
430 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
433 dev_err(&hdev->pdev->dev,
434 "Get MAC pkt stats fail, status = %d.\n", ret);
439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 /* for special opcode 0032, only the first desc has the head */
441 if (unlikely(i == 0)) {
442 desc_data = (__le64 *)(&desc[i].data[0]);
443 n = HCLGE_RD_FIRST_STATS_NUM;
445 desc_data = (__le64 *)(&desc[i]);
446 n = HCLGE_RD_OTHER_STATS_NUM;
449 for (k = 0; k < n; k++) {
450 *data += le64_to_cpu(*desc_data);
459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
461 u64 *data = (u64 *)(&hdev->mac_stats);
462 struct hclge_desc *desc;
467 /* This may be called inside atomic sections,
468 * so GFP_ATOMIC is more suitalbe here
470 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
474 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
481 for (i = 0; i < desc_num; i++) {
482 /* for special opcode 0034, only the first desc has the head */
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RD_FIRST_STATS_NUM;
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RD_OTHER_STATS_NUM;
491 for (k = 0; k < n; k++) {
492 *data += le64_to_cpu(*desc_data);
503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
505 struct hclge_desc desc;
510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
515 desc_data = (__le32 *)(&desc.data[0]);
516 reg_num = le32_to_cpu(*desc_data);
518 *desc_num = 1 + ((reg_num - 3) >> 2) +
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
529 ret = hclge_mac_query_reg_num(hdev, &desc_num);
531 /* The firmware supports the new statistics acquisition method */
533 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 else if (ret == -EOPNOTSUPP)
535 ret = hclge_mac_update_stats_defective(hdev);
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 struct hclge_vport *vport = hclge_get_vport(handle);
546 struct hclge_dev *hdev = vport->back;
547 struct hnae3_queue *queue;
548 struct hclge_desc desc[1];
549 struct hclge_tqp *tqp;
552 for (i = 0; i < kinfo->num_tqps; i++) {
553 queue = handle->kinfo.tqp[i];
554 tqp = container_of(queue, struct hclge_tqp, q);
555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
559 desc[0].data[0] = cpu_to_le32(tqp->index);
560 ret = hclge_cmd_send(&hdev->hw, desc, 1);
562 dev_err(&hdev->pdev->dev,
563 "Query tqp stat fail, status = %d,queue = %d\n",
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 le32_to_cpu(desc[0].data[1]);
571 for (i = 0; i < kinfo->num_tqps; i++) {
572 queue = handle->kinfo.tqp[i];
573 tqp = container_of(queue, struct hclge_tqp, q);
574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575 hclge_cmd_setup_basic_desc(&desc[0],
576 HCLGE_OPC_QUERY_TX_STATS,
579 desc[0].data[0] = cpu_to_le32(tqp->index);
580 ret = hclge_cmd_send(&hdev->hw, desc, 1);
582 dev_err(&hdev->pdev->dev,
583 "Query tqp stat fail, status = %d,queue = %d\n",
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 le32_to_cpu(desc[0].data[1]);
594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 struct hclge_tqp *tqp;
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
618 /* each tqp has TX & RX two queues */
619 return kinfo->num_tqps * (2);
622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 struct hclge_tqp, q);
631 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
633 buff = buff + ETH_GSTRING_LEN;
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 struct hclge_tqp, q);
639 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
641 buff = buff + ETH_GSTRING_LEN;
647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 const struct hclge_comm_stats_str strs[],
654 for (i = 0; i < size; i++)
655 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
660 static u8 *hclge_comm_get_strings(u32 stringset,
661 const struct hclge_comm_stats_str strs[],
664 char *buff = (char *)data;
667 if (stringset != ETH_SS_STATS)
670 for (i = 0; i < size; i++) {
671 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 buff = buff + ETH_GSTRING_LEN;
678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
680 struct hnae3_handle *handle;
683 handle = &hdev->vport[0].nic;
684 if (handle->client) {
685 status = hclge_tqps_update_stats(handle);
687 dev_err(&hdev->pdev->dev,
688 "Update TQPS stats fail, status = %d.\n",
693 status = hclge_mac_update_stats(hdev);
695 dev_err(&hdev->pdev->dev,
696 "Update MAC stats fail, status = %d.\n", status);
699 static void hclge_update_stats(struct hnae3_handle *handle,
700 struct net_device_stats *net_stats)
702 struct hclge_vport *vport = hclge_get_vport(handle);
703 struct hclge_dev *hdev = vport->back;
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
709 status = hclge_mac_update_stats(hdev);
711 dev_err(&hdev->pdev->dev,
712 "Update MAC stats fail, status = %d.\n",
715 status = hclge_tqps_update_stats(handle);
717 dev_err(&hdev->pdev->dev,
718 "Update TQPS stats fail, status = %d.\n",
721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 HNAE3_SUPPORT_PHY_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
755 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
756 hdev->hw.mac.phydev->drv->set_loopback) {
758 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
761 } else if (stringset == ETH_SS_STATS) {
762 count = ARRAY_SIZE(g_mac_stats_string) +
763 hclge_tqps_get_sset_count(handle, stringset);
769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
772 u8 *p = (char *)data;
775 if (stringset == ETH_SS_STATS) {
776 size = ARRAY_SIZE(g_mac_stats_string);
777 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
779 p = hclge_tqps_get_strings(handle, p);
780 } else if (stringset == ETH_SS_TEST) {
781 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
784 p += ETH_GSTRING_LEN;
786 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
789 p += ETH_GSTRING_LEN;
791 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
793 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
795 p += ETH_GSTRING_LEN;
797 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
800 p += ETH_GSTRING_LEN;
805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
807 struct hclge_vport *vport = hclge_get_vport(handle);
808 struct hclge_dev *hdev = vport->back;
811 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812 ARRAY_SIZE(g_mac_stats_string), data);
813 p = hclge_tqps_get_stats(handle, p);
816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817 struct hns3_mac_stats *mac_stats)
819 struct hclge_vport *vport = hclge_get_vport(handle);
820 struct hclge_dev *hdev = vport->back;
822 hclge_update_stats(handle, NULL);
824 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
831 #define HCLGE_MAC_ID_MASK 0xF
833 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
836 /* Set the pf to main pf */
837 if (status->pf_state & HCLGE_PF_STATE_MAIN)
838 hdev->flag |= HCLGE_FLAG_MAIN;
840 hdev->flag &= ~HCLGE_FLAG_MAIN;
842 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
846 static int hclge_query_function_status(struct hclge_dev *hdev)
848 #define HCLGE_QUERY_MAX_CNT 5
850 struct hclge_func_status_cmd *req;
851 struct hclge_desc desc;
855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856 req = (struct hclge_func_status_cmd *)desc.data;
859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
861 dev_err(&hdev->pdev->dev,
862 "query function status failed %d.\n", ret);
866 /* Check pf reset is done */
869 usleep_range(1000, 2000);
870 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
872 return hclge_parse_func_status(hdev, req);
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
877 struct hclge_pf_res_cmd *req;
878 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
884 dev_err(&hdev->pdev->dev,
885 "query pf resource failed %d.\n", ret);
889 req = (struct hclge_pf_res_cmd *)desc.data;
890 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
891 le16_to_cpu(req->ext_tqp_num);
892 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
894 if (req->tx_buf_size)
896 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
900 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (req->dv_buf_size)
904 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
906 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
908 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
910 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
911 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
912 dev_err(&hdev->pdev->dev,
913 "only %u msi resources available, not enough for pf(min:2).\n",
918 if (hnae3_dev_roce_supported(hdev)) {
920 le16_to_cpu(req->pf_intr_vector_number_roce);
922 /* PF should have NIC vectors and Roce vectors,
923 * NIC vectors are queued before Roce vectors.
925 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
927 hdev->num_msi = hdev->num_nic_msi;
933 static int hclge_parse_speed(int speed_cmd, int *speed)
937 *speed = HCLGE_MAC_SPEED_10M;
940 *speed = HCLGE_MAC_SPEED_100M;
943 *speed = HCLGE_MAC_SPEED_1G;
946 *speed = HCLGE_MAC_SPEED_10G;
949 *speed = HCLGE_MAC_SPEED_25G;
952 *speed = HCLGE_MAC_SPEED_40G;
955 *speed = HCLGE_MAC_SPEED_50G;
958 *speed = HCLGE_MAC_SPEED_100G;
961 *speed = HCLGE_MAC_SPEED_200G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1002 case HCLGE_MAC_SPEED_200G:
1003 speed_bit = HCLGE_SUPPORT_200G_BIT;
1009 if (speed_bit & speed_ability)
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1017 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1029 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1032 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1037 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1039 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1056 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1060 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1062 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1065 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1066 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1068 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1069 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1071 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1082 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1084 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1085 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1107 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1109 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1110 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1112 switch (mac->speed) {
1113 case HCLGE_MAC_SPEED_10G:
1114 case HCLGE_MAC_SPEED_40G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1118 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1120 case HCLGE_MAC_SPEED_25G:
1121 case HCLGE_MAC_SPEED_50G:
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1125 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1126 BIT(HNAE3_FEC_AUTO);
1128 case HCLGE_MAC_SPEED_100G:
1129 case HCLGE_MAC_SPEED_200G:
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1131 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1134 mac->fec_ability = 0;
1139 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1142 struct hclge_mac *mac = &hdev->hw.mac;
1144 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1145 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1148 hclge_convert_setting_sr(mac, speed_ability);
1149 hclge_convert_setting_lr(mac, speed_ability);
1150 hclge_convert_setting_cr(mac, speed_ability);
1151 if (hnae3_dev_fec_supported(hdev))
1152 hclge_convert_setting_fec(mac);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1159 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1162 struct hclge_mac *mac = &hdev->hw.mac;
1164 hclge_convert_setting_kr(mac, speed_ability);
1165 if (hnae3_dev_fec_supported(hdev))
1166 hclge_convert_setting_fec(mac);
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1168 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1172 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1175 unsigned long *supported = hdev->hw.mac.supported;
1177 /* default to support all speed for GE port */
1179 speed_ability = HCLGE_SUPPORT_GE;
1181 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1185 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1192 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1194 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1200 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1203 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1205 u8 media_type = hdev->hw.mac.media_type;
1207 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1208 hclge_parse_fiber_link_mode(hdev, speed_ability);
1209 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1210 hclge_parse_copper_link_mode(hdev, speed_ability);
1211 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1212 hclge_parse_backplane_link_mode(hdev, speed_ability);
1215 static u32 hclge_get_max_speed(u16 speed_ability)
1217 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1218 return HCLGE_MAC_SPEED_200G;
1220 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1221 return HCLGE_MAC_SPEED_100G;
1223 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1224 return HCLGE_MAC_SPEED_50G;
1226 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1227 return HCLGE_MAC_SPEED_40G;
1229 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1230 return HCLGE_MAC_SPEED_25G;
1232 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1233 return HCLGE_MAC_SPEED_10G;
1235 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1236 return HCLGE_MAC_SPEED_1G;
1238 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1239 return HCLGE_MAC_SPEED_100M;
1241 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1242 return HCLGE_MAC_SPEED_10M;
1244 return HCLGE_MAC_SPEED_1G;
1247 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1249 #define SPEED_ABILITY_EXT_SHIFT 8
1251 struct hclge_cfg_param_cmd *req;
1252 u64 mac_addr_tmp_high;
1253 u16 speed_ability_ext;
1257 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1259 /* get the configuration */
1260 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1264 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1265 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1266 HCLGE_CFG_TQP_DESC_N_M,
1267 HCLGE_CFG_TQP_DESC_N_S);
1269 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1270 HCLGE_CFG_PHY_ADDR_M,
1271 HCLGE_CFG_PHY_ADDR_S);
1272 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1273 HCLGE_CFG_MEDIA_TP_M,
1274 HCLGE_CFG_MEDIA_TP_S);
1275 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1276 HCLGE_CFG_RX_BUF_LEN_M,
1277 HCLGE_CFG_RX_BUF_LEN_S);
1278 /* get mac_address */
1279 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1280 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1281 HCLGE_CFG_MAC_ADDR_H_M,
1282 HCLGE_CFG_MAC_ADDR_H_S);
1284 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1286 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1287 HCLGE_CFG_DEFAULT_SPEED_M,
1288 HCLGE_CFG_DEFAULT_SPEED_S);
1289 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1290 HCLGE_CFG_RSS_SIZE_M,
1291 HCLGE_CFG_RSS_SIZE_S);
1293 for (i = 0; i < ETH_ALEN; i++)
1294 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1296 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1297 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1299 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 HCLGE_CFG_SPEED_ABILITY_M,
1301 HCLGE_CFG_SPEED_ABILITY_S);
1302 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1304 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1305 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1307 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308 HCLGE_CFG_UMV_TBL_SPACE_M,
1309 HCLGE_CFG_UMV_TBL_SPACE_S);
1310 if (!cfg->umv_space)
1311 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1313 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1314 HCLGE_CFG_PF_RSS_SIZE_M,
1315 HCLGE_CFG_PF_RSS_SIZE_S);
1317 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1318 * power of 2, instead of reading out directly. This would
1319 * be more flexible for future changes and expansions.
1320 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1321 * it does not make sense if PF's field is 0. In this case, PF and VF
1322 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1324 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1325 1U << cfg->pf_rss_size_max :
1326 cfg->vf_rss_size_max;
1329 /* hclge_get_cfg: query the static parameter from flash
1330 * @hdev: pointer to struct hclge_dev
1331 * @hcfg: the config structure to be getted
1333 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1335 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1336 struct hclge_cfg_param_cmd *req;
1340 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1343 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1344 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1346 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1347 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1348 /* Len should be united by 4 bytes when send to hardware */
1349 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1350 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1351 req->offset = cpu_to_le32(offset);
1354 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1356 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1360 hclge_parse_cfg(hcfg, desc);
1365 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1367 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1369 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1371 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1372 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1373 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1374 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1375 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1378 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1379 struct hclge_desc *desc)
1381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1382 struct hclge_dev_specs_0_cmd *req0;
1383 struct hclge_dev_specs_1_cmd *req1;
1385 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1386 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1388 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1389 ae_dev->dev_specs.rss_ind_tbl_size =
1390 le16_to_cpu(req0->rss_ind_tbl_size);
1391 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1392 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1393 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1394 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1397 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1399 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1401 if (!dev_specs->max_non_tso_bd_num)
1402 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1403 if (!dev_specs->rss_ind_tbl_size)
1404 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1405 if (!dev_specs->rss_key_size)
1406 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1407 if (!dev_specs->max_tm_rate)
1408 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409 if (!dev_specs->max_int_gl)
1410 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1413 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1415 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1419 /* set default specifications as devices lower than version V3 do not
1420 * support querying specifications from firmware.
1422 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1423 hclge_set_default_dev_specs(hdev);
1427 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1428 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1430 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1432 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1434 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1438 hclge_parse_dev_specs(hdev, desc);
1439 hclge_check_dev_specs(hdev);
1444 static int hclge_get_cap(struct hclge_dev *hdev)
1448 ret = hclge_query_function_status(hdev);
1450 dev_err(&hdev->pdev->dev,
1451 "query function status error %d.\n", ret);
1455 /* get pf resource */
1456 return hclge_query_pf_resource(hdev);
1459 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1461 #define HCLGE_MIN_TX_DESC 64
1462 #define HCLGE_MIN_RX_DESC 64
1464 if (!is_kdump_kernel())
1467 dev_info(&hdev->pdev->dev,
1468 "Running kdump kernel. Using minimal resources\n");
1470 /* minimal queue pairs equals to the number of vports */
1471 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1472 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1473 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1476 static int hclge_configure(struct hclge_dev *hdev)
1478 struct hclge_cfg cfg;
1482 ret = hclge_get_cfg(hdev, &cfg);
1486 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1487 hdev->base_tqp_pid = 0;
1488 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1489 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1490 hdev->rx_buf_len = cfg.rx_buf_len;
1491 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1492 hdev->hw.mac.media_type = cfg.media_type;
1493 hdev->hw.mac.phy_addr = cfg.phy_addr;
1494 hdev->num_tx_desc = cfg.tqp_desc_num;
1495 hdev->num_rx_desc = cfg.tqp_desc_num;
1496 hdev->tm_info.num_pg = 1;
1497 hdev->tc_max = cfg.tc_num;
1498 hdev->tm_info.hw_pfc_map = 0;
1499 hdev->wanted_umv_size = cfg.umv_space;
1501 if (hnae3_dev_fd_supported(hdev)) {
1503 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1506 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1508 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1509 cfg.default_speed, ret);
1513 hclge_parse_link_mode(hdev, cfg.speed_ability);
1515 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1517 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1518 (hdev->tc_max < 1)) {
1519 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1524 /* Dev does not support DCB */
1525 if (!hnae3_dev_dcb_supported(hdev)) {
1529 hdev->pfc_max = hdev->tc_max;
1532 hdev->tm_info.num_tc = 1;
1534 /* Currently not support uncontiuous tc */
1535 for (i = 0; i < hdev->tm_info.num_tc; i++)
1536 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1538 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1540 hclge_init_kdump_kernel_config(hdev);
1542 /* Set the init affinity based on pci func number */
1543 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1544 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1545 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1546 &hdev->affinity_mask);
1551 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1554 struct hclge_cfg_tso_status_cmd *req;
1555 struct hclge_desc desc;
1557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1559 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1560 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1561 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1563 return hclge_cmd_send(&hdev->hw, &desc, 1);
1566 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1568 struct hclge_cfg_gro_status_cmd *req;
1569 struct hclge_desc desc;
1572 if (!hnae3_dev_gro_supported(hdev))
1575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1576 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1578 req->gro_en = en ? 1 : 0;
1580 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1582 dev_err(&hdev->pdev->dev,
1583 "GRO hardware config cmd failed, ret = %d\n", ret);
1588 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1590 struct hclge_tqp *tqp;
1593 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1594 sizeof(struct hclge_tqp), GFP_KERNEL);
1600 for (i = 0; i < hdev->num_tqps; i++) {
1601 tqp->dev = &hdev->pdev->dev;
1604 tqp->q.ae_algo = &ae_algo;
1605 tqp->q.buf_size = hdev->rx_buf_len;
1606 tqp->q.tx_desc_num = hdev->num_tx_desc;
1607 tqp->q.rx_desc_num = hdev->num_rx_desc;
1609 /* need an extended offset to configure queues >=
1610 * HCLGE_TQP_MAX_SIZE_DEV_V2
1612 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1613 tqp->q.io_base = hdev->hw.io_base +
1614 HCLGE_TQP_REG_OFFSET +
1615 i * HCLGE_TQP_REG_SIZE;
1617 tqp->q.io_base = hdev->hw.io_base +
1618 HCLGE_TQP_REG_OFFSET +
1619 HCLGE_TQP_EXT_REG_OFFSET +
1620 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1629 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1630 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1632 struct hclge_tqp_map_cmd *req;
1633 struct hclge_desc desc;
1636 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1638 req = (struct hclge_tqp_map_cmd *)desc.data;
1639 req->tqp_id = cpu_to_le16(tqp_pid);
1640 req->tqp_vf = func_id;
1641 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1643 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1644 req->tqp_vid = cpu_to_le16(tqp_vid);
1646 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1648 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1653 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1655 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1656 struct hclge_dev *hdev = vport->back;
1659 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1660 alloced < num_tqps; i++) {
1661 if (!hdev->htqp[i].alloced) {
1662 hdev->htqp[i].q.handle = &vport->nic;
1663 hdev->htqp[i].q.tqp_index = alloced;
1664 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1665 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1666 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1667 hdev->htqp[i].alloced = true;
1671 vport->alloc_tqps = alloced;
1672 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1673 vport->alloc_tqps / hdev->tm_info.num_tc);
1675 /* ensure one to one mapping between irq and queue at default */
1676 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1677 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1682 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1683 u16 num_tx_desc, u16 num_rx_desc)
1686 struct hnae3_handle *nic = &vport->nic;
1687 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1688 struct hclge_dev *hdev = vport->back;
1691 kinfo->num_tx_desc = num_tx_desc;
1692 kinfo->num_rx_desc = num_rx_desc;
1694 kinfo->rx_buf_len = hdev->rx_buf_len;
1696 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1697 sizeof(struct hnae3_queue *), GFP_KERNEL);
1701 ret = hclge_assign_tqp(vport, num_tqps);
1703 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1708 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1709 struct hclge_vport *vport)
1711 struct hnae3_handle *nic = &vport->nic;
1712 struct hnae3_knic_private_info *kinfo;
1715 kinfo = &nic->kinfo;
1716 for (i = 0; i < vport->alloc_tqps; i++) {
1717 struct hclge_tqp *q =
1718 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1722 is_pf = !(vport->vport_id);
1723 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1732 static int hclge_map_tqp(struct hclge_dev *hdev)
1734 struct hclge_vport *vport = hdev->vport;
1737 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1738 for (i = 0; i < num_vport; i++) {
1741 ret = hclge_map_tqp_to_vport(hdev, vport);
1751 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1753 struct hnae3_handle *nic = &vport->nic;
1754 struct hclge_dev *hdev = vport->back;
1757 nic->pdev = hdev->pdev;
1758 nic->ae_algo = &ae_algo;
1759 nic->numa_node_mask = hdev->numa_node_mask;
1761 ret = hclge_knic_setup(vport, num_tqps,
1762 hdev->num_tx_desc, hdev->num_rx_desc);
1764 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1769 static int hclge_alloc_vport(struct hclge_dev *hdev)
1771 struct pci_dev *pdev = hdev->pdev;
1772 struct hclge_vport *vport;
1778 /* We need to alloc a vport for main NIC of PF */
1779 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1781 if (hdev->num_tqps < num_vport) {
1782 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1783 hdev->num_tqps, num_vport);
1787 /* Alloc the same number of TQPs for every vport */
1788 tqp_per_vport = hdev->num_tqps / num_vport;
1789 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1791 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1796 hdev->vport = vport;
1797 hdev->num_alloc_vport = num_vport;
1799 if (IS_ENABLED(CONFIG_PCI_IOV))
1800 hdev->num_alloc_vfs = hdev->num_req_vfs;
1802 for (i = 0; i < num_vport; i++) {
1804 vport->vport_id = i;
1805 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1806 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1807 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1808 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1809 INIT_LIST_HEAD(&vport->vlan_list);
1810 INIT_LIST_HEAD(&vport->uc_mac_list);
1811 INIT_LIST_HEAD(&vport->mc_mac_list);
1812 spin_lock_init(&vport->mac_list_lock);
1815 ret = hclge_vport_setup(vport, tqp_main_vport);
1817 ret = hclge_vport_setup(vport, tqp_per_vport);
1820 "vport setup failed for vport %d, %d\n",
1831 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1832 struct hclge_pkt_buf_alloc *buf_alloc)
1834 /* TX buffer size is unit by 128 byte */
1835 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1836 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1837 struct hclge_tx_buff_alloc_cmd *req;
1838 struct hclge_desc desc;
1842 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1845 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1846 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1848 req->tx_pkt_buff[i] =
1849 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1850 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1853 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1855 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1861 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1862 struct hclge_pkt_buf_alloc *buf_alloc)
1864 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1867 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1872 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1877 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1878 if (hdev->hw_tc_map & BIT(i))
1883 /* Get the number of pfc enabled TCs, which have private buffer */
1884 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1885 struct hclge_pkt_buf_alloc *buf_alloc)
1887 struct hclge_priv_buf *priv;
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 priv = &buf_alloc->priv_buf[i];
1893 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1901 /* Get the number of pfc disabled TCs, which have private buffer */
1902 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1903 struct hclge_pkt_buf_alloc *buf_alloc)
1905 struct hclge_priv_buf *priv;
1909 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1910 priv = &buf_alloc->priv_buf[i];
1911 if (hdev->hw_tc_map & BIT(i) &&
1912 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1920 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1922 struct hclge_priv_buf *priv;
1926 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1927 priv = &buf_alloc->priv_buf[i];
1929 rx_priv += priv->buf_size;
1934 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1936 u32 i, total_tx_size = 0;
1938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1941 return total_tx_size;
1944 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1945 struct hclge_pkt_buf_alloc *buf_alloc,
1948 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1949 u32 tc_num = hclge_get_tc_num(hdev);
1950 u32 shared_buf, aligned_mps;
1954 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1956 if (hnae3_dev_dcb_supported(hdev))
1957 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1960 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1961 + hdev->dv_buf_size;
1963 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1964 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1965 HCLGE_BUF_SIZE_UNIT);
1967 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1968 if (rx_all < rx_priv + shared_std)
1971 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1972 buf_alloc->s_buf.buf_size = shared_buf;
1973 if (hnae3_dev_dcb_supported(hdev)) {
1974 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1975 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1976 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1977 HCLGE_BUF_SIZE_UNIT);
1979 buf_alloc->s_buf.self.high = aligned_mps +
1980 HCLGE_NON_DCB_ADDITIONAL_BUF;
1981 buf_alloc->s_buf.self.low = aligned_mps;
1984 if (hnae3_dev_dcb_supported(hdev)) {
1985 hi_thrd = shared_buf - hdev->dv_buf_size;
1987 if (tc_num <= NEED_RESERVE_TC_NUM)
1988 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1992 hi_thrd = hi_thrd / tc_num;
1994 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1995 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1996 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1998 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1999 lo_thrd = aligned_mps;
2002 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2003 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2004 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2010 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2011 struct hclge_pkt_buf_alloc *buf_alloc)
2015 total_size = hdev->pkt_buf_size;
2017 /* alloc tx buffer for all enabled tc */
2018 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2019 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2021 if (hdev->hw_tc_map & BIT(i)) {
2022 if (total_size < hdev->tx_buf_size)
2025 priv->tx_buf_size = hdev->tx_buf_size;
2027 priv->tx_buf_size = 0;
2030 total_size -= priv->tx_buf_size;
2036 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2037 struct hclge_pkt_buf_alloc *buf_alloc)
2039 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2040 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2043 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2044 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2051 if (!(hdev->hw_tc_map & BIT(i)))
2056 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2057 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2058 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2059 HCLGE_BUF_SIZE_UNIT);
2062 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2066 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2069 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2072 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2073 struct hclge_pkt_buf_alloc *buf_alloc)
2075 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2076 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2079 /* let the last to be cleared first */
2080 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2081 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082 unsigned int mask = BIT((unsigned int)i);
2084 if (hdev->hw_tc_map & mask &&
2085 !(hdev->tm_info.hw_pfc_map & mask)) {
2086 /* Clear the no pfc TC private buffer */
2094 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2095 no_pfc_priv_num == 0)
2099 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2102 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2103 struct hclge_pkt_buf_alloc *buf_alloc)
2105 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2106 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2109 /* let the last to be cleared first */
2110 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2111 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2112 unsigned int mask = BIT((unsigned int)i);
2114 if (hdev->hw_tc_map & mask &&
2115 hdev->tm_info.hw_pfc_map & mask) {
2116 /* Reduce the number of pfc TC with private buffer */
2124 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2129 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2132 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 #define COMPENSATE_BUFFER 0x3C00
2136 #define COMPENSATE_HALF_MPS_NUM 5
2137 #define PRIV_WL_GAP 0x1800
2139 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2140 u32 tc_num = hclge_get_tc_num(hdev);
2141 u32 half_mps = hdev->mps >> 1;
2146 rx_priv = rx_priv / tc_num;
2148 if (tc_num <= NEED_RESERVE_TC_NUM)
2149 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2151 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2152 COMPENSATE_HALF_MPS_NUM * half_mps;
2153 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2154 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2156 if (rx_priv < min_rx_priv)
2159 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2160 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2167 if (!(hdev->hw_tc_map & BIT(i)))
2171 priv->buf_size = rx_priv;
2172 priv->wl.high = rx_priv - hdev->dv_buf_size;
2173 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2176 buf_alloc->s_buf.buf_size = 0;
2181 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2182 * @hdev: pointer to struct hclge_dev
2183 * @buf_alloc: pointer to buffer calculation data
2184 * @return: 0: calculate sucessful, negative: fail
2186 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2187 struct hclge_pkt_buf_alloc *buf_alloc)
2189 /* When DCB is not supported, rx private buffer is not allocated. */
2190 if (!hnae3_dev_dcb_supported(hdev)) {
2191 u32 rx_all = hdev->pkt_buf_size;
2193 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2194 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2200 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2203 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2206 /* try to decrease the buffer size */
2207 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2210 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2213 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2219 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_rx_priv_buff_cmd *req;
2223 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2228 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2230 /* Alloc private buffer TCs */
2231 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2232 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2235 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2237 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2241 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2242 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2246 dev_err(&hdev->pdev->dev,
2247 "rx private buffer alloc cmd failed %d\n", ret);
2252 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2253 struct hclge_pkt_buf_alloc *buf_alloc)
2255 struct hclge_rx_priv_wl_buf *req;
2256 struct hclge_priv_buf *priv;
2257 struct hclge_desc desc[2];
2261 for (i = 0; i < 2; i++) {
2262 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2264 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2266 /* The first descriptor set the NEXT bit to 1 */
2268 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2270 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2272 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2273 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2275 priv = &buf_alloc->priv_buf[idx];
2276 req->tc_wl[j].high =
2277 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2278 req->tc_wl[j].high |=
2279 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2281 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2282 req->tc_wl[j].low |=
2283 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2287 /* Send 2 descriptor at one time */
2288 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2290 dev_err(&hdev->pdev->dev,
2291 "rx private waterline config cmd failed %d\n",
2296 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2297 struct hclge_pkt_buf_alloc *buf_alloc)
2299 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2300 struct hclge_rx_com_thrd *req;
2301 struct hclge_desc desc[2];
2302 struct hclge_tc_thrd *tc;
2306 for (i = 0; i < 2; i++) {
2307 hclge_cmd_setup_basic_desc(&desc[i],
2308 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2309 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2311 /* The first descriptor set the NEXT bit to 1 */
2313 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2315 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2317 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2318 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2320 req->com_thrd[j].high =
2321 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2322 req->com_thrd[j].high |=
2323 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324 req->com_thrd[j].low =
2325 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2326 req->com_thrd[j].low |=
2327 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 /* Send 2 descriptors at one time */
2332 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2334 dev_err(&hdev->pdev->dev,
2335 "common threshold config cmd failed %d\n", ret);
2339 static int hclge_common_wl_config(struct hclge_dev *hdev,
2340 struct hclge_pkt_buf_alloc *buf_alloc)
2342 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2343 struct hclge_rx_com_wl *req;
2344 struct hclge_desc desc;
2347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2349 req = (struct hclge_rx_com_wl *)desc.data;
2350 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2351 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2353 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2354 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2356 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2358 dev_err(&hdev->pdev->dev,
2359 "common waterline config cmd failed %d\n", ret);
2364 int hclge_buffer_alloc(struct hclge_dev *hdev)
2366 struct hclge_pkt_buf_alloc *pkt_buf;
2369 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2373 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2375 dev_err(&hdev->pdev->dev,
2376 "could not calc tx buffer size for all TCs %d\n", ret);
2380 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2382 dev_err(&hdev->pdev->dev,
2383 "could not alloc tx buffers %d\n", ret);
2387 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2389 dev_err(&hdev->pdev->dev,
2390 "could not calc rx priv buffer size for all TCs %d\n",
2395 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2397 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2402 if (hnae3_dev_dcb_supported(hdev)) {
2403 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2405 dev_err(&hdev->pdev->dev,
2406 "could not configure rx private waterline %d\n",
2411 ret = hclge_common_thrd_config(hdev, pkt_buf);
2413 dev_err(&hdev->pdev->dev,
2414 "could not configure common threshold %d\n",
2420 ret = hclge_common_wl_config(hdev, pkt_buf);
2422 dev_err(&hdev->pdev->dev,
2423 "could not configure common waterline %d\n", ret);
2430 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2432 struct hnae3_handle *roce = &vport->roce;
2433 struct hnae3_handle *nic = &vport->nic;
2434 struct hclge_dev *hdev = vport->back;
2436 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2438 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2441 roce->rinfo.base_vector = hdev->roce_base_vector;
2443 roce->rinfo.netdev = nic->kinfo.netdev;
2444 roce->rinfo.roce_io_base = hdev->hw.io_base;
2445 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2447 roce->pdev = nic->pdev;
2448 roce->ae_algo = nic->ae_algo;
2449 roce->numa_node_mask = nic->numa_node_mask;
2454 static int hclge_init_msi(struct hclge_dev *hdev)
2456 struct pci_dev *pdev = hdev->pdev;
2460 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2462 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2465 "failed(%d) to allocate MSI/MSI-X vectors\n",
2469 if (vectors < hdev->num_msi)
2470 dev_warn(&hdev->pdev->dev,
2471 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2472 hdev->num_msi, vectors);
2474 hdev->num_msi = vectors;
2475 hdev->num_msi_left = vectors;
2477 hdev->base_msi_vector = pdev->irq;
2478 hdev->roce_base_vector = hdev->base_msi_vector +
2481 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2482 sizeof(u16), GFP_KERNEL);
2483 if (!hdev->vector_status) {
2484 pci_free_irq_vectors(pdev);
2488 for (i = 0; i < hdev->num_msi; i++)
2489 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2491 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2492 sizeof(int), GFP_KERNEL);
2493 if (!hdev->vector_irq) {
2494 pci_free_irq_vectors(pdev);
2501 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2503 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2504 duplex = HCLGE_MAC_FULL;
2509 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2512 struct hclge_config_mac_speed_dup_cmd *req;
2513 struct hclge_desc desc;
2516 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2518 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2521 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2524 case HCLGE_MAC_SPEED_10M:
2525 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526 HCLGE_CFG_SPEED_S, 6);
2528 case HCLGE_MAC_SPEED_100M:
2529 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530 HCLGE_CFG_SPEED_S, 7);
2532 case HCLGE_MAC_SPEED_1G:
2533 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534 HCLGE_CFG_SPEED_S, 0);
2536 case HCLGE_MAC_SPEED_10G:
2537 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2538 HCLGE_CFG_SPEED_S, 1);
2540 case HCLGE_MAC_SPEED_25G:
2541 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2542 HCLGE_CFG_SPEED_S, 2);
2544 case HCLGE_MAC_SPEED_40G:
2545 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2546 HCLGE_CFG_SPEED_S, 3);
2548 case HCLGE_MAC_SPEED_50G:
2549 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2550 HCLGE_CFG_SPEED_S, 4);
2552 case HCLGE_MAC_SPEED_100G:
2553 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2554 HCLGE_CFG_SPEED_S, 5);
2556 case HCLGE_MAC_SPEED_200G:
2557 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2558 HCLGE_CFG_SPEED_S, 8);
2561 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2565 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2568 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2570 dev_err(&hdev->pdev->dev,
2571 "mac speed/duplex config cmd failed %d.\n", ret);
2578 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2580 struct hclge_mac *mac = &hdev->hw.mac;
2583 duplex = hclge_check_speed_dup(duplex, speed);
2584 if (!mac->support_autoneg && mac->speed == speed &&
2585 mac->duplex == duplex)
2588 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2592 hdev->hw.mac.speed = speed;
2593 hdev->hw.mac.duplex = duplex;
2598 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2601 struct hclge_vport *vport = hclge_get_vport(handle);
2602 struct hclge_dev *hdev = vport->back;
2604 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2607 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2609 struct hclge_config_auto_neg_cmd *req;
2610 struct hclge_desc desc;
2614 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2616 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2618 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2619 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2621 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2623 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2629 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2631 struct hclge_vport *vport = hclge_get_vport(handle);
2632 struct hclge_dev *hdev = vport->back;
2634 if (!hdev->hw.mac.support_autoneg) {
2636 dev_err(&hdev->pdev->dev,
2637 "autoneg is not supported by current port\n");
2644 return hclge_set_autoneg_en(hdev, enable);
2647 static int hclge_get_autoneg(struct hnae3_handle *handle)
2649 struct hclge_vport *vport = hclge_get_vport(handle);
2650 struct hclge_dev *hdev = vport->back;
2651 struct phy_device *phydev = hdev->hw.mac.phydev;
2654 return phydev->autoneg;
2656 return hdev->hw.mac.autoneg;
2659 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2661 struct hclge_vport *vport = hclge_get_vport(handle);
2662 struct hclge_dev *hdev = vport->back;
2665 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2667 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2670 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2673 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2675 struct hclge_vport *vport = hclge_get_vport(handle);
2676 struct hclge_dev *hdev = vport->back;
2678 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2679 return hclge_set_autoneg_en(hdev, !halt);
2684 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2686 struct hclge_config_fec_cmd *req;
2687 struct hclge_desc desc;
2690 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2692 req = (struct hclge_config_fec_cmd *)desc.data;
2693 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2694 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2695 if (fec_mode & BIT(HNAE3_FEC_RS))
2696 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2697 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2698 if (fec_mode & BIT(HNAE3_FEC_BASER))
2699 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2700 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2704 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2709 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2711 struct hclge_vport *vport = hclge_get_vport(handle);
2712 struct hclge_dev *hdev = vport->back;
2713 struct hclge_mac *mac = &hdev->hw.mac;
2716 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2717 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2721 ret = hclge_set_fec_hw(hdev, fec_mode);
2725 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2729 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2732 struct hclge_vport *vport = hclge_get_vport(handle);
2733 struct hclge_dev *hdev = vport->back;
2734 struct hclge_mac *mac = &hdev->hw.mac;
2737 *fec_ability = mac->fec_ability;
2739 *fec_mode = mac->fec_mode;
2742 static int hclge_mac_init(struct hclge_dev *hdev)
2744 struct hclge_mac *mac = &hdev->hw.mac;
2747 hdev->support_sfp_query = true;
2748 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2749 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2750 hdev->hw.mac.duplex);
2754 if (hdev->hw.mac.support_autoneg) {
2755 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2762 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2763 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2768 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2770 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2774 ret = hclge_set_default_loopback(hdev);
2778 ret = hclge_buffer_alloc(hdev);
2780 dev_err(&hdev->pdev->dev,
2781 "allocate buffer fail, ret=%d\n", ret);
2786 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2788 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2789 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2790 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2791 hclge_wq, &hdev->service_task, 0);
2794 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2796 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2797 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2798 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2799 hclge_wq, &hdev->service_task, 0);
2802 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2804 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2805 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2806 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2807 hclge_wq, &hdev->service_task,
2811 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2813 struct hclge_link_status_cmd *req;
2814 struct hclge_desc desc;
2817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2818 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2820 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2825 req = (struct hclge_link_status_cmd *)desc.data;
2826 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2827 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2832 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2834 struct phy_device *phydev = hdev->hw.mac.phydev;
2836 *link_status = HCLGE_LINK_STATUS_DOWN;
2838 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2841 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2844 return hclge_get_mac_link_status(hdev, link_status);
2847 static void hclge_update_link_status(struct hclge_dev *hdev)
2849 struct hnae3_client *rclient = hdev->roce_client;
2850 struct hnae3_client *client = hdev->nic_client;
2851 struct hnae3_handle *rhandle;
2852 struct hnae3_handle *handle;
2860 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2863 ret = hclge_get_mac_phy_link(hdev, &state);
2865 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2869 if (state != hdev->hw.mac.link) {
2870 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2871 handle = &hdev->vport[i].nic;
2872 client->ops->link_status_change(handle, state);
2873 hclge_config_mac_tnl_int(hdev, state);
2874 rhandle = &hdev->vport[i].roce;
2875 if (rclient && rclient->ops->link_status_change)
2876 rclient->ops->link_status_change(rhandle,
2879 hdev->hw.mac.link = state;
2882 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2885 static void hclge_update_port_capability(struct hclge_mac *mac)
2887 /* update fec ability by speed */
2888 hclge_convert_setting_fec(mac);
2890 /* firmware can not identify back plane type, the media type
2891 * read from configuration can help deal it
2893 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2894 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2895 mac->module_type = HNAE3_MODULE_TYPE_KR;
2896 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2897 mac->module_type = HNAE3_MODULE_TYPE_TP;
2899 if (mac->support_autoneg) {
2900 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2901 linkmode_copy(mac->advertising, mac->supported);
2903 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2905 linkmode_zero(mac->advertising);
2909 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2911 struct hclge_sfp_info_cmd *resp;
2912 struct hclge_desc desc;
2915 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2916 resp = (struct hclge_sfp_info_cmd *)desc.data;
2917 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2918 if (ret == -EOPNOTSUPP) {
2919 dev_warn(&hdev->pdev->dev,
2920 "IMP do not support get SFP speed %d\n", ret);
2923 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2927 *speed = le32_to_cpu(resp->speed);
2932 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2934 struct hclge_sfp_info_cmd *resp;
2935 struct hclge_desc desc;
2938 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2939 resp = (struct hclge_sfp_info_cmd *)desc.data;
2941 resp->query_type = QUERY_ACTIVE_SPEED;
2943 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2944 if (ret == -EOPNOTSUPP) {
2945 dev_warn(&hdev->pdev->dev,
2946 "IMP does not support get SFP info %d\n", ret);
2949 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2953 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2954 * set to mac->speed.
2956 if (!le32_to_cpu(resp->speed))
2959 mac->speed = le32_to_cpu(resp->speed);
2960 /* if resp->speed_ability is 0, it means it's an old version
2961 * firmware, do not update these params
2963 if (resp->speed_ability) {
2964 mac->module_type = le32_to_cpu(resp->module_type);
2965 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2966 mac->autoneg = resp->autoneg;
2967 mac->support_autoneg = resp->autoneg_ability;
2968 mac->speed_type = QUERY_ACTIVE_SPEED;
2969 if (!resp->active_fec)
2972 mac->fec_mode = BIT(resp->active_fec);
2974 mac->speed_type = QUERY_SFP_SPEED;
2980 static int hclge_update_port_info(struct hclge_dev *hdev)
2982 struct hclge_mac *mac = &hdev->hw.mac;
2983 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2986 /* get the port info from SFP cmd if not copper port */
2987 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2990 /* if IMP does not support get SFP/qSFP info, return directly */
2991 if (!hdev->support_sfp_query)
2994 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2995 ret = hclge_get_sfp_info(hdev, mac);
2997 ret = hclge_get_sfp_speed(hdev, &speed);
2999 if (ret == -EOPNOTSUPP) {
3000 hdev->support_sfp_query = false;
3006 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3007 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3008 hclge_update_port_capability(mac);
3011 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3014 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3015 return 0; /* do nothing if no SFP */
3017 /* must config full duplex for SFP */
3018 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3022 static int hclge_get_status(struct hnae3_handle *handle)
3024 struct hclge_vport *vport = hclge_get_vport(handle);
3025 struct hclge_dev *hdev = vport->back;
3027 hclge_update_link_status(hdev);
3029 return hdev->hw.mac.link;
3032 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3034 if (!pci_num_vf(hdev->pdev)) {
3035 dev_err(&hdev->pdev->dev,
3036 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3040 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3041 dev_err(&hdev->pdev->dev,
3042 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3043 vf, pci_num_vf(hdev->pdev));
3047 /* VF start from 1 in vport */
3048 vf += HCLGE_VF_VPORT_START_NUM;
3049 return &hdev->vport[vf];
3052 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3053 struct ifla_vf_info *ivf)
3055 struct hclge_vport *vport = hclge_get_vport(handle);
3056 struct hclge_dev *hdev = vport->back;
3058 vport = hclge_get_vf_vport(hdev, vf);
3063 ivf->linkstate = vport->vf_info.link_state;
3064 ivf->spoofchk = vport->vf_info.spoofchk;
3065 ivf->trusted = vport->vf_info.trusted;
3066 ivf->min_tx_rate = 0;
3067 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3068 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3069 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3070 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3071 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3076 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3079 struct hclge_vport *vport = hclge_get_vport(handle);
3080 struct hclge_dev *hdev = vport->back;
3082 vport = hclge_get_vf_vport(hdev, vf);
3086 vport->vf_info.link_state = link_state;
3091 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3093 u32 cmdq_src_reg, msix_src_reg;
3095 /* fetch the events from their corresponding regs */
3096 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3097 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3099 /* Assumption: If by any chance reset and mailbox events are reported
3100 * together then we will only process reset event in this go and will
3101 * defer the processing of the mailbox events. Since, we would have not
3102 * cleared RX CMDQ event this time we would receive again another
3103 * interrupt from H/W just for the mailbox.
3105 * check for vector0 reset event sources
3107 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3108 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3109 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3110 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3111 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3112 hdev->rst_stats.imp_rst_cnt++;
3113 return HCLGE_VECTOR0_EVENT_RST;
3116 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3117 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3118 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3119 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3120 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3121 hdev->rst_stats.global_rst_cnt++;
3122 return HCLGE_VECTOR0_EVENT_RST;
3125 /* check for vector0 msix event source */
3126 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3127 *clearval = msix_src_reg;
3128 return HCLGE_VECTOR0_EVENT_ERR;
3131 /* check for vector0 mailbox(=CMDQ RX) event source */
3132 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3133 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3134 *clearval = cmdq_src_reg;
3135 return HCLGE_VECTOR0_EVENT_MBX;
3138 /* print other vector0 event source */
3139 dev_info(&hdev->pdev->dev,
3140 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3141 cmdq_src_reg, msix_src_reg);
3142 *clearval = msix_src_reg;
3144 return HCLGE_VECTOR0_EVENT_OTHER;
3147 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3150 switch (event_type) {
3151 case HCLGE_VECTOR0_EVENT_RST:
3152 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3154 case HCLGE_VECTOR0_EVENT_MBX:
3155 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3162 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3164 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3165 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3166 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3167 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3168 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3171 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3173 writel(enable ? 1 : 0, vector->addr);
3176 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3178 struct hclge_dev *hdev = data;
3182 hclge_enable_vector(&hdev->misc_vector, false);
3183 event_cause = hclge_check_event_cause(hdev, &clearval);
3185 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3186 switch (event_cause) {
3187 case HCLGE_VECTOR0_EVENT_ERR:
3188 /* we do not know what type of reset is required now. This could
3189 * only be decided after we fetch the type of errors which
3190 * caused this event. Therefore, we will do below for now:
3191 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3192 * have defered type of reset to be used.
3193 * 2. Schedule the reset serivce task.
3194 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3195 * will fetch the correct type of reset. This would be done
3196 * by first decoding the types of errors.
3198 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3200 case HCLGE_VECTOR0_EVENT_RST:
3201 hclge_reset_task_schedule(hdev);
3203 case HCLGE_VECTOR0_EVENT_MBX:
3204 /* If we are here then,
3205 * 1. Either we are not handling any mbx task and we are not
3208 * 2. We could be handling a mbx task but nothing more is
3210 * In both cases, we should schedule mbx task as there are more
3211 * mbx messages reported by this interrupt.
3213 hclge_mbx_task_schedule(hdev);
3216 dev_warn(&hdev->pdev->dev,
3217 "received unknown or unhandled event of vector0\n");
3221 hclge_clear_event_cause(hdev, event_cause, clearval);
3223 /* Enable interrupt if it is not cause by reset. And when
3224 * clearval equal to 0, it means interrupt status may be
3225 * cleared by hardware before driver reads status register.
3226 * For this case, vector0 interrupt also should be enabled.
3229 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3230 hclge_enable_vector(&hdev->misc_vector, true);
3236 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3238 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3239 dev_warn(&hdev->pdev->dev,
3240 "vector(vector_id %d) has been freed.\n", vector_id);
3244 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3245 hdev->num_msi_left += 1;
3246 hdev->num_msi_used -= 1;
3249 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3251 struct hclge_misc_vector *vector = &hdev->misc_vector;
3253 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3255 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3256 hdev->vector_status[0] = 0;
3258 hdev->num_msi_left -= 1;
3259 hdev->num_msi_used += 1;
3262 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3263 const cpumask_t *mask)
3265 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3268 cpumask_copy(&hdev->affinity_mask, mask);
3271 static void hclge_irq_affinity_release(struct kref *ref)
3275 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3277 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3278 &hdev->affinity_mask);
3280 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3281 hdev->affinity_notify.release = hclge_irq_affinity_release;
3282 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3283 &hdev->affinity_notify);
3286 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3288 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3289 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3292 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3296 hclge_get_misc_vector(hdev);
3298 /* this would be explicitly freed in the end */
3299 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3300 HCLGE_NAME, pci_name(hdev->pdev));
3301 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3302 0, hdev->misc_vector.name, hdev);
3304 hclge_free_vector(hdev, 0);
3305 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3306 hdev->misc_vector.vector_irq);
3312 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3314 free_irq(hdev->misc_vector.vector_irq, hdev);
3315 hclge_free_vector(hdev, 0);
3318 int hclge_notify_client(struct hclge_dev *hdev,
3319 enum hnae3_reset_notify_type type)
3321 struct hnae3_client *client = hdev->nic_client;
3324 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3327 if (!client->ops->reset_notify)
3330 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3331 struct hnae3_handle *handle = &hdev->vport[i].nic;
3334 ret = client->ops->reset_notify(handle, type);
3336 dev_err(&hdev->pdev->dev,
3337 "notify nic client failed %d(%d)\n", type, ret);
3345 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3346 enum hnae3_reset_notify_type type)
3348 struct hnae3_client *client = hdev->roce_client;
3352 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3355 if (!client->ops->reset_notify)
3358 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3359 struct hnae3_handle *handle = &hdev->vport[i].roce;
3361 ret = client->ops->reset_notify(handle, type);
3363 dev_err(&hdev->pdev->dev,
3364 "notify roce client failed %d(%d)",
3373 static int hclge_reset_wait(struct hclge_dev *hdev)
3375 #define HCLGE_RESET_WATI_MS 100
3376 #define HCLGE_RESET_WAIT_CNT 350
3378 u32 val, reg, reg_bit;
3381 switch (hdev->reset_type) {
3382 case HNAE3_IMP_RESET:
3383 reg = HCLGE_GLOBAL_RESET_REG;
3384 reg_bit = HCLGE_IMP_RESET_BIT;
3386 case HNAE3_GLOBAL_RESET:
3387 reg = HCLGE_GLOBAL_RESET_REG;
3388 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3390 case HNAE3_FUNC_RESET:
3391 reg = HCLGE_FUN_RST_ING;
3392 reg_bit = HCLGE_FUN_RST_ING_B;
3395 dev_err(&hdev->pdev->dev,
3396 "Wait for unsupported reset type: %d\n",
3401 val = hclge_read_dev(&hdev->hw, reg);
3402 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3403 msleep(HCLGE_RESET_WATI_MS);
3404 val = hclge_read_dev(&hdev->hw, reg);
3408 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3409 dev_warn(&hdev->pdev->dev,
3410 "Wait for reset timeout: %d\n", hdev->reset_type);
3417 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3419 struct hclge_vf_rst_cmd *req;
3420 struct hclge_desc desc;
3422 req = (struct hclge_vf_rst_cmd *)desc.data;
3423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3424 req->dest_vfid = func_id;
3429 return hclge_cmd_send(&hdev->hw, &desc, 1);
3432 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3436 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3437 struct hclge_vport *vport = &hdev->vport[i];
3440 /* Send cmd to set/clear VF's FUNC_RST_ING */
3441 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3443 dev_err(&hdev->pdev->dev,
3444 "set vf(%u) rst failed %d!\n",
3445 vport->vport_id, ret);
3449 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3452 /* Inform VF to process the reset.
3453 * hclge_inform_reset_assert_to_vf may fail if VF
3454 * driver is not loaded.
3456 ret = hclge_inform_reset_assert_to_vf(vport);
3458 dev_warn(&hdev->pdev->dev,
3459 "inform reset to vf(%u) failed %d!\n",
3460 vport->vport_id, ret);
3466 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3468 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3469 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3470 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3473 hclge_mbx_handler(hdev);
3475 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3478 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3480 struct hclge_pf_rst_sync_cmd *req;
3481 struct hclge_desc desc;
3485 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3489 /* vf need to down netdev by mbx during PF or FLR reset */
3490 hclge_mailbox_service_task(hdev);
3492 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3493 /* for compatible with old firmware, wait
3494 * 100 ms for VF to stop IO
3496 if (ret == -EOPNOTSUPP) {
3497 msleep(HCLGE_RESET_SYNC_TIME);
3500 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3503 } else if (req->all_vf_ready) {
3506 msleep(HCLGE_PF_RESET_SYNC_TIME);
3507 hclge_cmd_reuse_desc(&desc, true);
3508 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3510 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3513 void hclge_report_hw_error(struct hclge_dev *hdev,
3514 enum hnae3_hw_error_type type)
3516 struct hnae3_client *client = hdev->nic_client;
3519 if (!client || !client->ops->process_hw_error ||
3520 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3523 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3524 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3527 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3531 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3532 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3533 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3534 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3535 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3538 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3539 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3540 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3541 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3545 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3547 struct hclge_desc desc;
3548 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3551 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3552 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3553 req->fun_reset_vfid = func_id;
3555 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3557 dev_err(&hdev->pdev->dev,
3558 "send function reset cmd fail, status =%d\n", ret);
3563 static void hclge_do_reset(struct hclge_dev *hdev)
3565 struct hnae3_handle *handle = &hdev->vport[0].nic;
3566 struct pci_dev *pdev = hdev->pdev;
3569 if (hclge_get_hw_reset_stat(handle)) {
3570 dev_info(&pdev->dev, "hardware reset not finish\n");
3571 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3572 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3573 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3577 switch (hdev->reset_type) {
3578 case HNAE3_GLOBAL_RESET:
3579 dev_info(&pdev->dev, "global reset requested\n");
3580 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3581 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3582 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3584 case HNAE3_FUNC_RESET:
3585 dev_info(&pdev->dev, "PF reset requested\n");
3586 /* schedule again to check later */
3587 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3588 hclge_reset_task_schedule(hdev);
3591 dev_warn(&pdev->dev,
3592 "unsupported reset type: %d\n", hdev->reset_type);
3597 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3598 unsigned long *addr)
3600 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3601 struct hclge_dev *hdev = ae_dev->priv;
3603 /* first, resolve any unknown reset type to the known type(s) */
3604 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3605 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3606 HCLGE_MISC_VECTOR_INT_STS);
3607 /* we will intentionally ignore any errors from this function
3608 * as we will end up in *some* reset request in any case
3610 if (hclge_handle_hw_msix_error(hdev, addr))
3611 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3614 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3615 /* We defered the clearing of the error event which caused
3616 * interrupt since it was not posssible to do that in
3617 * interrupt context (and this is the reason we introduced
3618 * new UNKNOWN reset type). Now, the errors have been
3619 * handled and cleared in hardware we can safely enable
3620 * interrupts. This is an exception to the norm.
3622 hclge_enable_vector(&hdev->misc_vector, true);
3625 /* return the highest priority reset level amongst all */
3626 if (test_bit(HNAE3_IMP_RESET, addr)) {
3627 rst_level = HNAE3_IMP_RESET;
3628 clear_bit(HNAE3_IMP_RESET, addr);
3629 clear_bit(HNAE3_GLOBAL_RESET, addr);
3630 clear_bit(HNAE3_FUNC_RESET, addr);
3631 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3632 rst_level = HNAE3_GLOBAL_RESET;
3633 clear_bit(HNAE3_GLOBAL_RESET, addr);
3634 clear_bit(HNAE3_FUNC_RESET, addr);
3635 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3636 rst_level = HNAE3_FUNC_RESET;
3637 clear_bit(HNAE3_FUNC_RESET, addr);
3638 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3639 rst_level = HNAE3_FLR_RESET;
3640 clear_bit(HNAE3_FLR_RESET, addr);
3643 if (hdev->reset_type != HNAE3_NONE_RESET &&
3644 rst_level < hdev->reset_type)
3645 return HNAE3_NONE_RESET;
3650 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3654 switch (hdev->reset_type) {
3655 case HNAE3_IMP_RESET:
3656 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3658 case HNAE3_GLOBAL_RESET:
3659 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3668 /* For revision 0x20, the reset interrupt source
3669 * can only be cleared after hardware reset done
3671 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3672 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3675 hclge_enable_vector(&hdev->misc_vector, true);
3678 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3682 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3684 reg_val |= HCLGE_NIC_SW_RST_RDY;
3686 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3688 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3691 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3695 ret = hclge_set_all_vf_rst(hdev, true);
3699 hclge_func_reset_sync_vf(hdev);
3704 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3709 switch (hdev->reset_type) {
3710 case HNAE3_FUNC_RESET:
3711 ret = hclge_func_reset_notify_vf(hdev);
3715 ret = hclge_func_reset_cmd(hdev, 0);
3717 dev_err(&hdev->pdev->dev,
3718 "asserting function reset fail %d!\n", ret);
3722 /* After performaning pf reset, it is not necessary to do the
3723 * mailbox handling or send any command to firmware, because
3724 * any mailbox handling or command to firmware is only valid
3725 * after hclge_cmd_init is called.
3727 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3728 hdev->rst_stats.pf_rst_cnt++;
3730 case HNAE3_FLR_RESET:
3731 ret = hclge_func_reset_notify_vf(hdev);
3735 case HNAE3_IMP_RESET:
3736 hclge_handle_imp_error(hdev);
3737 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3738 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3739 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3745 /* inform hardware that preparatory work is done */
3746 msleep(HCLGE_RESET_SYNC_TIME);
3747 hclge_reset_handshake(hdev, true);
3748 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3753 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3755 #define MAX_RESET_FAIL_CNT 5
3757 if (hdev->reset_pending) {
3758 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3759 hdev->reset_pending);
3761 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3762 HCLGE_RESET_INT_M) {
3763 dev_info(&hdev->pdev->dev,
3764 "reset failed because new reset interrupt\n");
3765 hclge_clear_reset_cause(hdev);
3767 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3768 hdev->rst_stats.reset_fail_cnt++;
3769 set_bit(hdev->reset_type, &hdev->reset_pending);
3770 dev_info(&hdev->pdev->dev,
3771 "re-schedule reset task(%u)\n",
3772 hdev->rst_stats.reset_fail_cnt);
3776 hclge_clear_reset_cause(hdev);
3778 /* recover the handshake status when reset fail */
3779 hclge_reset_handshake(hdev, true);
3781 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3783 hclge_dbg_dump_rst_info(hdev);
3785 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3790 static int hclge_set_rst_done(struct hclge_dev *hdev)
3792 struct hclge_pf_rst_done_cmd *req;
3793 struct hclge_desc desc;
3796 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3797 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3798 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3800 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3801 /* To be compatible with the old firmware, which does not support
3802 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3805 if (ret == -EOPNOTSUPP) {
3806 dev_warn(&hdev->pdev->dev,
3807 "current firmware does not support command(0x%x)!\n",
3808 HCLGE_OPC_PF_RST_DONE);
3811 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3818 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3822 switch (hdev->reset_type) {
3823 case HNAE3_FUNC_RESET:
3824 case HNAE3_FLR_RESET:
3825 ret = hclge_set_all_vf_rst(hdev, false);
3827 case HNAE3_GLOBAL_RESET:
3828 case HNAE3_IMP_RESET:
3829 ret = hclge_set_rst_done(hdev);
3835 /* clear up the handshake status after re-initialize done */
3836 hclge_reset_handshake(hdev, false);
3841 static int hclge_reset_stack(struct hclge_dev *hdev)
3845 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3849 ret = hclge_reset_ae_dev(hdev->ae_dev);
3853 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3856 static int hclge_reset_prepare(struct hclge_dev *hdev)
3860 hdev->rst_stats.reset_cnt++;
3861 /* perform reset of the stack & ae device for a client */
3862 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3867 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3872 return hclge_reset_prepare_wait(hdev);
3875 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3877 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3878 enum hnae3_reset_type reset_level;
3881 hdev->rst_stats.hw_reset_done_cnt++;
3883 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3888 ret = hclge_reset_stack(hdev);
3893 hclge_clear_reset_cause(hdev);
3895 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3896 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3900 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3903 ret = hclge_reset_prepare_up(hdev);
3908 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3913 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3917 hdev->last_reset_time = jiffies;
3918 hdev->rst_stats.reset_fail_cnt = 0;
3919 hdev->rst_stats.reset_done_cnt++;
3920 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3922 /* if default_reset_request has a higher level reset request,
3923 * it should be handled as soon as possible. since some errors
3924 * need this kind of reset to fix.
3926 reset_level = hclge_get_reset_level(ae_dev,
3927 &hdev->default_reset_request);
3928 if (reset_level != HNAE3_NONE_RESET)
3929 set_bit(reset_level, &hdev->reset_request);
3934 static void hclge_reset(struct hclge_dev *hdev)
3936 if (hclge_reset_prepare(hdev))
3939 if (hclge_reset_wait(hdev))
3942 if (hclge_reset_rebuild(hdev))
3948 if (hclge_reset_err_handle(hdev))
3949 hclge_reset_task_schedule(hdev);
3952 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3954 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3955 struct hclge_dev *hdev = ae_dev->priv;
3957 /* We might end up getting called broadly because of 2 below cases:
3958 * 1. Recoverable error was conveyed through APEI and only way to bring
3959 * normalcy is to reset.
3960 * 2. A new reset request from the stack due to timeout
3962 * For the first case,error event might not have ae handle available.
3963 * check if this is a new reset request and we are not here just because
3964 * last reset attempt did not succeed and watchdog hit us again. We will
3965 * know this if last reset request did not occur very recently (watchdog
3966 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3967 * In case of new request we reset the "reset level" to PF reset.
3968 * And if it is a repeat reset request of the most recent one then we
3969 * want to make sure we throttle the reset request. Therefore, we will
3970 * not allow it again before 3*HZ times.
3973 handle = &hdev->vport[0].nic;
3975 if (time_before(jiffies, (hdev->last_reset_time +
3976 HCLGE_RESET_INTERVAL))) {
3977 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3979 } else if (hdev->default_reset_request) {
3981 hclge_get_reset_level(ae_dev,
3982 &hdev->default_reset_request);
3983 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3984 hdev->reset_level = HNAE3_FUNC_RESET;
3987 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3990 /* request reset & schedule reset task */
3991 set_bit(hdev->reset_level, &hdev->reset_request);
3992 hclge_reset_task_schedule(hdev);
3994 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3995 hdev->reset_level++;
3998 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3999 enum hnae3_reset_type rst_type)
4001 struct hclge_dev *hdev = ae_dev->priv;
4003 set_bit(rst_type, &hdev->default_reset_request);
4006 static void hclge_reset_timer(struct timer_list *t)
4008 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4010 /* if default_reset_request has no value, it means that this reset
4011 * request has already be handled, so just return here
4013 if (!hdev->default_reset_request)
4016 dev_info(&hdev->pdev->dev,
4017 "triggering reset in reset timer\n");
4018 hclge_reset_event(hdev->pdev, NULL);
4021 static void hclge_reset_subtask(struct hclge_dev *hdev)
4023 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4025 /* check if there is any ongoing reset in the hardware. This status can
4026 * be checked from reset_pending. If there is then, we need to wait for
4027 * hardware to complete reset.
4028 * a. If we are able to figure out in reasonable time that hardware
4029 * has fully resetted then, we can proceed with driver, client
4031 * b. else, we can come back later to check this status so re-sched
4034 hdev->last_reset_time = jiffies;
4035 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4036 if (hdev->reset_type != HNAE3_NONE_RESET)
4039 /* check if we got any *new* reset requests to be honored */
4040 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4041 if (hdev->reset_type != HNAE3_NONE_RESET)
4042 hclge_do_reset(hdev);
4044 hdev->reset_type = HNAE3_NONE_RESET;
4047 static void hclge_reset_service_task(struct hclge_dev *hdev)
4049 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4052 down(&hdev->reset_sem);
4053 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4055 hclge_reset_subtask(hdev);
4057 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4058 up(&hdev->reset_sem);
4061 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4065 /* start from vport 1 for PF is always alive */
4066 for (i = 1; i < hdev->num_alloc_vport; i++) {
4067 struct hclge_vport *vport = &hdev->vport[i];
4069 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4070 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4072 /* If vf is not alive, set to default value */
4073 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4074 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4078 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4080 unsigned long delta = round_jiffies_relative(HZ);
4082 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4085 /* Always handle the link updating to make sure link state is
4086 * updated when it is triggered by mbx.
4088 hclge_update_link_status(hdev);
4089 hclge_sync_mac_table(hdev);
4090 hclge_sync_promisc_mode(hdev);
4092 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4093 delta = jiffies - hdev->last_serv_processed;
4095 if (delta < round_jiffies_relative(HZ)) {
4096 delta = round_jiffies_relative(HZ) - delta;
4101 hdev->serv_processed_cnt++;
4102 hclge_update_vport_alive(hdev);
4104 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4105 hdev->last_serv_processed = jiffies;
4109 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4110 hclge_update_stats_for_all(hdev);
4112 hclge_update_port_info(hdev);
4113 hclge_sync_vlan_filter(hdev);
4115 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4116 hclge_rfs_filter_expire(hdev);
4118 hdev->last_serv_processed = jiffies;
4121 hclge_task_schedule(hdev, delta);
4124 static void hclge_service_task(struct work_struct *work)
4126 struct hclge_dev *hdev =
4127 container_of(work, struct hclge_dev, service_task.work);
4129 hclge_reset_service_task(hdev);
4130 hclge_mailbox_service_task(hdev);
4131 hclge_periodic_service_task(hdev);
4133 /* Handle reset and mbx again in case periodical task delays the
4134 * handling by calling hclge_task_schedule() in
4135 * hclge_periodic_service_task().
4137 hclge_reset_service_task(hdev);
4138 hclge_mailbox_service_task(hdev);
4141 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4143 /* VF handle has no client */
4144 if (!handle->client)
4145 return container_of(handle, struct hclge_vport, nic);
4146 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4147 return container_of(handle, struct hclge_vport, roce);
4149 return container_of(handle, struct hclge_vport, nic);
4152 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4153 struct hnae3_vector_info *vector_info)
4155 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4157 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4159 /* need an extend offset to config vector >= 64 */
4160 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4161 vector_info->io_addr = hdev->hw.io_base +
4162 HCLGE_VECTOR_REG_BASE +
4163 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4165 vector_info->io_addr = hdev->hw.io_base +
4166 HCLGE_VECTOR_EXT_REG_BASE +
4167 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4168 HCLGE_VECTOR_REG_OFFSET_H +
4169 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4170 HCLGE_VECTOR_REG_OFFSET;
4172 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4173 hdev->vector_irq[idx] = vector_info->vector;
4176 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4177 struct hnae3_vector_info *vector_info)
4179 struct hclge_vport *vport = hclge_get_vport(handle);
4180 struct hnae3_vector_info *vector = vector_info;
4181 struct hclge_dev *hdev = vport->back;
4186 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4187 vector_num = min(hdev->num_msi_left, vector_num);
4189 for (j = 0; j < vector_num; j++) {
4190 while (++i < hdev->num_nic_msi) {
4191 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4192 hclge_get_vector_info(hdev, i, vector);
4200 hdev->num_msi_left -= alloc;
4201 hdev->num_msi_used += alloc;
4206 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4210 for (i = 0; i < hdev->num_msi; i++)
4211 if (vector == hdev->vector_irq[i])
4217 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4219 struct hclge_vport *vport = hclge_get_vport(handle);
4220 struct hclge_dev *hdev = vport->back;
4223 vector_id = hclge_get_vector_index(hdev, vector);
4224 if (vector_id < 0) {
4225 dev_err(&hdev->pdev->dev,
4226 "Get vector index fail. vector = %d\n", vector);
4230 hclge_free_vector(hdev, vector_id);
4235 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4237 return HCLGE_RSS_KEY_SIZE;
4240 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4242 return HCLGE_RSS_IND_TBL_SIZE;
4245 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4246 const u8 hfunc, const u8 *key)
4248 struct hclge_rss_config_cmd *req;
4249 unsigned int key_offset = 0;
4250 struct hclge_desc desc;
4255 key_counts = HCLGE_RSS_KEY_SIZE;
4256 req = (struct hclge_rss_config_cmd *)desc.data;
4258 while (key_counts) {
4259 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4262 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4263 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4265 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4266 memcpy(req->hash_key,
4267 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4269 key_counts -= key_size;
4271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4273 dev_err(&hdev->pdev->dev,
4274 "Configure RSS config fail, status = %d\n",
4282 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4284 struct hclge_rss_indirection_table_cmd *req;
4285 struct hclge_desc desc;
4293 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4295 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4296 hclge_cmd_setup_basic_desc
4297 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4299 req->start_table_index =
4300 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4301 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4302 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4303 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4304 req->rss_qid_l[j] = qid & 0xff;
4306 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4307 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4308 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4309 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4311 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4313 dev_err(&hdev->pdev->dev,
4314 "Configure rss indir table fail,status = %d\n",
4322 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4323 u16 *tc_size, u16 *tc_offset)
4325 struct hclge_rss_tc_mode_cmd *req;
4326 struct hclge_desc desc;
4330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4331 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4333 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4336 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4337 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4338 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4339 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4340 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4341 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4342 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4344 req->rss_tc_mode[i] = cpu_to_le16(mode);
4347 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4349 dev_err(&hdev->pdev->dev,
4350 "Configure rss tc mode fail, status = %d\n", ret);
4355 static void hclge_get_rss_type(struct hclge_vport *vport)
4357 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4358 vport->rss_tuple_sets.ipv4_udp_en ||
4359 vport->rss_tuple_sets.ipv4_sctp_en ||
4360 vport->rss_tuple_sets.ipv6_tcp_en ||
4361 vport->rss_tuple_sets.ipv6_udp_en ||
4362 vport->rss_tuple_sets.ipv6_sctp_en)
4363 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4364 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4365 vport->rss_tuple_sets.ipv6_fragment_en)
4366 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4368 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4371 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4373 struct hclge_rss_input_tuple_cmd *req;
4374 struct hclge_desc desc;
4377 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4379 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4381 /* Get the tuple cfg from pf */
4382 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4383 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4384 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4385 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4386 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4387 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4388 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4389 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4390 hclge_get_rss_type(&hdev->vport[0]);
4391 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4393 dev_err(&hdev->pdev->dev,
4394 "Configure rss input fail, status = %d\n", ret);
4398 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4401 struct hclge_vport *vport = hclge_get_vport(handle);
4404 /* Get hash algorithm */
4406 switch (vport->rss_algo) {
4407 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4408 *hfunc = ETH_RSS_HASH_TOP;
4410 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4411 *hfunc = ETH_RSS_HASH_XOR;
4414 *hfunc = ETH_RSS_HASH_UNKNOWN;
4419 /* Get the RSS Key required by the user */
4421 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4423 /* Get indirect table */
4425 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4426 indir[i] = vport->rss_indirection_tbl[i];
4431 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4432 const u8 *key, const u8 hfunc)
4434 struct hclge_vport *vport = hclge_get_vport(handle);
4435 struct hclge_dev *hdev = vport->back;
4439 /* Set the RSS Hash Key if specififed by the user */
4442 case ETH_RSS_HASH_TOP:
4443 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4445 case ETH_RSS_HASH_XOR:
4446 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4448 case ETH_RSS_HASH_NO_CHANGE:
4449 hash_algo = vport->rss_algo;
4455 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4459 /* Update the shadow RSS key with user specified qids */
4460 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4461 vport->rss_algo = hash_algo;
4464 /* Update the shadow RSS table with user specified qids */
4465 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4466 vport->rss_indirection_tbl[i] = indir[i];
4468 /* Update the hardware */
4469 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4472 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4474 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4476 if (nfc->data & RXH_L4_B_2_3)
4477 hash_sets |= HCLGE_D_PORT_BIT;
4479 hash_sets &= ~HCLGE_D_PORT_BIT;
4481 if (nfc->data & RXH_IP_SRC)
4482 hash_sets |= HCLGE_S_IP_BIT;
4484 hash_sets &= ~HCLGE_S_IP_BIT;
4486 if (nfc->data & RXH_IP_DST)
4487 hash_sets |= HCLGE_D_IP_BIT;
4489 hash_sets &= ~HCLGE_D_IP_BIT;
4491 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4492 hash_sets |= HCLGE_V_TAG_BIT;
4497 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4498 struct ethtool_rxnfc *nfc)
4500 struct hclge_vport *vport = hclge_get_vport(handle);
4501 struct hclge_dev *hdev = vport->back;
4502 struct hclge_rss_input_tuple_cmd *req;
4503 struct hclge_desc desc;
4507 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4508 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4511 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4514 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4515 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4516 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4517 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4518 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4519 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4520 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4521 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4523 tuple_sets = hclge_get_rss_hash_bits(nfc);
4524 switch (nfc->flow_type) {
4526 req->ipv4_tcp_en = tuple_sets;
4529 req->ipv6_tcp_en = tuple_sets;
4532 req->ipv4_udp_en = tuple_sets;
4535 req->ipv6_udp_en = tuple_sets;
4538 req->ipv4_sctp_en = tuple_sets;
4541 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4542 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4545 req->ipv6_sctp_en = tuple_sets;
4548 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4551 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4557 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4559 dev_err(&hdev->pdev->dev,
4560 "Set rss tuple fail, status = %d\n", ret);
4564 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4565 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4566 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4567 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4568 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4569 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4570 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4571 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4572 hclge_get_rss_type(vport);
4576 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4577 struct ethtool_rxnfc *nfc)
4579 struct hclge_vport *vport = hclge_get_vport(handle);
4584 switch (nfc->flow_type) {
4586 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4589 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4592 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4595 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4598 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4601 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4605 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4614 if (tuple_sets & HCLGE_D_PORT_BIT)
4615 nfc->data |= RXH_L4_B_2_3;
4616 if (tuple_sets & HCLGE_S_PORT_BIT)
4617 nfc->data |= RXH_L4_B_0_1;
4618 if (tuple_sets & HCLGE_D_IP_BIT)
4619 nfc->data |= RXH_IP_DST;
4620 if (tuple_sets & HCLGE_S_IP_BIT)
4621 nfc->data |= RXH_IP_SRC;
4626 static int hclge_get_tc_size(struct hnae3_handle *handle)
4628 struct hclge_vport *vport = hclge_get_vport(handle);
4629 struct hclge_dev *hdev = vport->back;
4631 return hdev->pf_rss_size_max;
4634 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4636 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4637 struct hclge_vport *vport = hdev->vport;
4638 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4639 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4640 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4641 struct hnae3_tc_info *tc_info;
4646 tc_info = &vport->nic.kinfo.tc_info;
4647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4648 rss_size = tc_info->tqp_count[i];
4651 if (!(hdev->hw_tc_map & BIT(i)))
4654 /* tc_size set to hardware is the log2 of roundup power of two
4655 * of rss_size, the acutal queue size is limited by indirection
4658 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4660 dev_err(&hdev->pdev->dev,
4661 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4666 roundup_size = roundup_pow_of_two(rss_size);
4667 roundup_size = ilog2(roundup_size);
4670 tc_size[i] = roundup_size;
4671 tc_offset[i] = tc_info->tqp_offset[i];
4674 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4677 int hclge_rss_init_hw(struct hclge_dev *hdev)
4679 struct hclge_vport *vport = hdev->vport;
4680 u16 *rss_indir = vport[0].rss_indirection_tbl;
4681 u8 *key = vport[0].rss_hash_key;
4682 u8 hfunc = vport[0].rss_algo;
4685 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4689 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4693 ret = hclge_set_rss_input_tuple(hdev);
4697 return hclge_init_rss_tc_mode(hdev);
4700 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4702 struct hclge_vport *vport = hdev->vport;
4705 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4706 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4707 vport[j].rss_indirection_tbl[i] =
4708 i % vport[j].alloc_rss_size;
4712 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4714 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4715 struct hclge_vport *vport = hdev->vport;
4717 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4718 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4720 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4721 vport[i].rss_tuple_sets.ipv4_tcp_en =
4722 HCLGE_RSS_INPUT_TUPLE_OTHER;
4723 vport[i].rss_tuple_sets.ipv4_udp_en =
4724 HCLGE_RSS_INPUT_TUPLE_OTHER;
4725 vport[i].rss_tuple_sets.ipv4_sctp_en =
4726 HCLGE_RSS_INPUT_TUPLE_SCTP;
4727 vport[i].rss_tuple_sets.ipv4_fragment_en =
4728 HCLGE_RSS_INPUT_TUPLE_OTHER;
4729 vport[i].rss_tuple_sets.ipv6_tcp_en =
4730 HCLGE_RSS_INPUT_TUPLE_OTHER;
4731 vport[i].rss_tuple_sets.ipv6_udp_en =
4732 HCLGE_RSS_INPUT_TUPLE_OTHER;
4733 vport[i].rss_tuple_sets.ipv6_sctp_en =
4734 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4735 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4736 HCLGE_RSS_INPUT_TUPLE_SCTP;
4737 vport[i].rss_tuple_sets.ipv6_fragment_en =
4738 HCLGE_RSS_INPUT_TUPLE_OTHER;
4740 vport[i].rss_algo = rss_algo;
4742 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4743 HCLGE_RSS_KEY_SIZE);
4746 hclge_rss_indir_init_cfg(hdev);
4749 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4750 int vector_id, bool en,
4751 struct hnae3_ring_chain_node *ring_chain)
4753 struct hclge_dev *hdev = vport->back;
4754 struct hnae3_ring_chain_node *node;
4755 struct hclge_desc desc;
4756 struct hclge_ctrl_vector_chain_cmd *req =
4757 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4758 enum hclge_cmd_status status;
4759 enum hclge_opcode_type op;
4760 u16 tqp_type_and_id;
4763 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4764 hclge_cmd_setup_basic_desc(&desc, op, false);
4765 req->int_vector_id_l = hnae3_get_field(vector_id,
4766 HCLGE_VECTOR_ID_L_M,
4767 HCLGE_VECTOR_ID_L_S);
4768 req->int_vector_id_h = hnae3_get_field(vector_id,
4769 HCLGE_VECTOR_ID_H_M,
4770 HCLGE_VECTOR_ID_H_S);
4773 for (node = ring_chain; node; node = node->next) {
4774 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4775 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4777 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4778 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4779 HCLGE_TQP_ID_S, node->tqp_index);
4780 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4782 hnae3_get_field(node->int_gl_idx,
4783 HNAE3_RING_GL_IDX_M,
4784 HNAE3_RING_GL_IDX_S));
4785 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4786 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4787 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4788 req->vfid = vport->vport_id;
4790 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4792 dev_err(&hdev->pdev->dev,
4793 "Map TQP fail, status is %d.\n",
4799 hclge_cmd_setup_basic_desc(&desc,
4802 req->int_vector_id_l =
4803 hnae3_get_field(vector_id,
4804 HCLGE_VECTOR_ID_L_M,
4805 HCLGE_VECTOR_ID_L_S);
4806 req->int_vector_id_h =
4807 hnae3_get_field(vector_id,
4808 HCLGE_VECTOR_ID_H_M,
4809 HCLGE_VECTOR_ID_H_S);
4814 req->int_cause_num = i;
4815 req->vfid = vport->vport_id;
4816 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4818 dev_err(&hdev->pdev->dev,
4819 "Map TQP fail, status is %d.\n", status);
4827 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4828 struct hnae3_ring_chain_node *ring_chain)
4830 struct hclge_vport *vport = hclge_get_vport(handle);
4831 struct hclge_dev *hdev = vport->back;
4834 vector_id = hclge_get_vector_index(hdev, vector);
4835 if (vector_id < 0) {
4836 dev_err(&hdev->pdev->dev,
4837 "failed to get vector index. vector=%d\n", vector);
4841 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4844 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4845 struct hnae3_ring_chain_node *ring_chain)
4847 struct hclge_vport *vport = hclge_get_vport(handle);
4848 struct hclge_dev *hdev = vport->back;
4851 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4854 vector_id = hclge_get_vector_index(hdev, vector);
4855 if (vector_id < 0) {
4856 dev_err(&handle->pdev->dev,
4857 "Get vector index fail. ret =%d\n", vector_id);
4861 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4863 dev_err(&handle->pdev->dev,
4864 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4870 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4871 bool en_uc, bool en_mc, bool en_bc)
4873 struct hclge_vport *vport = &hdev->vport[vf_id];
4874 struct hnae3_handle *handle = &vport->nic;
4875 struct hclge_promisc_cfg_cmd *req;
4876 struct hclge_desc desc;
4877 bool uc_tx_en = en_uc;
4881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4883 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4886 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4889 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4890 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4891 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4892 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4893 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4894 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4895 req->extend_promisc = promisc_cfg;
4897 /* to be compatible with DEVICE_VERSION_V1/2 */
4899 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4900 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4901 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4902 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4903 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4904 req->promisc = promisc_cfg;
4906 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4908 dev_err(&hdev->pdev->dev,
4909 "failed to set vport %u promisc mode, ret = %d.\n",
4915 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4916 bool en_mc_pmc, bool en_bc_pmc)
4918 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4919 en_uc_pmc, en_mc_pmc, en_bc_pmc);
4922 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4925 struct hclge_vport *vport = hclge_get_vport(handle);
4926 struct hclge_dev *hdev = vport->back;
4927 bool en_bc_pmc = true;
4929 /* For device whose version below V2, if broadcast promisc enabled,
4930 * vlan filter is always bypassed. So broadcast promisc should be
4931 * disabled until user enable promisc mode
4933 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4934 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4936 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4940 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4942 struct hclge_vport *vport = hclge_get_vport(handle);
4943 struct hclge_dev *hdev = vport->back;
4945 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4948 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4950 struct hclge_get_fd_mode_cmd *req;
4951 struct hclge_desc desc;
4954 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4956 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4958 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4960 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4964 *fd_mode = req->mode;
4969 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4970 u32 *stage1_entry_num,
4971 u32 *stage2_entry_num,
4972 u16 *stage1_counter_num,
4973 u16 *stage2_counter_num)
4975 struct hclge_get_fd_allocation_cmd *req;
4976 struct hclge_desc desc;
4979 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4981 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4983 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4985 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4990 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4991 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4992 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4993 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4998 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4999 enum HCLGE_FD_STAGE stage_num)
5001 struct hclge_set_fd_key_config_cmd *req;
5002 struct hclge_fd_key_cfg *stage;
5003 struct hclge_desc desc;
5006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5008 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5009 stage = &hdev->fd_cfg.key_cfg[stage_num];
5010 req->stage = stage_num;
5011 req->key_select = stage->key_sel;
5012 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5013 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5014 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5015 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5016 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5017 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5019 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5021 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5026 static int hclge_init_fd_config(struct hclge_dev *hdev)
5028 #define LOW_2_WORDS 0x03
5029 struct hclge_fd_key_cfg *key_cfg;
5032 if (!hnae3_dev_fd_supported(hdev))
5035 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5039 switch (hdev->fd_cfg.fd_mode) {
5040 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5041 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5043 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5044 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5047 dev_err(&hdev->pdev->dev,
5048 "Unsupported flow director mode %u\n",
5049 hdev->fd_cfg.fd_mode);
5053 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5054 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5055 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5056 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5057 key_cfg->outer_sipv6_word_en = 0;
5058 key_cfg->outer_dipv6_word_en = 0;
5060 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5061 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5062 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5063 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5065 /* If use max 400bit key, we can support tuples for ether type */
5066 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5067 key_cfg->tuple_active |=
5068 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5070 /* roce_type is used to filter roce frames
5071 * dst_vport is used to specify the rule
5073 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5075 ret = hclge_get_fd_allocation(hdev,
5076 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5077 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5078 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5079 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5083 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5086 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5087 int loc, u8 *key, bool is_add)
5089 struct hclge_fd_tcam_config_1_cmd *req1;
5090 struct hclge_fd_tcam_config_2_cmd *req2;
5091 struct hclge_fd_tcam_config_3_cmd *req3;
5092 struct hclge_desc desc[3];
5095 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5096 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5097 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5098 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5099 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5101 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5102 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5103 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5105 req1->stage = stage;
5106 req1->xy_sel = sel_x ? 1 : 0;
5107 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5108 req1->index = cpu_to_le32(loc);
5109 req1->entry_vld = sel_x ? is_add : 0;
5112 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5113 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5114 sizeof(req2->tcam_data));
5115 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5116 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5119 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5121 dev_err(&hdev->pdev->dev,
5122 "config tcam key fail, ret=%d\n",
5128 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5129 struct hclge_fd_ad_data *action)
5131 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5132 struct hclge_fd_ad_config_cmd *req;
5133 struct hclge_desc desc;
5137 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5139 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5140 req->index = cpu_to_le32(loc);
5143 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5144 action->write_rule_id_to_bd);
5145 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5147 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5148 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5149 action->override_tc);
5150 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5151 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5154 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5155 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5156 action->forward_to_direct_queue);
5157 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5159 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5160 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5161 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5162 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5163 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5164 action->counter_id);
5166 req->ad_data = cpu_to_le64(ad_data);
5167 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5169 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5174 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5175 struct hclge_fd_rule *rule)
5177 u16 tmp_x_s, tmp_y_s;
5178 u32 tmp_x_l, tmp_y_l;
5181 if (rule->unused_tuple & tuple_bit)
5184 switch (tuple_bit) {
5185 case BIT(INNER_DST_MAC):
5186 for (i = 0; i < ETH_ALEN; i++) {
5187 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5188 rule->tuples_mask.dst_mac[i]);
5189 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5190 rule->tuples_mask.dst_mac[i]);
5194 case BIT(INNER_SRC_MAC):
5195 for (i = 0; i < ETH_ALEN; i++) {
5196 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5197 rule->tuples.src_mac[i]);
5198 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5199 rule->tuples.src_mac[i]);
5203 case BIT(INNER_VLAN_TAG_FST):
5204 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5205 rule->tuples_mask.vlan_tag1);
5206 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5207 rule->tuples_mask.vlan_tag1);
5208 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5209 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5212 case BIT(INNER_ETH_TYPE):
5213 calc_x(tmp_x_s, rule->tuples.ether_proto,
5214 rule->tuples_mask.ether_proto);
5215 calc_y(tmp_y_s, rule->tuples.ether_proto,
5216 rule->tuples_mask.ether_proto);
5217 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5218 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5221 case BIT(INNER_IP_TOS):
5222 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5223 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5226 case BIT(INNER_IP_PROTO):
5227 calc_x(*key_x, rule->tuples.ip_proto,
5228 rule->tuples_mask.ip_proto);
5229 calc_y(*key_y, rule->tuples.ip_proto,
5230 rule->tuples_mask.ip_proto);
5233 case BIT(INNER_SRC_IP):
5234 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5235 rule->tuples_mask.src_ip[IPV4_INDEX]);
5236 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5237 rule->tuples_mask.src_ip[IPV4_INDEX]);
5238 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5239 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5242 case BIT(INNER_DST_IP):
5243 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5244 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5245 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5246 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5247 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5248 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5251 case BIT(INNER_SRC_PORT):
5252 calc_x(tmp_x_s, rule->tuples.src_port,
5253 rule->tuples_mask.src_port);
5254 calc_y(tmp_y_s, rule->tuples.src_port,
5255 rule->tuples_mask.src_port);
5256 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5257 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5260 case BIT(INNER_DST_PORT):
5261 calc_x(tmp_x_s, rule->tuples.dst_port,
5262 rule->tuples_mask.dst_port);
5263 calc_y(tmp_y_s, rule->tuples.dst_port,
5264 rule->tuples_mask.dst_port);
5265 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5266 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5274 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5275 u8 vf_id, u8 network_port_id)
5277 u32 port_number = 0;
5279 if (port_type == HOST_PORT) {
5280 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5282 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5284 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5286 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5287 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5288 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5294 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5295 __le32 *key_x, __le32 *key_y,
5296 struct hclge_fd_rule *rule)
5298 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5299 u8 cur_pos = 0, tuple_size, shift_bits;
5302 for (i = 0; i < MAX_META_DATA; i++) {
5303 tuple_size = meta_data_key_info[i].key_length;
5304 tuple_bit = key_cfg->meta_data_active & BIT(i);
5306 switch (tuple_bit) {
5307 case BIT(ROCE_TYPE):
5308 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5309 cur_pos += tuple_size;
5311 case BIT(DST_VPORT):
5312 port_number = hclge_get_port_number(HOST_PORT, 0,
5314 hnae3_set_field(meta_data,
5315 GENMASK(cur_pos + tuple_size, cur_pos),
5316 cur_pos, port_number);
5317 cur_pos += tuple_size;
5324 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5325 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5326 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5328 *key_x = cpu_to_le32(tmp_x << shift_bits);
5329 *key_y = cpu_to_le32(tmp_y << shift_bits);
5332 /* A complete key is combined with meta data key and tuple key.
5333 * Meta data key is stored at the MSB region, and tuple key is stored at
5334 * the LSB region, unused bits will be filled 0.
5336 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5337 struct hclge_fd_rule *rule)
5339 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5340 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5341 u8 *cur_key_x, *cur_key_y;
5342 u8 meta_data_region;
5347 memset(key_x, 0, sizeof(key_x));
5348 memset(key_y, 0, sizeof(key_y));
5352 for (i = 0 ; i < MAX_TUPLE; i++) {
5356 tuple_size = tuple_key_info[i].key_length / 8;
5357 check_tuple = key_cfg->tuple_active & BIT(i);
5359 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5362 cur_key_x += tuple_size;
5363 cur_key_y += tuple_size;
5367 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5368 MAX_META_DATA_LENGTH / 8;
5370 hclge_fd_convert_meta_data(key_cfg,
5371 (__le32 *)(key_x + meta_data_region),
5372 (__le32 *)(key_y + meta_data_region),
5375 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5378 dev_err(&hdev->pdev->dev,
5379 "fd key_y config fail, loc=%u, ret=%d\n",
5380 rule->queue_id, ret);
5384 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5387 dev_err(&hdev->pdev->dev,
5388 "fd key_x config fail, loc=%u, ret=%d\n",
5389 rule->queue_id, ret);
5393 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5394 struct hclge_fd_rule *rule)
5396 struct hclge_vport *vport = hdev->vport;
5397 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5398 struct hclge_fd_ad_data ad_data;
5400 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5401 ad_data.ad_id = rule->location;
5403 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5404 ad_data.drop_packet = true;
5405 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5406 ad_data.override_tc = true;
5408 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5410 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5412 ad_data.forward_to_direct_queue = true;
5413 ad_data.queue_id = rule->queue_id;
5416 ad_data.use_counter = false;
5417 ad_data.counter_id = 0;
5419 ad_data.use_next_stage = false;
5420 ad_data.next_input_key = 0;
5422 ad_data.write_rule_id_to_bd = true;
5423 ad_data.rule_id = rule->location;
5425 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5428 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5431 if (!spec || !unused_tuple)
5434 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5437 *unused_tuple |= BIT(INNER_SRC_IP);
5440 *unused_tuple |= BIT(INNER_DST_IP);
5443 *unused_tuple |= BIT(INNER_SRC_PORT);
5446 *unused_tuple |= BIT(INNER_DST_PORT);
5449 *unused_tuple |= BIT(INNER_IP_TOS);
5454 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5457 if (!spec || !unused_tuple)
5460 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5461 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5464 *unused_tuple |= BIT(INNER_SRC_IP);
5467 *unused_tuple |= BIT(INNER_DST_IP);
5470 *unused_tuple |= BIT(INNER_IP_TOS);
5473 *unused_tuple |= BIT(INNER_IP_PROTO);
5475 if (spec->l4_4_bytes)
5478 if (spec->ip_ver != ETH_RX_NFC_IP4)
5484 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5487 if (!spec || !unused_tuple)
5490 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5493 /* check whether src/dst ip address used */
5494 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5495 !spec->ip6src[2] && !spec->ip6src[3])
5496 *unused_tuple |= BIT(INNER_SRC_IP);
5498 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5499 !spec->ip6dst[2] && !spec->ip6dst[3])
5500 *unused_tuple |= BIT(INNER_DST_IP);
5503 *unused_tuple |= BIT(INNER_SRC_PORT);
5506 *unused_tuple |= BIT(INNER_DST_PORT);
5514 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5517 if (!spec || !unused_tuple)
5520 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5521 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5523 /* check whether src/dst ip address used */
5524 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5525 !spec->ip6src[2] && !spec->ip6src[3])
5526 *unused_tuple |= BIT(INNER_SRC_IP);
5528 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5529 !spec->ip6dst[2] && !spec->ip6dst[3])
5530 *unused_tuple |= BIT(INNER_DST_IP);
5532 if (!spec->l4_proto)
5533 *unused_tuple |= BIT(INNER_IP_PROTO);
5538 if (spec->l4_4_bytes)
5544 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5546 if (!spec || !unused_tuple)
5549 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5550 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5551 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5553 if (is_zero_ether_addr(spec->h_source))
5554 *unused_tuple |= BIT(INNER_SRC_MAC);
5556 if (is_zero_ether_addr(spec->h_dest))
5557 *unused_tuple |= BIT(INNER_DST_MAC);
5560 *unused_tuple |= BIT(INNER_ETH_TYPE);
5565 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5566 struct ethtool_rx_flow_spec *fs,
5569 if (fs->flow_type & FLOW_EXT) {
5570 if (fs->h_ext.vlan_etype) {
5571 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5575 if (!fs->h_ext.vlan_tci)
5576 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5578 if (fs->m_ext.vlan_tci &&
5579 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5580 dev_err(&hdev->pdev->dev,
5581 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5582 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5586 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5589 if (fs->flow_type & FLOW_MAC_EXT) {
5590 if (hdev->fd_cfg.fd_mode !=
5591 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5592 dev_err(&hdev->pdev->dev,
5593 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5597 if (is_zero_ether_addr(fs->h_ext.h_dest))
5598 *unused_tuple |= BIT(INNER_DST_MAC);
5600 *unused_tuple &= ~BIT(INNER_DST_MAC);
5606 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5607 struct ethtool_rx_flow_spec *fs,
5613 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5614 dev_err(&hdev->pdev->dev,
5615 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5617 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5621 if ((fs->flow_type & FLOW_EXT) &&
5622 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5623 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5627 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5628 switch (flow_type) {
5632 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5636 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5642 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5645 case IPV6_USER_FLOW:
5646 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5650 if (hdev->fd_cfg.fd_mode !=
5651 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5652 dev_err(&hdev->pdev->dev,
5653 "ETHER_FLOW is not supported in current fd mode!\n");
5657 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5661 dev_err(&hdev->pdev->dev,
5662 "unsupported protocol type, protocol type = %#x\n",
5668 dev_err(&hdev->pdev->dev,
5669 "failed to check flow union tuple, ret = %d\n",
5674 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5677 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5679 struct hclge_fd_rule *rule = NULL;
5680 struct hlist_node *node2;
5682 spin_lock_bh(&hdev->fd_rule_lock);
5683 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5684 if (rule->location >= location)
5688 spin_unlock_bh(&hdev->fd_rule_lock);
5690 return rule && rule->location == location;
5693 /* make sure being called after lock up with fd_rule_lock */
5694 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5695 struct hclge_fd_rule *new_rule,
5699 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5700 struct hlist_node *node2;
5702 if (is_add && !new_rule)
5705 hlist_for_each_entry_safe(rule, node2,
5706 &hdev->fd_rule_list, rule_node) {
5707 if (rule->location >= location)
5712 if (rule && rule->location == location) {
5713 hlist_del(&rule->rule_node);
5715 hdev->hclge_fd_rule_num--;
5718 if (!hdev->hclge_fd_rule_num)
5719 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5720 clear_bit(location, hdev->fd_bmap);
5724 } else if (!is_add) {
5725 dev_err(&hdev->pdev->dev,
5726 "delete fail, rule %u is inexistent\n",
5731 INIT_HLIST_NODE(&new_rule->rule_node);
5734 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5736 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5738 set_bit(location, hdev->fd_bmap);
5739 hdev->hclge_fd_rule_num++;
5740 hdev->fd_active_type = new_rule->rule_type;
5745 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5746 struct ethtool_rx_flow_spec *fs,
5747 struct hclge_fd_rule *rule)
5749 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5751 switch (flow_type) {
5755 rule->tuples.src_ip[IPV4_INDEX] =
5756 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5757 rule->tuples_mask.src_ip[IPV4_INDEX] =
5758 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5760 rule->tuples.dst_ip[IPV4_INDEX] =
5761 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5762 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5763 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5765 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5766 rule->tuples_mask.src_port =
5767 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5769 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5770 rule->tuples_mask.dst_port =
5771 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5773 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5774 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5776 rule->tuples.ether_proto = ETH_P_IP;
5777 rule->tuples_mask.ether_proto = 0xFFFF;
5781 rule->tuples.src_ip[IPV4_INDEX] =
5782 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5783 rule->tuples_mask.src_ip[IPV4_INDEX] =
5784 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5786 rule->tuples.dst_ip[IPV4_INDEX] =
5787 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5788 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5789 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5791 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5792 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5794 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5795 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5797 rule->tuples.ether_proto = ETH_P_IP;
5798 rule->tuples_mask.ether_proto = 0xFFFF;
5804 be32_to_cpu_array(rule->tuples.src_ip,
5805 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5806 be32_to_cpu_array(rule->tuples_mask.src_ip,
5807 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5809 be32_to_cpu_array(rule->tuples.dst_ip,
5810 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5811 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5812 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5814 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5815 rule->tuples_mask.src_port =
5816 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5818 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5819 rule->tuples_mask.dst_port =
5820 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5822 rule->tuples.ether_proto = ETH_P_IPV6;
5823 rule->tuples_mask.ether_proto = 0xFFFF;
5826 case IPV6_USER_FLOW:
5827 be32_to_cpu_array(rule->tuples.src_ip,
5828 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5829 be32_to_cpu_array(rule->tuples_mask.src_ip,
5830 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5832 be32_to_cpu_array(rule->tuples.dst_ip,
5833 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5834 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5835 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5837 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5838 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5840 rule->tuples.ether_proto = ETH_P_IPV6;
5841 rule->tuples_mask.ether_proto = 0xFFFF;
5845 ether_addr_copy(rule->tuples.src_mac,
5846 fs->h_u.ether_spec.h_source);
5847 ether_addr_copy(rule->tuples_mask.src_mac,
5848 fs->m_u.ether_spec.h_source);
5850 ether_addr_copy(rule->tuples.dst_mac,
5851 fs->h_u.ether_spec.h_dest);
5852 ether_addr_copy(rule->tuples_mask.dst_mac,
5853 fs->m_u.ether_spec.h_dest);
5855 rule->tuples.ether_proto =
5856 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5857 rule->tuples_mask.ether_proto =
5858 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5865 switch (flow_type) {
5868 rule->tuples.ip_proto = IPPROTO_SCTP;
5869 rule->tuples_mask.ip_proto = 0xFF;
5873 rule->tuples.ip_proto = IPPROTO_TCP;
5874 rule->tuples_mask.ip_proto = 0xFF;
5878 rule->tuples.ip_proto = IPPROTO_UDP;
5879 rule->tuples_mask.ip_proto = 0xFF;
5885 if (fs->flow_type & FLOW_EXT) {
5886 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5887 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5890 if (fs->flow_type & FLOW_MAC_EXT) {
5891 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5892 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5898 /* make sure being called after lock up with fd_rule_lock */
5899 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5900 struct hclge_fd_rule *rule)
5905 dev_err(&hdev->pdev->dev,
5906 "The flow director rule is NULL\n");
5910 /* it will never fail here, so needn't to check return value */
5911 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5913 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5917 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5924 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5928 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5930 struct hclge_vport *vport = hclge_get_vport(handle);
5931 struct hclge_dev *hdev = vport->back;
5933 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5936 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5937 struct ethtool_rxnfc *cmd)
5939 struct hclge_vport *vport = hclge_get_vport(handle);
5940 struct hclge_dev *hdev = vport->back;
5941 u16 dst_vport_id = 0, q_index = 0;
5942 struct ethtool_rx_flow_spec *fs;
5943 struct hclge_fd_rule *rule;
5948 if (!hnae3_dev_fd_supported(hdev)) {
5949 dev_err(&hdev->pdev->dev,
5950 "flow table director is not supported\n");
5955 dev_err(&hdev->pdev->dev,
5956 "please enable flow director first\n");
5960 if (hclge_is_cls_flower_active(handle)) {
5961 dev_err(&hdev->pdev->dev,
5962 "please delete all exist cls flower rules first\n");
5966 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5968 ret = hclge_fd_check_spec(hdev, fs, &unused);
5972 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5973 action = HCLGE_FD_ACTION_DROP_PACKET;
5975 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5976 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5979 if (vf > hdev->num_req_vfs) {
5980 dev_err(&hdev->pdev->dev,
5981 "Error: vf id (%u) > max vf num (%u)\n",
5982 vf, hdev->num_req_vfs);
5986 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5987 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5990 dev_err(&hdev->pdev->dev,
5991 "Error: queue id (%u) > max tqp num (%u)\n",
5996 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6000 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6004 ret = hclge_fd_get_tuple(hdev, fs, rule);
6010 rule->flow_type = fs->flow_type;
6011 rule->location = fs->location;
6012 rule->unused_tuple = unused;
6013 rule->vf_id = dst_vport_id;
6014 rule->queue_id = q_index;
6015 rule->action = action;
6016 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6018 /* to avoid rule conflict, when user configure rule by ethtool,
6019 * we need to clear all arfs rules
6021 spin_lock_bh(&hdev->fd_rule_lock);
6022 hclge_clear_arfs_rules(handle);
6024 ret = hclge_fd_config_rule(hdev, rule);
6026 spin_unlock_bh(&hdev->fd_rule_lock);
6031 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6032 struct ethtool_rxnfc *cmd)
6034 struct hclge_vport *vport = hclge_get_vport(handle);
6035 struct hclge_dev *hdev = vport->back;
6036 struct ethtool_rx_flow_spec *fs;
6039 if (!hnae3_dev_fd_supported(hdev))
6042 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6044 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6047 if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6048 !hclge_fd_rule_exist(hdev, fs->location)) {
6049 dev_err(&hdev->pdev->dev,
6050 "Delete fail, rule %u is inexistent\n", fs->location);
6054 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6059 spin_lock_bh(&hdev->fd_rule_lock);
6060 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6062 spin_unlock_bh(&hdev->fd_rule_lock);
6067 /* make sure being called after lock up with fd_rule_lock */
6068 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6071 struct hclge_vport *vport = hclge_get_vport(handle);
6072 struct hclge_dev *hdev = vport->back;
6073 struct hclge_fd_rule *rule;
6074 struct hlist_node *node;
6077 if (!hnae3_dev_fd_supported(hdev))
6080 for_each_set_bit(location, hdev->fd_bmap,
6081 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6082 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6086 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6088 hlist_del(&rule->rule_node);
6091 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6092 hdev->hclge_fd_rule_num = 0;
6093 bitmap_zero(hdev->fd_bmap,
6094 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6098 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6100 struct hclge_vport *vport = hclge_get_vport(handle);
6101 struct hclge_dev *hdev = vport->back;
6102 struct hclge_fd_rule *rule;
6103 struct hlist_node *node;
6106 /* Return ok here, because reset error handling will check this
6107 * return value. If error is returned here, the reset process will
6110 if (!hnae3_dev_fd_supported(hdev))
6113 /* if fd is disabled, should not restore it when reset */
6117 spin_lock_bh(&hdev->fd_rule_lock);
6118 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6119 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6121 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6124 dev_warn(&hdev->pdev->dev,
6125 "Restore rule %u failed, remove it\n",
6127 clear_bit(rule->location, hdev->fd_bmap);
6128 hlist_del(&rule->rule_node);
6130 hdev->hclge_fd_rule_num--;
6134 if (hdev->hclge_fd_rule_num)
6135 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6137 spin_unlock_bh(&hdev->fd_rule_lock);
6142 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6143 struct ethtool_rxnfc *cmd)
6145 struct hclge_vport *vport = hclge_get_vport(handle);
6146 struct hclge_dev *hdev = vport->back;
6148 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6151 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6152 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6157 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6158 struct ethtool_tcpip4_spec *spec,
6159 struct ethtool_tcpip4_spec *spec_mask)
6161 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6162 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6163 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6165 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6166 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6167 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6169 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6170 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6171 0 : cpu_to_be16(rule->tuples_mask.src_port);
6173 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6174 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6175 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6177 spec->tos = rule->tuples.ip_tos;
6178 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6179 0 : rule->tuples_mask.ip_tos;
6182 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6183 struct ethtool_usrip4_spec *spec,
6184 struct ethtool_usrip4_spec *spec_mask)
6186 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6187 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6188 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6190 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6191 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6192 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6194 spec->tos = rule->tuples.ip_tos;
6195 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6196 0 : rule->tuples_mask.ip_tos;
6198 spec->proto = rule->tuples.ip_proto;
6199 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6200 0 : rule->tuples_mask.ip_proto;
6202 spec->ip_ver = ETH_RX_NFC_IP4;
6205 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6206 struct ethtool_tcpip6_spec *spec,
6207 struct ethtool_tcpip6_spec *spec_mask)
6209 cpu_to_be32_array(spec->ip6src,
6210 rule->tuples.src_ip, IPV6_SIZE);
6211 cpu_to_be32_array(spec->ip6dst,
6212 rule->tuples.dst_ip, IPV6_SIZE);
6213 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6214 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6216 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6219 if (rule->unused_tuple & BIT(INNER_DST_IP))
6220 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6222 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6225 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6226 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6227 0 : cpu_to_be16(rule->tuples_mask.src_port);
6229 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6230 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6231 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6234 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6235 struct ethtool_usrip6_spec *spec,
6236 struct ethtool_usrip6_spec *spec_mask)
6238 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6239 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6240 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6241 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6243 cpu_to_be32_array(spec_mask->ip6src,
6244 rule->tuples_mask.src_ip, IPV6_SIZE);
6246 if (rule->unused_tuple & BIT(INNER_DST_IP))
6247 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6249 cpu_to_be32_array(spec_mask->ip6dst,
6250 rule->tuples_mask.dst_ip, IPV6_SIZE);
6252 spec->l4_proto = rule->tuples.ip_proto;
6253 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6254 0 : rule->tuples_mask.ip_proto;
6257 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6258 struct ethhdr *spec,
6259 struct ethhdr *spec_mask)
6261 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6262 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6264 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6265 eth_zero_addr(spec_mask->h_source);
6267 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6269 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6270 eth_zero_addr(spec_mask->h_dest);
6272 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6274 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6275 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6276 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6279 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6280 struct hclge_fd_rule *rule)
6282 if (fs->flow_type & FLOW_EXT) {
6283 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6284 fs->m_ext.vlan_tci =
6285 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6286 cpu_to_be16(VLAN_VID_MASK) :
6287 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6290 if (fs->flow_type & FLOW_MAC_EXT) {
6291 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6292 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6293 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6295 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6296 rule->tuples_mask.dst_mac);
6300 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6301 struct ethtool_rxnfc *cmd)
6303 struct hclge_vport *vport = hclge_get_vport(handle);
6304 struct hclge_fd_rule *rule = NULL;
6305 struct hclge_dev *hdev = vport->back;
6306 struct ethtool_rx_flow_spec *fs;
6307 struct hlist_node *node2;
6309 if (!hnae3_dev_fd_supported(hdev))
6312 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6314 spin_lock_bh(&hdev->fd_rule_lock);
6316 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6317 if (rule->location >= fs->location)
6321 if (!rule || fs->location != rule->location) {
6322 spin_unlock_bh(&hdev->fd_rule_lock);
6327 fs->flow_type = rule->flow_type;
6328 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6332 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6333 &fs->m_u.tcp_ip4_spec);
6336 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6337 &fs->m_u.usr_ip4_spec);
6342 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6343 &fs->m_u.tcp_ip6_spec);
6345 case IPV6_USER_FLOW:
6346 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6347 &fs->m_u.usr_ip6_spec);
6349 /* The flow type of fd rule has been checked before adding in to rule
6350 * list. As other flow types have been handled, it must be ETHER_FLOW
6351 * for the default case
6354 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6355 &fs->m_u.ether_spec);
6359 hclge_fd_get_ext_info(fs, rule);
6361 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6362 fs->ring_cookie = RX_CLS_FLOW_DISC;
6366 fs->ring_cookie = rule->queue_id;
6367 vf_id = rule->vf_id;
6368 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6369 fs->ring_cookie |= vf_id;
6372 spin_unlock_bh(&hdev->fd_rule_lock);
6377 static int hclge_get_all_rules(struct hnae3_handle *handle,
6378 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6380 struct hclge_vport *vport = hclge_get_vport(handle);
6381 struct hclge_dev *hdev = vport->back;
6382 struct hclge_fd_rule *rule;
6383 struct hlist_node *node2;
6386 if (!hnae3_dev_fd_supported(hdev))
6389 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6391 spin_lock_bh(&hdev->fd_rule_lock);
6392 hlist_for_each_entry_safe(rule, node2,
6393 &hdev->fd_rule_list, rule_node) {
6394 if (cnt == cmd->rule_cnt) {
6395 spin_unlock_bh(&hdev->fd_rule_lock);
6399 rule_locs[cnt] = rule->location;
6403 spin_unlock_bh(&hdev->fd_rule_lock);
6405 cmd->rule_cnt = cnt;
6410 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6411 struct hclge_fd_rule_tuples *tuples)
6413 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6414 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6416 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6417 tuples->ip_proto = fkeys->basic.ip_proto;
6418 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6420 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6421 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6422 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6426 for (i = 0; i < IPV6_SIZE; i++) {
6427 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6428 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6433 /* traverse all rules, check whether an existed rule has the same tuples */
6434 static struct hclge_fd_rule *
6435 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6436 const struct hclge_fd_rule_tuples *tuples)
6438 struct hclge_fd_rule *rule = NULL;
6439 struct hlist_node *node;
6441 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6442 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6449 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6450 struct hclge_fd_rule *rule)
6452 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6453 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6454 BIT(INNER_SRC_PORT);
6457 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6458 if (tuples->ether_proto == ETH_P_IP) {
6459 if (tuples->ip_proto == IPPROTO_TCP)
6460 rule->flow_type = TCP_V4_FLOW;
6462 rule->flow_type = UDP_V4_FLOW;
6464 if (tuples->ip_proto == IPPROTO_TCP)
6465 rule->flow_type = TCP_V6_FLOW;
6467 rule->flow_type = UDP_V6_FLOW;
6469 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6470 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6473 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6474 u16 flow_id, struct flow_keys *fkeys)
6476 struct hclge_vport *vport = hclge_get_vport(handle);
6477 struct hclge_fd_rule_tuples new_tuples = {};
6478 struct hclge_dev *hdev = vport->back;
6479 struct hclge_fd_rule *rule;
6484 if (!hnae3_dev_fd_supported(hdev))
6487 /* when there is already fd rule existed add by user,
6488 * arfs should not work
6490 spin_lock_bh(&hdev->fd_rule_lock);
6491 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6492 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6493 spin_unlock_bh(&hdev->fd_rule_lock);
6497 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6499 /* check is there flow director filter existed for this flow,
6500 * if not, create a new filter for it;
6501 * if filter exist with different queue id, modify the filter;
6502 * if filter exist with same queue id, do nothing
6504 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6506 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6507 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6508 spin_unlock_bh(&hdev->fd_rule_lock);
6512 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6514 spin_unlock_bh(&hdev->fd_rule_lock);
6518 set_bit(bit_id, hdev->fd_bmap);
6519 rule->location = bit_id;
6520 rule->arfs.flow_id = flow_id;
6521 rule->queue_id = queue_id;
6522 hclge_fd_build_arfs_rule(&new_tuples, rule);
6523 ret = hclge_fd_config_rule(hdev, rule);
6525 spin_unlock_bh(&hdev->fd_rule_lock);
6530 return rule->location;
6533 spin_unlock_bh(&hdev->fd_rule_lock);
6535 if (rule->queue_id == queue_id)
6536 return rule->location;
6538 tmp_queue_id = rule->queue_id;
6539 rule->queue_id = queue_id;
6540 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6542 rule->queue_id = tmp_queue_id;
6546 return rule->location;
6549 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6551 #ifdef CONFIG_RFS_ACCEL
6552 struct hnae3_handle *handle = &hdev->vport[0].nic;
6553 struct hclge_fd_rule *rule;
6554 struct hlist_node *node;
6555 HLIST_HEAD(del_list);
6557 spin_lock_bh(&hdev->fd_rule_lock);
6558 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6559 spin_unlock_bh(&hdev->fd_rule_lock);
6562 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6563 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6564 rule->arfs.flow_id, rule->location)) {
6565 hlist_del_init(&rule->rule_node);
6566 hlist_add_head(&rule->rule_node, &del_list);
6567 hdev->hclge_fd_rule_num--;
6568 clear_bit(rule->location, hdev->fd_bmap);
6571 spin_unlock_bh(&hdev->fd_rule_lock);
6573 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6574 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6575 rule->location, NULL, false);
6581 /* make sure being called after lock up with fd_rule_lock */
6582 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6584 #ifdef CONFIG_RFS_ACCEL
6585 struct hclge_vport *vport = hclge_get_vport(handle);
6586 struct hclge_dev *hdev = vport->back;
6588 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6589 hclge_del_all_fd_entries(handle, true);
6593 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6594 struct hclge_fd_rule *rule)
6596 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6597 struct flow_match_basic match;
6598 u16 ethtype_key, ethtype_mask;
6600 flow_rule_match_basic(flow, &match);
6601 ethtype_key = ntohs(match.key->n_proto);
6602 ethtype_mask = ntohs(match.mask->n_proto);
6604 if (ethtype_key == ETH_P_ALL) {
6608 rule->tuples.ether_proto = ethtype_key;
6609 rule->tuples_mask.ether_proto = ethtype_mask;
6610 rule->tuples.ip_proto = match.key->ip_proto;
6611 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6613 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6614 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6618 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6619 struct hclge_fd_rule *rule)
6621 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6622 struct flow_match_eth_addrs match;
6624 flow_rule_match_eth_addrs(flow, &match);
6625 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6626 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6627 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6628 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6630 rule->unused_tuple |= BIT(INNER_DST_MAC);
6631 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6635 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6636 struct hclge_fd_rule *rule)
6638 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6639 struct flow_match_vlan match;
6641 flow_rule_match_vlan(flow, &match);
6642 rule->tuples.vlan_tag1 = match.key->vlan_id |
6643 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6644 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6645 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6647 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6651 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6652 struct hclge_fd_rule *rule)
6656 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6657 struct flow_match_control match;
6659 flow_rule_match_control(flow, &match);
6660 addr_type = match.key->addr_type;
6663 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6664 struct flow_match_ipv4_addrs match;
6666 flow_rule_match_ipv4_addrs(flow, &match);
6667 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6668 rule->tuples_mask.src_ip[IPV4_INDEX] =
6669 be32_to_cpu(match.mask->src);
6670 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6671 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6672 be32_to_cpu(match.mask->dst);
6673 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6674 struct flow_match_ipv6_addrs match;
6676 flow_rule_match_ipv6_addrs(flow, &match);
6677 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6679 be32_to_cpu_array(rule->tuples_mask.src_ip,
6680 match.mask->src.s6_addr32, IPV6_SIZE);
6681 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6683 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6684 match.mask->dst.s6_addr32, IPV6_SIZE);
6686 rule->unused_tuple |= BIT(INNER_SRC_IP);
6687 rule->unused_tuple |= BIT(INNER_DST_IP);
6691 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6692 struct hclge_fd_rule *rule)
6694 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6695 struct flow_match_ports match;
6697 flow_rule_match_ports(flow, &match);
6699 rule->tuples.src_port = be16_to_cpu(match.key->src);
6700 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6701 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6702 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6704 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6705 rule->unused_tuple |= BIT(INNER_DST_PORT);
6709 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6710 struct flow_cls_offload *cls_flower,
6711 struct hclge_fd_rule *rule)
6713 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6714 struct flow_dissector *dissector = flow->match.dissector;
6716 if (dissector->used_keys &
6717 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6718 BIT(FLOW_DISSECTOR_KEY_BASIC) |
6719 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6720 BIT(FLOW_DISSECTOR_KEY_VLAN) |
6721 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6722 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6723 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6724 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6725 dissector->used_keys);
6729 hclge_get_cls_key_basic(flow, rule);
6730 hclge_get_cls_key_mac(flow, rule);
6731 hclge_get_cls_key_vlan(flow, rule);
6732 hclge_get_cls_key_ip(flow, rule);
6733 hclge_get_cls_key_port(flow, rule);
6738 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6739 struct flow_cls_offload *cls_flower, int tc)
6741 u32 prio = cls_flower->common.prio;
6743 if (tc < 0 || tc > hdev->tc_max) {
6744 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6749 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6750 dev_err(&hdev->pdev->dev,
6751 "prio %u should be in range[1, %u]\n",
6752 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6756 if (test_bit(prio - 1, hdev->fd_bmap)) {
6757 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6763 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6764 struct flow_cls_offload *cls_flower,
6767 struct hclge_vport *vport = hclge_get_vport(handle);
6768 struct hclge_dev *hdev = vport->back;
6769 struct hclge_fd_rule *rule;
6772 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6773 dev_err(&hdev->pdev->dev,
6774 "please remove all exist fd rules via ethtool first\n");
6778 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6780 dev_err(&hdev->pdev->dev,
6781 "failed to check cls flower params, ret = %d\n", ret);
6785 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6789 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6793 rule->action = HCLGE_FD_ACTION_SELECT_TC;
6794 rule->cls_flower.tc = tc;
6795 rule->location = cls_flower->common.prio - 1;
6797 rule->cls_flower.cookie = cls_flower->cookie;
6798 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6800 spin_lock_bh(&hdev->fd_rule_lock);
6801 hclge_clear_arfs_rules(handle);
6803 ret = hclge_fd_config_rule(hdev, rule);
6805 spin_unlock_bh(&hdev->fd_rule_lock);
6808 dev_err(&hdev->pdev->dev,
6809 "failed to add cls flower rule, ret = %d\n", ret);
6819 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6820 unsigned long cookie)
6822 struct hclge_fd_rule *rule;
6823 struct hlist_node *node;
6825 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6826 if (rule->cls_flower.cookie == cookie)
6833 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6834 struct flow_cls_offload *cls_flower)
6836 struct hclge_vport *vport = hclge_get_vport(handle);
6837 struct hclge_dev *hdev = vport->back;
6838 struct hclge_fd_rule *rule;
6841 spin_lock_bh(&hdev->fd_rule_lock);
6843 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6845 spin_unlock_bh(&hdev->fd_rule_lock);
6849 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6852 dev_err(&hdev->pdev->dev,
6853 "failed to delete cls flower rule %u, ret = %d\n",
6854 rule->location, ret);
6855 spin_unlock_bh(&hdev->fd_rule_lock);
6859 ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6861 dev_err(&hdev->pdev->dev,
6862 "failed to delete cls flower rule %u in list, ret = %d\n",
6863 rule->location, ret);
6864 spin_unlock_bh(&hdev->fd_rule_lock);
6868 spin_unlock_bh(&hdev->fd_rule_lock);
6873 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6875 struct hclge_vport *vport = hclge_get_vport(handle);
6876 struct hclge_dev *hdev = vport->back;
6878 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6879 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6882 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6884 struct hclge_vport *vport = hclge_get_vport(handle);
6885 struct hclge_dev *hdev = vport->back;
6887 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6890 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6892 struct hclge_vport *vport = hclge_get_vport(handle);
6893 struct hclge_dev *hdev = vport->back;
6895 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6898 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6900 struct hclge_vport *vport = hclge_get_vport(handle);
6901 struct hclge_dev *hdev = vport->back;
6903 return hdev->rst_stats.hw_reset_done_cnt;
6906 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6908 struct hclge_vport *vport = hclge_get_vport(handle);
6909 struct hclge_dev *hdev = vport->back;
6912 hdev->fd_en = enable;
6913 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6916 spin_lock_bh(&hdev->fd_rule_lock);
6917 hclge_del_all_fd_entries(handle, clear);
6918 spin_unlock_bh(&hdev->fd_rule_lock);
6920 hclge_restore_fd_entries(handle);
6924 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6926 struct hclge_desc desc;
6927 struct hclge_config_mac_mode_cmd *req =
6928 (struct hclge_config_mac_mode_cmd *)desc.data;
6932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6935 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6936 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6937 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6938 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6939 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6940 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6941 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6942 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6943 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6944 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6947 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6949 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6951 dev_err(&hdev->pdev->dev,
6952 "mac enable fail, ret =%d.\n", ret);
6955 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6956 u8 switch_param, u8 param_mask)
6958 struct hclge_mac_vlan_switch_cmd *req;
6959 struct hclge_desc desc;
6963 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6964 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6966 /* read current config parameter */
6967 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6969 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6970 req->func_id = cpu_to_le32(func_id);
6972 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6974 dev_err(&hdev->pdev->dev,
6975 "read mac vlan switch parameter fail, ret = %d\n", ret);
6979 /* modify and write new config parameter */
6980 hclge_cmd_reuse_desc(&desc, false);
6981 req->switch_param = (req->switch_param & param_mask) | switch_param;
6982 req->param_mask = param_mask;
6984 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6986 dev_err(&hdev->pdev->dev,
6987 "set mac vlan switch parameter fail, ret = %d\n", ret);
6991 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6994 #define HCLGE_PHY_LINK_STATUS_NUM 200
6996 struct phy_device *phydev = hdev->hw.mac.phydev;
7001 ret = phy_read_status(phydev);
7003 dev_err(&hdev->pdev->dev,
7004 "phy update link status fail, ret = %d\n", ret);
7008 if (phydev->link == link_ret)
7011 msleep(HCLGE_LINK_STATUS_MS);
7012 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7015 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7017 #define HCLGE_MAC_LINK_STATUS_NUM 100
7024 ret = hclge_get_mac_link_status(hdev, &link_status);
7027 if (link_status == link_ret)
7030 msleep(HCLGE_LINK_STATUS_MS);
7031 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7035 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7040 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7043 hclge_phy_link_status_wait(hdev, link_ret);
7045 return hclge_mac_link_status_wait(hdev, link_ret);
7048 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7050 struct hclge_config_mac_mode_cmd *req;
7051 struct hclge_desc desc;
7055 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7056 /* 1 Read out the MAC mode config at first */
7057 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7058 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7060 dev_err(&hdev->pdev->dev,
7061 "mac loopback get fail, ret =%d.\n", ret);
7065 /* 2 Then setup the loopback flag */
7066 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7067 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7069 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7071 /* 3 Config mac work mode with loopback flag
7072 * and its original configure parameters
7074 hclge_cmd_reuse_desc(&desc, false);
7075 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7077 dev_err(&hdev->pdev->dev,
7078 "mac loopback set fail, ret =%d.\n", ret);
7082 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7083 enum hnae3_loop loop_mode)
7085 #define HCLGE_SERDES_RETRY_MS 10
7086 #define HCLGE_SERDES_RETRY_NUM 100
7088 struct hclge_serdes_lb_cmd *req;
7089 struct hclge_desc desc;
7093 req = (struct hclge_serdes_lb_cmd *)desc.data;
7094 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7096 switch (loop_mode) {
7097 case HNAE3_LOOP_SERIAL_SERDES:
7098 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7100 case HNAE3_LOOP_PARALLEL_SERDES:
7101 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7104 dev_err(&hdev->pdev->dev,
7105 "unsupported serdes loopback mode %d\n", loop_mode);
7110 req->enable = loop_mode_b;
7111 req->mask = loop_mode_b;
7113 req->mask = loop_mode_b;
7116 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7118 dev_err(&hdev->pdev->dev,
7119 "serdes loopback set fail, ret = %d\n", ret);
7124 msleep(HCLGE_SERDES_RETRY_MS);
7125 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7127 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7129 dev_err(&hdev->pdev->dev,
7130 "serdes loopback get, ret = %d\n", ret);
7133 } while (++i < HCLGE_SERDES_RETRY_NUM &&
7134 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7136 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7137 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7139 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7140 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7146 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7147 enum hnae3_loop loop_mode)
7151 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7155 hclge_cfg_mac_mode(hdev, en);
7157 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7159 dev_err(&hdev->pdev->dev,
7160 "serdes loopback config mac mode timeout\n");
7165 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7166 struct phy_device *phydev)
7170 if (!phydev->suspended) {
7171 ret = phy_suspend(phydev);
7176 ret = phy_resume(phydev);
7180 return phy_loopback(phydev, true);
7183 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7184 struct phy_device *phydev)
7188 ret = phy_loopback(phydev, false);
7192 return phy_suspend(phydev);
7195 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7197 struct phy_device *phydev = hdev->hw.mac.phydev;
7204 ret = hclge_enable_phy_loopback(hdev, phydev);
7206 ret = hclge_disable_phy_loopback(hdev, phydev);
7208 dev_err(&hdev->pdev->dev,
7209 "set phy loopback fail, ret = %d\n", ret);
7213 hclge_cfg_mac_mode(hdev, en);
7215 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7217 dev_err(&hdev->pdev->dev,
7218 "phy loopback config mac mode timeout\n");
7223 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7224 int stream_id, bool enable)
7226 struct hclge_desc desc;
7227 struct hclge_cfg_com_tqp_queue_cmd *req =
7228 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7232 req->tqp_id = cpu_to_le16(tqp_id);
7233 req->stream_id = cpu_to_le16(stream_id);
7235 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7237 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7239 dev_err(&hdev->pdev->dev,
7240 "Tqp enable fail, status =%d.\n", ret);
7244 static int hclge_set_loopback(struct hnae3_handle *handle,
7245 enum hnae3_loop loop_mode, bool en)
7247 struct hclge_vport *vport = hclge_get_vport(handle);
7248 struct hnae3_knic_private_info *kinfo;
7249 struct hclge_dev *hdev = vport->back;
7252 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7253 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7254 * the same, the packets are looped back in the SSU. If SSU loopback
7255 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7257 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7258 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7260 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7261 HCLGE_SWITCH_ALW_LPBK_MASK);
7266 switch (loop_mode) {
7267 case HNAE3_LOOP_APP:
7268 ret = hclge_set_app_loopback(hdev, en);
7270 case HNAE3_LOOP_SERIAL_SERDES:
7271 case HNAE3_LOOP_PARALLEL_SERDES:
7272 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7274 case HNAE3_LOOP_PHY:
7275 ret = hclge_set_phy_loopback(hdev, en);
7279 dev_err(&hdev->pdev->dev,
7280 "loop_mode %d is not supported\n", loop_mode);
7287 kinfo = &vport->nic.kinfo;
7288 for (i = 0; i < kinfo->num_tqps; i++) {
7289 ret = hclge_tqp_enable(hdev, i, 0, en);
7297 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7301 ret = hclge_set_app_loopback(hdev, false);
7305 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7309 return hclge_cfg_serdes_loopback(hdev, false,
7310 HNAE3_LOOP_PARALLEL_SERDES);
7313 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7315 struct hclge_vport *vport = hclge_get_vport(handle);
7316 struct hnae3_knic_private_info *kinfo;
7317 struct hnae3_queue *queue;
7318 struct hclge_tqp *tqp;
7321 kinfo = &vport->nic.kinfo;
7322 for (i = 0; i < kinfo->num_tqps; i++) {
7323 queue = handle->kinfo.tqp[i];
7324 tqp = container_of(queue, struct hclge_tqp, q);
7325 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7329 static void hclge_flush_link_update(struct hclge_dev *hdev)
7331 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7333 unsigned long last = hdev->serv_processed_cnt;
7336 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7337 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7338 last == hdev->serv_processed_cnt)
7342 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7344 struct hclge_vport *vport = hclge_get_vport(handle);
7345 struct hclge_dev *hdev = vport->back;
7348 hclge_task_schedule(hdev, 0);
7350 /* Set the DOWN flag here to disable link updating */
7351 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7353 /* flush memory to make sure DOWN is seen by service task */
7354 smp_mb__before_atomic();
7355 hclge_flush_link_update(hdev);
7359 static int hclge_ae_start(struct hnae3_handle *handle)
7361 struct hclge_vport *vport = hclge_get_vport(handle);
7362 struct hclge_dev *hdev = vport->back;
7365 hclge_cfg_mac_mode(hdev, true);
7366 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7367 hdev->hw.mac.link = 0;
7369 /* reset tqp stats */
7370 hclge_reset_tqp_stats(handle);
7372 hclge_mac_start_phy(hdev);
7377 static void hclge_ae_stop(struct hnae3_handle *handle)
7379 struct hclge_vport *vport = hclge_get_vport(handle);
7380 struct hclge_dev *hdev = vport->back;
7383 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7384 spin_lock_bh(&hdev->fd_rule_lock);
7385 hclge_clear_arfs_rules(handle);
7386 spin_unlock_bh(&hdev->fd_rule_lock);
7388 /* If it is not PF reset, the firmware will disable the MAC,
7389 * so it only need to stop phy here.
7391 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7392 hdev->reset_type != HNAE3_FUNC_RESET) {
7393 hclge_mac_stop_phy(hdev);
7394 hclge_update_link_status(hdev);
7398 for (i = 0; i < handle->kinfo.num_tqps; i++)
7399 hclge_reset_tqp(handle, i);
7401 hclge_config_mac_tnl_int(hdev, false);
7404 hclge_cfg_mac_mode(hdev, false);
7406 hclge_mac_stop_phy(hdev);
7408 /* reset tqp stats */
7409 hclge_reset_tqp_stats(handle);
7410 hclge_update_link_status(hdev);
7413 int hclge_vport_start(struct hclge_vport *vport)
7415 struct hclge_dev *hdev = vport->back;
7417 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7418 vport->last_active_jiffies = jiffies;
7420 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7421 if (vport->vport_id) {
7422 hclge_restore_mac_table_common(vport);
7423 hclge_restore_vport_vlan_table(vport);
7425 hclge_restore_hw_table(hdev);
7429 clear_bit(vport->vport_id, hdev->vport_config_block);
7434 void hclge_vport_stop(struct hclge_vport *vport)
7436 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7439 static int hclge_client_start(struct hnae3_handle *handle)
7441 struct hclge_vport *vport = hclge_get_vport(handle);
7443 return hclge_vport_start(vport);
7446 static void hclge_client_stop(struct hnae3_handle *handle)
7448 struct hclge_vport *vport = hclge_get_vport(handle);
7450 hclge_vport_stop(vport);
7453 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7454 u16 cmdq_resp, u8 resp_code,
7455 enum hclge_mac_vlan_tbl_opcode op)
7457 struct hclge_dev *hdev = vport->back;
7460 dev_err(&hdev->pdev->dev,
7461 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7466 if (op == HCLGE_MAC_VLAN_ADD) {
7467 if (!resp_code || resp_code == 1)
7469 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7470 resp_code == HCLGE_ADD_MC_OVERFLOW)
7473 dev_err(&hdev->pdev->dev,
7474 "add mac addr failed for undefined, code=%u.\n",
7477 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7480 } else if (resp_code == 1) {
7481 dev_dbg(&hdev->pdev->dev,
7482 "remove mac addr failed for miss.\n");
7486 dev_err(&hdev->pdev->dev,
7487 "remove mac addr failed for undefined, code=%u.\n",
7490 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7493 } else if (resp_code == 1) {
7494 dev_dbg(&hdev->pdev->dev,
7495 "lookup mac addr failed for miss.\n");
7499 dev_err(&hdev->pdev->dev,
7500 "lookup mac addr failed for undefined, code=%u.\n",
7505 dev_err(&hdev->pdev->dev,
7506 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7511 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7513 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7515 unsigned int word_num;
7516 unsigned int bit_num;
7518 if (vfid > 255 || vfid < 0)
7521 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7522 word_num = vfid / 32;
7523 bit_num = vfid % 32;
7525 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7527 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7529 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7530 bit_num = vfid % 32;
7532 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7534 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7540 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7542 #define HCLGE_DESC_NUMBER 3
7543 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7546 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7547 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7548 if (desc[i].data[j])
7554 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7555 const u8 *addr, bool is_mc)
7557 const unsigned char *mac_addr = addr;
7558 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7559 (mac_addr[0]) | (mac_addr[1] << 8);
7560 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7562 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7564 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7565 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7568 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7569 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7572 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7573 struct hclge_mac_vlan_tbl_entry_cmd *req)
7575 struct hclge_dev *hdev = vport->back;
7576 struct hclge_desc desc;
7581 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7583 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7585 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7587 dev_err(&hdev->pdev->dev,
7588 "del mac addr failed for cmd_send, ret =%d.\n",
7592 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7593 retval = le16_to_cpu(desc.retval);
7595 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7596 HCLGE_MAC_VLAN_REMOVE);
7599 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7600 struct hclge_mac_vlan_tbl_entry_cmd *req,
7601 struct hclge_desc *desc,
7604 struct hclge_dev *hdev = vport->back;
7609 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7611 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7612 memcpy(desc[0].data,
7614 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7615 hclge_cmd_setup_basic_desc(&desc[1],
7616 HCLGE_OPC_MAC_VLAN_ADD,
7618 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7619 hclge_cmd_setup_basic_desc(&desc[2],
7620 HCLGE_OPC_MAC_VLAN_ADD,
7622 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7624 memcpy(desc[0].data,
7626 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7627 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7630 dev_err(&hdev->pdev->dev,
7631 "lookup mac addr failed for cmd_send, ret =%d.\n",
7635 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7636 retval = le16_to_cpu(desc[0].retval);
7638 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7639 HCLGE_MAC_VLAN_LKUP);
7642 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7643 struct hclge_mac_vlan_tbl_entry_cmd *req,
7644 struct hclge_desc *mc_desc)
7646 struct hclge_dev *hdev = vport->back;
7653 struct hclge_desc desc;
7655 hclge_cmd_setup_basic_desc(&desc,
7656 HCLGE_OPC_MAC_VLAN_ADD,
7658 memcpy(desc.data, req,
7659 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7660 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7661 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7662 retval = le16_to_cpu(desc.retval);
7664 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7666 HCLGE_MAC_VLAN_ADD);
7668 hclge_cmd_reuse_desc(&mc_desc[0], false);
7669 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7670 hclge_cmd_reuse_desc(&mc_desc[1], false);
7671 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7672 hclge_cmd_reuse_desc(&mc_desc[2], false);
7673 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7674 memcpy(mc_desc[0].data, req,
7675 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7676 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7677 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7678 retval = le16_to_cpu(mc_desc[0].retval);
7680 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7682 HCLGE_MAC_VLAN_ADD);
7686 dev_err(&hdev->pdev->dev,
7687 "add mac addr failed for cmd_send, ret =%d.\n",
7695 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7696 u16 *allocated_size)
7698 struct hclge_umv_spc_alc_cmd *req;
7699 struct hclge_desc desc;
7702 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7705 req->space_size = cpu_to_le32(space_size);
7707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7709 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7714 *allocated_size = le32_to_cpu(desc.data[1]);
7719 static int hclge_init_umv_space(struct hclge_dev *hdev)
7721 u16 allocated_size = 0;
7724 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7728 if (allocated_size < hdev->wanted_umv_size)
7729 dev_warn(&hdev->pdev->dev,
7730 "failed to alloc umv space, want %u, get %u\n",
7731 hdev->wanted_umv_size, allocated_size);
7733 hdev->max_umv_size = allocated_size;
7734 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7735 hdev->share_umv_size = hdev->priv_umv_size +
7736 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7741 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7743 struct hclge_vport *vport;
7746 for (i = 0; i < hdev->num_alloc_vport; i++) {
7747 vport = &hdev->vport[i];
7748 vport->used_umv_num = 0;
7751 mutex_lock(&hdev->vport_lock);
7752 hdev->share_umv_size = hdev->priv_umv_size +
7753 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7754 mutex_unlock(&hdev->vport_lock);
7757 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7759 struct hclge_dev *hdev = vport->back;
7763 mutex_lock(&hdev->vport_lock);
7765 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7766 hdev->share_umv_size == 0);
7769 mutex_unlock(&hdev->vport_lock);
7774 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7776 struct hclge_dev *hdev = vport->back;
7779 if (vport->used_umv_num > hdev->priv_umv_size)
7780 hdev->share_umv_size++;
7782 if (vport->used_umv_num > 0)
7783 vport->used_umv_num--;
7785 if (vport->used_umv_num >= hdev->priv_umv_size &&
7786 hdev->share_umv_size > 0)
7787 hdev->share_umv_size--;
7788 vport->used_umv_num++;
7792 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7795 struct hclge_mac_node *mac_node, *tmp;
7797 list_for_each_entry_safe(mac_node, tmp, list, node)
7798 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7804 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7805 enum HCLGE_MAC_NODE_STATE state)
7808 /* from set_rx_mode or tmp_add_list */
7809 case HCLGE_MAC_TO_ADD:
7810 if (mac_node->state == HCLGE_MAC_TO_DEL)
7811 mac_node->state = HCLGE_MAC_ACTIVE;
7813 /* only from set_rx_mode */
7814 case HCLGE_MAC_TO_DEL:
7815 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7816 list_del(&mac_node->node);
7819 mac_node->state = HCLGE_MAC_TO_DEL;
7822 /* only from tmp_add_list, the mac_node->state won't be
7825 case HCLGE_MAC_ACTIVE:
7826 if (mac_node->state == HCLGE_MAC_TO_ADD)
7827 mac_node->state = HCLGE_MAC_ACTIVE;
7833 int hclge_update_mac_list(struct hclge_vport *vport,
7834 enum HCLGE_MAC_NODE_STATE state,
7835 enum HCLGE_MAC_ADDR_TYPE mac_type,
7836 const unsigned char *addr)
7838 struct hclge_dev *hdev = vport->back;
7839 struct hclge_mac_node *mac_node;
7840 struct list_head *list;
7842 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7843 &vport->uc_mac_list : &vport->mc_mac_list;
7845 spin_lock_bh(&vport->mac_list_lock);
7847 /* if the mac addr is already in the mac list, no need to add a new
7848 * one into it, just check the mac addr state, convert it to a new
7849 * new state, or just remove it, or do nothing.
7851 mac_node = hclge_find_mac_node(list, addr);
7853 hclge_update_mac_node(mac_node, state);
7854 spin_unlock_bh(&vport->mac_list_lock);
7855 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7859 /* if this address is never added, unnecessary to delete */
7860 if (state == HCLGE_MAC_TO_DEL) {
7861 spin_unlock_bh(&vport->mac_list_lock);
7862 dev_err(&hdev->pdev->dev,
7863 "failed to delete address %pM from mac list\n",
7868 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7870 spin_unlock_bh(&vport->mac_list_lock);
7874 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7876 mac_node->state = state;
7877 ether_addr_copy(mac_node->mac_addr, addr);
7878 list_add_tail(&mac_node->node, list);
7880 spin_unlock_bh(&vport->mac_list_lock);
7885 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7886 const unsigned char *addr)
7888 struct hclge_vport *vport = hclge_get_vport(handle);
7890 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7894 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7895 const unsigned char *addr)
7897 struct hclge_dev *hdev = vport->back;
7898 struct hclge_mac_vlan_tbl_entry_cmd req;
7899 struct hclge_desc desc;
7900 u16 egress_port = 0;
7903 /* mac addr check */
7904 if (is_zero_ether_addr(addr) ||
7905 is_broadcast_ether_addr(addr) ||
7906 is_multicast_ether_addr(addr)) {
7907 dev_err(&hdev->pdev->dev,
7908 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7909 addr, is_zero_ether_addr(addr),
7910 is_broadcast_ether_addr(addr),
7911 is_multicast_ether_addr(addr));
7915 memset(&req, 0, sizeof(req));
7917 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7918 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7920 req.egress_port = cpu_to_le16(egress_port);
7922 hclge_prepare_mac_addr(&req, addr, false);
7924 /* Lookup the mac address in the mac_vlan table, and add
7925 * it if the entry is inexistent. Repeated unicast entry
7926 * is not allowed in the mac vlan table.
7928 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7929 if (ret == -ENOENT) {
7930 mutex_lock(&hdev->vport_lock);
7931 if (!hclge_is_umv_space_full(vport, false)) {
7932 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7934 hclge_update_umv_space(vport, false);
7935 mutex_unlock(&hdev->vport_lock);
7938 mutex_unlock(&hdev->vport_lock);
7940 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7941 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7942 hdev->priv_umv_size);
7947 /* check if we just hit the duplicate */
7949 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7950 vport->vport_id, addr);
7954 dev_err(&hdev->pdev->dev,
7955 "PF failed to add unicast entry(%pM) in the MAC table\n",
7961 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7962 const unsigned char *addr)
7964 struct hclge_vport *vport = hclge_get_vport(handle);
7966 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7970 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7971 const unsigned char *addr)
7973 struct hclge_dev *hdev = vport->back;
7974 struct hclge_mac_vlan_tbl_entry_cmd req;
7977 /* mac addr check */
7978 if (is_zero_ether_addr(addr) ||
7979 is_broadcast_ether_addr(addr) ||
7980 is_multicast_ether_addr(addr)) {
7981 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7986 memset(&req, 0, sizeof(req));
7987 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7988 hclge_prepare_mac_addr(&req, addr, false);
7989 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7991 mutex_lock(&hdev->vport_lock);
7992 hclge_update_umv_space(vport, true);
7993 mutex_unlock(&hdev->vport_lock);
7994 } else if (ret == -ENOENT) {
8001 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8002 const unsigned char *addr)
8004 struct hclge_vport *vport = hclge_get_vport(handle);
8006 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8010 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8011 const unsigned char *addr)
8013 struct hclge_dev *hdev = vport->back;
8014 struct hclge_mac_vlan_tbl_entry_cmd req;
8015 struct hclge_desc desc[3];
8018 /* mac addr check */
8019 if (!is_multicast_ether_addr(addr)) {
8020 dev_err(&hdev->pdev->dev,
8021 "Add mc mac err! invalid mac:%pM.\n",
8025 memset(&req, 0, sizeof(req));
8026 hclge_prepare_mac_addr(&req, addr, true);
8027 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8029 /* This mac addr do not exist, add new entry for it */
8030 memset(desc[0].data, 0, sizeof(desc[0].data));
8031 memset(desc[1].data, 0, sizeof(desc[0].data));
8032 memset(desc[2].data, 0, sizeof(desc[0].data));
8034 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8037 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8039 /* if already overflow, not to print each time */
8040 if (status == -ENOSPC &&
8041 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8042 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8047 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8048 const unsigned char *addr)
8050 struct hclge_vport *vport = hclge_get_vport(handle);
8052 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8056 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8057 const unsigned char *addr)
8059 struct hclge_dev *hdev = vport->back;
8060 struct hclge_mac_vlan_tbl_entry_cmd req;
8061 enum hclge_cmd_status status;
8062 struct hclge_desc desc[3];
8064 /* mac addr check */
8065 if (!is_multicast_ether_addr(addr)) {
8066 dev_dbg(&hdev->pdev->dev,
8067 "Remove mc mac err! invalid mac:%pM.\n",
8072 memset(&req, 0, sizeof(req));
8073 hclge_prepare_mac_addr(&req, addr, true);
8074 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8076 /* This mac addr exist, remove this handle's VFID for it */
8077 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8081 if (hclge_is_all_function_id_zero(desc))
8082 /* All the vfid is zero, so need to delete this entry */
8083 status = hclge_remove_mac_vlan_tbl(vport, &req);
8085 /* Not all the vfid is zero, update the vfid */
8086 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8088 } else if (status == -ENOENT) {
8095 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8096 struct list_head *list,
8097 int (*sync)(struct hclge_vport *,
8098 const unsigned char *))
8100 struct hclge_mac_node *mac_node, *tmp;
8103 list_for_each_entry_safe(mac_node, tmp, list, node) {
8104 ret = sync(vport, mac_node->mac_addr);
8106 mac_node->state = HCLGE_MAC_ACTIVE;
8108 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8115 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8116 struct list_head *list,
8117 int (*unsync)(struct hclge_vport *,
8118 const unsigned char *))
8120 struct hclge_mac_node *mac_node, *tmp;
8123 list_for_each_entry_safe(mac_node, tmp, list, node) {
8124 ret = unsync(vport, mac_node->mac_addr);
8125 if (!ret || ret == -ENOENT) {
8126 list_del(&mac_node->node);
8129 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8136 static bool hclge_sync_from_add_list(struct list_head *add_list,
8137 struct list_head *mac_list)
8139 struct hclge_mac_node *mac_node, *tmp, *new_node;
8140 bool all_added = true;
8142 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8143 if (mac_node->state == HCLGE_MAC_TO_ADD)
8146 /* if the mac address from tmp_add_list is not in the
8147 * uc/mc_mac_list, it means have received a TO_DEL request
8148 * during the time window of adding the mac address into mac
8149 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8150 * then it will be removed at next time. else it must be TO_ADD,
8151 * this address hasn't been added into mac table,
8152 * so just remove the mac node.
8154 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8156 hclge_update_mac_node(new_node, mac_node->state);
8157 list_del(&mac_node->node);
8159 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8160 mac_node->state = HCLGE_MAC_TO_DEL;
8161 list_del(&mac_node->node);
8162 list_add_tail(&mac_node->node, mac_list);
8164 list_del(&mac_node->node);
8172 static void hclge_sync_from_del_list(struct list_head *del_list,
8173 struct list_head *mac_list)
8175 struct hclge_mac_node *mac_node, *tmp, *new_node;
8177 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8178 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8180 /* If the mac addr exists in the mac list, it means
8181 * received a new TO_ADD request during the time window
8182 * of configuring the mac address. For the mac node
8183 * state is TO_ADD, and the address is already in the
8184 * in the hardware(due to delete fail), so we just need
8185 * to change the mac node state to ACTIVE.
8187 new_node->state = HCLGE_MAC_ACTIVE;
8188 list_del(&mac_node->node);
8191 list_del(&mac_node->node);
8192 list_add_tail(&mac_node->node, mac_list);
8197 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8198 enum HCLGE_MAC_ADDR_TYPE mac_type,
8201 if (mac_type == HCLGE_MAC_ADDR_UC) {
8203 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8205 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8208 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8210 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8214 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8215 enum HCLGE_MAC_ADDR_TYPE mac_type)
8217 struct hclge_mac_node *mac_node, *tmp, *new_node;
8218 struct list_head tmp_add_list, tmp_del_list;
8219 struct list_head *list;
8222 INIT_LIST_HEAD(&tmp_add_list);
8223 INIT_LIST_HEAD(&tmp_del_list);
8225 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8226 * we can add/delete these mac addr outside the spin lock
8228 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8229 &vport->uc_mac_list : &vport->mc_mac_list;
8231 spin_lock_bh(&vport->mac_list_lock);
8233 list_for_each_entry_safe(mac_node, tmp, list, node) {
8234 switch (mac_node->state) {
8235 case HCLGE_MAC_TO_DEL:
8236 list_del(&mac_node->node);
8237 list_add_tail(&mac_node->node, &tmp_del_list);
8239 case HCLGE_MAC_TO_ADD:
8240 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8243 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8244 new_node->state = mac_node->state;
8245 list_add_tail(&new_node->node, &tmp_add_list);
8253 spin_unlock_bh(&vport->mac_list_lock);
8255 /* delete first, in order to get max mac table space for adding */
8256 if (mac_type == HCLGE_MAC_ADDR_UC) {
8257 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8258 hclge_rm_uc_addr_common);
8259 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8260 hclge_add_uc_addr_common);
8262 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8263 hclge_rm_mc_addr_common);
8264 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8265 hclge_add_mc_addr_common);
8268 /* if some mac addresses were added/deleted fail, move back to the
8269 * mac_list, and retry at next time.
8271 spin_lock_bh(&vport->mac_list_lock);
8273 hclge_sync_from_del_list(&tmp_del_list, list);
8274 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8276 spin_unlock_bh(&vport->mac_list_lock);
8278 hclge_update_overflow_flags(vport, mac_type, all_added);
8281 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8283 struct hclge_dev *hdev = vport->back;
8285 if (test_bit(vport->vport_id, hdev->vport_config_block))
8288 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8294 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8298 for (i = 0; i < hdev->num_alloc_vport; i++) {
8299 struct hclge_vport *vport = &hdev->vport[i];
8301 if (!hclge_need_sync_mac_table(vport))
8304 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8305 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8309 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8310 enum HCLGE_MAC_ADDR_TYPE mac_type)
8312 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8313 struct hclge_mac_node *mac_cfg, *tmp;
8314 struct hclge_dev *hdev = vport->back;
8315 struct list_head tmp_del_list, *list;
8318 if (mac_type == HCLGE_MAC_ADDR_UC) {
8319 list = &vport->uc_mac_list;
8320 unsync = hclge_rm_uc_addr_common;
8322 list = &vport->mc_mac_list;
8323 unsync = hclge_rm_mc_addr_common;
8326 INIT_LIST_HEAD(&tmp_del_list);
8329 set_bit(vport->vport_id, hdev->vport_config_block);
8331 spin_lock_bh(&vport->mac_list_lock);
8333 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8334 switch (mac_cfg->state) {
8335 case HCLGE_MAC_TO_DEL:
8336 case HCLGE_MAC_ACTIVE:
8337 list_del(&mac_cfg->node);
8338 list_add_tail(&mac_cfg->node, &tmp_del_list);
8340 case HCLGE_MAC_TO_ADD:
8342 list_del(&mac_cfg->node);
8349 spin_unlock_bh(&vport->mac_list_lock);
8351 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8352 ret = unsync(vport, mac_cfg->mac_addr);
8353 if (!ret || ret == -ENOENT) {
8354 /* clear all mac addr from hardware, but remain these
8355 * mac addr in the mac list, and restore them after
8356 * vf reset finished.
8359 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8360 mac_cfg->state = HCLGE_MAC_TO_ADD;
8362 list_del(&mac_cfg->node);
8365 } else if (is_del_list) {
8366 mac_cfg->state = HCLGE_MAC_TO_DEL;
8370 spin_lock_bh(&vport->mac_list_lock);
8372 hclge_sync_from_del_list(&tmp_del_list, list);
8374 spin_unlock_bh(&vport->mac_list_lock);
8377 /* remove all mac address when uninitailize */
8378 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8379 enum HCLGE_MAC_ADDR_TYPE mac_type)
8381 struct hclge_mac_node *mac_node, *tmp;
8382 struct hclge_dev *hdev = vport->back;
8383 struct list_head tmp_del_list, *list;
8385 INIT_LIST_HEAD(&tmp_del_list);
8387 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8388 &vport->uc_mac_list : &vport->mc_mac_list;
8390 spin_lock_bh(&vport->mac_list_lock);
8392 list_for_each_entry_safe(mac_node, tmp, list, node) {
8393 switch (mac_node->state) {
8394 case HCLGE_MAC_TO_DEL:
8395 case HCLGE_MAC_ACTIVE:
8396 list_del(&mac_node->node);
8397 list_add_tail(&mac_node->node, &tmp_del_list);
8399 case HCLGE_MAC_TO_ADD:
8400 list_del(&mac_node->node);
8406 spin_unlock_bh(&vport->mac_list_lock);
8408 if (mac_type == HCLGE_MAC_ADDR_UC)
8409 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8410 hclge_rm_uc_addr_common);
8412 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8413 hclge_rm_mc_addr_common);
8415 if (!list_empty(&tmp_del_list))
8416 dev_warn(&hdev->pdev->dev,
8417 "uninit %s mac list for vport %u not completely.\n",
8418 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8421 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8422 list_del(&mac_node->node);
8427 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8429 struct hclge_vport *vport;
8432 for (i = 0; i < hdev->num_alloc_vport; i++) {
8433 vport = &hdev->vport[i];
8434 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8435 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8439 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8440 u16 cmdq_resp, u8 resp_code)
8442 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8443 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8444 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8445 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8450 dev_err(&hdev->pdev->dev,
8451 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8456 switch (resp_code) {
8457 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8458 case HCLGE_ETHERTYPE_ALREADY_ADD:
8461 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8462 dev_err(&hdev->pdev->dev,
8463 "add mac ethertype failed for manager table overflow.\n");
8464 return_status = -EIO;
8466 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8467 dev_err(&hdev->pdev->dev,
8468 "add mac ethertype failed for key conflict.\n");
8469 return_status = -EIO;
8472 dev_err(&hdev->pdev->dev,
8473 "add mac ethertype failed for undefined, code=%u.\n",
8475 return_status = -EIO;
8478 return return_status;
8481 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8484 struct hclge_mac_vlan_tbl_entry_cmd req;
8485 struct hclge_dev *hdev = vport->back;
8486 struct hclge_desc desc;
8487 u16 egress_port = 0;
8490 if (is_zero_ether_addr(mac_addr))
8493 memset(&req, 0, sizeof(req));
8494 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8495 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8496 req.egress_port = cpu_to_le16(egress_port);
8497 hclge_prepare_mac_addr(&req, mac_addr, false);
8499 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8502 vf_idx += HCLGE_VF_VPORT_START_NUM;
8503 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8505 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8511 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8514 struct hclge_vport *vport = hclge_get_vport(handle);
8515 struct hclge_dev *hdev = vport->back;
8517 vport = hclge_get_vf_vport(hdev, vf);
8521 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8522 dev_info(&hdev->pdev->dev,
8523 "Specified MAC(=%pM) is same as before, no change committed!\n",
8528 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8529 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8534 ether_addr_copy(vport->vf_info.mac, mac_addr);
8536 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8537 dev_info(&hdev->pdev->dev,
8538 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8540 return hclge_inform_reset_assert_to_vf(vport);
8543 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8548 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8549 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8551 struct hclge_desc desc;
8556 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8557 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8559 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8561 dev_err(&hdev->pdev->dev,
8562 "add mac ethertype failed for cmd_send, ret =%d.\n",
8567 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8568 retval = le16_to_cpu(desc.retval);
8570 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8573 static int init_mgr_tbl(struct hclge_dev *hdev)
8578 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8579 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8581 dev_err(&hdev->pdev->dev,
8582 "add mac ethertype failed, ret =%d.\n",
8591 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8593 struct hclge_vport *vport = hclge_get_vport(handle);
8594 struct hclge_dev *hdev = vport->back;
8596 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8599 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8600 const u8 *old_addr, const u8 *new_addr)
8602 struct list_head *list = &vport->uc_mac_list;
8603 struct hclge_mac_node *old_node, *new_node;
8605 new_node = hclge_find_mac_node(list, new_addr);
8607 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8611 new_node->state = HCLGE_MAC_TO_ADD;
8612 ether_addr_copy(new_node->mac_addr, new_addr);
8613 list_add(&new_node->node, list);
8615 if (new_node->state == HCLGE_MAC_TO_DEL)
8616 new_node->state = HCLGE_MAC_ACTIVE;
8618 /* make sure the new addr is in the list head, avoid dev
8619 * addr may be not re-added into mac table for the umv space
8620 * limitation after global/imp reset which will clear mac
8621 * table by hardware.
8623 list_move(&new_node->node, list);
8626 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8627 old_node = hclge_find_mac_node(list, old_addr);
8629 if (old_node->state == HCLGE_MAC_TO_ADD) {
8630 list_del(&old_node->node);
8633 old_node->state = HCLGE_MAC_TO_DEL;
8638 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8643 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8646 const unsigned char *new_addr = (const unsigned char *)p;
8647 struct hclge_vport *vport = hclge_get_vport(handle);
8648 struct hclge_dev *hdev = vport->back;
8649 unsigned char *old_addr = NULL;
8652 /* mac addr check */
8653 if (is_zero_ether_addr(new_addr) ||
8654 is_broadcast_ether_addr(new_addr) ||
8655 is_multicast_ether_addr(new_addr)) {
8656 dev_err(&hdev->pdev->dev,
8657 "change uc mac err! invalid mac: %pM.\n",
8662 ret = hclge_pause_addr_cfg(hdev, new_addr);
8664 dev_err(&hdev->pdev->dev,
8665 "failed to configure mac pause address, ret = %d\n",
8671 old_addr = hdev->hw.mac.mac_addr;
8673 spin_lock_bh(&vport->mac_list_lock);
8674 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8676 dev_err(&hdev->pdev->dev,
8677 "failed to change the mac addr:%pM, ret = %d\n",
8679 spin_unlock_bh(&vport->mac_list_lock);
8682 hclge_pause_addr_cfg(hdev, old_addr);
8686 /* we must update dev addr with spin lock protect, preventing dev addr
8687 * being removed by set_rx_mode path.
8689 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8690 spin_unlock_bh(&vport->mac_list_lock);
8692 hclge_task_schedule(hdev, 0);
8697 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8700 struct hclge_vport *vport = hclge_get_vport(handle);
8701 struct hclge_dev *hdev = vport->back;
8703 if (!hdev->hw.mac.phydev)
8706 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8709 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8710 u8 fe_type, bool filter_en, u8 vf_id)
8712 struct hclge_vlan_filter_ctrl_cmd *req;
8713 struct hclge_desc desc;
8716 /* read current vlan filter parameter */
8717 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8718 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8719 req->vlan_type = vlan_type;
8722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8724 dev_err(&hdev->pdev->dev,
8725 "failed to get vlan filter config, ret = %d.\n", ret);
8729 /* modify and write new config parameter */
8730 hclge_cmd_reuse_desc(&desc, false);
8731 req->vlan_fe = filter_en ?
8732 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8734 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8736 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8742 #define HCLGE_FILTER_TYPE_VF 0
8743 #define HCLGE_FILTER_TYPE_PORT 1
8744 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8745 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8746 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8747 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8748 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8749 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8750 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8751 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8752 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8754 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8756 struct hclge_vport *vport = hclge_get_vport(handle);
8757 struct hclge_dev *hdev = vport->back;
8759 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8760 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8761 HCLGE_FILTER_FE_EGRESS, enable, 0);
8762 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8763 HCLGE_FILTER_FE_INGRESS, enable, 0);
8765 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8766 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8770 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8772 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8775 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8776 bool is_kill, u16 vlan,
8779 struct hclge_vport *vport = &hdev->vport[vfid];
8780 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8781 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8782 struct hclge_desc desc[2];
8787 /* if vf vlan table is full, firmware will close vf vlan filter, it
8788 * is unable and unnecessary to add new vlan id to vf vlan filter.
8789 * If spoof check is enable, and vf vlan is full, it shouldn't add
8790 * new vlan, because tx packets with these vlan id will be dropped.
8792 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8793 if (vport->vf_info.spoofchk && vlan) {
8794 dev_err(&hdev->pdev->dev,
8795 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8801 hclge_cmd_setup_basic_desc(&desc[0],
8802 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8803 hclge_cmd_setup_basic_desc(&desc[1],
8804 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8806 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8808 vf_byte_off = vfid / 8;
8809 vf_byte_val = 1 << (vfid % 8);
8811 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8812 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8814 req0->vlan_id = cpu_to_le16(vlan);
8815 req0->vlan_cfg = is_kill;
8817 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8818 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8820 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8822 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8824 dev_err(&hdev->pdev->dev,
8825 "Send vf vlan command fail, ret =%d.\n",
8831 #define HCLGE_VF_VLAN_NO_ENTRY 2
8832 if (!req0->resp_code || req0->resp_code == 1)
8835 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8836 set_bit(vfid, hdev->vf_vlan_full);
8837 dev_warn(&hdev->pdev->dev,
8838 "vf vlan table is full, vf vlan filter is disabled\n");
8842 dev_err(&hdev->pdev->dev,
8843 "Add vf vlan filter fail, ret =%u.\n",
8846 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8847 if (!req0->resp_code)
8850 /* vf vlan filter is disabled when vf vlan table is full,
8851 * then new vlan id will not be added into vf vlan table.
8852 * Just return 0 without warning, avoid massive verbose
8853 * print logs when unload.
8855 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8858 dev_err(&hdev->pdev->dev,
8859 "Kill vf vlan filter fail, ret =%u.\n",
8866 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8867 u16 vlan_id, bool is_kill)
8869 struct hclge_vlan_filter_pf_cfg_cmd *req;
8870 struct hclge_desc desc;
8871 u8 vlan_offset_byte_val;
8872 u8 vlan_offset_byte;
8876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8878 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8879 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8880 HCLGE_VLAN_BYTE_SIZE;
8881 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8883 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8884 req->vlan_offset = vlan_offset_160;
8885 req->vlan_cfg = is_kill;
8886 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8888 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8890 dev_err(&hdev->pdev->dev,
8891 "port vlan command, send fail, ret =%d.\n", ret);
8895 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8896 u16 vport_id, u16 vlan_id,
8899 u16 vport_idx, vport_num = 0;
8902 if (is_kill && !vlan_id)
8905 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8908 dev_err(&hdev->pdev->dev,
8909 "Set %u vport vlan filter config fail, ret =%d.\n",
8914 /* vlan 0 may be added twice when 8021q module is enabled */
8915 if (!is_kill && !vlan_id &&
8916 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8919 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8920 dev_err(&hdev->pdev->dev,
8921 "Add port vlan failed, vport %u is already in vlan %u\n",
8927 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8928 dev_err(&hdev->pdev->dev,
8929 "Delete port vlan failed, vport %u is not in vlan %u\n",
8934 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8937 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8938 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8944 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8946 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8947 struct hclge_vport_vtag_tx_cfg_cmd *req;
8948 struct hclge_dev *hdev = vport->back;
8949 struct hclge_desc desc;
8953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8955 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8956 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8957 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8958 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8959 vcfg->accept_tag1 ? 1 : 0);
8960 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8961 vcfg->accept_untag1 ? 1 : 0);
8962 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8963 vcfg->accept_tag2 ? 1 : 0);
8964 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8965 vcfg->accept_untag2 ? 1 : 0);
8966 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8967 vcfg->insert_tag1_en ? 1 : 0);
8968 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8969 vcfg->insert_tag2_en ? 1 : 0);
8970 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
8971 vcfg->tag_shift_mode_en ? 1 : 0);
8972 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8974 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8975 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8976 HCLGE_VF_NUM_PER_BYTE;
8977 req->vf_bitmap[bmap_index] =
8978 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8980 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8982 dev_err(&hdev->pdev->dev,
8983 "Send port txvlan cfg command fail, ret =%d\n",
8989 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8991 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8992 struct hclge_vport_vtag_rx_cfg_cmd *req;
8993 struct hclge_dev *hdev = vport->back;
8994 struct hclge_desc desc;
8998 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9000 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9001 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9002 vcfg->strip_tag1_en ? 1 : 0);
9003 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9004 vcfg->strip_tag2_en ? 1 : 0);
9005 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9006 vcfg->vlan1_vlan_prionly ? 1 : 0);
9007 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9008 vcfg->vlan2_vlan_prionly ? 1 : 0);
9009 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9010 vcfg->strip_tag1_discard_en ? 1 : 0);
9011 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9012 vcfg->strip_tag2_discard_en ? 1 : 0);
9014 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9015 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9016 HCLGE_VF_NUM_PER_BYTE;
9017 req->vf_bitmap[bmap_index] =
9018 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9020 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9022 dev_err(&hdev->pdev->dev,
9023 "Send port rxvlan cfg command fail, ret =%d\n",
9029 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9030 u16 port_base_vlan_state,
9035 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9036 vport->txvlan_cfg.accept_tag1 = true;
9037 vport->txvlan_cfg.insert_tag1_en = false;
9038 vport->txvlan_cfg.default_tag1 = 0;
9040 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9042 vport->txvlan_cfg.accept_tag1 =
9043 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9044 vport->txvlan_cfg.insert_tag1_en = true;
9045 vport->txvlan_cfg.default_tag1 = vlan_tag;
9048 vport->txvlan_cfg.accept_untag1 = true;
9050 /* accept_tag2 and accept_untag2 are not supported on
9051 * pdev revision(0x20), new revision support them,
9052 * this two fields can not be configured by user.
9054 vport->txvlan_cfg.accept_tag2 = true;
9055 vport->txvlan_cfg.accept_untag2 = true;
9056 vport->txvlan_cfg.insert_tag2_en = false;
9057 vport->txvlan_cfg.default_tag2 = 0;
9058 vport->txvlan_cfg.tag_shift_mode_en = true;
9060 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9061 vport->rxvlan_cfg.strip_tag1_en = false;
9062 vport->rxvlan_cfg.strip_tag2_en =
9063 vport->rxvlan_cfg.rx_vlan_offload_en;
9064 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9066 vport->rxvlan_cfg.strip_tag1_en =
9067 vport->rxvlan_cfg.rx_vlan_offload_en;
9068 vport->rxvlan_cfg.strip_tag2_en = true;
9069 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9072 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9073 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9074 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9076 ret = hclge_set_vlan_tx_offload_cfg(vport);
9080 return hclge_set_vlan_rx_offload_cfg(vport);
9083 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9085 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9086 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9087 struct hclge_desc desc;
9090 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9091 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9092 rx_req->ot_fst_vlan_type =
9093 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9094 rx_req->ot_sec_vlan_type =
9095 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9096 rx_req->in_fst_vlan_type =
9097 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9098 rx_req->in_sec_vlan_type =
9099 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9101 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9103 dev_err(&hdev->pdev->dev,
9104 "Send rxvlan protocol type command fail, ret =%d\n",
9109 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9111 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9112 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9113 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9115 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9117 dev_err(&hdev->pdev->dev,
9118 "Send txvlan protocol type command fail, ret =%d\n",
9124 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9126 #define HCLGE_DEF_VLAN_TYPE 0x8100
9128 struct hnae3_handle *handle = &hdev->vport[0].nic;
9129 struct hclge_vport *vport;
9133 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9134 /* for revision 0x21, vf vlan filter is per function */
9135 for (i = 0; i < hdev->num_alloc_vport; i++) {
9136 vport = &hdev->vport[i];
9137 ret = hclge_set_vlan_filter_ctrl(hdev,
9138 HCLGE_FILTER_TYPE_VF,
9139 HCLGE_FILTER_FE_EGRESS,
9146 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9147 HCLGE_FILTER_FE_INGRESS, true,
9152 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9153 HCLGE_FILTER_FE_EGRESS_V1_B,
9159 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9161 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9162 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9163 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9164 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9165 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9166 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9168 ret = hclge_set_vlan_protocol_type(hdev);
9172 for (i = 0; i < hdev->num_alloc_vport; i++) {
9175 vport = &hdev->vport[i];
9176 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9178 ret = hclge_vlan_offload_cfg(vport,
9179 vport->port_base_vlan_cfg.state,
9185 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9188 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9191 struct hclge_vport_vlan_cfg *vlan;
9193 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9197 vlan->hd_tbl_status = writen_to_tbl;
9198 vlan->vlan_id = vlan_id;
9200 list_add_tail(&vlan->node, &vport->vlan_list);
9203 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9205 struct hclge_vport_vlan_cfg *vlan, *tmp;
9206 struct hclge_dev *hdev = vport->back;
9209 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9210 if (!vlan->hd_tbl_status) {
9211 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9213 vlan->vlan_id, false);
9215 dev_err(&hdev->pdev->dev,
9216 "restore vport vlan list failed, ret=%d\n",
9221 vlan->hd_tbl_status = true;
9227 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9230 struct hclge_vport_vlan_cfg *vlan, *tmp;
9231 struct hclge_dev *hdev = vport->back;
9233 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9234 if (vlan->vlan_id == vlan_id) {
9235 if (is_write_tbl && vlan->hd_tbl_status)
9236 hclge_set_vlan_filter_hw(hdev,
9242 list_del(&vlan->node);
9249 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9251 struct hclge_vport_vlan_cfg *vlan, *tmp;
9252 struct hclge_dev *hdev = vport->back;
9254 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9255 if (vlan->hd_tbl_status)
9256 hclge_set_vlan_filter_hw(hdev,
9262 vlan->hd_tbl_status = false;
9264 list_del(&vlan->node);
9268 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9271 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9273 struct hclge_vport_vlan_cfg *vlan, *tmp;
9274 struct hclge_vport *vport;
9277 for (i = 0; i < hdev->num_alloc_vport; i++) {
9278 vport = &hdev->vport[i];
9279 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9280 list_del(&vlan->node);
9286 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9288 struct hclge_vport_vlan_cfg *vlan, *tmp;
9289 struct hclge_dev *hdev = vport->back;
9295 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9296 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9297 state = vport->port_base_vlan_cfg.state;
9299 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9300 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9301 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9302 vport->vport_id, vlan_id,
9307 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9308 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9310 vlan->vlan_id, false);
9313 vlan->hd_tbl_status = true;
9317 /* For global reset and imp reset, hardware will clear the mac table,
9318 * so we change the mac address state from ACTIVE to TO_ADD, then they
9319 * can be restored in the service task after reset complete. Furtherly,
9320 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9321 * be restored after reset, so just remove these mac nodes from mac_list.
9323 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9325 struct hclge_mac_node *mac_node, *tmp;
9327 list_for_each_entry_safe(mac_node, tmp, list, node) {
9328 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9329 mac_node->state = HCLGE_MAC_TO_ADD;
9330 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9331 list_del(&mac_node->node);
9337 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9339 spin_lock_bh(&vport->mac_list_lock);
9341 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9342 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9343 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9345 spin_unlock_bh(&vport->mac_list_lock);
9348 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9350 struct hclge_vport *vport = &hdev->vport[0];
9351 struct hnae3_handle *handle = &vport->nic;
9353 hclge_restore_mac_table_common(vport);
9354 hclge_restore_vport_vlan_table(vport);
9355 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9357 hclge_restore_fd_entries(handle);
9360 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9362 struct hclge_vport *vport = hclge_get_vport(handle);
9364 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9365 vport->rxvlan_cfg.strip_tag1_en = false;
9366 vport->rxvlan_cfg.strip_tag2_en = enable;
9367 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9369 vport->rxvlan_cfg.strip_tag1_en = enable;
9370 vport->rxvlan_cfg.strip_tag2_en = true;
9371 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9374 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9375 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9376 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9377 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9379 return hclge_set_vlan_rx_offload_cfg(vport);
9382 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9383 u16 port_base_vlan_state,
9384 struct hclge_vlan_info *new_info,
9385 struct hclge_vlan_info *old_info)
9387 struct hclge_dev *hdev = vport->back;
9390 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9391 hclge_rm_vport_all_vlan_table(vport, false);
9392 return hclge_set_vlan_filter_hw(hdev,
9393 htons(new_info->vlan_proto),
9399 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9400 vport->vport_id, old_info->vlan_tag,
9405 return hclge_add_vport_all_vlan_table(vport);
9408 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9409 struct hclge_vlan_info *vlan_info)
9411 struct hnae3_handle *nic = &vport->nic;
9412 struct hclge_vlan_info *old_vlan_info;
9413 struct hclge_dev *hdev = vport->back;
9416 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9418 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9422 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9423 /* add new VLAN tag */
9424 ret = hclge_set_vlan_filter_hw(hdev,
9425 htons(vlan_info->vlan_proto),
9427 vlan_info->vlan_tag,
9432 /* remove old VLAN tag */
9433 ret = hclge_set_vlan_filter_hw(hdev,
9434 htons(old_vlan_info->vlan_proto),
9436 old_vlan_info->vlan_tag,
9444 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9449 /* update state only when disable/enable port based VLAN */
9450 vport->port_base_vlan_cfg.state = state;
9451 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9452 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9454 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9457 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9458 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9459 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9464 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9465 enum hnae3_port_base_vlan_state state,
9468 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9470 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9472 return HNAE3_PORT_BASE_VLAN_ENABLE;
9475 return HNAE3_PORT_BASE_VLAN_DISABLE;
9476 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9477 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9479 return HNAE3_PORT_BASE_VLAN_MODIFY;
9483 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9484 u16 vlan, u8 qos, __be16 proto)
9486 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9487 struct hclge_vport *vport = hclge_get_vport(handle);
9488 struct hclge_dev *hdev = vport->back;
9489 struct hclge_vlan_info vlan_info;
9493 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9496 vport = hclge_get_vf_vport(hdev, vfid);
9500 /* qos is a 3 bits value, so can not be bigger than 7 */
9501 if (vlan > VLAN_N_VID - 1 || qos > 7)
9503 if (proto != htons(ETH_P_8021Q))
9504 return -EPROTONOSUPPORT;
9506 state = hclge_get_port_base_vlan_state(vport,
9507 vport->port_base_vlan_cfg.state,
9509 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9512 vlan_info.vlan_tag = vlan;
9513 vlan_info.qos = qos;
9514 vlan_info.vlan_proto = ntohs(proto);
9516 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9518 dev_err(&hdev->pdev->dev,
9519 "failed to update port base vlan for vf %d, ret = %d\n",
9524 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9527 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9528 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9529 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9530 vport->vport_id, state,
9537 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9539 struct hclge_vlan_info *vlan_info;
9540 struct hclge_vport *vport;
9544 /* clear port base vlan for all vf */
9545 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9546 vport = &hdev->vport[vf];
9547 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9549 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9551 vlan_info->vlan_tag, true);
9553 dev_err(&hdev->pdev->dev,
9554 "failed to clear vf vlan for vf%d, ret = %d\n",
9555 vf - HCLGE_VF_VPORT_START_NUM, ret);
9559 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9560 u16 vlan_id, bool is_kill)
9562 struct hclge_vport *vport = hclge_get_vport(handle);
9563 struct hclge_dev *hdev = vport->back;
9564 bool writen_to_tbl = false;
9567 /* When device is resetting or reset failed, firmware is unable to
9568 * handle mailbox. Just record the vlan id, and remove it after
9571 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9572 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9573 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9577 /* when port base vlan enabled, we use port base vlan as the vlan
9578 * filter entry. In this case, we don't update vlan filter table
9579 * when user add new vlan or remove exist vlan, just update the vport
9580 * vlan list. The vlan id in vlan list will be writen in vlan filter
9581 * table until port base vlan disabled
9583 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9584 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9586 writen_to_tbl = true;
9591 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9593 hclge_add_vport_vlan_table(vport, vlan_id,
9595 } else if (is_kill) {
9596 /* when remove hw vlan filter failed, record the vlan id,
9597 * and try to remove it from hw later, to be consistence
9600 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9605 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9607 #define HCLGE_MAX_SYNC_COUNT 60
9609 int i, ret, sync_cnt = 0;
9612 /* start from vport 1 for PF is always alive */
9613 for (i = 0; i < hdev->num_alloc_vport; i++) {
9614 struct hclge_vport *vport = &hdev->vport[i];
9616 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9618 while (vlan_id != VLAN_N_VID) {
9619 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9620 vport->vport_id, vlan_id,
9622 if (ret && ret != -EINVAL)
9625 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9626 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9629 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9632 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9638 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9640 struct hclge_config_max_frm_size_cmd *req;
9641 struct hclge_desc desc;
9643 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9645 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9646 req->max_frm_size = cpu_to_le16(new_mps);
9647 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9649 return hclge_cmd_send(&hdev->hw, &desc, 1);
9652 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9654 struct hclge_vport *vport = hclge_get_vport(handle);
9656 return hclge_set_vport_mtu(vport, new_mtu);
9659 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9661 struct hclge_dev *hdev = vport->back;
9662 int i, max_frm_size, ret;
9664 /* HW supprt 2 layer vlan */
9665 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9666 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9667 max_frm_size > HCLGE_MAC_MAX_FRAME)
9670 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9671 mutex_lock(&hdev->vport_lock);
9672 /* VF's mps must fit within hdev->mps */
9673 if (vport->vport_id && max_frm_size > hdev->mps) {
9674 mutex_unlock(&hdev->vport_lock);
9676 } else if (vport->vport_id) {
9677 vport->mps = max_frm_size;
9678 mutex_unlock(&hdev->vport_lock);
9682 /* PF's mps must be greater then VF's mps */
9683 for (i = 1; i < hdev->num_alloc_vport; i++)
9684 if (max_frm_size < hdev->vport[i].mps) {
9685 mutex_unlock(&hdev->vport_lock);
9689 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9691 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9693 dev_err(&hdev->pdev->dev,
9694 "Change mtu fail, ret =%d\n", ret);
9698 hdev->mps = max_frm_size;
9699 vport->mps = max_frm_size;
9701 ret = hclge_buffer_alloc(hdev);
9703 dev_err(&hdev->pdev->dev,
9704 "Allocate buffer fail, ret =%d\n", ret);
9707 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9708 mutex_unlock(&hdev->vport_lock);
9712 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9715 struct hclge_reset_tqp_queue_cmd *req;
9716 struct hclge_desc desc;
9719 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9721 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9722 req->tqp_id = cpu_to_le16(queue_id);
9724 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9726 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9728 dev_err(&hdev->pdev->dev,
9729 "Send tqp reset cmd error, status =%d\n", ret);
9736 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9738 struct hclge_reset_tqp_queue_cmd *req;
9739 struct hclge_desc desc;
9742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9744 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9745 req->tqp_id = cpu_to_le16(queue_id);
9747 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9749 dev_err(&hdev->pdev->dev,
9750 "Get reset status error, status =%d\n", ret);
9754 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9757 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9759 struct hnae3_queue *queue;
9760 struct hclge_tqp *tqp;
9762 queue = handle->kinfo.tqp[queue_id];
9763 tqp = container_of(queue, struct hclge_tqp, q);
9768 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9770 struct hclge_vport *vport = hclge_get_vport(handle);
9771 struct hclge_dev *hdev = vport->back;
9772 int reset_try_times = 0;
9777 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9779 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9781 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9785 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9787 dev_err(&hdev->pdev->dev,
9788 "Send reset tqp cmd fail, ret = %d\n", ret);
9792 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9793 reset_status = hclge_get_reset_status(hdev, queue_gid);
9797 /* Wait for tqp hw reset */
9798 usleep_range(1000, 1200);
9801 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9802 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9806 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9808 dev_err(&hdev->pdev->dev,
9809 "Deassert the soft reset fail, ret = %d\n", ret);
9814 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9816 struct hnae3_handle *handle = &vport->nic;
9817 struct hclge_dev *hdev = vport->back;
9818 int reset_try_times = 0;
9823 if (queue_id >= handle->kinfo.num_tqps) {
9824 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9829 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9831 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9833 dev_warn(&hdev->pdev->dev,
9834 "Send reset tqp cmd fail, ret = %d\n", ret);
9838 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9839 reset_status = hclge_get_reset_status(hdev, queue_gid);
9843 /* Wait for tqp hw reset */
9844 usleep_range(1000, 1200);
9847 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9848 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9852 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9854 dev_warn(&hdev->pdev->dev,
9855 "Deassert the soft reset fail, ret = %d\n", ret);
9858 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9860 struct hclge_vport *vport = hclge_get_vport(handle);
9861 struct hclge_dev *hdev = vport->back;
9863 return hdev->fw_version;
9866 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9868 struct phy_device *phydev = hdev->hw.mac.phydev;
9873 phy_set_asym_pause(phydev, rx_en, tx_en);
9876 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9880 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9883 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9885 dev_err(&hdev->pdev->dev,
9886 "configure pauseparam error, ret = %d.\n", ret);
9891 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9893 struct phy_device *phydev = hdev->hw.mac.phydev;
9894 u16 remote_advertising = 0;
9895 u16 local_advertising;
9896 u32 rx_pause, tx_pause;
9899 if (!phydev->link || !phydev->autoneg)
9902 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9905 remote_advertising = LPA_PAUSE_CAP;
9907 if (phydev->asym_pause)
9908 remote_advertising |= LPA_PAUSE_ASYM;
9910 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9911 remote_advertising);
9912 tx_pause = flowctl & FLOW_CTRL_TX;
9913 rx_pause = flowctl & FLOW_CTRL_RX;
9915 if (phydev->duplex == HCLGE_MAC_HALF) {
9920 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9923 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9924 u32 *rx_en, u32 *tx_en)
9926 struct hclge_vport *vport = hclge_get_vport(handle);
9927 struct hclge_dev *hdev = vport->back;
9928 struct phy_device *phydev = hdev->hw.mac.phydev;
9930 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9932 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9938 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9941 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9944 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9953 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9954 u32 rx_en, u32 tx_en)
9957 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9958 else if (rx_en && !tx_en)
9959 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9960 else if (!rx_en && tx_en)
9961 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9963 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9965 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9968 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9969 u32 rx_en, u32 tx_en)
9971 struct hclge_vport *vport = hclge_get_vport(handle);
9972 struct hclge_dev *hdev = vport->back;
9973 struct phy_device *phydev = hdev->hw.mac.phydev;
9977 fc_autoneg = hclge_get_autoneg(handle);
9978 if (auto_neg != fc_autoneg) {
9979 dev_info(&hdev->pdev->dev,
9980 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9985 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9986 dev_info(&hdev->pdev->dev,
9987 "Priority flow control enabled. Cannot set link flow control.\n");
9991 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9993 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9996 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9999 return phy_start_aneg(phydev);
10001 return -EOPNOTSUPP;
10004 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10005 u8 *auto_neg, u32 *speed, u8 *duplex)
10007 struct hclge_vport *vport = hclge_get_vport(handle);
10008 struct hclge_dev *hdev = vport->back;
10011 *speed = hdev->hw.mac.speed;
10013 *duplex = hdev->hw.mac.duplex;
10015 *auto_neg = hdev->hw.mac.autoneg;
10018 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10021 struct hclge_vport *vport = hclge_get_vport(handle);
10022 struct hclge_dev *hdev = vport->back;
10024 /* When nic is down, the service task is not running, doesn't update
10025 * the port information per second. Query the port information before
10026 * return the media type, ensure getting the correct media information.
10028 hclge_update_port_info(hdev);
10031 *media_type = hdev->hw.mac.media_type;
10034 *module_type = hdev->hw.mac.module_type;
10037 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10038 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10040 struct hclge_vport *vport = hclge_get_vport(handle);
10041 struct hclge_dev *hdev = vport->back;
10042 struct phy_device *phydev = hdev->hw.mac.phydev;
10043 int mdix_ctrl, mdix, is_resolved;
10044 unsigned int retval;
10047 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10048 *tp_mdix = ETH_TP_MDI_INVALID;
10052 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10054 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10055 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10056 HCLGE_PHY_MDIX_CTRL_S);
10058 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10059 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10060 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10062 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10064 switch (mdix_ctrl) {
10066 *tp_mdix_ctrl = ETH_TP_MDI;
10069 *tp_mdix_ctrl = ETH_TP_MDI_X;
10072 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10075 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10080 *tp_mdix = ETH_TP_MDI_INVALID;
10082 *tp_mdix = ETH_TP_MDI_X;
10084 *tp_mdix = ETH_TP_MDI;
10087 static void hclge_info_show(struct hclge_dev *hdev)
10089 struct device *dev = &hdev->pdev->dev;
10091 dev_info(dev, "PF info begin:\n");
10093 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10094 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10095 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10096 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10097 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10098 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10099 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10100 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10101 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10102 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10103 dev_info(dev, "This is %s PF\n",
10104 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10105 dev_info(dev, "DCB %s\n",
10106 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10107 dev_info(dev, "MQPRIO %s\n",
10108 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10110 dev_info(dev, "PF info end.\n");
10113 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10114 struct hclge_vport *vport)
10116 struct hnae3_client *client = vport->nic.client;
10117 struct hclge_dev *hdev = ae_dev->priv;
10118 int rst_cnt = hdev->rst_stats.reset_cnt;
10121 ret = client->ops->init_instance(&vport->nic);
10125 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10126 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10127 rst_cnt != hdev->rst_stats.reset_cnt) {
10132 /* Enable nic hw error interrupts */
10133 ret = hclge_config_nic_hw_error(hdev, true);
10135 dev_err(&ae_dev->pdev->dev,
10136 "fail(%d) to enable hw error interrupts\n", ret);
10140 hnae3_set_client_init_flag(client, ae_dev, 1);
10142 if (netif_msg_drv(&hdev->vport->nic))
10143 hclge_info_show(hdev);
10148 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10149 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10150 msleep(HCLGE_WAIT_RESET_DONE);
10152 client->ops->uninit_instance(&vport->nic, 0);
10157 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10158 struct hclge_vport *vport)
10160 struct hclge_dev *hdev = ae_dev->priv;
10161 struct hnae3_client *client;
10165 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10169 client = hdev->roce_client;
10170 ret = hclge_init_roce_base_info(vport);
10174 rst_cnt = hdev->rst_stats.reset_cnt;
10175 ret = client->ops->init_instance(&vport->roce);
10179 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10180 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10181 rst_cnt != hdev->rst_stats.reset_cnt) {
10183 goto init_roce_err;
10186 /* Enable roce ras interrupts */
10187 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10189 dev_err(&ae_dev->pdev->dev,
10190 "fail(%d) to enable roce ras interrupts\n", ret);
10191 goto init_roce_err;
10194 hnae3_set_client_init_flag(client, ae_dev, 1);
10199 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10200 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10201 msleep(HCLGE_WAIT_RESET_DONE);
10203 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10208 static int hclge_init_client_instance(struct hnae3_client *client,
10209 struct hnae3_ae_dev *ae_dev)
10211 struct hclge_dev *hdev = ae_dev->priv;
10212 struct hclge_vport *vport;
10215 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10216 vport = &hdev->vport[i];
10218 switch (client->type) {
10219 case HNAE3_CLIENT_KNIC:
10220 hdev->nic_client = client;
10221 vport->nic.client = client;
10222 ret = hclge_init_nic_client_instance(ae_dev, vport);
10226 ret = hclge_init_roce_client_instance(ae_dev, vport);
10231 case HNAE3_CLIENT_ROCE:
10232 if (hnae3_dev_roce_supported(hdev)) {
10233 hdev->roce_client = client;
10234 vport->roce.client = client;
10237 ret = hclge_init_roce_client_instance(ae_dev, vport);
10250 hdev->nic_client = NULL;
10251 vport->nic.client = NULL;
10254 hdev->roce_client = NULL;
10255 vport->roce.client = NULL;
10259 static void hclge_uninit_client_instance(struct hnae3_client *client,
10260 struct hnae3_ae_dev *ae_dev)
10262 struct hclge_dev *hdev = ae_dev->priv;
10263 struct hclge_vport *vport;
10266 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10267 vport = &hdev->vport[i];
10268 if (hdev->roce_client) {
10269 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10270 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10271 msleep(HCLGE_WAIT_RESET_DONE);
10273 hdev->roce_client->ops->uninit_instance(&vport->roce,
10275 hdev->roce_client = NULL;
10276 vport->roce.client = NULL;
10278 if (client->type == HNAE3_CLIENT_ROCE)
10280 if (hdev->nic_client && client->ops->uninit_instance) {
10281 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10282 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10283 msleep(HCLGE_WAIT_RESET_DONE);
10285 client->ops->uninit_instance(&vport->nic, 0);
10286 hdev->nic_client = NULL;
10287 vport->nic.client = NULL;
10292 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10294 #define HCLGE_MEM_BAR 4
10296 struct pci_dev *pdev = hdev->pdev;
10297 struct hclge_hw *hw = &hdev->hw;
10299 /* for device does not have device memory, return directly */
10300 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10303 hw->mem_base = devm_ioremap_wc(&pdev->dev,
10304 pci_resource_start(pdev, HCLGE_MEM_BAR),
10305 pci_resource_len(pdev, HCLGE_MEM_BAR));
10306 if (!hw->mem_base) {
10307 dev_err(&pdev->dev, "failed to map device memory\n");
10314 static int hclge_pci_init(struct hclge_dev *hdev)
10316 struct pci_dev *pdev = hdev->pdev;
10317 struct hclge_hw *hw;
10320 ret = pci_enable_device(pdev);
10322 dev_err(&pdev->dev, "failed to enable PCI device\n");
10326 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10328 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10330 dev_err(&pdev->dev,
10331 "can't set consistent PCI DMA");
10332 goto err_disable_device;
10334 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10337 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10339 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10340 goto err_disable_device;
10343 pci_set_master(pdev);
10345 hw->io_base = pcim_iomap(pdev, 2, 0);
10346 if (!hw->io_base) {
10347 dev_err(&pdev->dev, "Can't map configuration register space\n");
10349 goto err_clr_master;
10352 ret = hclge_dev_mem_map(hdev);
10354 goto err_unmap_io_base;
10356 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10361 pcim_iounmap(pdev, hdev->hw.io_base);
10363 pci_clear_master(pdev);
10364 pci_release_regions(pdev);
10365 err_disable_device:
10366 pci_disable_device(pdev);
10371 static void hclge_pci_uninit(struct hclge_dev *hdev)
10373 struct pci_dev *pdev = hdev->pdev;
10375 if (hdev->hw.mem_base)
10376 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10378 pcim_iounmap(pdev, hdev->hw.io_base);
10379 pci_free_irq_vectors(pdev);
10380 pci_clear_master(pdev);
10381 pci_release_mem_regions(pdev);
10382 pci_disable_device(pdev);
10385 static void hclge_state_init(struct hclge_dev *hdev)
10387 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10388 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10389 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10390 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10391 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10392 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10393 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10396 static void hclge_state_uninit(struct hclge_dev *hdev)
10398 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10399 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10401 if (hdev->reset_timer.function)
10402 del_timer_sync(&hdev->reset_timer);
10403 if (hdev->service_task.work.func)
10404 cancel_delayed_work_sync(&hdev->service_task);
10407 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10409 #define HCLGE_FLR_RETRY_WAIT_MS 500
10410 #define HCLGE_FLR_RETRY_CNT 5
10412 struct hclge_dev *hdev = ae_dev->priv;
10417 down(&hdev->reset_sem);
10418 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10419 hdev->reset_type = HNAE3_FLR_RESET;
10420 ret = hclge_reset_prepare(hdev);
10421 if (ret || hdev->reset_pending) {
10422 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10424 if (hdev->reset_pending ||
10425 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10426 dev_err(&hdev->pdev->dev,
10427 "reset_pending:0x%lx, retry_cnt:%d\n",
10428 hdev->reset_pending, retry_cnt);
10429 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10430 up(&hdev->reset_sem);
10431 msleep(HCLGE_FLR_RETRY_WAIT_MS);
10436 /* disable misc vector before FLR done */
10437 hclge_enable_vector(&hdev->misc_vector, false);
10438 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10439 hdev->rst_stats.flr_rst_cnt++;
10442 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10444 struct hclge_dev *hdev = ae_dev->priv;
10447 hclge_enable_vector(&hdev->misc_vector, true);
10449 ret = hclge_reset_rebuild(hdev);
10451 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10453 hdev->reset_type = HNAE3_NONE_RESET;
10454 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10455 up(&hdev->reset_sem);
10458 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10462 for (i = 0; i < hdev->num_alloc_vport; i++) {
10463 struct hclge_vport *vport = &hdev->vport[i];
10466 /* Send cmd to clear VF's FUNC_RST_ING */
10467 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10469 dev_warn(&hdev->pdev->dev,
10470 "clear vf(%u) rst failed %d!\n",
10471 vport->vport_id, ret);
10475 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10477 struct pci_dev *pdev = ae_dev->pdev;
10478 struct hclge_dev *hdev;
10481 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10486 hdev->ae_dev = ae_dev;
10487 hdev->reset_type = HNAE3_NONE_RESET;
10488 hdev->reset_level = HNAE3_FUNC_RESET;
10489 ae_dev->priv = hdev;
10491 /* HW supprt 2 layer vlan */
10492 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10494 mutex_init(&hdev->vport_lock);
10495 spin_lock_init(&hdev->fd_rule_lock);
10496 sema_init(&hdev->reset_sem, 1);
10498 ret = hclge_pci_init(hdev);
10502 /* Firmware command queue initialize */
10503 ret = hclge_cmd_queue_init(hdev);
10505 goto err_pci_uninit;
10507 /* Firmware command initialize */
10508 ret = hclge_cmd_init(hdev);
10510 goto err_cmd_uninit;
10512 ret = hclge_get_cap(hdev);
10514 goto err_cmd_uninit;
10516 ret = hclge_query_dev_specs(hdev);
10518 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10520 goto err_cmd_uninit;
10523 ret = hclge_configure(hdev);
10525 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10526 goto err_cmd_uninit;
10529 ret = hclge_init_msi(hdev);
10531 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10532 goto err_cmd_uninit;
10535 ret = hclge_misc_irq_init(hdev);
10537 goto err_msi_uninit;
10539 ret = hclge_alloc_tqps(hdev);
10541 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10542 goto err_msi_irq_uninit;
10545 ret = hclge_alloc_vport(hdev);
10547 goto err_msi_irq_uninit;
10549 ret = hclge_map_tqp(hdev);
10551 goto err_msi_irq_uninit;
10553 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10554 ret = hclge_mac_mdio_config(hdev);
10556 goto err_msi_irq_uninit;
10559 ret = hclge_init_umv_space(hdev);
10561 goto err_mdiobus_unreg;
10563 ret = hclge_mac_init(hdev);
10565 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10566 goto err_mdiobus_unreg;
10569 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10571 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10572 goto err_mdiobus_unreg;
10575 ret = hclge_config_gro(hdev, true);
10577 goto err_mdiobus_unreg;
10579 ret = hclge_init_vlan_config(hdev);
10581 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10582 goto err_mdiobus_unreg;
10585 ret = hclge_tm_schd_init(hdev);
10587 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10588 goto err_mdiobus_unreg;
10591 hclge_rss_init_cfg(hdev);
10592 ret = hclge_rss_init_hw(hdev);
10594 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10595 goto err_mdiobus_unreg;
10598 ret = init_mgr_tbl(hdev);
10600 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10601 goto err_mdiobus_unreg;
10604 ret = hclge_init_fd_config(hdev);
10606 dev_err(&pdev->dev,
10607 "fd table init fail, ret=%d\n", ret);
10608 goto err_mdiobus_unreg;
10611 INIT_KFIFO(hdev->mac_tnl_log);
10613 hclge_dcb_ops_set(hdev);
10615 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10616 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10618 /* Setup affinity after service timer setup because add_timer_on
10619 * is called in affinity notify.
10621 hclge_misc_affinity_setup(hdev);
10623 hclge_clear_all_event_cause(hdev);
10624 hclge_clear_resetting_state(hdev);
10626 /* Log and clear the hw errors those already occurred */
10627 hclge_handle_all_hns_hw_errors(ae_dev);
10629 /* request delayed reset for the error recovery because an immediate
10630 * global reset on a PF affecting pending initialization of other PFs
10632 if (ae_dev->hw_err_reset_req) {
10633 enum hnae3_reset_type reset_level;
10635 reset_level = hclge_get_reset_level(ae_dev,
10636 &ae_dev->hw_err_reset_req);
10637 hclge_set_def_reset_request(ae_dev, reset_level);
10638 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10641 /* Enable MISC vector(vector0) */
10642 hclge_enable_vector(&hdev->misc_vector, true);
10644 hclge_state_init(hdev);
10645 hdev->last_reset_time = jiffies;
10647 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10648 HCLGE_DRIVER_NAME);
10650 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10655 if (hdev->hw.mac.phydev)
10656 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10657 err_msi_irq_uninit:
10658 hclge_misc_irq_uninit(hdev);
10660 pci_free_irq_vectors(pdev);
10662 hclge_cmd_uninit(hdev);
10664 pcim_iounmap(pdev, hdev->hw.io_base);
10665 pci_clear_master(pdev);
10666 pci_release_regions(pdev);
10667 pci_disable_device(pdev);
10669 mutex_destroy(&hdev->vport_lock);
10673 static void hclge_stats_clear(struct hclge_dev *hdev)
10675 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10678 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10680 return hclge_config_switch_param(hdev, vf, enable,
10681 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10684 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10686 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10687 HCLGE_FILTER_FE_NIC_INGRESS_B,
10691 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10695 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10697 dev_err(&hdev->pdev->dev,
10698 "Set vf %d mac spoof check %s failed, ret=%d\n",
10699 vf, enable ? "on" : "off", ret);
10703 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10705 dev_err(&hdev->pdev->dev,
10706 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10707 vf, enable ? "on" : "off", ret);
10712 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10715 struct hclge_vport *vport = hclge_get_vport(handle);
10716 struct hclge_dev *hdev = vport->back;
10717 u32 new_spoofchk = enable ? 1 : 0;
10720 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10721 return -EOPNOTSUPP;
10723 vport = hclge_get_vf_vport(hdev, vf);
10727 if (vport->vf_info.spoofchk == new_spoofchk)
10730 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10731 dev_warn(&hdev->pdev->dev,
10732 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10734 else if (enable && hclge_is_umv_space_full(vport, true))
10735 dev_warn(&hdev->pdev->dev,
10736 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10739 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10743 vport->vf_info.spoofchk = new_spoofchk;
10747 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10749 struct hclge_vport *vport = hdev->vport;
10753 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10756 /* resume the vf spoof check state after reset */
10757 for (i = 0; i < hdev->num_alloc_vport; i++) {
10758 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10759 vport->vf_info.spoofchk);
10769 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10771 struct hclge_vport *vport = hclge_get_vport(handle);
10772 struct hclge_dev *hdev = vport->back;
10773 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10774 u32 new_trusted = enable ? 1 : 0;
10778 vport = hclge_get_vf_vport(hdev, vf);
10782 if (vport->vf_info.trusted == new_trusted)
10785 /* Disable promisc mode for VF if it is not trusted any more. */
10786 if (!enable && vport->vf_info.promisc_enable) {
10787 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10788 ret = hclge_set_vport_promisc_mode(vport, false, false,
10792 vport->vf_info.promisc_enable = 0;
10793 hclge_inform_vf_promisc_info(vport);
10796 vport->vf_info.trusted = new_trusted;
10801 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10806 /* reset vf rate to default value */
10807 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10808 struct hclge_vport *vport = &hdev->vport[vf];
10810 vport->vf_info.max_tx_rate = 0;
10811 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10813 dev_err(&hdev->pdev->dev,
10814 "vf%d failed to reset to default, ret=%d\n",
10815 vf - HCLGE_VF_VPORT_START_NUM, ret);
10819 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10820 int min_tx_rate, int max_tx_rate)
10822 if (min_tx_rate != 0 ||
10823 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10824 dev_err(&hdev->pdev->dev,
10825 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10826 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10833 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10834 int min_tx_rate, int max_tx_rate, bool force)
10836 struct hclge_vport *vport = hclge_get_vport(handle);
10837 struct hclge_dev *hdev = vport->back;
10840 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10844 vport = hclge_get_vf_vport(hdev, vf);
10848 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10851 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10855 vport->vf_info.max_tx_rate = max_tx_rate;
10860 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10862 struct hnae3_handle *handle = &hdev->vport->nic;
10863 struct hclge_vport *vport;
10867 /* resume the vf max_tx_rate after reset */
10868 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10869 vport = hclge_get_vf_vport(hdev, vf);
10873 /* zero means max rate, after reset, firmware already set it to
10874 * max rate, so just continue.
10876 if (!vport->vf_info.max_tx_rate)
10879 ret = hclge_set_vf_rate(handle, vf, 0,
10880 vport->vf_info.max_tx_rate, true);
10882 dev_err(&hdev->pdev->dev,
10883 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10884 vf, vport->vf_info.max_tx_rate, ret);
10892 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10894 struct hclge_vport *vport = hdev->vport;
10897 for (i = 0; i < hdev->num_alloc_vport; i++) {
10898 hclge_vport_stop(vport);
10903 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10905 struct hclge_dev *hdev = ae_dev->priv;
10906 struct pci_dev *pdev = ae_dev->pdev;
10909 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10911 hclge_stats_clear(hdev);
10912 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10913 * so here should not clean table in memory.
10915 if (hdev->reset_type == HNAE3_IMP_RESET ||
10916 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10917 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10918 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10919 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10920 hclge_reset_umv_space(hdev);
10923 ret = hclge_cmd_init(hdev);
10925 dev_err(&pdev->dev, "Cmd queue init failed\n");
10929 ret = hclge_map_tqp(hdev);
10931 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10935 ret = hclge_mac_init(hdev);
10937 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10941 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10943 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10947 ret = hclge_config_gro(hdev, true);
10951 ret = hclge_init_vlan_config(hdev);
10953 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10957 ret = hclge_tm_init_hw(hdev, true);
10959 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10963 ret = hclge_rss_init_hw(hdev);
10965 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10969 ret = init_mgr_tbl(hdev);
10971 dev_err(&pdev->dev,
10972 "failed to reinit manager table, ret = %d\n", ret);
10976 ret = hclge_init_fd_config(hdev);
10978 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10982 /* Log and clear the hw errors those already occurred */
10983 hclge_handle_all_hns_hw_errors(ae_dev);
10985 /* Re-enable the hw error interrupts because
10986 * the interrupts get disabled on global reset.
10988 ret = hclge_config_nic_hw_error(hdev, true);
10990 dev_err(&pdev->dev,
10991 "fail(%d) to re-enable NIC hw error interrupts\n",
10996 if (hdev->roce_client) {
10997 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10999 dev_err(&pdev->dev,
11000 "fail(%d) to re-enable roce ras interrupts\n",
11006 hclge_reset_vport_state(hdev);
11007 ret = hclge_reset_vport_spoofchk(hdev);
11011 ret = hclge_resume_vf_rate(hdev);
11015 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11016 HCLGE_DRIVER_NAME);
11021 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11023 struct hclge_dev *hdev = ae_dev->priv;
11024 struct hclge_mac *mac = &hdev->hw.mac;
11026 hclge_reset_vf_rate(hdev);
11027 hclge_clear_vf_vlan(hdev);
11028 hclge_misc_affinity_teardown(hdev);
11029 hclge_state_uninit(hdev);
11030 hclge_uninit_mac_table(hdev);
11033 mdiobus_unregister(mac->mdio_bus);
11035 /* Disable MISC vector(vector0) */
11036 hclge_enable_vector(&hdev->misc_vector, false);
11037 synchronize_irq(hdev->misc_vector.vector_irq);
11039 /* Disable all hw interrupts */
11040 hclge_config_mac_tnl_int(hdev, false);
11041 hclge_config_nic_hw_error(hdev, false);
11042 hclge_config_rocee_ras_interrupt(hdev, false);
11044 hclge_cmd_uninit(hdev);
11045 hclge_misc_irq_uninit(hdev);
11046 hclge_pci_uninit(hdev);
11047 mutex_destroy(&hdev->vport_lock);
11048 hclge_uninit_vport_vlan_table(hdev);
11049 ae_dev->priv = NULL;
11052 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11054 struct hclge_vport *vport = hclge_get_vport(handle);
11055 struct hclge_dev *hdev = vport->back;
11057 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11060 static void hclge_get_channels(struct hnae3_handle *handle,
11061 struct ethtool_channels *ch)
11063 ch->max_combined = hclge_get_max_channels(handle);
11064 ch->other_count = 1;
11066 ch->combined_count = handle->kinfo.rss_size;
11069 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11070 u16 *alloc_tqps, u16 *max_rss_size)
11072 struct hclge_vport *vport = hclge_get_vport(handle);
11073 struct hclge_dev *hdev = vport->back;
11075 *alloc_tqps = vport->alloc_tqps;
11076 *max_rss_size = hdev->pf_rss_size_max;
11079 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11080 bool rxfh_configured)
11082 struct hclge_vport *vport = hclge_get_vport(handle);
11083 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11084 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11085 struct hclge_dev *hdev = vport->back;
11086 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11087 u16 cur_rss_size = kinfo->rss_size;
11088 u16 cur_tqps = kinfo->num_tqps;
11089 u16 tc_valid[HCLGE_MAX_TC_NUM];
11095 kinfo->req_rss_size = new_tqps_num;
11097 ret = hclge_tm_vport_map_update(hdev);
11099 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11103 roundup_size = roundup_pow_of_two(kinfo->rss_size);
11104 roundup_size = ilog2(roundup_size);
11105 /* Set the RSS TC mode according to the new RSS size */
11106 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11109 if (!(hdev->hw_tc_map & BIT(i)))
11113 tc_size[i] = roundup_size;
11114 tc_offset[i] = kinfo->rss_size * i;
11116 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11120 /* RSS indirection table has been configuared by user */
11121 if (rxfh_configured)
11124 /* Reinitializes the rss indirect table according to the new RSS size */
11125 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
11129 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
11130 rss_indir[i] = i % kinfo->rss_size;
11132 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11134 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11141 dev_info(&hdev->pdev->dev,
11142 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11143 cur_rss_size, kinfo->rss_size,
11144 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11149 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11150 u32 *regs_num_64_bit)
11152 struct hclge_desc desc;
11156 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11157 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11159 dev_err(&hdev->pdev->dev,
11160 "Query register number cmd failed, ret = %d.\n", ret);
11164 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11165 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11167 total_num = *regs_num_32_bit + *regs_num_64_bit;
11174 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11177 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11178 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11180 struct hclge_desc *desc;
11181 u32 *reg_val = data;
11191 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11192 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11193 HCLGE_32_BIT_REG_RTN_DATANUM);
11194 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11198 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11199 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11201 dev_err(&hdev->pdev->dev,
11202 "Query 32 bit register cmd failed, ret = %d.\n", ret);
11207 for (i = 0; i < cmd_num; i++) {
11209 desc_data = (__le32 *)(&desc[i].data[0]);
11210 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11212 desc_data = (__le32 *)(&desc[i]);
11213 n = HCLGE_32_BIT_REG_RTN_DATANUM;
11215 for (k = 0; k < n; k++) {
11216 *reg_val++ = le32_to_cpu(*desc_data++);
11228 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11231 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11232 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11234 struct hclge_desc *desc;
11235 u64 *reg_val = data;
11245 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11246 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11247 HCLGE_64_BIT_REG_RTN_DATANUM);
11248 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11252 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11253 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11255 dev_err(&hdev->pdev->dev,
11256 "Query 64 bit register cmd failed, ret = %d.\n", ret);
11261 for (i = 0; i < cmd_num; i++) {
11263 desc_data = (__le64 *)(&desc[i].data[0]);
11264 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11266 desc_data = (__le64 *)(&desc[i]);
11267 n = HCLGE_64_BIT_REG_RTN_DATANUM;
11269 for (k = 0; k < n; k++) {
11270 *reg_val++ = le64_to_cpu(*desc_data++);
11282 #define MAX_SEPARATE_NUM 4
11283 #define SEPARATOR_VALUE 0xFDFCFBFA
11284 #define REG_NUM_PER_LINE 4
11285 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
11286 #define REG_SEPARATOR_LINE 1
11287 #define REG_NUM_REMAIN_MASK 3
11288 #define BD_LIST_MAX_NUM 30
11290 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11294 /* initialize command BD except the last one */
11295 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11296 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11298 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11301 /* initialize the last command BD */
11302 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11304 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11307 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11311 u32 entries_per_desc, desc_index, index, offset, i;
11312 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11315 ret = hclge_query_bd_num_cmd_send(hdev, desc);
11317 dev_err(&hdev->pdev->dev,
11318 "Get dfx bd num fail, status is %d.\n", ret);
11322 entries_per_desc = ARRAY_SIZE(desc[0].data);
11323 for (i = 0; i < type_num; i++) {
11324 offset = hclge_dfx_bd_offset_list[i];
11325 index = offset % entries_per_desc;
11326 desc_index = offset / entries_per_desc;
11327 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11333 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11334 struct hclge_desc *desc_src, int bd_num,
11335 enum hclge_opcode_type cmd)
11337 struct hclge_desc *desc = desc_src;
11340 hclge_cmd_setup_basic_desc(desc, cmd, true);
11341 for (i = 0; i < bd_num - 1; i++) {
11342 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11344 hclge_cmd_setup_basic_desc(desc, cmd, true);
11348 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11350 dev_err(&hdev->pdev->dev,
11351 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11357 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11360 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11361 struct hclge_desc *desc = desc_src;
11364 entries_per_desc = ARRAY_SIZE(desc->data);
11365 reg_num = entries_per_desc * bd_num;
11366 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11367 for (i = 0; i < reg_num; i++) {
11368 index = i % entries_per_desc;
11369 desc_index = i / entries_per_desc;
11370 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11372 for (i = 0; i < separator_num; i++)
11373 *reg++ = SEPARATOR_VALUE;
11375 return reg_num + separator_num;
11378 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11380 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11381 int data_len_per_desc, bd_num, i;
11382 int bd_num_list[BD_LIST_MAX_NUM];
11386 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11388 dev_err(&hdev->pdev->dev,
11389 "Get dfx reg bd num fail, status is %d.\n", ret);
11393 data_len_per_desc = sizeof_field(struct hclge_desc, data);
11395 for (i = 0; i < dfx_reg_type_num; i++) {
11396 bd_num = bd_num_list[i];
11397 data_len = data_len_per_desc * bd_num;
11398 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11404 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11406 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11407 int bd_num, bd_num_max, buf_len, i;
11408 int bd_num_list[BD_LIST_MAX_NUM];
11409 struct hclge_desc *desc_src;
11413 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11415 dev_err(&hdev->pdev->dev,
11416 "Get dfx reg bd num fail, status is %d.\n", ret);
11420 bd_num_max = bd_num_list[0];
11421 for (i = 1; i < dfx_reg_type_num; i++)
11422 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11424 buf_len = sizeof(*desc_src) * bd_num_max;
11425 desc_src = kzalloc(buf_len, GFP_KERNEL);
11429 for (i = 0; i < dfx_reg_type_num; i++) {
11430 bd_num = bd_num_list[i];
11431 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11432 hclge_dfx_reg_opcode_list[i]);
11434 dev_err(&hdev->pdev->dev,
11435 "Get dfx reg fail, status is %d.\n", ret);
11439 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11446 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11447 struct hnae3_knic_private_info *kinfo)
11449 #define HCLGE_RING_REG_OFFSET 0x200
11450 #define HCLGE_RING_INT_REG_OFFSET 0x4
11452 int i, j, reg_num, separator_num;
11456 /* fetching per-PF registers valus from PF PCIe register space */
11457 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11458 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11459 for (i = 0; i < reg_num; i++)
11460 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11461 for (i = 0; i < separator_num; i++)
11462 *reg++ = SEPARATOR_VALUE;
11463 data_num_sum = reg_num + separator_num;
11465 reg_num = ARRAY_SIZE(common_reg_addr_list);
11466 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11467 for (i = 0; i < reg_num; i++)
11468 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11469 for (i = 0; i < separator_num; i++)
11470 *reg++ = SEPARATOR_VALUE;
11471 data_num_sum += reg_num + separator_num;
11473 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11474 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11475 for (j = 0; j < kinfo->num_tqps; j++) {
11476 for (i = 0; i < reg_num; i++)
11477 *reg++ = hclge_read_dev(&hdev->hw,
11478 ring_reg_addr_list[i] +
11479 HCLGE_RING_REG_OFFSET * j);
11480 for (i = 0; i < separator_num; i++)
11481 *reg++ = SEPARATOR_VALUE;
11483 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11485 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11486 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11487 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11488 for (i = 0; i < reg_num; i++)
11489 *reg++ = hclge_read_dev(&hdev->hw,
11490 tqp_intr_reg_addr_list[i] +
11491 HCLGE_RING_INT_REG_OFFSET * j);
11492 for (i = 0; i < separator_num; i++)
11493 *reg++ = SEPARATOR_VALUE;
11495 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11497 return data_num_sum;
11500 static int hclge_get_regs_len(struct hnae3_handle *handle)
11502 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11504 struct hclge_vport *vport = hclge_get_vport(handle);
11505 struct hclge_dev *hdev = vport->back;
11506 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11507 int regs_lines_32_bit, regs_lines_64_bit;
11510 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11512 dev_err(&hdev->pdev->dev,
11513 "Get register number failed, ret = %d.\n", ret);
11517 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11519 dev_err(&hdev->pdev->dev,
11520 "Get dfx reg len failed, ret = %d.\n", ret);
11524 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11525 REG_SEPARATOR_LINE;
11526 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11527 REG_SEPARATOR_LINE;
11528 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11529 REG_SEPARATOR_LINE;
11530 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11531 REG_SEPARATOR_LINE;
11532 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11533 REG_SEPARATOR_LINE;
11534 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11535 REG_SEPARATOR_LINE;
11537 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11538 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11539 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11542 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11545 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11546 struct hclge_vport *vport = hclge_get_vport(handle);
11547 struct hclge_dev *hdev = vport->back;
11548 u32 regs_num_32_bit, regs_num_64_bit;
11549 int i, reg_num, separator_num, ret;
11552 *version = hdev->fw_version;
11554 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11556 dev_err(&hdev->pdev->dev,
11557 "Get register number failed, ret = %d.\n", ret);
11561 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11563 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11565 dev_err(&hdev->pdev->dev,
11566 "Get 32 bit register failed, ret = %d.\n", ret);
11569 reg_num = regs_num_32_bit;
11571 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11572 for (i = 0; i < separator_num; i++)
11573 *reg++ = SEPARATOR_VALUE;
11575 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11577 dev_err(&hdev->pdev->dev,
11578 "Get 64 bit register failed, ret = %d.\n", ret);
11581 reg_num = regs_num_64_bit * 2;
11583 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11584 for (i = 0; i < separator_num; i++)
11585 *reg++ = SEPARATOR_VALUE;
11587 ret = hclge_get_dfx_reg(hdev, reg);
11589 dev_err(&hdev->pdev->dev,
11590 "Get dfx register failed, ret = %d.\n", ret);
11593 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11595 struct hclge_set_led_state_cmd *req;
11596 struct hclge_desc desc;
11599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11601 req = (struct hclge_set_led_state_cmd *)desc.data;
11602 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11603 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11605 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11607 dev_err(&hdev->pdev->dev,
11608 "Send set led state cmd error, ret =%d\n", ret);
11613 enum hclge_led_status {
11616 HCLGE_LED_NO_CHANGE = 0xFF,
11619 static int hclge_set_led_id(struct hnae3_handle *handle,
11620 enum ethtool_phys_id_state status)
11622 struct hclge_vport *vport = hclge_get_vport(handle);
11623 struct hclge_dev *hdev = vport->back;
11626 case ETHTOOL_ID_ACTIVE:
11627 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11628 case ETHTOOL_ID_INACTIVE:
11629 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11635 static void hclge_get_link_mode(struct hnae3_handle *handle,
11636 unsigned long *supported,
11637 unsigned long *advertising)
11639 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11640 struct hclge_vport *vport = hclge_get_vport(handle);
11641 struct hclge_dev *hdev = vport->back;
11642 unsigned int idx = 0;
11644 for (; idx < size; idx++) {
11645 supported[idx] = hdev->hw.mac.supported[idx];
11646 advertising[idx] = hdev->hw.mac.advertising[idx];
11650 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11652 struct hclge_vport *vport = hclge_get_vport(handle);
11653 struct hclge_dev *hdev = vport->back;
11655 return hclge_config_gro(hdev, enable);
11658 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11660 struct hclge_vport *vport = &hdev->vport[0];
11661 struct hnae3_handle *handle = &vport->nic;
11665 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11666 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11667 vport->last_promisc_flags = vport->overflow_promisc_flags;
11670 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11671 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11672 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11673 tmp_flags & HNAE3_MPE);
11675 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11676 hclge_enable_vlan_filter(handle,
11677 tmp_flags & HNAE3_VLAN_FLTR);
11682 static bool hclge_module_existed(struct hclge_dev *hdev)
11684 struct hclge_desc desc;
11688 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11689 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11691 dev_err(&hdev->pdev->dev,
11692 "failed to get SFP exist state, ret = %d\n", ret);
11696 existed = le32_to_cpu(desc.data[0]);
11698 return existed != 0;
11701 /* need 6 bds(total 140 bytes) in one reading
11702 * return the number of bytes actually read, 0 means read failed.
11704 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11707 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11708 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11714 /* setup all 6 bds to read module eeprom info. */
11715 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11716 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11719 /* bd0~bd4 need next flag */
11720 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11721 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11724 /* setup bd0, this bd contains offset and read length. */
11725 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11726 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11727 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11728 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11730 ret = hclge_cmd_send(&hdev->hw, desc, i);
11732 dev_err(&hdev->pdev->dev,
11733 "failed to get SFP eeprom info, ret = %d\n", ret);
11737 /* copy sfp info from bd0 to out buffer. */
11738 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11739 memcpy(data, sfp_info_bd0->data, copy_len);
11740 read_len = copy_len;
11742 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11743 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11744 if (read_len >= len)
11747 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11748 memcpy(data + read_len, desc[i].data, copy_len);
11749 read_len += copy_len;
11755 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11758 struct hclge_vport *vport = hclge_get_vport(handle);
11759 struct hclge_dev *hdev = vport->back;
11763 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11764 return -EOPNOTSUPP;
11766 if (!hclge_module_existed(hdev))
11769 while (read_len < len) {
11770 data_len = hclge_get_sfp_eeprom_info(hdev,
11777 read_len += data_len;
11783 static const struct hnae3_ae_ops hclge_ops = {
11784 .init_ae_dev = hclge_init_ae_dev,
11785 .uninit_ae_dev = hclge_uninit_ae_dev,
11786 .flr_prepare = hclge_flr_prepare,
11787 .flr_done = hclge_flr_done,
11788 .init_client_instance = hclge_init_client_instance,
11789 .uninit_client_instance = hclge_uninit_client_instance,
11790 .map_ring_to_vector = hclge_map_ring_to_vector,
11791 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11792 .get_vector = hclge_get_vector,
11793 .put_vector = hclge_put_vector,
11794 .set_promisc_mode = hclge_set_promisc_mode,
11795 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11796 .set_loopback = hclge_set_loopback,
11797 .start = hclge_ae_start,
11798 .stop = hclge_ae_stop,
11799 .client_start = hclge_client_start,
11800 .client_stop = hclge_client_stop,
11801 .get_status = hclge_get_status,
11802 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11803 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11804 .get_media_type = hclge_get_media_type,
11805 .check_port_speed = hclge_check_port_speed,
11806 .get_fec = hclge_get_fec,
11807 .set_fec = hclge_set_fec,
11808 .get_rss_key_size = hclge_get_rss_key_size,
11809 .get_rss_indir_size = hclge_get_rss_indir_size,
11810 .get_rss = hclge_get_rss,
11811 .set_rss = hclge_set_rss,
11812 .set_rss_tuple = hclge_set_rss_tuple,
11813 .get_rss_tuple = hclge_get_rss_tuple,
11814 .get_tc_size = hclge_get_tc_size,
11815 .get_mac_addr = hclge_get_mac_addr,
11816 .set_mac_addr = hclge_set_mac_addr,
11817 .do_ioctl = hclge_do_ioctl,
11818 .add_uc_addr = hclge_add_uc_addr,
11819 .rm_uc_addr = hclge_rm_uc_addr,
11820 .add_mc_addr = hclge_add_mc_addr,
11821 .rm_mc_addr = hclge_rm_mc_addr,
11822 .set_autoneg = hclge_set_autoneg,
11823 .get_autoneg = hclge_get_autoneg,
11824 .restart_autoneg = hclge_restart_autoneg,
11825 .halt_autoneg = hclge_halt_autoneg,
11826 .get_pauseparam = hclge_get_pauseparam,
11827 .set_pauseparam = hclge_set_pauseparam,
11828 .set_mtu = hclge_set_mtu,
11829 .reset_queue = hclge_reset_tqp,
11830 .get_stats = hclge_get_stats,
11831 .get_mac_stats = hclge_get_mac_stat,
11832 .update_stats = hclge_update_stats,
11833 .get_strings = hclge_get_strings,
11834 .get_sset_count = hclge_get_sset_count,
11835 .get_fw_version = hclge_get_fw_version,
11836 .get_mdix_mode = hclge_get_mdix_mode,
11837 .enable_vlan_filter = hclge_enable_vlan_filter,
11838 .set_vlan_filter = hclge_set_vlan_filter,
11839 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11840 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11841 .reset_event = hclge_reset_event,
11842 .get_reset_level = hclge_get_reset_level,
11843 .set_default_reset_request = hclge_set_def_reset_request,
11844 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11845 .set_channels = hclge_set_channels,
11846 .get_channels = hclge_get_channels,
11847 .get_regs_len = hclge_get_regs_len,
11848 .get_regs = hclge_get_regs,
11849 .set_led_id = hclge_set_led_id,
11850 .get_link_mode = hclge_get_link_mode,
11851 .add_fd_entry = hclge_add_fd_entry,
11852 .del_fd_entry = hclge_del_fd_entry,
11853 .del_all_fd_entries = hclge_del_all_fd_entries,
11854 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11855 .get_fd_rule_info = hclge_get_fd_rule_info,
11856 .get_fd_all_rules = hclge_get_all_rules,
11857 .enable_fd = hclge_enable_fd,
11858 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11859 .dbg_run_cmd = hclge_dbg_run_cmd,
11860 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11861 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11862 .ae_dev_resetting = hclge_ae_dev_resetting,
11863 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11864 .set_gro_en = hclge_gro_en,
11865 .get_global_queue_id = hclge_covert_handle_qid_global,
11866 .set_timer_task = hclge_set_timer_task,
11867 .mac_connect_phy = hclge_mac_connect_phy,
11868 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11869 .get_vf_config = hclge_get_vf_config,
11870 .set_vf_link_state = hclge_set_vf_link_state,
11871 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11872 .set_vf_trust = hclge_set_vf_trust,
11873 .set_vf_rate = hclge_set_vf_rate,
11874 .set_vf_mac = hclge_set_vf_mac,
11875 .get_module_eeprom = hclge_get_module_eeprom,
11876 .get_cmdq_stat = hclge_get_cmdq_stat,
11877 .add_cls_flower = hclge_add_cls_flower,
11878 .del_cls_flower = hclge_del_cls_flower,
11879 .cls_flower_active = hclge_is_cls_flower_active,
11882 static struct hnae3_ae_algo ae_algo = {
11884 .pdev_id_table = ae_algo_pci_tbl,
11887 static int hclge_init(void)
11889 pr_info("%s is initializing\n", HCLGE_NAME);
11891 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11893 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11897 hnae3_register_ae_algo(&ae_algo);
11902 static void hclge_exit(void)
11904 hnae3_unregister_ae_algo(&ae_algo);
11905 destroy_workqueue(hclge_wq);
11907 module_init(hclge_init);
11908 module_exit(hclge_exit);
11910 MODULE_LICENSE("GPL");
11911 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11912 MODULE_DESCRIPTION("HCLGE Driver");
11913 MODULE_VERSION(HCLGE_MOD_VERSION);