1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45 u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
51 static struct hnae3_ae_algo ae_algo;
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61 /* required last entry */
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68 HCLGE_CMDQ_TX_ADDR_H_REG,
69 HCLGE_CMDQ_TX_DEPTH_REG,
70 HCLGE_CMDQ_TX_TAIL_REG,
71 HCLGE_CMDQ_TX_HEAD_REG,
72 HCLGE_CMDQ_RX_ADDR_L_REG,
73 HCLGE_CMDQ_RX_ADDR_H_REG,
74 HCLGE_CMDQ_RX_DEPTH_REG,
75 HCLGE_CMDQ_RX_TAIL_REG,
76 HCLGE_CMDQ_RX_HEAD_REG,
77 HCLGE_VECTOR0_CMDQ_SRC_REG,
78 HCLGE_CMDQ_INTR_STS_REG,
79 HCLGE_CMDQ_INTR_EN_REG,
80 HCLGE_CMDQ_INTR_GEN_REG};
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83 HCLGE_VECTOR0_OTER_EN_REG,
84 HCLGE_MISC_RESET_STS_REG,
85 HCLGE_MISC_VECTOR_INT_STS,
86 HCLGE_GLOBAL_RESET_REG,
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91 HCLGE_RING_RX_ADDR_H_REG,
92 HCLGE_RING_RX_BD_NUM_REG,
93 HCLGE_RING_RX_BD_LENGTH_REG,
94 HCLGE_RING_RX_MERGE_EN_REG,
95 HCLGE_RING_RX_TAIL_REG,
96 HCLGE_RING_RX_HEAD_REG,
97 HCLGE_RING_RX_FBD_NUM_REG,
98 HCLGE_RING_RX_OFFSET_REG,
99 HCLGE_RING_RX_FBD_OFFSET_REG,
100 HCLGE_RING_RX_STASH_REG,
101 HCLGE_RING_RX_BD_ERR_REG,
102 HCLGE_RING_TX_ADDR_L_REG,
103 HCLGE_RING_TX_ADDR_H_REG,
104 HCLGE_RING_TX_BD_NUM_REG,
105 HCLGE_RING_TX_PRIORITY_REG,
106 HCLGE_RING_TX_TC_REG,
107 HCLGE_RING_TX_MERGE_EN_REG,
108 HCLGE_RING_TX_TAIL_REG,
109 HCLGE_RING_TX_HEAD_REG,
110 HCLGE_RING_TX_FBD_NUM_REG,
111 HCLGE_RING_TX_OFFSET_REG,
112 HCLGE_RING_TX_EBD_NUM_REG,
113 HCLGE_RING_TX_EBD_OFFSET_REG,
114 HCLGE_RING_TX_BD_ERR_REG,
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118 HCLGE_TQP_INTR_GL0_REG,
119 HCLGE_TQP_INTR_GL1_REG,
120 HCLGE_TQP_INTR_GL2_REG,
121 HCLGE_TQP_INTR_RL_REG};
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
125 "Serdes serial Loopback test",
126 "Serdes parallel Loopback test",
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131 {"mac_tx_mac_pause_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133 {"mac_rx_mac_pause_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135 {"mac_tx_control_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137 {"mac_rx_control_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139 {"mac_tx_pfc_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141 {"mac_tx_pfc_pri0_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143 {"mac_tx_pfc_pri1_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145 {"mac_tx_pfc_pri2_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147 {"mac_tx_pfc_pri3_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149 {"mac_tx_pfc_pri4_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151 {"mac_tx_pfc_pri5_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153 {"mac_tx_pfc_pri6_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155 {"mac_tx_pfc_pri7_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157 {"mac_rx_pfc_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159 {"mac_rx_pfc_pri0_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161 {"mac_rx_pfc_pri1_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163 {"mac_rx_pfc_pri2_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165 {"mac_rx_pfc_pri3_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167 {"mac_rx_pfc_pri4_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169 {"mac_rx_pfc_pri5_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171 {"mac_rx_pfc_pri6_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173 {"mac_rx_pfc_pri7_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175 {"mac_tx_total_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177 {"mac_tx_total_oct_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179 {"mac_tx_good_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181 {"mac_tx_bad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183 {"mac_tx_good_oct_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185 {"mac_tx_bad_oct_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187 {"mac_tx_uni_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189 {"mac_tx_multi_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191 {"mac_tx_broad_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193 {"mac_tx_undersize_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195 {"mac_tx_oversize_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197 {"mac_tx_64_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199 {"mac_tx_65_127_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201 {"mac_tx_128_255_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203 {"mac_tx_256_511_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205 {"mac_tx_512_1023_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207 {"mac_tx_1024_1518_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209 {"mac_tx_1519_2047_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211 {"mac_tx_2048_4095_oct_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213 {"mac_tx_4096_8191_oct_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215 {"mac_tx_8192_9216_oct_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217 {"mac_tx_9217_12287_oct_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219 {"mac_tx_12288_16383_oct_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221 {"mac_tx_1519_max_good_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223 {"mac_tx_1519_max_bad_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225 {"mac_rx_total_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227 {"mac_rx_total_oct_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229 {"mac_rx_good_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231 {"mac_rx_bad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233 {"mac_rx_good_oct_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235 {"mac_rx_bad_oct_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237 {"mac_rx_uni_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239 {"mac_rx_multi_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241 {"mac_rx_broad_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243 {"mac_rx_undersize_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245 {"mac_rx_oversize_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247 {"mac_rx_64_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249 {"mac_rx_65_127_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251 {"mac_rx_128_255_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253 {"mac_rx_256_511_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255 {"mac_rx_512_1023_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257 {"mac_rx_1024_1518_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259 {"mac_rx_1519_2047_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261 {"mac_rx_2048_4095_oct_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263 {"mac_rx_4096_8191_oct_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265 {"mac_rx_8192_9216_oct_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267 {"mac_rx_9217_12287_oct_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269 {"mac_rx_12288_16383_oct_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271 {"mac_rx_1519_max_good_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273 {"mac_rx_1519_max_bad_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
276 {"mac_tx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278 {"mac_tx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280 {"mac_tx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282 {"mac_tx_err_all_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284 {"mac_tx_from_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286 {"mac_tx_from_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288 {"mac_rx_fragment_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290 {"mac_rx_undermin_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292 {"mac_rx_jabber_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294 {"mac_rx_fcs_err_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296 {"mac_rx_send_app_good_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298 {"mac_rx_send_app_bad_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
304 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305 .ethter_type = cpu_to_le16(ETH_P_LLDP),
306 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308 .i_port_bitmap = 0x1,
312 static const u8 hclge_hash_key[] = {
313 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
322 #define HCLGE_MAC_CMD_NUM 21
324 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
330 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
333 dev_err(&hdev->pdev->dev,
334 "Get MAC pkt stats fail, status = %d.\n", ret);
339 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340 /* for special opcode 0032, only the first desc has the head */
341 if (unlikely(i == 0)) {
342 desc_data = (__le64 *)(&desc[i].data[0]);
343 n = HCLGE_RD_FIRST_STATS_NUM;
345 desc_data = (__le64 *)(&desc[i]);
346 n = HCLGE_RD_OTHER_STATS_NUM;
349 for (k = 0; k < n; k++) {
350 *data += le64_to_cpu(*desc_data);
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
361 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362 struct hclge_desc *desc;
367 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
370 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
377 for (i = 0; i < desc_num; i++) {
378 /* for special opcode 0034, only the first desc has the head */
380 desc_data = (__le64 *)(&desc[i].data[0]);
381 n = HCLGE_RD_FIRST_STATS_NUM;
383 desc_data = (__le64 *)(&desc[i]);
384 n = HCLGE_RD_OTHER_STATS_NUM;
387 for (k = 0; k < n; k++) {
388 *data += le64_to_cpu(*desc_data);
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
401 struct hclge_desc desc;
406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
411 desc_data = (__le32 *)(&desc.data[0]);
412 reg_num = le32_to_cpu(*desc_data);
414 *desc_num = 1 + ((reg_num - 3) >> 2) +
415 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
425 ret = hclge_mac_query_reg_num(hdev, &desc_num);
427 /* The firmware supports the new statistics acquisition method */
429 ret = hclge_mac_update_stats_complete(hdev, desc_num);
430 else if (ret == -EOPNOTSUPP)
431 ret = hclge_mac_update_stats_defective(hdev);
433 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
440 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441 struct hclge_vport *vport = hclge_get_vport(handle);
442 struct hclge_dev *hdev = vport->back;
443 struct hnae3_queue *queue;
444 struct hclge_desc desc[1];
445 struct hclge_tqp *tqp;
448 for (i = 0; i < kinfo->num_tqps; i++) {
449 queue = handle->kinfo.tqp[i];
450 tqp = container_of(queue, struct hclge_tqp, q);
451 /* command : HCLGE_OPC_QUERY_IGU_STAT */
452 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
455 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456 ret = hclge_cmd_send(&hdev->hw, desc, 1);
458 dev_err(&hdev->pdev->dev,
459 "Query tqp stat fail, status = %d,queue = %d\n",
463 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464 le32_to_cpu(desc[0].data[1]);
467 for (i = 0; i < kinfo->num_tqps; i++) {
468 queue = handle->kinfo.tqp[i];
469 tqp = container_of(queue, struct hclge_tqp, q);
470 /* command : HCLGE_OPC_QUERY_IGU_STAT */
471 hclge_cmd_setup_basic_desc(&desc[0],
472 HCLGE_OPC_QUERY_TX_STATUS,
475 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476 ret = hclge_cmd_send(&hdev->hw, desc, 1);
478 dev_err(&hdev->pdev->dev,
479 "Query tqp stat fail, status = %d,queue = %d\n",
483 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484 le32_to_cpu(desc[0].data[1]);
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
492 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493 struct hclge_tqp *tqp;
497 for (i = 0; i < kinfo->num_tqps; i++) {
498 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
502 for (i = 0; i < kinfo->num_tqps; i++) {
503 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
512 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 /* each tqp has TX & RX two queues */
515 return kinfo->num_tqps * (2);
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
520 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
524 for (i = 0; i < kinfo->num_tqps; i++) {
525 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526 struct hclge_tqp, q);
527 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
529 buff = buff + ETH_GSTRING_LEN;
532 for (i = 0; i < kinfo->num_tqps; i++) {
533 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534 struct hclge_tqp, q);
535 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
537 buff = buff + ETH_GSTRING_LEN;
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544 const struct hclge_comm_stats_str strs[],
550 for (i = 0; i < size; i++)
551 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
556 static u8 *hclge_comm_get_strings(u32 stringset,
557 const struct hclge_comm_stats_str strs[],
560 char *buff = (char *)data;
563 if (stringset != ETH_SS_STATS)
566 for (i = 0; i < size; i++) {
567 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
568 buff = buff + ETH_GSTRING_LEN;
574 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
576 struct hnae3_handle *handle;
579 handle = &hdev->vport[0].nic;
580 if (handle->client) {
581 status = hclge_tqps_update_stats(handle);
583 dev_err(&hdev->pdev->dev,
584 "Update TQPS stats fail, status = %d.\n",
589 status = hclge_mac_update_stats(hdev);
591 dev_err(&hdev->pdev->dev,
592 "Update MAC stats fail, status = %d.\n", status);
595 static void hclge_update_stats(struct hnae3_handle *handle,
596 struct net_device_stats *net_stats)
598 struct hclge_vport *vport = hclge_get_vport(handle);
599 struct hclge_dev *hdev = vport->back;
602 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
605 status = hclge_mac_update_stats(hdev);
607 dev_err(&hdev->pdev->dev,
608 "Update MAC stats fail, status = %d.\n",
611 status = hclge_tqps_update_stats(handle);
613 dev_err(&hdev->pdev->dev,
614 "Update TQPS stats fail, status = %d.\n",
617 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
623 HNAE3_SUPPORT_PHY_LOOPBACK |\
624 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
625 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
627 struct hclge_vport *vport = hclge_get_vport(handle);
628 struct hclge_dev *hdev = vport->back;
631 /* Loopback test support rules:
632 * mac: only GE mode support
633 * serdes: all mac mode will support include GE/XGE/LGE/CGE
634 * phy: only support when phy device exist on board
636 if (stringset == ETH_SS_TEST) {
637 /* clear loopback bit flags at first */
638 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
639 if (hdev->pdev->revision >= 0x21 ||
640 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
644 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
648 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
649 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
650 } else if (stringset == ETH_SS_STATS) {
651 count = ARRAY_SIZE(g_mac_stats_string) +
652 hclge_tqps_get_sset_count(handle, stringset);
658 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
661 u8 *p = (char *)data;
664 if (stringset == ETH_SS_STATS) {
665 size = ARRAY_SIZE(g_mac_stats_string);
666 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
668 p = hclge_tqps_get_strings(handle, p);
669 } else if (stringset == ETH_SS_TEST) {
670 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
671 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
673 p += ETH_GSTRING_LEN;
675 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
684 p += ETH_GSTRING_LEN;
686 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
689 p += ETH_GSTRING_LEN;
694 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
696 struct hclge_vport *vport = hclge_get_vport(handle);
697 struct hclge_dev *hdev = vport->back;
700 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
701 ARRAY_SIZE(g_mac_stats_string), data);
702 p = hclge_tqps_get_stats(handle, p);
705 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
708 struct hclge_vport *vport = hclge_get_vport(handle);
709 struct hclge_dev *hdev = vport->back;
711 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
712 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
715 static int hclge_parse_func_status(struct hclge_dev *hdev,
716 struct hclge_func_status_cmd *status)
718 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
721 /* Set the pf to main pf */
722 if (status->pf_state & HCLGE_PF_STATE_MAIN)
723 hdev->flag |= HCLGE_FLAG_MAIN;
725 hdev->flag &= ~HCLGE_FLAG_MAIN;
730 static int hclge_query_function_status(struct hclge_dev *hdev)
732 #define HCLGE_QUERY_MAX_CNT 5
734 struct hclge_func_status_cmd *req;
735 struct hclge_desc desc;
739 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
740 req = (struct hclge_func_status_cmd *)desc.data;
743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
745 dev_err(&hdev->pdev->dev,
746 "query function status failed %d.\n", ret);
750 /* Check pf reset is done */
753 usleep_range(1000, 2000);
754 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
756 ret = hclge_parse_func_status(hdev, req);
761 static int hclge_query_pf_resource(struct hclge_dev *hdev)
763 struct hclge_pf_res_cmd *req;
764 struct hclge_desc desc;
767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
768 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
770 dev_err(&hdev->pdev->dev,
771 "query pf resource failed %d.\n", ret);
775 req = (struct hclge_pf_res_cmd *)desc.data;
776 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
777 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
779 if (req->tx_buf_size)
781 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
783 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
785 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
787 if (req->dv_buf_size)
789 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
791 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
793 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
795 if (hnae3_dev_roce_supported(hdev)) {
796 hdev->roce_base_msix_offset =
797 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
798 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
800 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
801 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
803 /* PF should have NIC vectors and Roce vectors,
804 * NIC vectors are queued before Roce vectors.
806 hdev->num_msi = hdev->num_roce_msi +
807 hdev->roce_base_msix_offset;
810 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
811 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
817 static int hclge_parse_speed(int speed_cmd, int *speed)
821 *speed = HCLGE_MAC_SPEED_10M;
824 *speed = HCLGE_MAC_SPEED_100M;
827 *speed = HCLGE_MAC_SPEED_1G;
830 *speed = HCLGE_MAC_SPEED_10G;
833 *speed = HCLGE_MAC_SPEED_25G;
836 *speed = HCLGE_MAC_SPEED_40G;
839 *speed = HCLGE_MAC_SPEED_50G;
842 *speed = HCLGE_MAC_SPEED_100G;
851 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
853 struct hclge_vport *vport = hclge_get_vport(handle);
854 struct hclge_dev *hdev = vport->back;
855 u32 speed_ability = hdev->hw.mac.speed_ability;
859 case HCLGE_MAC_SPEED_10M:
860 speed_bit = HCLGE_SUPPORT_10M_BIT;
862 case HCLGE_MAC_SPEED_100M:
863 speed_bit = HCLGE_SUPPORT_100M_BIT;
865 case HCLGE_MAC_SPEED_1G:
866 speed_bit = HCLGE_SUPPORT_1G_BIT;
868 case HCLGE_MAC_SPEED_10G:
869 speed_bit = HCLGE_SUPPORT_10G_BIT;
871 case HCLGE_MAC_SPEED_25G:
872 speed_bit = HCLGE_SUPPORT_25G_BIT;
874 case HCLGE_MAC_SPEED_40G:
875 speed_bit = HCLGE_SUPPORT_40G_BIT;
877 case HCLGE_MAC_SPEED_50G:
878 speed_bit = HCLGE_SUPPORT_50G_BIT;
880 case HCLGE_MAC_SPEED_100G:
881 speed_bit = HCLGE_SUPPORT_100G_BIT;
887 if (speed_bit & speed_ability)
893 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
895 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
896 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
898 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
899 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
901 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
902 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
904 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
905 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
907 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
908 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
912 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
914 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
915 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
917 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
918 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
920 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
921 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
923 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
924 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
926 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
927 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
931 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
933 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
934 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
936 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
937 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
939 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
940 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
942 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
943 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
945 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
946 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
950 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
952 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
953 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
955 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
956 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
958 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
959 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
961 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
962 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
964 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
965 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
967 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
968 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
972 static void hclge_convert_setting_fec(struct hclge_mac *mac)
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
977 switch (mac->speed) {
978 case HCLGE_MAC_SPEED_10G:
979 case HCLGE_MAC_SPEED_40G:
980 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
983 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
985 case HCLGE_MAC_SPEED_25G:
986 case HCLGE_MAC_SPEED_50G:
987 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
990 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
993 case HCLGE_MAC_SPEED_100G:
994 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
995 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
998 mac->fec_ability = 0;
1003 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1006 struct hclge_mac *mac = &hdev->hw.mac;
1008 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1009 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1012 hclge_convert_setting_sr(mac, speed_ability);
1013 hclge_convert_setting_lr(mac, speed_ability);
1014 hclge_convert_setting_cr(mac, speed_ability);
1015 if (hdev->pdev->revision >= 0x21)
1016 hclge_convert_setting_fec(mac);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1023 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1026 struct hclge_mac *mac = &hdev->hw.mac;
1028 hclge_convert_setting_kr(mac, speed_ability);
1029 if (hdev->pdev->revision >= 0x21)
1030 hclge_convert_setting_fec(mac);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1036 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1039 unsigned long *supported = hdev->hw.mac.supported;
1041 /* default to support all speed for GE port */
1043 speed_ability = HCLGE_SUPPORT_GE;
1045 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1049 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1056 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1069 u8 media_type = hdev->hw.mac.media_type;
1071 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072 hclge_parse_fiber_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074 hclge_parse_copper_link_mode(hdev, speed_ability);
1075 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076 hclge_parse_backplane_link_mode(hdev, speed_ability);
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1080 struct hclge_cfg_param_cmd *req;
1081 u64 mac_addr_tmp_high;
1085 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1087 /* get the configuration */
1088 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1091 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094 HCLGE_CFG_TQP_DESC_N_M,
1095 HCLGE_CFG_TQP_DESC_N_S);
1097 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098 HCLGE_CFG_PHY_ADDR_M,
1099 HCLGE_CFG_PHY_ADDR_S);
1100 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101 HCLGE_CFG_MEDIA_TP_M,
1102 HCLGE_CFG_MEDIA_TP_S);
1103 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104 HCLGE_CFG_RX_BUF_LEN_M,
1105 HCLGE_CFG_RX_BUF_LEN_S);
1106 /* get mac_address */
1107 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109 HCLGE_CFG_MAC_ADDR_H_M,
1110 HCLGE_CFG_MAC_ADDR_H_S);
1112 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1114 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115 HCLGE_CFG_DEFAULT_SPEED_M,
1116 HCLGE_CFG_DEFAULT_SPEED_S);
1117 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118 HCLGE_CFG_RSS_SIZE_M,
1119 HCLGE_CFG_RSS_SIZE_S);
1121 for (i = 0; i < ETH_ALEN; i++)
1122 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1124 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1127 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128 HCLGE_CFG_SPEED_ABILITY_M,
1129 HCLGE_CFG_SPEED_ABILITY_S);
1130 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131 HCLGE_CFG_UMV_TBL_SPACE_M,
1132 HCLGE_CFG_UMV_TBL_SPACE_S);
1133 if (!cfg->umv_space)
1134 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1137 /* hclge_get_cfg: query the static parameter from flash
1138 * @hdev: pointer to struct hclge_dev
1139 * @hcfg: the config structure to be getted
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1143 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144 struct hclge_cfg_param_cmd *req;
1148 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1151 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1154 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156 /* Len should be united by 4 bytes when send to hardware */
1157 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159 req->offset = cpu_to_le32(offset);
1162 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1164 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1168 hclge_parse_cfg(hcfg, desc);
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1177 ret = hclge_query_function_status(hdev);
1179 dev_err(&hdev->pdev->dev,
1180 "query function status error %d.\n", ret);
1184 /* get pf resource */
1185 ret = hclge_query_pf_resource(hdev);
1187 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1194 #define HCLGE_MIN_TX_DESC 64
1195 #define HCLGE_MIN_RX_DESC 64
1197 if (!is_kdump_kernel())
1200 dev_info(&hdev->pdev->dev,
1201 "Running kdump kernel. Using minimal resources\n");
1203 /* minimal queue pairs equals to the number of vports */
1204 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1209 static int hclge_configure(struct hclge_dev *hdev)
1211 struct hclge_cfg cfg;
1215 ret = hclge_get_cfg(hdev, &cfg);
1217 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1221 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222 hdev->base_tqp_pid = 0;
1223 hdev->rss_size_max = cfg.rss_size_max;
1224 hdev->rx_buf_len = cfg.rx_buf_len;
1225 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226 hdev->hw.mac.media_type = cfg.media_type;
1227 hdev->hw.mac.phy_addr = cfg.phy_addr;
1228 hdev->num_tx_desc = cfg.tqp_desc_num;
1229 hdev->num_rx_desc = cfg.tqp_desc_num;
1230 hdev->tm_info.num_pg = 1;
1231 hdev->tc_max = cfg.tc_num;
1232 hdev->tm_info.hw_pfc_map = 0;
1233 hdev->wanted_umv_size = cfg.umv_space;
1235 if (hnae3_dev_fd_supported(hdev)) {
1237 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1240 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1242 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1246 hclge_parse_link_mode(hdev, cfg.speed_ability);
1248 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249 (hdev->tc_max < 1)) {
1250 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1255 /* Dev does not support DCB */
1256 if (!hnae3_dev_dcb_supported(hdev)) {
1260 hdev->pfc_max = hdev->tc_max;
1263 hdev->tm_info.num_tc = 1;
1265 /* Currently not support uncontiuous tc */
1266 for (i = 0; i < hdev->tm_info.num_tc; i++)
1267 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1269 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1271 hclge_init_kdump_kernel_config(hdev);
1276 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1277 unsigned int tso_mss_max)
1279 struct hclge_cfg_tso_status_cmd *req;
1280 struct hclge_desc desc;
1283 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1285 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1288 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1289 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1290 req->tso_mss_min = cpu_to_le16(tso_mss);
1293 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1294 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1295 req->tso_mss_max = cpu_to_le16(tso_mss);
1297 return hclge_cmd_send(&hdev->hw, &desc, 1);
1300 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1302 struct hclge_cfg_gro_status_cmd *req;
1303 struct hclge_desc desc;
1306 if (!hnae3_dev_gro_supported(hdev))
1309 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1310 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1312 req->gro_en = cpu_to_le16(en ? 1 : 0);
1314 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1316 dev_err(&hdev->pdev->dev,
1317 "GRO hardware config cmd failed, ret = %d\n", ret);
1322 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1324 struct hclge_tqp *tqp;
1327 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1328 sizeof(struct hclge_tqp), GFP_KERNEL);
1334 for (i = 0; i < hdev->num_tqps; i++) {
1335 tqp->dev = &hdev->pdev->dev;
1338 tqp->q.ae_algo = &ae_algo;
1339 tqp->q.buf_size = hdev->rx_buf_len;
1340 tqp->q.tx_desc_num = hdev->num_tx_desc;
1341 tqp->q.rx_desc_num = hdev->num_rx_desc;
1342 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1343 i * HCLGE_TQP_REG_SIZE;
1351 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1352 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1354 struct hclge_tqp_map_cmd *req;
1355 struct hclge_desc desc;
1358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1360 req = (struct hclge_tqp_map_cmd *)desc.data;
1361 req->tqp_id = cpu_to_le16(tqp_pid);
1362 req->tqp_vf = func_id;
1363 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1365 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1366 req->tqp_vid = cpu_to_le16(tqp_vid);
1368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1370 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1375 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1377 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1378 struct hclge_dev *hdev = vport->back;
1381 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1382 alloced < num_tqps; i++) {
1383 if (!hdev->htqp[i].alloced) {
1384 hdev->htqp[i].q.handle = &vport->nic;
1385 hdev->htqp[i].q.tqp_index = alloced;
1386 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1387 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1388 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1389 hdev->htqp[i].alloced = true;
1393 vport->alloc_tqps = alloced;
1394 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1395 vport->alloc_tqps / hdev->tm_info.num_tc);
1400 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1401 u16 num_tx_desc, u16 num_rx_desc)
1404 struct hnae3_handle *nic = &vport->nic;
1405 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1406 struct hclge_dev *hdev = vport->back;
1409 kinfo->num_tx_desc = num_tx_desc;
1410 kinfo->num_rx_desc = num_rx_desc;
1412 kinfo->rx_buf_len = hdev->rx_buf_len;
1414 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1415 sizeof(struct hnae3_queue *), GFP_KERNEL);
1419 ret = hclge_assign_tqp(vport, num_tqps);
1421 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1426 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1427 struct hclge_vport *vport)
1429 struct hnae3_handle *nic = &vport->nic;
1430 struct hnae3_knic_private_info *kinfo;
1433 kinfo = &nic->kinfo;
1434 for (i = 0; i < vport->alloc_tqps; i++) {
1435 struct hclge_tqp *q =
1436 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1440 is_pf = !(vport->vport_id);
1441 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1450 static int hclge_map_tqp(struct hclge_dev *hdev)
1452 struct hclge_vport *vport = hdev->vport;
1455 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1456 for (i = 0; i < num_vport; i++) {
1459 ret = hclge_map_tqp_to_vport(hdev, vport);
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 ret = hclge_knic_setup(vport, num_tqps,
1480 hdev->num_tx_desc, hdev->num_rx_desc);
1482 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1487 static int hclge_alloc_vport(struct hclge_dev *hdev)
1489 struct pci_dev *pdev = hdev->pdev;
1490 struct hclge_vport *vport;
1496 /* We need to alloc a vport for main NIC of PF */
1497 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1499 if (hdev->num_tqps < num_vport) {
1500 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1501 hdev->num_tqps, num_vport);
1505 /* Alloc the same number of TQPs for every vport */
1506 tqp_per_vport = hdev->num_tqps / num_vport;
1507 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1509 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1514 hdev->vport = vport;
1515 hdev->num_alloc_vport = num_vport;
1517 if (IS_ENABLED(CONFIG_PCI_IOV))
1518 hdev->num_alloc_vfs = hdev->num_req_vfs;
1520 for (i = 0; i < num_vport; i++) {
1522 vport->vport_id = i;
1523 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1524 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1525 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1526 INIT_LIST_HEAD(&vport->vlan_list);
1527 INIT_LIST_HEAD(&vport->uc_mac_list);
1528 INIT_LIST_HEAD(&vport->mc_mac_list);
1531 ret = hclge_vport_setup(vport, tqp_main_vport);
1533 ret = hclge_vport_setup(vport, tqp_per_vport);
1536 "vport setup failed for vport %d, %d\n",
1547 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1548 struct hclge_pkt_buf_alloc *buf_alloc)
1550 /* TX buffer size is unit by 128 byte */
1551 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1552 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1553 struct hclge_tx_buff_alloc_cmd *req;
1554 struct hclge_desc desc;
1558 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1560 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1561 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1562 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1564 req->tx_pkt_buff[i] =
1565 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1566 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1571 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1577 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1578 struct hclge_pkt_buf_alloc *buf_alloc)
1580 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1583 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1588 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1593 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1594 if (hdev->hw_tc_map & BIT(i))
1599 /* Get the number of pfc enabled TCs, which have private buffer */
1600 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1601 struct hclge_pkt_buf_alloc *buf_alloc)
1603 struct hclge_priv_buf *priv;
1607 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1608 priv = &buf_alloc->priv_buf[i];
1609 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1617 /* Get the number of pfc disabled TCs, which have private buffer */
1618 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1619 struct hclge_pkt_buf_alloc *buf_alloc)
1621 struct hclge_priv_buf *priv;
1625 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1626 priv = &buf_alloc->priv_buf[i];
1627 if (hdev->hw_tc_map & BIT(i) &&
1628 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1636 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1638 struct hclge_priv_buf *priv;
1642 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1643 priv = &buf_alloc->priv_buf[i];
1645 rx_priv += priv->buf_size;
1650 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1652 u32 i, total_tx_size = 0;
1654 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1655 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1657 return total_tx_size;
1660 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1661 struct hclge_pkt_buf_alloc *buf_alloc,
1664 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1665 u32 tc_num = hclge_get_tc_num(hdev);
1666 u32 shared_buf, aligned_mps;
1670 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1672 if (hnae3_dev_dcb_supported(hdev))
1673 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1676 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1677 + hdev->dv_buf_size;
1679 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1680 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1681 HCLGE_BUF_SIZE_UNIT);
1683 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1684 if (rx_all < rx_priv + shared_std)
1687 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1688 buf_alloc->s_buf.buf_size = shared_buf;
1689 if (hnae3_dev_dcb_supported(hdev)) {
1690 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1691 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1692 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1693 HCLGE_BUF_SIZE_UNIT);
1695 buf_alloc->s_buf.self.high = aligned_mps +
1696 HCLGE_NON_DCB_ADDITIONAL_BUF;
1697 buf_alloc->s_buf.self.low = aligned_mps;
1700 if (hnae3_dev_dcb_supported(hdev)) {
1701 hi_thrd = shared_buf - hdev->dv_buf_size;
1703 if (tc_num <= NEED_RESERVE_TC_NUM)
1704 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1708 hi_thrd = hi_thrd / tc_num;
1710 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1711 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1712 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1714 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1715 lo_thrd = aligned_mps;
1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1720 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1726 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1727 struct hclge_pkt_buf_alloc *buf_alloc)
1731 total_size = hdev->pkt_buf_size;
1733 /* alloc tx buffer for all enabled tc */
1734 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1735 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1737 if (hdev->hw_tc_map & BIT(i)) {
1738 if (total_size < hdev->tx_buf_size)
1741 priv->tx_buf_size = hdev->tx_buf_size;
1743 priv->tx_buf_size = 0;
1746 total_size -= priv->tx_buf_size;
1752 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1753 struct hclge_pkt_buf_alloc *buf_alloc)
1755 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1756 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1759 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1760 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1767 if (!(hdev->hw_tc_map & BIT(i)))
1772 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1773 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1774 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1775 HCLGE_BUF_SIZE_UNIT);
1778 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1782 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1785 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1788 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1789 struct hclge_pkt_buf_alloc *buf_alloc)
1791 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1792 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1795 /* let the last to be cleared first */
1796 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1797 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1798 unsigned int mask = BIT((unsigned int)i);
1800 if (hdev->hw_tc_map & mask &&
1801 !(hdev->tm_info.hw_pfc_map & mask)) {
1802 /* Clear the no pfc TC private buffer */
1810 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1811 no_pfc_priv_num == 0)
1815 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1818 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1819 struct hclge_pkt_buf_alloc *buf_alloc)
1821 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1822 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1825 /* let the last to be cleared first */
1826 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1827 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1828 unsigned int mask = BIT((unsigned int)i);
1830 if (hdev->hw_tc_map & mask &&
1831 hdev->tm_info.hw_pfc_map & mask) {
1832 /* Reduce the number of pfc TC with private buffer */
1840 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1845 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1848 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1849 struct hclge_pkt_buf_alloc *buf_alloc)
1851 #define COMPENSATE_BUFFER 0x3C00
1852 #define COMPENSATE_HALF_MPS_NUM 5
1853 #define PRIV_WL_GAP 0x1800
1855 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1856 u32 tc_num = hclge_get_tc_num(hdev);
1857 u32 half_mps = hdev->mps >> 1;
1862 rx_priv = rx_priv / tc_num;
1864 if (tc_num <= NEED_RESERVE_TC_NUM)
1865 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1867 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1868 COMPENSATE_HALF_MPS_NUM * half_mps;
1869 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1870 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1872 if (rx_priv < min_rx_priv)
1875 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1876 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1883 if (!(hdev->hw_tc_map & BIT(i)))
1887 priv->buf_size = rx_priv;
1888 priv->wl.high = rx_priv - hdev->dv_buf_size;
1889 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1892 buf_alloc->s_buf.buf_size = 0;
1897 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1898 * @hdev: pointer to struct hclge_dev
1899 * @buf_alloc: pointer to buffer calculation data
1900 * @return: 0: calculate sucessful, negative: fail
1902 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1903 struct hclge_pkt_buf_alloc *buf_alloc)
1905 /* When DCB is not supported, rx private buffer is not allocated. */
1906 if (!hnae3_dev_dcb_supported(hdev)) {
1907 u32 rx_all = hdev->pkt_buf_size;
1909 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1910 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1916 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1919 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1922 /* try to decrease the buffer size */
1923 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1926 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1929 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1935 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1936 struct hclge_pkt_buf_alloc *buf_alloc)
1938 struct hclge_rx_priv_buff_cmd *req;
1939 struct hclge_desc desc;
1943 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1944 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1946 /* Alloc private buffer TCs */
1947 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1948 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1951 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1953 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1957 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1958 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1960 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1962 dev_err(&hdev->pdev->dev,
1963 "rx private buffer alloc cmd failed %d\n", ret);
1968 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1969 struct hclge_pkt_buf_alloc *buf_alloc)
1971 struct hclge_rx_priv_wl_buf *req;
1972 struct hclge_priv_buf *priv;
1973 struct hclge_desc desc[2];
1977 for (i = 0; i < 2; i++) {
1978 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1980 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1982 /* The first descriptor set the NEXT bit to 1 */
1984 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1986 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1988 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1989 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1991 priv = &buf_alloc->priv_buf[idx];
1992 req->tc_wl[j].high =
1993 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1994 req->tc_wl[j].high |=
1995 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1997 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1998 req->tc_wl[j].low |=
1999 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2003 /* Send 2 descriptor at one time */
2004 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2006 dev_err(&hdev->pdev->dev,
2007 "rx private waterline config cmd failed %d\n",
2012 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2013 struct hclge_pkt_buf_alloc *buf_alloc)
2015 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2016 struct hclge_rx_com_thrd *req;
2017 struct hclge_desc desc[2];
2018 struct hclge_tc_thrd *tc;
2022 for (i = 0; i < 2; i++) {
2023 hclge_cmd_setup_basic_desc(&desc[i],
2024 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2025 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2027 /* The first descriptor set the NEXT bit to 1 */
2029 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2031 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2033 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2034 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2036 req->com_thrd[j].high =
2037 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2038 req->com_thrd[j].high |=
2039 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2040 req->com_thrd[j].low =
2041 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2042 req->com_thrd[j].low |=
2043 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2047 /* Send 2 descriptors at one time */
2048 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2050 dev_err(&hdev->pdev->dev,
2051 "common threshold config cmd failed %d\n", ret);
2055 static int hclge_common_wl_config(struct hclge_dev *hdev,
2056 struct hclge_pkt_buf_alloc *buf_alloc)
2058 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2059 struct hclge_rx_com_wl *req;
2060 struct hclge_desc desc;
2063 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2065 req = (struct hclge_rx_com_wl *)desc.data;
2066 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2067 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2069 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2070 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2072 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2074 dev_err(&hdev->pdev->dev,
2075 "common waterline config cmd failed %d\n", ret);
2080 int hclge_buffer_alloc(struct hclge_dev *hdev)
2082 struct hclge_pkt_buf_alloc *pkt_buf;
2085 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2089 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2091 dev_err(&hdev->pdev->dev,
2092 "could not calc tx buffer size for all TCs %d\n", ret);
2096 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2098 dev_err(&hdev->pdev->dev,
2099 "could not alloc tx buffers %d\n", ret);
2103 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2105 dev_err(&hdev->pdev->dev,
2106 "could not calc rx priv buffer size for all TCs %d\n",
2111 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2113 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2118 if (hnae3_dev_dcb_supported(hdev)) {
2119 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2121 dev_err(&hdev->pdev->dev,
2122 "could not configure rx private waterline %d\n",
2127 ret = hclge_common_thrd_config(hdev, pkt_buf);
2129 dev_err(&hdev->pdev->dev,
2130 "could not configure common threshold %d\n",
2136 ret = hclge_common_wl_config(hdev, pkt_buf);
2138 dev_err(&hdev->pdev->dev,
2139 "could not configure common waterline %d\n", ret);
2146 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2148 struct hnae3_handle *roce = &vport->roce;
2149 struct hnae3_handle *nic = &vport->nic;
2151 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2153 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2154 vport->back->num_msi_left == 0)
2157 roce->rinfo.base_vector = vport->back->roce_base_vector;
2159 roce->rinfo.netdev = nic->kinfo.netdev;
2160 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2162 roce->pdev = nic->pdev;
2163 roce->ae_algo = nic->ae_algo;
2164 roce->numa_node_mask = nic->numa_node_mask;
2169 static int hclge_init_msi(struct hclge_dev *hdev)
2171 struct pci_dev *pdev = hdev->pdev;
2175 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2176 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2179 "failed(%d) to allocate MSI/MSI-X vectors\n",
2183 if (vectors < hdev->num_msi)
2184 dev_warn(&hdev->pdev->dev,
2185 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2186 hdev->num_msi, vectors);
2188 hdev->num_msi = vectors;
2189 hdev->num_msi_left = vectors;
2190 hdev->base_msi_vector = pdev->irq;
2191 hdev->roce_base_vector = hdev->base_msi_vector +
2192 hdev->roce_base_msix_offset;
2194 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2195 sizeof(u16), GFP_KERNEL);
2196 if (!hdev->vector_status) {
2197 pci_free_irq_vectors(pdev);
2201 for (i = 0; i < hdev->num_msi; i++)
2202 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2204 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2205 sizeof(int), GFP_KERNEL);
2206 if (!hdev->vector_irq) {
2207 pci_free_irq_vectors(pdev);
2214 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2216 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2217 duplex = HCLGE_MAC_FULL;
2222 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2225 struct hclge_config_mac_speed_dup_cmd *req;
2226 struct hclge_desc desc;
2229 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2234 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2237 case HCLGE_MAC_SPEED_10M:
2238 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2239 HCLGE_CFG_SPEED_S, 6);
2241 case HCLGE_MAC_SPEED_100M:
2242 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2243 HCLGE_CFG_SPEED_S, 7);
2245 case HCLGE_MAC_SPEED_1G:
2246 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2247 HCLGE_CFG_SPEED_S, 0);
2249 case HCLGE_MAC_SPEED_10G:
2250 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2251 HCLGE_CFG_SPEED_S, 1);
2253 case HCLGE_MAC_SPEED_25G:
2254 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2255 HCLGE_CFG_SPEED_S, 2);
2257 case HCLGE_MAC_SPEED_40G:
2258 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2259 HCLGE_CFG_SPEED_S, 3);
2261 case HCLGE_MAC_SPEED_50G:
2262 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2263 HCLGE_CFG_SPEED_S, 4);
2265 case HCLGE_MAC_SPEED_100G:
2266 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2267 HCLGE_CFG_SPEED_S, 5);
2270 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2274 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2277 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2279 dev_err(&hdev->pdev->dev,
2280 "mac speed/duplex config cmd failed %d.\n", ret);
2287 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2291 duplex = hclge_check_speed_dup(duplex, speed);
2292 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2295 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2299 hdev->hw.mac.speed = speed;
2300 hdev->hw.mac.duplex = duplex;
2305 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2308 struct hclge_vport *vport = hclge_get_vport(handle);
2309 struct hclge_dev *hdev = vport->back;
2311 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2314 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2316 struct hclge_config_auto_neg_cmd *req;
2317 struct hclge_desc desc;
2321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2323 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2325 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2326 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2328 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2330 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2336 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2338 struct hclge_vport *vport = hclge_get_vport(handle);
2339 struct hclge_dev *hdev = vport->back;
2341 if (!hdev->hw.mac.support_autoneg) {
2343 dev_err(&hdev->pdev->dev,
2344 "autoneg is not supported by current port\n");
2351 return hclge_set_autoneg_en(hdev, enable);
2354 static int hclge_get_autoneg(struct hnae3_handle *handle)
2356 struct hclge_vport *vport = hclge_get_vport(handle);
2357 struct hclge_dev *hdev = vport->back;
2358 struct phy_device *phydev = hdev->hw.mac.phydev;
2361 return phydev->autoneg;
2363 return hdev->hw.mac.autoneg;
2366 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2368 struct hclge_vport *vport = hclge_get_vport(handle);
2369 struct hclge_dev *hdev = vport->back;
2372 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2374 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2377 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2380 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2382 struct hclge_vport *vport = hclge_get_vport(handle);
2383 struct hclge_dev *hdev = vport->back;
2385 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2386 return hclge_set_autoneg_en(hdev, !halt);
2391 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2393 struct hclge_config_fec_cmd *req;
2394 struct hclge_desc desc;
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2399 req = (struct hclge_config_fec_cmd *)desc.data;
2400 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2401 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2402 if (fec_mode & BIT(HNAE3_FEC_RS))
2403 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2404 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2405 if (fec_mode & BIT(HNAE3_FEC_BASER))
2406 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2407 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2409 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2411 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2416 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2418 struct hclge_vport *vport = hclge_get_vport(handle);
2419 struct hclge_dev *hdev = vport->back;
2420 struct hclge_mac *mac = &hdev->hw.mac;
2423 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2424 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2428 ret = hclge_set_fec_hw(hdev, fec_mode);
2432 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2436 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2439 struct hclge_vport *vport = hclge_get_vport(handle);
2440 struct hclge_dev *hdev = vport->back;
2441 struct hclge_mac *mac = &hdev->hw.mac;
2444 *fec_ability = mac->fec_ability;
2446 *fec_mode = mac->fec_mode;
2449 static int hclge_mac_init(struct hclge_dev *hdev)
2451 struct hclge_mac *mac = &hdev->hw.mac;
2454 hdev->support_sfp_query = true;
2455 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2456 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2457 hdev->hw.mac.duplex);
2459 dev_err(&hdev->pdev->dev,
2460 "Config mac speed dup fail ret=%d\n", ret);
2464 if (hdev->hw.mac.support_autoneg) {
2465 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2467 dev_err(&hdev->pdev->dev,
2468 "Config mac autoneg fail ret=%d\n", ret);
2475 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2476 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2478 dev_err(&hdev->pdev->dev,
2479 "Fec mode init fail, ret = %d\n", ret);
2484 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2486 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2490 ret = hclge_buffer_alloc(hdev);
2492 dev_err(&hdev->pdev->dev,
2493 "allocate buffer fail, ret=%d\n", ret);
2498 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2500 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2501 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2502 schedule_work(&hdev->mbx_service_task);
2505 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2507 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2508 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2509 schedule_work(&hdev->rst_service_task);
2512 static void hclge_task_schedule(struct hclge_dev *hdev)
2514 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2515 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2516 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2517 (void)schedule_work(&hdev->service_task);
2520 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2522 struct hclge_link_status_cmd *req;
2523 struct hclge_desc desc;
2527 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2528 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2530 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2535 req = (struct hclge_link_status_cmd *)desc.data;
2536 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2538 return !!link_status;
2541 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2543 unsigned int mac_state;
2546 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2549 mac_state = hclge_get_mac_link_status(hdev);
2551 if (hdev->hw.mac.phydev) {
2552 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2553 link_stat = mac_state &
2554 hdev->hw.mac.phydev->link;
2559 link_stat = mac_state;
2565 static void hclge_update_link_status(struct hclge_dev *hdev)
2567 struct hnae3_client *rclient = hdev->roce_client;
2568 struct hnae3_client *client = hdev->nic_client;
2569 struct hnae3_handle *rhandle;
2570 struct hnae3_handle *handle;
2576 state = hclge_get_mac_phy_link(hdev);
2577 if (state != hdev->hw.mac.link) {
2578 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2579 handle = &hdev->vport[i].nic;
2580 client->ops->link_status_change(handle, state);
2581 hclge_config_mac_tnl_int(hdev, state);
2582 rhandle = &hdev->vport[i].roce;
2583 if (rclient && rclient->ops->link_status_change)
2584 rclient->ops->link_status_change(rhandle,
2587 hdev->hw.mac.link = state;
2591 static void hclge_update_port_capability(struct hclge_mac *mac)
2593 /* update fec ability by speed */
2594 hclge_convert_setting_fec(mac);
2596 /* firmware can not identify back plane type, the media type
2597 * read from configuration can help deal it
2599 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2600 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2601 mac->module_type = HNAE3_MODULE_TYPE_KR;
2602 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2603 mac->module_type = HNAE3_MODULE_TYPE_TP;
2605 if (mac->support_autoneg == true) {
2606 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2607 linkmode_copy(mac->advertising, mac->supported);
2609 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2611 linkmode_zero(mac->advertising);
2615 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2617 struct hclge_sfp_info_cmd *resp;
2618 struct hclge_desc desc;
2621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2622 resp = (struct hclge_sfp_info_cmd *)desc.data;
2623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2624 if (ret == -EOPNOTSUPP) {
2625 dev_warn(&hdev->pdev->dev,
2626 "IMP do not support get SFP speed %d\n", ret);
2629 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2633 *speed = le32_to_cpu(resp->speed);
2638 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2640 struct hclge_sfp_info_cmd *resp;
2641 struct hclge_desc desc;
2644 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2645 resp = (struct hclge_sfp_info_cmd *)desc.data;
2647 resp->query_type = QUERY_ACTIVE_SPEED;
2649 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2650 if (ret == -EOPNOTSUPP) {
2651 dev_warn(&hdev->pdev->dev,
2652 "IMP does not support get SFP info %d\n", ret);
2655 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2659 mac->speed = le32_to_cpu(resp->speed);
2660 /* if resp->speed_ability is 0, it means it's an old version
2661 * firmware, do not update these params
2663 if (resp->speed_ability) {
2664 mac->module_type = le32_to_cpu(resp->module_type);
2665 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2666 mac->autoneg = resp->autoneg;
2667 mac->support_autoneg = resp->autoneg_ability;
2668 mac->speed_type = QUERY_ACTIVE_SPEED;
2669 if (!resp->active_fec)
2672 mac->fec_mode = BIT(resp->active_fec);
2674 mac->speed_type = QUERY_SFP_SPEED;
2680 static int hclge_update_port_info(struct hclge_dev *hdev)
2682 struct hclge_mac *mac = &hdev->hw.mac;
2683 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2686 /* get the port info from SFP cmd if not copper port */
2687 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2690 /* if IMP does not support get SFP/qSFP info, return directly */
2691 if (!hdev->support_sfp_query)
2694 if (hdev->pdev->revision >= 0x21)
2695 ret = hclge_get_sfp_info(hdev, mac);
2697 ret = hclge_get_sfp_speed(hdev, &speed);
2699 if (ret == -EOPNOTSUPP) {
2700 hdev->support_sfp_query = false;
2706 if (hdev->pdev->revision >= 0x21) {
2707 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2708 hclge_update_port_capability(mac);
2711 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2714 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2715 return 0; /* do nothing if no SFP */
2717 /* must config full duplex for SFP */
2718 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2722 static int hclge_get_status(struct hnae3_handle *handle)
2724 struct hclge_vport *vport = hclge_get_vport(handle);
2725 struct hclge_dev *hdev = vport->back;
2727 hclge_update_link_status(hdev);
2729 return hdev->hw.mac.link;
2732 static void hclge_service_timer(struct timer_list *t)
2734 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2736 mod_timer(&hdev->service_timer, jiffies + HZ);
2737 hdev->hw_stats.stats_timer++;
2738 hdev->fd_arfs_expire_timer++;
2739 hclge_task_schedule(hdev);
2742 static void hclge_service_complete(struct hclge_dev *hdev)
2744 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2746 /* Flush memory before next watchdog */
2747 smp_mb__before_atomic();
2748 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2751 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2753 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2755 /* fetch the events from their corresponding regs */
2756 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2757 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2758 msix_src_reg = hclge_read_dev(&hdev->hw,
2759 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2761 /* Assumption: If by any chance reset and mailbox events are reported
2762 * together then we will only process reset event in this go and will
2763 * defer the processing of the mailbox events. Since, we would have not
2764 * cleared RX CMDQ event this time we would receive again another
2765 * interrupt from H/W just for the mailbox.
2768 /* check for vector0 reset event sources */
2769 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2770 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2771 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2772 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2773 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2774 hdev->rst_stats.imp_rst_cnt++;
2775 return HCLGE_VECTOR0_EVENT_RST;
2778 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2779 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2780 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2781 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2782 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2783 hdev->rst_stats.global_rst_cnt++;
2784 return HCLGE_VECTOR0_EVENT_RST;
2787 /* check for vector0 msix event source */
2788 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2789 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2791 *clearval = msix_src_reg;
2792 return HCLGE_VECTOR0_EVENT_ERR;
2795 /* check for vector0 mailbox(=CMDQ RX) event source */
2796 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2797 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2798 *clearval = cmdq_src_reg;
2799 return HCLGE_VECTOR0_EVENT_MBX;
2802 /* print other vector0 event source */
2803 dev_info(&hdev->pdev->dev,
2804 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2805 cmdq_src_reg, msix_src_reg);
2806 *clearval = msix_src_reg;
2808 return HCLGE_VECTOR0_EVENT_OTHER;
2811 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2814 switch (event_type) {
2815 case HCLGE_VECTOR0_EVENT_RST:
2816 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2818 case HCLGE_VECTOR0_EVENT_MBX:
2819 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2826 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2828 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2829 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2830 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2831 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2832 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2835 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2837 writel(enable ? 1 : 0, vector->addr);
2840 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2842 struct hclge_dev *hdev = data;
2846 hclge_enable_vector(&hdev->misc_vector, false);
2847 event_cause = hclge_check_event_cause(hdev, &clearval);
2849 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2850 switch (event_cause) {
2851 case HCLGE_VECTOR0_EVENT_ERR:
2852 /* we do not know what type of reset is required now. This could
2853 * only be decided after we fetch the type of errors which
2854 * caused this event. Therefore, we will do below for now:
2855 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2856 * have defered type of reset to be used.
2857 * 2. Schedule the reset serivce task.
2858 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2859 * will fetch the correct type of reset. This would be done
2860 * by first decoding the types of errors.
2862 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2864 case HCLGE_VECTOR0_EVENT_RST:
2865 hclge_reset_task_schedule(hdev);
2867 case HCLGE_VECTOR0_EVENT_MBX:
2868 /* If we are here then,
2869 * 1. Either we are not handling any mbx task and we are not
2872 * 2. We could be handling a mbx task but nothing more is
2874 * In both cases, we should schedule mbx task as there are more
2875 * mbx messages reported by this interrupt.
2877 hclge_mbx_task_schedule(hdev);
2880 dev_warn(&hdev->pdev->dev,
2881 "received unknown or unhandled event of vector0\n");
2885 /* clear the source of interrupt if it is not cause by reset */
2887 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2888 hclge_clear_event_cause(hdev, event_cause, clearval);
2889 hclge_enable_vector(&hdev->misc_vector, true);
2895 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2897 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2898 dev_warn(&hdev->pdev->dev,
2899 "vector(vector_id %d) has been freed.\n", vector_id);
2903 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2904 hdev->num_msi_left += 1;
2905 hdev->num_msi_used -= 1;
2908 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2910 struct hclge_misc_vector *vector = &hdev->misc_vector;
2912 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2914 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2915 hdev->vector_status[0] = 0;
2917 hdev->num_msi_left -= 1;
2918 hdev->num_msi_used += 1;
2921 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2925 hclge_get_misc_vector(hdev);
2927 /* this would be explicitly freed in the end */
2928 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2929 0, "hclge_misc", hdev);
2931 hclge_free_vector(hdev, 0);
2932 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2933 hdev->misc_vector.vector_irq);
2939 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2941 free_irq(hdev->misc_vector.vector_irq, hdev);
2942 hclge_free_vector(hdev, 0);
2945 int hclge_notify_client(struct hclge_dev *hdev,
2946 enum hnae3_reset_notify_type type)
2948 struct hnae3_client *client = hdev->nic_client;
2951 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2954 if (!client->ops->reset_notify)
2957 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2958 struct hnae3_handle *handle = &hdev->vport[i].nic;
2961 ret = client->ops->reset_notify(handle, type);
2963 dev_err(&hdev->pdev->dev,
2964 "notify nic client failed %d(%d)\n", type, ret);
2972 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2973 enum hnae3_reset_notify_type type)
2975 struct hnae3_client *client = hdev->roce_client;
2979 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2982 if (!client->ops->reset_notify)
2985 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2986 struct hnae3_handle *handle = &hdev->vport[i].roce;
2988 ret = client->ops->reset_notify(handle, type);
2990 dev_err(&hdev->pdev->dev,
2991 "notify roce client failed %d(%d)",
3000 static int hclge_reset_wait(struct hclge_dev *hdev)
3002 #define HCLGE_RESET_WATI_MS 100
3003 #define HCLGE_RESET_WAIT_CNT 200
3004 u32 val, reg, reg_bit;
3007 switch (hdev->reset_type) {
3008 case HNAE3_IMP_RESET:
3009 reg = HCLGE_GLOBAL_RESET_REG;
3010 reg_bit = HCLGE_IMP_RESET_BIT;
3012 case HNAE3_GLOBAL_RESET:
3013 reg = HCLGE_GLOBAL_RESET_REG;
3014 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3016 case HNAE3_FUNC_RESET:
3017 reg = HCLGE_FUN_RST_ING;
3018 reg_bit = HCLGE_FUN_RST_ING_B;
3020 case HNAE3_FLR_RESET:
3023 dev_err(&hdev->pdev->dev,
3024 "Wait for unsupported reset type: %d\n",
3029 if (hdev->reset_type == HNAE3_FLR_RESET) {
3030 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3031 cnt++ < HCLGE_RESET_WAIT_CNT)
3032 msleep(HCLGE_RESET_WATI_MS);
3034 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3035 dev_err(&hdev->pdev->dev,
3036 "flr wait timeout: %d\n", cnt);
3043 val = hclge_read_dev(&hdev->hw, reg);
3044 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3045 msleep(HCLGE_RESET_WATI_MS);
3046 val = hclge_read_dev(&hdev->hw, reg);
3050 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3051 dev_warn(&hdev->pdev->dev,
3052 "Wait for reset timeout: %d\n", hdev->reset_type);
3059 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3061 struct hclge_vf_rst_cmd *req;
3062 struct hclge_desc desc;
3064 req = (struct hclge_vf_rst_cmd *)desc.data;
3065 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3066 req->dest_vfid = func_id;
3071 return hclge_cmd_send(&hdev->hw, &desc, 1);
3074 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3078 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3079 struct hclge_vport *vport = &hdev->vport[i];
3082 /* Send cmd to set/clear VF's FUNC_RST_ING */
3083 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3085 dev_err(&hdev->pdev->dev,
3086 "set vf(%d) rst failed %d!\n",
3087 vport->vport_id, ret);
3091 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3094 /* Inform VF to process the reset.
3095 * hclge_inform_reset_assert_to_vf may fail if VF
3096 * driver is not loaded.
3098 ret = hclge_inform_reset_assert_to_vf(vport);
3100 dev_warn(&hdev->pdev->dev,
3101 "inform reset to vf(%d) failed %d!\n",
3102 vport->vport_id, ret);
3108 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3110 struct hclge_desc desc;
3111 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3115 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3116 req->fun_reset_vfid = func_id;
3118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3120 dev_err(&hdev->pdev->dev,
3121 "send function reset cmd fail, status =%d\n", ret);
3126 static void hclge_do_reset(struct hclge_dev *hdev)
3128 struct hnae3_handle *handle = &hdev->vport[0].nic;
3129 struct pci_dev *pdev = hdev->pdev;
3132 if (hclge_get_hw_reset_stat(handle)) {
3133 dev_info(&pdev->dev, "Hardware reset not finish\n");
3134 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3135 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3136 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3140 switch (hdev->reset_type) {
3141 case HNAE3_GLOBAL_RESET:
3142 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3143 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3144 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3145 dev_info(&pdev->dev, "Global Reset requested\n");
3147 case HNAE3_FUNC_RESET:
3148 dev_info(&pdev->dev, "PF Reset requested\n");
3149 /* schedule again to check later */
3150 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3151 hclge_reset_task_schedule(hdev);
3153 case HNAE3_FLR_RESET:
3154 dev_info(&pdev->dev, "FLR requested\n");
3155 /* schedule again to check later */
3156 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3157 hclge_reset_task_schedule(hdev);
3160 dev_warn(&pdev->dev,
3161 "Unsupported reset type: %d\n", hdev->reset_type);
3166 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3167 unsigned long *addr)
3169 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3170 struct hclge_dev *hdev = ae_dev->priv;
3172 /* first, resolve any unknown reset type to the known type(s) */
3173 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3174 /* we will intentionally ignore any errors from this function
3175 * as we will end up in *some* reset request in any case
3177 hclge_handle_hw_msix_error(hdev, addr);
3178 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3179 /* We defered the clearing of the error event which caused
3180 * interrupt since it was not posssible to do that in
3181 * interrupt context (and this is the reason we introduced
3182 * new UNKNOWN reset type). Now, the errors have been
3183 * handled and cleared in hardware we can safely enable
3184 * interrupts. This is an exception to the norm.
3186 hclge_enable_vector(&hdev->misc_vector, true);
3189 /* return the highest priority reset level amongst all */
3190 if (test_bit(HNAE3_IMP_RESET, addr)) {
3191 rst_level = HNAE3_IMP_RESET;
3192 clear_bit(HNAE3_IMP_RESET, addr);
3193 clear_bit(HNAE3_GLOBAL_RESET, addr);
3194 clear_bit(HNAE3_FUNC_RESET, addr);
3195 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3196 rst_level = HNAE3_GLOBAL_RESET;
3197 clear_bit(HNAE3_GLOBAL_RESET, addr);
3198 clear_bit(HNAE3_FUNC_RESET, addr);
3199 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3200 rst_level = HNAE3_FUNC_RESET;
3201 clear_bit(HNAE3_FUNC_RESET, addr);
3202 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3203 rst_level = HNAE3_FLR_RESET;
3204 clear_bit(HNAE3_FLR_RESET, addr);
3207 if (hdev->reset_type != HNAE3_NONE_RESET &&
3208 rst_level < hdev->reset_type)
3209 return HNAE3_NONE_RESET;
3214 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3218 switch (hdev->reset_type) {
3219 case HNAE3_IMP_RESET:
3220 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3222 case HNAE3_GLOBAL_RESET:
3223 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3232 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3233 hclge_enable_vector(&hdev->misc_vector, true);
3236 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3240 switch (hdev->reset_type) {
3241 case HNAE3_FUNC_RESET:
3243 case HNAE3_FLR_RESET:
3244 ret = hclge_set_all_vf_rst(hdev, true);
3253 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3255 #define HCLGE_RESET_SYNC_TIME 100
3260 switch (hdev->reset_type) {
3261 case HNAE3_FUNC_RESET:
3262 /* There is no mechanism for PF to know if VF has stopped IO
3263 * for now, just wait 100 ms for VF to stop IO
3265 msleep(HCLGE_RESET_SYNC_TIME);
3266 ret = hclge_func_reset_cmd(hdev, 0);
3268 dev_err(&hdev->pdev->dev,
3269 "asserting function reset fail %d!\n", ret);
3273 /* After performaning pf reset, it is not necessary to do the
3274 * mailbox handling or send any command to firmware, because
3275 * any mailbox handling or command to firmware is only valid
3276 * after hclge_cmd_init is called.
3278 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3279 hdev->rst_stats.pf_rst_cnt++;
3281 case HNAE3_FLR_RESET:
3282 /* There is no mechanism for PF to know if VF has stopped IO
3283 * for now, just wait 100 ms for VF to stop IO
3285 msleep(HCLGE_RESET_SYNC_TIME);
3286 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3287 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3288 hdev->rst_stats.flr_rst_cnt++;
3290 case HNAE3_IMP_RESET:
3291 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3292 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3293 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3299 /* inform hardware that preparatory work is done */
3300 msleep(HCLGE_RESET_SYNC_TIME);
3301 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3302 HCLGE_NIC_CMQ_ENABLE);
3303 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3308 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3310 #define MAX_RESET_FAIL_CNT 5
3312 if (hdev->reset_pending) {
3313 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3314 hdev->reset_pending);
3316 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3317 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3318 BIT(HCLGE_IMP_RESET_BIT))) {
3319 dev_info(&hdev->pdev->dev,
3320 "reset failed because IMP Reset is pending\n");
3321 hclge_clear_reset_cause(hdev);
3323 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3324 hdev->reset_fail_cnt++;
3326 set_bit(hdev->reset_type, &hdev->reset_pending);
3327 dev_info(&hdev->pdev->dev,
3328 "re-schedule to wait for hw reset done\n");
3332 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3333 hclge_clear_reset_cause(hdev);
3334 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3335 mod_timer(&hdev->reset_timer,
3336 jiffies + HCLGE_RESET_INTERVAL);
3341 hclge_clear_reset_cause(hdev);
3342 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3346 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3350 switch (hdev->reset_type) {
3351 case HNAE3_FUNC_RESET:
3353 case HNAE3_FLR_RESET:
3354 ret = hclge_set_all_vf_rst(hdev, false);
3363 static int hclge_reset_stack(struct hclge_dev *hdev)
3367 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3371 ret = hclge_reset_ae_dev(hdev->ae_dev);
3375 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3379 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3382 static void hclge_reset(struct hclge_dev *hdev)
3384 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3385 bool is_timeout = false;
3388 /* Initialize ae_dev reset status as well, in case enet layer wants to
3389 * know if device is undergoing reset
3391 ae_dev->reset_type = hdev->reset_type;
3392 hdev->rst_stats.reset_cnt++;
3393 /* perform reset of the stack & ae device for a client */
3394 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3398 ret = hclge_reset_prepare_down(hdev);
3403 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3405 goto err_reset_lock;
3409 ret = hclge_reset_prepare_wait(hdev);
3413 if (hclge_reset_wait(hdev)) {
3418 hdev->rst_stats.hw_reset_done_cnt++;
3420 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3426 ret = hclge_reset_stack(hdev);
3428 goto err_reset_lock;
3430 hclge_clear_reset_cause(hdev);
3432 ret = hclge_reset_prepare_up(hdev);
3434 goto err_reset_lock;
3438 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3439 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3442 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3447 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3449 goto err_reset_lock;
3453 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3457 hdev->last_reset_time = jiffies;
3458 hdev->reset_fail_cnt = 0;
3459 hdev->rst_stats.reset_done_cnt++;
3460 ae_dev->reset_type = HNAE3_NONE_RESET;
3461 del_timer(&hdev->reset_timer);
3468 if (hclge_reset_err_handle(hdev, is_timeout))
3469 hclge_reset_task_schedule(hdev);
3472 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3474 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3475 struct hclge_dev *hdev = ae_dev->priv;
3477 /* We might end up getting called broadly because of 2 below cases:
3478 * 1. Recoverable error was conveyed through APEI and only way to bring
3479 * normalcy is to reset.
3480 * 2. A new reset request from the stack due to timeout
3482 * For the first case,error event might not have ae handle available.
3483 * check if this is a new reset request and we are not here just because
3484 * last reset attempt did not succeed and watchdog hit us again. We will
3485 * know this if last reset request did not occur very recently (watchdog
3486 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3487 * In case of new request we reset the "reset level" to PF reset.
3488 * And if it is a repeat reset request of the most recent one then we
3489 * want to make sure we throttle the reset request. Therefore, we will
3490 * not allow it again before 3*HZ times.
3493 handle = &hdev->vport[0].nic;
3495 if (time_before(jiffies, (hdev->last_reset_time +
3496 HCLGE_RESET_INTERVAL)))
3498 else if (hdev->default_reset_request)
3500 hclge_get_reset_level(ae_dev,
3501 &hdev->default_reset_request);
3502 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3503 hdev->reset_level = HNAE3_FUNC_RESET;
3505 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3508 /* request reset & schedule reset task */
3509 set_bit(hdev->reset_level, &hdev->reset_request);
3510 hclge_reset_task_schedule(hdev);
3512 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3513 hdev->reset_level++;
3516 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3517 enum hnae3_reset_type rst_type)
3519 struct hclge_dev *hdev = ae_dev->priv;
3521 set_bit(rst_type, &hdev->default_reset_request);
3524 static void hclge_reset_timer(struct timer_list *t)
3526 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3528 dev_info(&hdev->pdev->dev,
3529 "triggering reset in reset timer\n");
3530 hclge_reset_event(hdev->pdev, NULL);
3533 static void hclge_reset_subtask(struct hclge_dev *hdev)
3535 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3537 /* check if there is any ongoing reset in the hardware. This status can
3538 * be checked from reset_pending. If there is then, we need to wait for
3539 * hardware to complete reset.
3540 * a. If we are able to figure out in reasonable time that hardware
3541 * has fully resetted then, we can proceed with driver, client
3543 * b. else, we can come back later to check this status so re-sched
3546 hdev->last_reset_time = jiffies;
3547 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3548 if (hdev->reset_type != HNAE3_NONE_RESET)
3551 /* check if we got any *new* reset requests to be honored */
3552 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3553 if (hdev->reset_type != HNAE3_NONE_RESET)
3554 hclge_do_reset(hdev);
3556 hdev->reset_type = HNAE3_NONE_RESET;
3559 static void hclge_reset_service_task(struct work_struct *work)
3561 struct hclge_dev *hdev =
3562 container_of(work, struct hclge_dev, rst_service_task);
3564 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3567 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3569 hclge_reset_subtask(hdev);
3571 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3574 static void hclge_mailbox_service_task(struct work_struct *work)
3576 struct hclge_dev *hdev =
3577 container_of(work, struct hclge_dev, mbx_service_task);
3579 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3582 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3584 hclge_mbx_handler(hdev);
3586 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3589 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3593 /* start from vport 1 for PF is always alive */
3594 for (i = 1; i < hdev->num_alloc_vport; i++) {
3595 struct hclge_vport *vport = &hdev->vport[i];
3597 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3598 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3600 /* If vf is not alive, set to default value */
3601 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3602 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3606 static void hclge_service_task(struct work_struct *work)
3608 struct hclge_dev *hdev =
3609 container_of(work, struct hclge_dev, service_task);
3611 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3612 hclge_update_stats_for_all(hdev);
3613 hdev->hw_stats.stats_timer = 0;
3616 hclge_update_port_info(hdev);
3617 hclge_update_link_status(hdev);
3618 hclge_update_vport_alive(hdev);
3619 hclge_sync_vlan_filter(hdev);
3620 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3621 hclge_rfs_filter_expire(hdev);
3622 hdev->fd_arfs_expire_timer = 0;
3624 hclge_service_complete(hdev);
3627 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3629 /* VF handle has no client */
3630 if (!handle->client)
3631 return container_of(handle, struct hclge_vport, nic);
3632 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3633 return container_of(handle, struct hclge_vport, roce);
3635 return container_of(handle, struct hclge_vport, nic);
3638 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3639 struct hnae3_vector_info *vector_info)
3641 struct hclge_vport *vport = hclge_get_vport(handle);
3642 struct hnae3_vector_info *vector = vector_info;
3643 struct hclge_dev *hdev = vport->back;
3647 vector_num = min(hdev->num_msi_left, vector_num);
3649 for (j = 0; j < vector_num; j++) {
3650 for (i = 1; i < hdev->num_msi; i++) {
3651 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3652 vector->vector = pci_irq_vector(hdev->pdev, i);
3653 vector->io_addr = hdev->hw.io_base +
3654 HCLGE_VECTOR_REG_BASE +
3655 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3657 HCLGE_VECTOR_VF_OFFSET;
3658 hdev->vector_status[i] = vport->vport_id;
3659 hdev->vector_irq[i] = vector->vector;
3668 hdev->num_msi_left -= alloc;
3669 hdev->num_msi_used += alloc;
3674 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3678 for (i = 0; i < hdev->num_msi; i++)
3679 if (vector == hdev->vector_irq[i])
3685 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3687 struct hclge_vport *vport = hclge_get_vport(handle);
3688 struct hclge_dev *hdev = vport->back;
3691 vector_id = hclge_get_vector_index(hdev, vector);
3692 if (vector_id < 0) {
3693 dev_err(&hdev->pdev->dev,
3694 "Get vector index fail. vector_id =%d\n", vector_id);
3698 hclge_free_vector(hdev, vector_id);
3703 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3705 return HCLGE_RSS_KEY_SIZE;
3708 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3710 return HCLGE_RSS_IND_TBL_SIZE;
3713 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3714 const u8 hfunc, const u8 *key)
3716 struct hclge_rss_config_cmd *req;
3717 unsigned int key_offset = 0;
3718 struct hclge_desc desc;
3723 key_counts = HCLGE_RSS_KEY_SIZE;
3724 req = (struct hclge_rss_config_cmd *)desc.data;
3726 while (key_counts) {
3727 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3730 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3731 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3733 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3734 memcpy(req->hash_key,
3735 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3737 key_counts -= key_size;
3739 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3741 dev_err(&hdev->pdev->dev,
3742 "Configure RSS config fail, status = %d\n",
3750 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3752 struct hclge_rss_indirection_table_cmd *req;
3753 struct hclge_desc desc;
3757 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3759 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3760 hclge_cmd_setup_basic_desc
3761 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3763 req->start_table_index =
3764 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3765 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3767 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3768 req->rss_result[j] =
3769 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3771 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3773 dev_err(&hdev->pdev->dev,
3774 "Configure rss indir table fail,status = %d\n",
3782 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3783 u16 *tc_size, u16 *tc_offset)
3785 struct hclge_rss_tc_mode_cmd *req;
3786 struct hclge_desc desc;
3790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3791 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3793 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3796 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3797 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3798 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3799 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3800 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3802 req->rss_tc_mode[i] = cpu_to_le16(mode);
3805 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3807 dev_err(&hdev->pdev->dev,
3808 "Configure rss tc mode fail, status = %d\n", ret);
3813 static void hclge_get_rss_type(struct hclge_vport *vport)
3815 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3816 vport->rss_tuple_sets.ipv4_udp_en ||
3817 vport->rss_tuple_sets.ipv4_sctp_en ||
3818 vport->rss_tuple_sets.ipv6_tcp_en ||
3819 vport->rss_tuple_sets.ipv6_udp_en ||
3820 vport->rss_tuple_sets.ipv6_sctp_en)
3821 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3822 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3823 vport->rss_tuple_sets.ipv6_fragment_en)
3824 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3826 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3829 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3831 struct hclge_rss_input_tuple_cmd *req;
3832 struct hclge_desc desc;
3835 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3837 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3839 /* Get the tuple cfg from pf */
3840 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3841 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3842 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3843 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3844 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3845 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3846 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3847 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3848 hclge_get_rss_type(&hdev->vport[0]);
3849 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3851 dev_err(&hdev->pdev->dev,
3852 "Configure rss input fail, status = %d\n", ret);
3856 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3859 struct hclge_vport *vport = hclge_get_vport(handle);
3862 /* Get hash algorithm */
3864 switch (vport->rss_algo) {
3865 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3866 *hfunc = ETH_RSS_HASH_TOP;
3868 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3869 *hfunc = ETH_RSS_HASH_XOR;
3872 *hfunc = ETH_RSS_HASH_UNKNOWN;
3877 /* Get the RSS Key required by the user */
3879 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3881 /* Get indirect table */
3883 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3884 indir[i] = vport->rss_indirection_tbl[i];
3889 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3890 const u8 *key, const u8 hfunc)
3892 struct hclge_vport *vport = hclge_get_vport(handle);
3893 struct hclge_dev *hdev = vport->back;
3897 /* Set the RSS Hash Key if specififed by the user */
3900 case ETH_RSS_HASH_TOP:
3901 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3903 case ETH_RSS_HASH_XOR:
3904 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3906 case ETH_RSS_HASH_NO_CHANGE:
3907 hash_algo = vport->rss_algo;
3913 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3917 /* Update the shadow RSS key with user specified qids */
3918 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3919 vport->rss_algo = hash_algo;
3922 /* Update the shadow RSS table with user specified qids */
3923 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3924 vport->rss_indirection_tbl[i] = indir[i];
3926 /* Update the hardware */
3927 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3930 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3932 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3934 if (nfc->data & RXH_L4_B_2_3)
3935 hash_sets |= HCLGE_D_PORT_BIT;
3937 hash_sets &= ~HCLGE_D_PORT_BIT;
3939 if (nfc->data & RXH_IP_SRC)
3940 hash_sets |= HCLGE_S_IP_BIT;
3942 hash_sets &= ~HCLGE_S_IP_BIT;
3944 if (nfc->data & RXH_IP_DST)
3945 hash_sets |= HCLGE_D_IP_BIT;
3947 hash_sets &= ~HCLGE_D_IP_BIT;
3949 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3950 hash_sets |= HCLGE_V_TAG_BIT;
3955 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3956 struct ethtool_rxnfc *nfc)
3958 struct hclge_vport *vport = hclge_get_vport(handle);
3959 struct hclge_dev *hdev = vport->back;
3960 struct hclge_rss_input_tuple_cmd *req;
3961 struct hclge_desc desc;
3965 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3966 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3969 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3970 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3972 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3973 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3974 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3975 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3976 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3977 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3978 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3979 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3981 tuple_sets = hclge_get_rss_hash_bits(nfc);
3982 switch (nfc->flow_type) {
3984 req->ipv4_tcp_en = tuple_sets;
3987 req->ipv6_tcp_en = tuple_sets;
3990 req->ipv4_udp_en = tuple_sets;
3993 req->ipv6_udp_en = tuple_sets;
3996 req->ipv4_sctp_en = tuple_sets;
3999 if ((nfc->data & RXH_L4_B_0_1) ||
4000 (nfc->data & RXH_L4_B_2_3))
4003 req->ipv6_sctp_en = tuple_sets;
4006 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4009 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4015 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4017 dev_err(&hdev->pdev->dev,
4018 "Set rss tuple fail, status = %d\n", ret);
4022 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4023 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4024 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4025 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4026 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4027 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4028 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4029 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4030 hclge_get_rss_type(vport);
4034 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4035 struct ethtool_rxnfc *nfc)
4037 struct hclge_vport *vport = hclge_get_vport(handle);
4042 switch (nfc->flow_type) {
4044 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4047 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4050 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4053 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4056 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4059 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4063 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4072 if (tuple_sets & HCLGE_D_PORT_BIT)
4073 nfc->data |= RXH_L4_B_2_3;
4074 if (tuple_sets & HCLGE_S_PORT_BIT)
4075 nfc->data |= RXH_L4_B_0_1;
4076 if (tuple_sets & HCLGE_D_IP_BIT)
4077 nfc->data |= RXH_IP_DST;
4078 if (tuple_sets & HCLGE_S_IP_BIT)
4079 nfc->data |= RXH_IP_SRC;
4084 static int hclge_get_tc_size(struct hnae3_handle *handle)
4086 struct hclge_vport *vport = hclge_get_vport(handle);
4087 struct hclge_dev *hdev = vport->back;
4089 return hdev->rss_size_max;
4092 int hclge_rss_init_hw(struct hclge_dev *hdev)
4094 struct hclge_vport *vport = hdev->vport;
4095 u8 *rss_indir = vport[0].rss_indirection_tbl;
4096 u16 rss_size = vport[0].alloc_rss_size;
4097 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4098 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4099 u8 *key = vport[0].rss_hash_key;
4100 u8 hfunc = vport[0].rss_algo;
4101 u16 tc_valid[HCLGE_MAX_TC_NUM];
4106 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4110 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4114 ret = hclge_set_rss_input_tuple(hdev);
4118 /* Each TC have the same queue size, and tc_size set to hardware is
4119 * the log2 of roundup power of two of rss_size, the acutal queue
4120 * size is limited by indirection table.
4122 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4123 dev_err(&hdev->pdev->dev,
4124 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4129 roundup_size = roundup_pow_of_two(rss_size);
4130 roundup_size = ilog2(roundup_size);
4132 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4135 if (!(hdev->hw_tc_map & BIT(i)))
4139 tc_size[i] = roundup_size;
4140 tc_offset[i] = rss_size * i;
4143 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4146 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4148 struct hclge_vport *vport = hdev->vport;
4151 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4152 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4153 vport[j].rss_indirection_tbl[i] =
4154 i % vport[j].alloc_rss_size;
4158 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4160 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4161 struct hclge_vport *vport = hdev->vport;
4163 if (hdev->pdev->revision >= 0x21)
4164 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4166 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4167 vport[i].rss_tuple_sets.ipv4_tcp_en =
4168 HCLGE_RSS_INPUT_TUPLE_OTHER;
4169 vport[i].rss_tuple_sets.ipv4_udp_en =
4170 HCLGE_RSS_INPUT_TUPLE_OTHER;
4171 vport[i].rss_tuple_sets.ipv4_sctp_en =
4172 HCLGE_RSS_INPUT_TUPLE_SCTP;
4173 vport[i].rss_tuple_sets.ipv4_fragment_en =
4174 HCLGE_RSS_INPUT_TUPLE_OTHER;
4175 vport[i].rss_tuple_sets.ipv6_tcp_en =
4176 HCLGE_RSS_INPUT_TUPLE_OTHER;
4177 vport[i].rss_tuple_sets.ipv6_udp_en =
4178 HCLGE_RSS_INPUT_TUPLE_OTHER;
4179 vport[i].rss_tuple_sets.ipv6_sctp_en =
4180 HCLGE_RSS_INPUT_TUPLE_SCTP;
4181 vport[i].rss_tuple_sets.ipv6_fragment_en =
4182 HCLGE_RSS_INPUT_TUPLE_OTHER;
4184 vport[i].rss_algo = rss_algo;
4186 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4187 HCLGE_RSS_KEY_SIZE);
4190 hclge_rss_indir_init_cfg(hdev);
4193 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4194 int vector_id, bool en,
4195 struct hnae3_ring_chain_node *ring_chain)
4197 struct hclge_dev *hdev = vport->back;
4198 struct hnae3_ring_chain_node *node;
4199 struct hclge_desc desc;
4200 struct hclge_ctrl_vector_chain_cmd *req
4201 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4202 enum hclge_cmd_status status;
4203 enum hclge_opcode_type op;
4204 u16 tqp_type_and_id;
4207 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4208 hclge_cmd_setup_basic_desc(&desc, op, false);
4209 req->int_vector_id = vector_id;
4212 for (node = ring_chain; node; node = node->next) {
4213 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4214 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4216 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4217 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4218 HCLGE_TQP_ID_S, node->tqp_index);
4219 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4221 hnae3_get_field(node->int_gl_idx,
4222 HNAE3_RING_GL_IDX_M,
4223 HNAE3_RING_GL_IDX_S));
4224 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4225 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4226 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4227 req->vfid = vport->vport_id;
4229 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4231 dev_err(&hdev->pdev->dev,
4232 "Map TQP fail, status is %d.\n",
4238 hclge_cmd_setup_basic_desc(&desc,
4241 req->int_vector_id = vector_id;
4246 req->int_cause_num = i;
4247 req->vfid = vport->vport_id;
4248 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4250 dev_err(&hdev->pdev->dev,
4251 "Map TQP fail, status is %d.\n", status);
4259 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4260 struct hnae3_ring_chain_node *ring_chain)
4262 struct hclge_vport *vport = hclge_get_vport(handle);
4263 struct hclge_dev *hdev = vport->back;
4266 vector_id = hclge_get_vector_index(hdev, vector);
4267 if (vector_id < 0) {
4268 dev_err(&hdev->pdev->dev,
4269 "Get vector index fail. vector_id =%d\n", vector_id);
4273 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4276 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4277 struct hnae3_ring_chain_node *ring_chain)
4279 struct hclge_vport *vport = hclge_get_vport(handle);
4280 struct hclge_dev *hdev = vport->back;
4283 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4286 vector_id = hclge_get_vector_index(hdev, vector);
4287 if (vector_id < 0) {
4288 dev_err(&handle->pdev->dev,
4289 "Get vector index fail. ret =%d\n", vector_id);
4293 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4295 dev_err(&handle->pdev->dev,
4296 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4302 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4303 struct hclge_promisc_param *param)
4305 struct hclge_promisc_cfg_cmd *req;
4306 struct hclge_desc desc;
4309 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4311 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4312 req->vf_id = param->vf_id;
4314 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4315 * pdev revision(0x20), new revision support them. The
4316 * value of this two fields will not return error when driver
4317 * send command to fireware in revision(0x20).
4319 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4320 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4322 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4324 dev_err(&hdev->pdev->dev,
4325 "Set promisc mode fail, status is %d.\n", ret);
4330 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4331 bool en_mc, bool en_bc, int vport_id)
4336 memset(param, 0, sizeof(struct hclge_promisc_param));
4338 param->enable = HCLGE_PROMISC_EN_UC;
4340 param->enable |= HCLGE_PROMISC_EN_MC;
4342 param->enable |= HCLGE_PROMISC_EN_BC;
4343 param->vf_id = vport_id;
4346 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4349 struct hclge_vport *vport = hclge_get_vport(handle);
4350 struct hclge_dev *hdev = vport->back;
4351 struct hclge_promisc_param param;
4352 bool en_bc_pmc = true;
4354 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4355 * always bypassed. So broadcast promisc should be disabled until
4356 * user enable promisc mode
4358 if (handle->pdev->revision == 0x20)
4359 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4361 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4363 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4366 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4368 struct hclge_get_fd_mode_cmd *req;
4369 struct hclge_desc desc;
4372 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4374 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4376 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4378 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4382 *fd_mode = req->mode;
4387 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4388 u32 *stage1_entry_num,
4389 u32 *stage2_entry_num,
4390 u16 *stage1_counter_num,
4391 u16 *stage2_counter_num)
4393 struct hclge_get_fd_allocation_cmd *req;
4394 struct hclge_desc desc;
4397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4399 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4401 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4403 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4408 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4409 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4410 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4411 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4416 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4418 struct hclge_set_fd_key_config_cmd *req;
4419 struct hclge_fd_key_cfg *stage;
4420 struct hclge_desc desc;
4423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4425 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4426 stage = &hdev->fd_cfg.key_cfg[stage_num];
4427 req->stage = stage_num;
4428 req->key_select = stage->key_sel;
4429 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4430 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4431 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4432 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4433 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4434 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4436 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4438 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4443 static int hclge_init_fd_config(struct hclge_dev *hdev)
4445 #define LOW_2_WORDS 0x03
4446 struct hclge_fd_key_cfg *key_cfg;
4449 if (!hnae3_dev_fd_supported(hdev))
4452 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4456 switch (hdev->fd_cfg.fd_mode) {
4457 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4458 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4460 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4461 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4464 dev_err(&hdev->pdev->dev,
4465 "Unsupported flow director mode %d\n",
4466 hdev->fd_cfg.fd_mode);
4470 hdev->fd_cfg.proto_support =
4471 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4472 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4473 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4474 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4475 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4476 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4477 key_cfg->outer_sipv6_word_en = 0;
4478 key_cfg->outer_dipv6_word_en = 0;
4480 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4481 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4482 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4483 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4485 /* If use max 400bit key, we can support tuples for ether type */
4486 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4487 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4488 key_cfg->tuple_active |=
4489 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4492 /* roce_type is used to filter roce frames
4493 * dst_vport is used to specify the rule
4495 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4497 ret = hclge_get_fd_allocation(hdev,
4498 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4499 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4500 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4501 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4505 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4508 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4509 int loc, u8 *key, bool is_add)
4511 struct hclge_fd_tcam_config_1_cmd *req1;
4512 struct hclge_fd_tcam_config_2_cmd *req2;
4513 struct hclge_fd_tcam_config_3_cmd *req3;
4514 struct hclge_desc desc[3];
4517 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4518 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4519 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4520 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4521 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4523 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4524 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4525 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4527 req1->stage = stage;
4528 req1->xy_sel = sel_x ? 1 : 0;
4529 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4530 req1->index = cpu_to_le32(loc);
4531 req1->entry_vld = sel_x ? is_add : 0;
4534 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4535 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4536 sizeof(req2->tcam_data));
4537 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4538 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4541 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4543 dev_err(&hdev->pdev->dev,
4544 "config tcam key fail, ret=%d\n",
4550 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4551 struct hclge_fd_ad_data *action)
4553 struct hclge_fd_ad_config_cmd *req;
4554 struct hclge_desc desc;
4558 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4560 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4561 req->index = cpu_to_le32(loc);
4564 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4565 action->write_rule_id_to_bd);
4566 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4569 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4570 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4571 action->forward_to_direct_queue);
4572 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4574 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4575 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4576 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4577 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4578 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4579 action->counter_id);
4581 req->ad_data = cpu_to_le64(ad_data);
4582 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4584 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4589 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4590 struct hclge_fd_rule *rule)
4592 u16 tmp_x_s, tmp_y_s;
4593 u32 tmp_x_l, tmp_y_l;
4596 if (rule->unused_tuple & tuple_bit)
4599 switch (tuple_bit) {
4602 case BIT(INNER_DST_MAC):
4603 for (i = 0; i < ETH_ALEN; i++) {
4604 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4605 rule->tuples_mask.dst_mac[i]);
4606 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4607 rule->tuples_mask.dst_mac[i]);
4611 case BIT(INNER_SRC_MAC):
4612 for (i = 0; i < ETH_ALEN; i++) {
4613 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4614 rule->tuples.src_mac[i]);
4615 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4616 rule->tuples.src_mac[i]);
4620 case BIT(INNER_VLAN_TAG_FST):
4621 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4622 rule->tuples_mask.vlan_tag1);
4623 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4624 rule->tuples_mask.vlan_tag1);
4625 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4626 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4629 case BIT(INNER_ETH_TYPE):
4630 calc_x(tmp_x_s, rule->tuples.ether_proto,
4631 rule->tuples_mask.ether_proto);
4632 calc_y(tmp_y_s, rule->tuples.ether_proto,
4633 rule->tuples_mask.ether_proto);
4634 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4635 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4638 case BIT(INNER_IP_TOS):
4639 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4640 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4643 case BIT(INNER_IP_PROTO):
4644 calc_x(*key_x, rule->tuples.ip_proto,
4645 rule->tuples_mask.ip_proto);
4646 calc_y(*key_y, rule->tuples.ip_proto,
4647 rule->tuples_mask.ip_proto);
4650 case BIT(INNER_SRC_IP):
4651 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4652 rule->tuples_mask.src_ip[IPV4_INDEX]);
4653 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4654 rule->tuples_mask.src_ip[IPV4_INDEX]);
4655 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4656 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4659 case BIT(INNER_DST_IP):
4660 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4661 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4662 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4663 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4664 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4665 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4668 case BIT(INNER_SRC_PORT):
4669 calc_x(tmp_x_s, rule->tuples.src_port,
4670 rule->tuples_mask.src_port);
4671 calc_y(tmp_y_s, rule->tuples.src_port,
4672 rule->tuples_mask.src_port);
4673 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4674 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4677 case BIT(INNER_DST_PORT):
4678 calc_x(tmp_x_s, rule->tuples.dst_port,
4679 rule->tuples_mask.dst_port);
4680 calc_y(tmp_y_s, rule->tuples.dst_port,
4681 rule->tuples_mask.dst_port);
4682 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4683 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4691 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4692 u8 vf_id, u8 network_port_id)
4694 u32 port_number = 0;
4696 if (port_type == HOST_PORT) {
4697 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4699 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4701 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4703 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4704 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4705 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4711 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4712 __le32 *key_x, __le32 *key_y,
4713 struct hclge_fd_rule *rule)
4715 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4716 u8 cur_pos = 0, tuple_size, shift_bits;
4719 for (i = 0; i < MAX_META_DATA; i++) {
4720 tuple_size = meta_data_key_info[i].key_length;
4721 tuple_bit = key_cfg->meta_data_active & BIT(i);
4723 switch (tuple_bit) {
4724 case BIT(ROCE_TYPE):
4725 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4726 cur_pos += tuple_size;
4728 case BIT(DST_VPORT):
4729 port_number = hclge_get_port_number(HOST_PORT, 0,
4731 hnae3_set_field(meta_data,
4732 GENMASK(cur_pos + tuple_size, cur_pos),
4733 cur_pos, port_number);
4734 cur_pos += tuple_size;
4741 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4742 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4743 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4745 *key_x = cpu_to_le32(tmp_x << shift_bits);
4746 *key_y = cpu_to_le32(tmp_y << shift_bits);
4749 /* A complete key is combined with meta data key and tuple key.
4750 * Meta data key is stored at the MSB region, and tuple key is stored at
4751 * the LSB region, unused bits will be filled 0.
4753 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4754 struct hclge_fd_rule *rule)
4756 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4757 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4758 u8 *cur_key_x, *cur_key_y;
4760 int ret, tuple_size;
4761 u8 meta_data_region;
4763 memset(key_x, 0, sizeof(key_x));
4764 memset(key_y, 0, sizeof(key_y));
4768 for (i = 0 ; i < MAX_TUPLE; i++) {
4772 tuple_size = tuple_key_info[i].key_length / 8;
4773 check_tuple = key_cfg->tuple_active & BIT(i);
4775 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4778 cur_key_x += tuple_size;
4779 cur_key_y += tuple_size;
4783 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4784 MAX_META_DATA_LENGTH / 8;
4786 hclge_fd_convert_meta_data(key_cfg,
4787 (__le32 *)(key_x + meta_data_region),
4788 (__le32 *)(key_y + meta_data_region),
4791 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4794 dev_err(&hdev->pdev->dev,
4795 "fd key_y config fail, loc=%d, ret=%d\n",
4796 rule->queue_id, ret);
4800 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4803 dev_err(&hdev->pdev->dev,
4804 "fd key_x config fail, loc=%d, ret=%d\n",
4805 rule->queue_id, ret);
4809 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4810 struct hclge_fd_rule *rule)
4812 struct hclge_fd_ad_data ad_data;
4814 ad_data.ad_id = rule->location;
4816 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4817 ad_data.drop_packet = true;
4818 ad_data.forward_to_direct_queue = false;
4819 ad_data.queue_id = 0;
4821 ad_data.drop_packet = false;
4822 ad_data.forward_to_direct_queue = true;
4823 ad_data.queue_id = rule->queue_id;
4826 ad_data.use_counter = false;
4827 ad_data.counter_id = 0;
4829 ad_data.use_next_stage = false;
4830 ad_data.next_input_key = 0;
4832 ad_data.write_rule_id_to_bd = true;
4833 ad_data.rule_id = rule->location;
4835 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4838 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4839 struct ethtool_rx_flow_spec *fs, u32 *unused)
4841 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4842 struct ethtool_usrip4_spec *usr_ip4_spec;
4843 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4844 struct ethtool_usrip6_spec *usr_ip6_spec;
4845 struct ethhdr *ether_spec;
4847 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4850 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4853 if ((fs->flow_type & FLOW_EXT) &&
4854 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4855 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4859 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4863 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4864 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4866 if (!tcp_ip4_spec->ip4src)
4867 *unused |= BIT(INNER_SRC_IP);
4869 if (!tcp_ip4_spec->ip4dst)
4870 *unused |= BIT(INNER_DST_IP);
4872 if (!tcp_ip4_spec->psrc)
4873 *unused |= BIT(INNER_SRC_PORT);
4875 if (!tcp_ip4_spec->pdst)
4876 *unused |= BIT(INNER_DST_PORT);
4878 if (!tcp_ip4_spec->tos)
4879 *unused |= BIT(INNER_IP_TOS);
4883 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4884 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4885 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4887 if (!usr_ip4_spec->ip4src)
4888 *unused |= BIT(INNER_SRC_IP);
4890 if (!usr_ip4_spec->ip4dst)
4891 *unused |= BIT(INNER_DST_IP);
4893 if (!usr_ip4_spec->tos)
4894 *unused |= BIT(INNER_IP_TOS);
4896 if (!usr_ip4_spec->proto)
4897 *unused |= BIT(INNER_IP_PROTO);
4899 if (usr_ip4_spec->l4_4_bytes)
4902 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4909 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4910 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4913 /* check whether src/dst ip address used */
4914 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4915 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4916 *unused |= BIT(INNER_SRC_IP);
4918 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4919 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4920 *unused |= BIT(INNER_DST_IP);
4922 if (!tcp_ip6_spec->psrc)
4923 *unused |= BIT(INNER_SRC_PORT);
4925 if (!tcp_ip6_spec->pdst)
4926 *unused |= BIT(INNER_DST_PORT);
4928 if (tcp_ip6_spec->tclass)
4932 case IPV6_USER_FLOW:
4933 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4934 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4935 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4936 BIT(INNER_DST_PORT);
4938 /* check whether src/dst ip address used */
4939 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4940 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4941 *unused |= BIT(INNER_SRC_IP);
4943 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4944 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4945 *unused |= BIT(INNER_DST_IP);
4947 if (!usr_ip6_spec->l4_proto)
4948 *unused |= BIT(INNER_IP_PROTO);
4950 if (usr_ip6_spec->tclass)
4953 if (usr_ip6_spec->l4_4_bytes)
4958 ether_spec = &fs->h_u.ether_spec;
4959 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4960 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4961 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4963 if (is_zero_ether_addr(ether_spec->h_source))
4964 *unused |= BIT(INNER_SRC_MAC);
4966 if (is_zero_ether_addr(ether_spec->h_dest))
4967 *unused |= BIT(INNER_DST_MAC);
4969 if (!ether_spec->h_proto)
4970 *unused |= BIT(INNER_ETH_TYPE);
4977 if ((fs->flow_type & FLOW_EXT)) {
4978 if (fs->h_ext.vlan_etype)
4980 if (!fs->h_ext.vlan_tci)
4981 *unused |= BIT(INNER_VLAN_TAG_FST);
4983 if (fs->m_ext.vlan_tci) {
4984 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4988 *unused |= BIT(INNER_VLAN_TAG_FST);
4991 if (fs->flow_type & FLOW_MAC_EXT) {
4992 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4995 if (is_zero_ether_addr(fs->h_ext.h_dest))
4996 *unused |= BIT(INNER_DST_MAC);
4998 *unused &= ~(BIT(INNER_DST_MAC));
5004 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5006 struct hclge_fd_rule *rule = NULL;
5007 struct hlist_node *node2;
5009 spin_lock_bh(&hdev->fd_rule_lock);
5010 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5011 if (rule->location >= location)
5015 spin_unlock_bh(&hdev->fd_rule_lock);
5017 return rule && rule->location == location;
5020 /* make sure being called after lock up with fd_rule_lock */
5021 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5022 struct hclge_fd_rule *new_rule,
5026 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5027 struct hlist_node *node2;
5029 if (is_add && !new_rule)
5032 hlist_for_each_entry_safe(rule, node2,
5033 &hdev->fd_rule_list, rule_node) {
5034 if (rule->location >= location)
5039 if (rule && rule->location == location) {
5040 hlist_del(&rule->rule_node);
5042 hdev->hclge_fd_rule_num--;
5045 if (!hdev->hclge_fd_rule_num)
5046 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5047 clear_bit(location, hdev->fd_bmap);
5051 } else if (!is_add) {
5052 dev_err(&hdev->pdev->dev,
5053 "delete fail, rule %d is inexistent\n",
5058 INIT_HLIST_NODE(&new_rule->rule_node);
5061 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5063 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5065 set_bit(location, hdev->fd_bmap);
5066 hdev->hclge_fd_rule_num++;
5067 hdev->fd_active_type = new_rule->rule_type;
5072 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5073 struct ethtool_rx_flow_spec *fs,
5074 struct hclge_fd_rule *rule)
5076 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5078 switch (flow_type) {
5082 rule->tuples.src_ip[IPV4_INDEX] =
5083 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5084 rule->tuples_mask.src_ip[IPV4_INDEX] =
5085 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5087 rule->tuples.dst_ip[IPV4_INDEX] =
5088 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5089 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5090 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5092 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5093 rule->tuples_mask.src_port =
5094 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5096 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5097 rule->tuples_mask.dst_port =
5098 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5100 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5101 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5103 rule->tuples.ether_proto = ETH_P_IP;
5104 rule->tuples_mask.ether_proto = 0xFFFF;
5108 rule->tuples.src_ip[IPV4_INDEX] =
5109 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5110 rule->tuples_mask.src_ip[IPV4_INDEX] =
5111 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5113 rule->tuples.dst_ip[IPV4_INDEX] =
5114 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5115 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5116 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5118 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5119 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5121 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5122 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5124 rule->tuples.ether_proto = ETH_P_IP;
5125 rule->tuples_mask.ether_proto = 0xFFFF;
5131 be32_to_cpu_array(rule->tuples.src_ip,
5132 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5133 be32_to_cpu_array(rule->tuples_mask.src_ip,
5134 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5136 be32_to_cpu_array(rule->tuples.dst_ip,
5137 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5138 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5139 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5141 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5142 rule->tuples_mask.src_port =
5143 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5145 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5146 rule->tuples_mask.dst_port =
5147 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5149 rule->tuples.ether_proto = ETH_P_IPV6;
5150 rule->tuples_mask.ether_proto = 0xFFFF;
5153 case IPV6_USER_FLOW:
5154 be32_to_cpu_array(rule->tuples.src_ip,
5155 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5156 be32_to_cpu_array(rule->tuples_mask.src_ip,
5157 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5159 be32_to_cpu_array(rule->tuples.dst_ip,
5160 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5161 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5162 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5164 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5165 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5167 rule->tuples.ether_proto = ETH_P_IPV6;
5168 rule->tuples_mask.ether_proto = 0xFFFF;
5172 ether_addr_copy(rule->tuples.src_mac,
5173 fs->h_u.ether_spec.h_source);
5174 ether_addr_copy(rule->tuples_mask.src_mac,
5175 fs->m_u.ether_spec.h_source);
5177 ether_addr_copy(rule->tuples.dst_mac,
5178 fs->h_u.ether_spec.h_dest);
5179 ether_addr_copy(rule->tuples_mask.dst_mac,
5180 fs->m_u.ether_spec.h_dest);
5182 rule->tuples.ether_proto =
5183 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5184 rule->tuples_mask.ether_proto =
5185 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5192 switch (flow_type) {
5195 rule->tuples.ip_proto = IPPROTO_SCTP;
5196 rule->tuples_mask.ip_proto = 0xFF;
5200 rule->tuples.ip_proto = IPPROTO_TCP;
5201 rule->tuples_mask.ip_proto = 0xFF;
5205 rule->tuples.ip_proto = IPPROTO_UDP;
5206 rule->tuples_mask.ip_proto = 0xFF;
5212 if ((fs->flow_type & FLOW_EXT)) {
5213 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5214 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5217 if (fs->flow_type & FLOW_MAC_EXT) {
5218 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5219 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5225 /* make sure being called after lock up with fd_rule_lock */
5226 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5227 struct hclge_fd_rule *rule)
5232 dev_err(&hdev->pdev->dev,
5233 "The flow director rule is NULL\n");
5237 /* it will never fail here, so needn't to check return value */
5238 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5240 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5244 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5251 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5255 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5256 struct ethtool_rxnfc *cmd)
5258 struct hclge_vport *vport = hclge_get_vport(handle);
5259 struct hclge_dev *hdev = vport->back;
5260 u16 dst_vport_id = 0, q_index = 0;
5261 struct ethtool_rx_flow_spec *fs;
5262 struct hclge_fd_rule *rule;
5267 if (!hnae3_dev_fd_supported(hdev))
5271 dev_warn(&hdev->pdev->dev,
5272 "Please enable flow director first\n");
5276 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5278 ret = hclge_fd_check_spec(hdev, fs, &unused);
5280 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5284 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5285 action = HCLGE_FD_ACTION_DROP_PACKET;
5287 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5288 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5291 if (vf > hdev->num_req_vfs) {
5292 dev_err(&hdev->pdev->dev,
5293 "Error: vf id (%d) > max vf num (%d)\n",
5294 vf, hdev->num_req_vfs);
5298 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5299 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5302 dev_err(&hdev->pdev->dev,
5303 "Error: queue id (%d) > max tqp num (%d)\n",
5308 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5312 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5316 ret = hclge_fd_get_tuple(hdev, fs, rule);
5322 rule->flow_type = fs->flow_type;
5324 rule->location = fs->location;
5325 rule->unused_tuple = unused;
5326 rule->vf_id = dst_vport_id;
5327 rule->queue_id = q_index;
5328 rule->action = action;
5329 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5331 /* to avoid rule conflict, when user configure rule by ethtool,
5332 * we need to clear all arfs rules
5334 hclge_clear_arfs_rules(handle);
5336 spin_lock_bh(&hdev->fd_rule_lock);
5337 ret = hclge_fd_config_rule(hdev, rule);
5339 spin_unlock_bh(&hdev->fd_rule_lock);
5344 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5345 struct ethtool_rxnfc *cmd)
5347 struct hclge_vport *vport = hclge_get_vport(handle);
5348 struct hclge_dev *hdev = vport->back;
5349 struct ethtool_rx_flow_spec *fs;
5352 if (!hnae3_dev_fd_supported(hdev))
5355 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5357 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5360 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5361 dev_err(&hdev->pdev->dev,
5362 "Delete fail, rule %d is inexistent\n", fs->location);
5366 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5371 spin_lock_bh(&hdev->fd_rule_lock);
5372 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5374 spin_unlock_bh(&hdev->fd_rule_lock);
5379 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5382 struct hclge_vport *vport = hclge_get_vport(handle);
5383 struct hclge_dev *hdev = vport->back;
5384 struct hclge_fd_rule *rule;
5385 struct hlist_node *node;
5388 if (!hnae3_dev_fd_supported(hdev))
5391 spin_lock_bh(&hdev->fd_rule_lock);
5392 for_each_set_bit(location, hdev->fd_bmap,
5393 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5394 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5398 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5400 hlist_del(&rule->rule_node);
5403 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5404 hdev->hclge_fd_rule_num = 0;
5405 bitmap_zero(hdev->fd_bmap,
5406 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5409 spin_unlock_bh(&hdev->fd_rule_lock);
5412 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5414 struct hclge_vport *vport = hclge_get_vport(handle);
5415 struct hclge_dev *hdev = vport->back;
5416 struct hclge_fd_rule *rule;
5417 struct hlist_node *node;
5420 /* Return ok here, because reset error handling will check this
5421 * return value. If error is returned here, the reset process will
5424 if (!hnae3_dev_fd_supported(hdev))
5427 /* if fd is disabled, should not restore it when reset */
5431 spin_lock_bh(&hdev->fd_rule_lock);
5432 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5433 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5435 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5438 dev_warn(&hdev->pdev->dev,
5439 "Restore rule %d failed, remove it\n",
5441 clear_bit(rule->location, hdev->fd_bmap);
5442 hlist_del(&rule->rule_node);
5444 hdev->hclge_fd_rule_num--;
5448 if (hdev->hclge_fd_rule_num)
5449 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5451 spin_unlock_bh(&hdev->fd_rule_lock);
5456 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5457 struct ethtool_rxnfc *cmd)
5459 struct hclge_vport *vport = hclge_get_vport(handle);
5460 struct hclge_dev *hdev = vport->back;
5462 if (!hnae3_dev_fd_supported(hdev))
5465 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5466 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5471 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5472 struct ethtool_rxnfc *cmd)
5474 struct hclge_vport *vport = hclge_get_vport(handle);
5475 struct hclge_fd_rule *rule = NULL;
5476 struct hclge_dev *hdev = vport->back;
5477 struct ethtool_rx_flow_spec *fs;
5478 struct hlist_node *node2;
5480 if (!hnae3_dev_fd_supported(hdev))
5483 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5485 spin_lock_bh(&hdev->fd_rule_lock);
5487 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5488 if (rule->location >= fs->location)
5492 if (!rule || fs->location != rule->location) {
5493 spin_unlock_bh(&hdev->fd_rule_lock);
5498 fs->flow_type = rule->flow_type;
5499 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5503 fs->h_u.tcp_ip4_spec.ip4src =
5504 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5505 fs->m_u.tcp_ip4_spec.ip4src =
5506 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5507 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5509 fs->h_u.tcp_ip4_spec.ip4dst =
5510 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5511 fs->m_u.tcp_ip4_spec.ip4dst =
5512 rule->unused_tuple & BIT(INNER_DST_IP) ?
5513 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5515 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5516 fs->m_u.tcp_ip4_spec.psrc =
5517 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5518 0 : cpu_to_be16(rule->tuples_mask.src_port);
5520 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5521 fs->m_u.tcp_ip4_spec.pdst =
5522 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5523 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5525 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5526 fs->m_u.tcp_ip4_spec.tos =
5527 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5528 0 : rule->tuples_mask.ip_tos;
5532 fs->h_u.usr_ip4_spec.ip4src =
5533 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5534 fs->m_u.tcp_ip4_spec.ip4src =
5535 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5536 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5538 fs->h_u.usr_ip4_spec.ip4dst =
5539 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5540 fs->m_u.usr_ip4_spec.ip4dst =
5541 rule->unused_tuple & BIT(INNER_DST_IP) ?
5542 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5544 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5545 fs->m_u.usr_ip4_spec.tos =
5546 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5547 0 : rule->tuples_mask.ip_tos;
5549 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5550 fs->m_u.usr_ip4_spec.proto =
5551 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5552 0 : rule->tuples_mask.ip_proto;
5554 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5560 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5561 rule->tuples.src_ip, IPV6_SIZE);
5562 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5563 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5564 sizeof(int) * IPV6_SIZE);
5566 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5567 rule->tuples_mask.src_ip, IPV6_SIZE);
5569 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5570 rule->tuples.dst_ip, IPV6_SIZE);
5571 if (rule->unused_tuple & BIT(INNER_DST_IP))
5572 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5573 sizeof(int) * IPV6_SIZE);
5575 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5576 rule->tuples_mask.dst_ip, IPV6_SIZE);
5578 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5579 fs->m_u.tcp_ip6_spec.psrc =
5580 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5581 0 : cpu_to_be16(rule->tuples_mask.src_port);
5583 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5584 fs->m_u.tcp_ip6_spec.pdst =
5585 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5586 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5589 case IPV6_USER_FLOW:
5590 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5591 rule->tuples.src_ip, IPV6_SIZE);
5592 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5593 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5594 sizeof(int) * IPV6_SIZE);
5596 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5597 rule->tuples_mask.src_ip, IPV6_SIZE);
5599 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5600 rule->tuples.dst_ip, IPV6_SIZE);
5601 if (rule->unused_tuple & BIT(INNER_DST_IP))
5602 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5603 sizeof(int) * IPV6_SIZE);
5605 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5606 rule->tuples_mask.dst_ip, IPV6_SIZE);
5608 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5609 fs->m_u.usr_ip6_spec.l4_proto =
5610 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5611 0 : rule->tuples_mask.ip_proto;
5615 ether_addr_copy(fs->h_u.ether_spec.h_source,
5616 rule->tuples.src_mac);
5617 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5618 eth_zero_addr(fs->m_u.ether_spec.h_source);
5620 ether_addr_copy(fs->m_u.ether_spec.h_source,
5621 rule->tuples_mask.src_mac);
5623 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5624 rule->tuples.dst_mac);
5625 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5626 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5628 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5629 rule->tuples_mask.dst_mac);
5631 fs->h_u.ether_spec.h_proto =
5632 cpu_to_be16(rule->tuples.ether_proto);
5633 fs->m_u.ether_spec.h_proto =
5634 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5635 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5639 spin_unlock_bh(&hdev->fd_rule_lock);
5643 if (fs->flow_type & FLOW_EXT) {
5644 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5645 fs->m_ext.vlan_tci =
5646 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5647 cpu_to_be16(VLAN_VID_MASK) :
5648 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5651 if (fs->flow_type & FLOW_MAC_EXT) {
5652 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5653 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5654 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5656 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5657 rule->tuples_mask.dst_mac);
5660 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5661 fs->ring_cookie = RX_CLS_FLOW_DISC;
5665 fs->ring_cookie = rule->queue_id;
5666 vf_id = rule->vf_id;
5667 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5668 fs->ring_cookie |= vf_id;
5671 spin_unlock_bh(&hdev->fd_rule_lock);
5676 static int hclge_get_all_rules(struct hnae3_handle *handle,
5677 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5679 struct hclge_vport *vport = hclge_get_vport(handle);
5680 struct hclge_dev *hdev = vport->back;
5681 struct hclge_fd_rule *rule;
5682 struct hlist_node *node2;
5685 if (!hnae3_dev_fd_supported(hdev))
5688 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5690 spin_lock_bh(&hdev->fd_rule_lock);
5691 hlist_for_each_entry_safe(rule, node2,
5692 &hdev->fd_rule_list, rule_node) {
5693 if (cnt == cmd->rule_cnt) {
5694 spin_unlock_bh(&hdev->fd_rule_lock);
5698 rule_locs[cnt] = rule->location;
5702 spin_unlock_bh(&hdev->fd_rule_lock);
5704 cmd->rule_cnt = cnt;
5709 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5710 struct hclge_fd_rule_tuples *tuples)
5712 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5713 tuples->ip_proto = fkeys->basic.ip_proto;
5714 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5716 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5717 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5718 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5720 memcpy(tuples->src_ip,
5721 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5722 sizeof(tuples->src_ip));
5723 memcpy(tuples->dst_ip,
5724 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5725 sizeof(tuples->dst_ip));
5729 /* traverse all rules, check whether an existed rule has the same tuples */
5730 static struct hclge_fd_rule *
5731 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5732 const struct hclge_fd_rule_tuples *tuples)
5734 struct hclge_fd_rule *rule = NULL;
5735 struct hlist_node *node;
5737 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5738 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5745 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5746 struct hclge_fd_rule *rule)
5748 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5749 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5750 BIT(INNER_SRC_PORT);
5753 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5754 if (tuples->ether_proto == ETH_P_IP) {
5755 if (tuples->ip_proto == IPPROTO_TCP)
5756 rule->flow_type = TCP_V4_FLOW;
5758 rule->flow_type = UDP_V4_FLOW;
5760 if (tuples->ip_proto == IPPROTO_TCP)
5761 rule->flow_type = TCP_V6_FLOW;
5763 rule->flow_type = UDP_V6_FLOW;
5765 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5766 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5769 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5770 u16 flow_id, struct flow_keys *fkeys)
5772 struct hclge_vport *vport = hclge_get_vport(handle);
5773 struct hclge_fd_rule_tuples new_tuples;
5774 struct hclge_dev *hdev = vport->back;
5775 struct hclge_fd_rule *rule;
5780 if (!hnae3_dev_fd_supported(hdev))
5783 memset(&new_tuples, 0, sizeof(new_tuples));
5784 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5786 spin_lock_bh(&hdev->fd_rule_lock);
5788 /* when there is already fd rule existed add by user,
5789 * arfs should not work
5791 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5792 spin_unlock_bh(&hdev->fd_rule_lock);
5797 /* check is there flow director filter existed for this flow,
5798 * if not, create a new filter for it;
5799 * if filter exist with different queue id, modify the filter;
5800 * if filter exist with same queue id, do nothing
5802 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5804 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5805 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5806 spin_unlock_bh(&hdev->fd_rule_lock);
5811 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5813 spin_unlock_bh(&hdev->fd_rule_lock);
5818 set_bit(bit_id, hdev->fd_bmap);
5819 rule->location = bit_id;
5820 rule->flow_id = flow_id;
5821 rule->queue_id = queue_id;
5822 hclge_fd_build_arfs_rule(&new_tuples, rule);
5823 ret = hclge_fd_config_rule(hdev, rule);
5825 spin_unlock_bh(&hdev->fd_rule_lock);
5830 return rule->location;
5833 spin_unlock_bh(&hdev->fd_rule_lock);
5835 if (rule->queue_id == queue_id)
5836 return rule->location;
5838 tmp_queue_id = rule->queue_id;
5839 rule->queue_id = queue_id;
5840 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5842 rule->queue_id = tmp_queue_id;
5846 return rule->location;
5849 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5851 #ifdef CONFIG_RFS_ACCEL
5852 struct hnae3_handle *handle = &hdev->vport[0].nic;
5853 struct hclge_fd_rule *rule;
5854 struct hlist_node *node;
5855 HLIST_HEAD(del_list);
5857 spin_lock_bh(&hdev->fd_rule_lock);
5858 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5859 spin_unlock_bh(&hdev->fd_rule_lock);
5862 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5863 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5864 rule->flow_id, rule->location)) {
5865 hlist_del_init(&rule->rule_node);
5866 hlist_add_head(&rule->rule_node, &del_list);
5867 hdev->hclge_fd_rule_num--;
5868 clear_bit(rule->location, hdev->fd_bmap);
5871 spin_unlock_bh(&hdev->fd_rule_lock);
5873 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5874 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5875 rule->location, NULL, false);
5881 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5883 #ifdef CONFIG_RFS_ACCEL
5884 struct hclge_vport *vport = hclge_get_vport(handle);
5885 struct hclge_dev *hdev = vport->back;
5887 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5888 hclge_del_all_fd_entries(handle, true);
5892 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5894 struct hclge_vport *vport = hclge_get_vport(handle);
5895 struct hclge_dev *hdev = vport->back;
5897 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5898 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5901 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5903 struct hclge_vport *vport = hclge_get_vport(handle);
5904 struct hclge_dev *hdev = vport->back;
5906 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5909 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5911 struct hclge_vport *vport = hclge_get_vport(handle);
5912 struct hclge_dev *hdev = vport->back;
5914 return hdev->rst_stats.hw_reset_done_cnt;
5917 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5919 struct hclge_vport *vport = hclge_get_vport(handle);
5920 struct hclge_dev *hdev = vport->back;
5923 hdev->fd_en = enable;
5924 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5926 hclge_del_all_fd_entries(handle, clear);
5928 hclge_restore_fd_entries(handle);
5931 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5933 struct hclge_desc desc;
5934 struct hclge_config_mac_mode_cmd *req =
5935 (struct hclge_config_mac_mode_cmd *)desc.data;
5939 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5942 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
5943 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
5944 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
5945 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
5946 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
5947 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
5948 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
5949 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
5950 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
5951 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
5954 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5956 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5958 dev_err(&hdev->pdev->dev,
5959 "mac enable fail, ret =%d.\n", ret);
5962 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5964 struct hclge_config_mac_mode_cmd *req;
5965 struct hclge_desc desc;
5969 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5970 /* 1 Read out the MAC mode config at first */
5971 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5972 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5974 dev_err(&hdev->pdev->dev,
5975 "mac loopback get fail, ret =%d.\n", ret);
5979 /* 2 Then setup the loopback flag */
5980 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5981 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5982 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5983 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5985 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5987 /* 3 Config mac work mode with loopback flag
5988 * and its original configure parameters
5990 hclge_cmd_reuse_desc(&desc, false);
5991 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5993 dev_err(&hdev->pdev->dev,
5994 "mac loopback set fail, ret =%d.\n", ret);
5998 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5999 enum hnae3_loop loop_mode)
6001 #define HCLGE_SERDES_RETRY_MS 10
6002 #define HCLGE_SERDES_RETRY_NUM 100
6004 #define HCLGE_MAC_LINK_STATUS_MS 10
6005 #define HCLGE_MAC_LINK_STATUS_NUM 100
6006 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6007 #define HCLGE_MAC_LINK_STATUS_UP 1
6009 struct hclge_serdes_lb_cmd *req;
6010 struct hclge_desc desc;
6011 int mac_link_ret = 0;
6015 req = (struct hclge_serdes_lb_cmd *)desc.data;
6016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6018 switch (loop_mode) {
6019 case HNAE3_LOOP_SERIAL_SERDES:
6020 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6022 case HNAE3_LOOP_PARALLEL_SERDES:
6023 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6026 dev_err(&hdev->pdev->dev,
6027 "unsupported serdes loopback mode %d\n", loop_mode);
6032 req->enable = loop_mode_b;
6033 req->mask = loop_mode_b;
6034 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6036 req->mask = loop_mode_b;
6037 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6040 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6042 dev_err(&hdev->pdev->dev,
6043 "serdes loopback set fail, ret = %d\n", ret);
6048 msleep(HCLGE_SERDES_RETRY_MS);
6049 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6051 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6053 dev_err(&hdev->pdev->dev,
6054 "serdes loopback get, ret = %d\n", ret);
6057 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6058 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6060 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6061 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6063 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6064 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6068 hclge_cfg_mac_mode(hdev, en);
6072 /* serdes Internal loopback, independent of the network cable.*/
6073 msleep(HCLGE_MAC_LINK_STATUS_MS);
6074 ret = hclge_get_mac_link_status(hdev);
6075 if (ret == mac_link_ret)
6077 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6079 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6084 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6085 int stream_id, bool enable)
6087 struct hclge_desc desc;
6088 struct hclge_cfg_com_tqp_queue_cmd *req =
6089 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6092 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6093 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6094 req->stream_id = cpu_to_le16(stream_id);
6096 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6098 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6100 dev_err(&hdev->pdev->dev,
6101 "Tqp enable fail, status =%d.\n", ret);
6105 static int hclge_set_loopback(struct hnae3_handle *handle,
6106 enum hnae3_loop loop_mode, bool en)
6108 struct hclge_vport *vport = hclge_get_vport(handle);
6109 struct hnae3_knic_private_info *kinfo;
6110 struct hclge_dev *hdev = vport->back;
6113 switch (loop_mode) {
6114 case HNAE3_LOOP_APP:
6115 ret = hclge_set_app_loopback(hdev, en);
6117 case HNAE3_LOOP_SERIAL_SERDES:
6118 case HNAE3_LOOP_PARALLEL_SERDES:
6119 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6123 dev_err(&hdev->pdev->dev,
6124 "loop_mode %d is not supported\n", loop_mode);
6131 kinfo = &vport->nic.kinfo;
6132 for (i = 0; i < kinfo->num_tqps; i++) {
6133 ret = hclge_tqp_enable(hdev, i, 0, en);
6141 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6143 struct hclge_vport *vport = hclge_get_vport(handle);
6144 struct hnae3_knic_private_info *kinfo;
6145 struct hnae3_queue *queue;
6146 struct hclge_tqp *tqp;
6149 kinfo = &vport->nic.kinfo;
6150 for (i = 0; i < kinfo->num_tqps; i++) {
6151 queue = handle->kinfo.tqp[i];
6152 tqp = container_of(queue, struct hclge_tqp, q);
6153 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6157 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6159 struct hclge_vport *vport = hclge_get_vport(handle);
6160 struct hclge_dev *hdev = vport->back;
6163 mod_timer(&hdev->service_timer, jiffies + HZ);
6165 del_timer_sync(&hdev->service_timer);
6166 cancel_work_sync(&hdev->service_task);
6167 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6171 static int hclge_ae_start(struct hnae3_handle *handle)
6173 struct hclge_vport *vport = hclge_get_vport(handle);
6174 struct hclge_dev *hdev = vport->back;
6177 hclge_cfg_mac_mode(hdev, true);
6178 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6179 hdev->hw.mac.link = 0;
6181 /* reset tqp stats */
6182 hclge_reset_tqp_stats(handle);
6184 hclge_mac_start_phy(hdev);
6189 static void hclge_ae_stop(struct hnae3_handle *handle)
6191 struct hclge_vport *vport = hclge_get_vport(handle);
6192 struct hclge_dev *hdev = vport->back;
6195 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6197 hclge_clear_arfs_rules(handle);
6199 /* If it is not PF reset, the firmware will disable the MAC,
6200 * so it only need to stop phy here.
6202 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6203 hdev->reset_type != HNAE3_FUNC_RESET) {
6204 hclge_mac_stop_phy(hdev);
6208 for (i = 0; i < handle->kinfo.num_tqps; i++)
6209 hclge_reset_tqp(handle, i);
6212 hclge_cfg_mac_mode(hdev, false);
6214 hclge_mac_stop_phy(hdev);
6216 /* reset tqp stats */
6217 hclge_reset_tqp_stats(handle);
6218 hclge_update_link_status(hdev);
6221 int hclge_vport_start(struct hclge_vport *vport)
6223 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6224 vport->last_active_jiffies = jiffies;
6228 void hclge_vport_stop(struct hclge_vport *vport)
6230 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6233 static int hclge_client_start(struct hnae3_handle *handle)
6235 struct hclge_vport *vport = hclge_get_vport(handle);
6237 return hclge_vport_start(vport);
6240 static void hclge_client_stop(struct hnae3_handle *handle)
6242 struct hclge_vport *vport = hclge_get_vport(handle);
6244 hclge_vport_stop(vport);
6247 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6248 u16 cmdq_resp, u8 resp_code,
6249 enum hclge_mac_vlan_tbl_opcode op)
6251 struct hclge_dev *hdev = vport->back;
6252 int return_status = -EIO;
6255 dev_err(&hdev->pdev->dev,
6256 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6261 if (op == HCLGE_MAC_VLAN_ADD) {
6262 if ((!resp_code) || (resp_code == 1)) {
6264 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6265 return_status = -ENOSPC;
6266 dev_err(&hdev->pdev->dev,
6267 "add mac addr failed for uc_overflow.\n");
6268 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6269 return_status = -ENOSPC;
6270 dev_err(&hdev->pdev->dev,
6271 "add mac addr failed for mc_overflow.\n");
6273 dev_err(&hdev->pdev->dev,
6274 "add mac addr failed for undefined, code=%d.\n",
6277 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6280 } else if (resp_code == 1) {
6281 return_status = -ENOENT;
6282 dev_dbg(&hdev->pdev->dev,
6283 "remove mac addr failed for miss.\n");
6285 dev_err(&hdev->pdev->dev,
6286 "remove mac addr failed for undefined, code=%d.\n",
6289 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6292 } else if (resp_code == 1) {
6293 return_status = -ENOENT;
6294 dev_dbg(&hdev->pdev->dev,
6295 "lookup mac addr failed for miss.\n");
6297 dev_err(&hdev->pdev->dev,
6298 "lookup mac addr failed for undefined, code=%d.\n",
6302 return_status = -EINVAL;
6303 dev_err(&hdev->pdev->dev,
6304 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6308 return return_status;
6311 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6313 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6315 unsigned int word_num;
6316 unsigned int bit_num;
6318 if (vfid > 255 || vfid < 0)
6321 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6322 word_num = vfid / 32;
6323 bit_num = vfid % 32;
6325 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6327 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6329 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6330 bit_num = vfid % 32;
6332 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6334 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6340 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6342 #define HCLGE_DESC_NUMBER 3
6343 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6346 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6347 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6348 if (desc[i].data[j])
6354 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6355 const u8 *addr, bool is_mc)
6357 const unsigned char *mac_addr = addr;
6358 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6359 (mac_addr[0]) | (mac_addr[1] << 8);
6360 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6362 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6364 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6365 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6368 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6369 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6372 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6373 struct hclge_mac_vlan_tbl_entry_cmd *req)
6375 struct hclge_dev *hdev = vport->back;
6376 struct hclge_desc desc;
6381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6383 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6385 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6387 dev_err(&hdev->pdev->dev,
6388 "del mac addr failed for cmd_send, ret =%d.\n",
6392 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6393 retval = le16_to_cpu(desc.retval);
6395 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6396 HCLGE_MAC_VLAN_REMOVE);
6399 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6400 struct hclge_mac_vlan_tbl_entry_cmd *req,
6401 struct hclge_desc *desc,
6404 struct hclge_dev *hdev = vport->back;
6409 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6411 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6412 memcpy(desc[0].data,
6414 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6415 hclge_cmd_setup_basic_desc(&desc[1],
6416 HCLGE_OPC_MAC_VLAN_ADD,
6418 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6419 hclge_cmd_setup_basic_desc(&desc[2],
6420 HCLGE_OPC_MAC_VLAN_ADD,
6422 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6424 memcpy(desc[0].data,
6426 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6427 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6430 dev_err(&hdev->pdev->dev,
6431 "lookup mac addr failed for cmd_send, ret =%d.\n",
6435 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6436 retval = le16_to_cpu(desc[0].retval);
6438 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6439 HCLGE_MAC_VLAN_LKUP);
6442 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6443 struct hclge_mac_vlan_tbl_entry_cmd *req,
6444 struct hclge_desc *mc_desc)
6446 struct hclge_dev *hdev = vport->back;
6453 struct hclge_desc desc;
6455 hclge_cmd_setup_basic_desc(&desc,
6456 HCLGE_OPC_MAC_VLAN_ADD,
6458 memcpy(desc.data, req,
6459 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6460 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6461 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6462 retval = le16_to_cpu(desc.retval);
6464 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6466 HCLGE_MAC_VLAN_ADD);
6468 hclge_cmd_reuse_desc(&mc_desc[0], false);
6469 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6470 hclge_cmd_reuse_desc(&mc_desc[1], false);
6471 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6472 hclge_cmd_reuse_desc(&mc_desc[2], false);
6473 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6474 memcpy(mc_desc[0].data, req,
6475 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6476 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6477 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6478 retval = le16_to_cpu(mc_desc[0].retval);
6480 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6482 HCLGE_MAC_VLAN_ADD);
6486 dev_err(&hdev->pdev->dev,
6487 "add mac addr failed for cmd_send, ret =%d.\n",
6495 static int hclge_init_umv_space(struct hclge_dev *hdev)
6497 u16 allocated_size = 0;
6500 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6505 if (allocated_size < hdev->wanted_umv_size)
6506 dev_warn(&hdev->pdev->dev,
6507 "Alloc umv space failed, want %d, get %d\n",
6508 hdev->wanted_umv_size, allocated_size);
6510 mutex_init(&hdev->umv_mutex);
6511 hdev->max_umv_size = allocated_size;
6512 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6513 * preserve some unicast mac vlan table entries shared by pf
6516 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6517 hdev->share_umv_size = hdev->priv_umv_size +
6518 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6523 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6527 if (hdev->max_umv_size > 0) {
6528 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6532 hdev->max_umv_size = 0;
6534 mutex_destroy(&hdev->umv_mutex);
6539 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6540 u16 *allocated_size, bool is_alloc)
6542 struct hclge_umv_spc_alc_cmd *req;
6543 struct hclge_desc desc;
6546 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6547 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6549 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6551 req->space_size = cpu_to_le32(space_size);
6553 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6555 dev_err(&hdev->pdev->dev,
6556 "%s umv space failed for cmd_send, ret =%d\n",
6557 is_alloc ? "allocate" : "free", ret);
6561 if (is_alloc && allocated_size)
6562 *allocated_size = le32_to_cpu(desc.data[1]);
6567 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6569 struct hclge_vport *vport;
6572 for (i = 0; i < hdev->num_alloc_vport; i++) {
6573 vport = &hdev->vport[i];
6574 vport->used_umv_num = 0;
6577 mutex_lock(&hdev->umv_mutex);
6578 hdev->share_umv_size = hdev->priv_umv_size +
6579 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6580 mutex_unlock(&hdev->umv_mutex);
6583 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6585 struct hclge_dev *hdev = vport->back;
6588 mutex_lock(&hdev->umv_mutex);
6589 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6590 hdev->share_umv_size == 0);
6591 mutex_unlock(&hdev->umv_mutex);
6596 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6598 struct hclge_dev *hdev = vport->back;
6600 mutex_lock(&hdev->umv_mutex);
6602 if (vport->used_umv_num > hdev->priv_umv_size)
6603 hdev->share_umv_size++;
6605 if (vport->used_umv_num > 0)
6606 vport->used_umv_num--;
6608 if (vport->used_umv_num >= hdev->priv_umv_size &&
6609 hdev->share_umv_size > 0)
6610 hdev->share_umv_size--;
6611 vport->used_umv_num++;
6613 mutex_unlock(&hdev->umv_mutex);
6616 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6617 const unsigned char *addr)
6619 struct hclge_vport *vport = hclge_get_vport(handle);
6621 return hclge_add_uc_addr_common(vport, addr);
6624 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6625 const unsigned char *addr)
6627 struct hclge_dev *hdev = vport->back;
6628 struct hclge_mac_vlan_tbl_entry_cmd req;
6629 struct hclge_desc desc;
6630 u16 egress_port = 0;
6633 /* mac addr check */
6634 if (is_zero_ether_addr(addr) ||
6635 is_broadcast_ether_addr(addr) ||
6636 is_multicast_ether_addr(addr)) {
6637 dev_err(&hdev->pdev->dev,
6638 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6639 addr, is_zero_ether_addr(addr),
6640 is_broadcast_ether_addr(addr),
6641 is_multicast_ether_addr(addr));
6645 memset(&req, 0, sizeof(req));
6647 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6648 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6650 req.egress_port = cpu_to_le16(egress_port);
6652 hclge_prepare_mac_addr(&req, addr, false);
6654 /* Lookup the mac address in the mac_vlan table, and add
6655 * it if the entry is inexistent. Repeated unicast entry
6656 * is not allowed in the mac vlan table.
6658 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6659 if (ret == -ENOENT) {
6660 if (!hclge_is_umv_space_full(vport)) {
6661 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6663 hclge_update_umv_space(vport, false);
6667 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6668 hdev->priv_umv_size);
6673 /* check if we just hit the duplicate */
6675 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6676 vport->vport_id, addr);
6680 dev_err(&hdev->pdev->dev,
6681 "PF failed to add unicast entry(%pM) in the MAC table\n",
6687 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6688 const unsigned char *addr)
6690 struct hclge_vport *vport = hclge_get_vport(handle);
6692 return hclge_rm_uc_addr_common(vport, addr);
6695 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6696 const unsigned char *addr)
6698 struct hclge_dev *hdev = vport->back;
6699 struct hclge_mac_vlan_tbl_entry_cmd req;
6702 /* mac addr check */
6703 if (is_zero_ether_addr(addr) ||
6704 is_broadcast_ether_addr(addr) ||
6705 is_multicast_ether_addr(addr)) {
6706 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6711 memset(&req, 0, sizeof(req));
6712 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6713 hclge_prepare_mac_addr(&req, addr, false);
6714 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6716 hclge_update_umv_space(vport, true);
6721 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6722 const unsigned char *addr)
6724 struct hclge_vport *vport = hclge_get_vport(handle);
6726 return hclge_add_mc_addr_common(vport, addr);
6729 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6730 const unsigned char *addr)
6732 struct hclge_dev *hdev = vport->back;
6733 struct hclge_mac_vlan_tbl_entry_cmd req;
6734 struct hclge_desc desc[3];
6737 /* mac addr check */
6738 if (!is_multicast_ether_addr(addr)) {
6739 dev_err(&hdev->pdev->dev,
6740 "Add mc mac err! invalid mac:%pM.\n",
6744 memset(&req, 0, sizeof(req));
6745 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6746 hclge_prepare_mac_addr(&req, addr, true);
6747 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6749 /* This mac addr do not exist, add new entry for it */
6750 memset(desc[0].data, 0, sizeof(desc[0].data));
6751 memset(desc[1].data, 0, sizeof(desc[0].data));
6752 memset(desc[2].data, 0, sizeof(desc[0].data));
6754 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6757 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6759 if (status == -ENOSPC)
6760 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6765 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6766 const unsigned char *addr)
6768 struct hclge_vport *vport = hclge_get_vport(handle);
6770 return hclge_rm_mc_addr_common(vport, addr);
6773 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6774 const unsigned char *addr)
6776 struct hclge_dev *hdev = vport->back;
6777 struct hclge_mac_vlan_tbl_entry_cmd req;
6778 enum hclge_cmd_status status;
6779 struct hclge_desc desc[3];
6781 /* mac addr check */
6782 if (!is_multicast_ether_addr(addr)) {
6783 dev_dbg(&hdev->pdev->dev,
6784 "Remove mc mac err! invalid mac:%pM.\n",
6789 memset(&req, 0, sizeof(req));
6790 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6791 hclge_prepare_mac_addr(&req, addr, true);
6792 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6794 /* This mac addr exist, remove this handle's VFID for it */
6795 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6799 if (hclge_is_all_function_id_zero(desc))
6800 /* All the vfid is zero, so need to delete this entry */
6801 status = hclge_remove_mac_vlan_tbl(vport, &req);
6803 /* Not all the vfid is zero, update the vfid */
6804 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6807 /* Maybe this mac address is in mta table, but it cannot be
6808 * deleted here because an entry of mta represents an address
6809 * range rather than a specific address. the delete action to
6810 * all entries will take effect in update_mta_status called by
6811 * hns3_nic_set_rx_mode.
6819 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6820 enum HCLGE_MAC_ADDR_TYPE mac_type)
6822 struct hclge_vport_mac_addr_cfg *mac_cfg;
6823 struct list_head *list;
6825 if (!vport->vport_id)
6828 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6832 mac_cfg->hd_tbl_status = true;
6833 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6835 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6836 &vport->uc_mac_list : &vport->mc_mac_list;
6838 list_add_tail(&mac_cfg->node, list);
6841 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6843 enum HCLGE_MAC_ADDR_TYPE mac_type)
6845 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6846 struct list_head *list;
6847 bool uc_flag, mc_flag;
6849 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6850 &vport->uc_mac_list : &vport->mc_mac_list;
6852 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6853 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6855 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6856 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6857 if (uc_flag && mac_cfg->hd_tbl_status)
6858 hclge_rm_uc_addr_common(vport, mac_addr);
6860 if (mc_flag && mac_cfg->hd_tbl_status)
6861 hclge_rm_mc_addr_common(vport, mac_addr);
6863 list_del(&mac_cfg->node);
6870 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6871 enum HCLGE_MAC_ADDR_TYPE mac_type)
6873 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6874 struct list_head *list;
6876 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6877 &vport->uc_mac_list : &vport->mc_mac_list;
6879 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6880 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6881 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6883 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6884 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6886 mac_cfg->hd_tbl_status = false;
6888 list_del(&mac_cfg->node);
6894 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6896 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6897 struct hclge_vport *vport;
6900 mutex_lock(&hdev->vport_cfg_mutex);
6901 for (i = 0; i < hdev->num_alloc_vport; i++) {
6902 vport = &hdev->vport[i];
6903 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6904 list_del(&mac->node);
6908 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6909 list_del(&mac->node);
6913 mutex_unlock(&hdev->vport_cfg_mutex);
6916 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6917 u16 cmdq_resp, u8 resp_code)
6919 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6920 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6921 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6922 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6927 dev_err(&hdev->pdev->dev,
6928 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6933 switch (resp_code) {
6934 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6935 case HCLGE_ETHERTYPE_ALREADY_ADD:
6938 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6939 dev_err(&hdev->pdev->dev,
6940 "add mac ethertype failed for manager table overflow.\n");
6941 return_status = -EIO;
6943 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6944 dev_err(&hdev->pdev->dev,
6945 "add mac ethertype failed for key conflict.\n");
6946 return_status = -EIO;
6949 dev_err(&hdev->pdev->dev,
6950 "add mac ethertype failed for undefined, code=%d.\n",
6952 return_status = -EIO;
6955 return return_status;
6958 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6959 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6961 struct hclge_desc desc;
6966 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6967 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6971 dev_err(&hdev->pdev->dev,
6972 "add mac ethertype failed for cmd_send, ret =%d.\n",
6977 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6978 retval = le16_to_cpu(desc.retval);
6980 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6983 static int init_mgr_tbl(struct hclge_dev *hdev)
6988 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6989 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6991 dev_err(&hdev->pdev->dev,
6992 "add mac ethertype failed, ret =%d.\n",
7001 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7003 struct hclge_vport *vport = hclge_get_vport(handle);
7004 struct hclge_dev *hdev = vport->back;
7006 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7009 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7012 const unsigned char *new_addr = (const unsigned char *)p;
7013 struct hclge_vport *vport = hclge_get_vport(handle);
7014 struct hclge_dev *hdev = vport->back;
7017 /* mac addr check */
7018 if (is_zero_ether_addr(new_addr) ||
7019 is_broadcast_ether_addr(new_addr) ||
7020 is_multicast_ether_addr(new_addr)) {
7021 dev_err(&hdev->pdev->dev,
7022 "Change uc mac err! invalid mac:%p.\n",
7027 if ((!is_first || is_kdump_kernel()) &&
7028 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7029 dev_warn(&hdev->pdev->dev,
7030 "remove old uc mac address fail.\n");
7032 ret = hclge_add_uc_addr(handle, new_addr);
7034 dev_err(&hdev->pdev->dev,
7035 "add uc mac address fail, ret =%d.\n",
7039 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7040 dev_err(&hdev->pdev->dev,
7041 "restore uc mac address fail.\n");
7046 ret = hclge_pause_addr_cfg(hdev, new_addr);
7048 dev_err(&hdev->pdev->dev,
7049 "configure mac pause address fail, ret =%d.\n",
7054 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7059 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7062 struct hclge_vport *vport = hclge_get_vport(handle);
7063 struct hclge_dev *hdev = vport->back;
7065 if (!hdev->hw.mac.phydev)
7068 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7071 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7072 u8 fe_type, bool filter_en, u8 vf_id)
7074 struct hclge_vlan_filter_ctrl_cmd *req;
7075 struct hclge_desc desc;
7078 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7080 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7081 req->vlan_type = vlan_type;
7082 req->vlan_fe = filter_en ? fe_type : 0;
7085 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7087 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7093 #define HCLGE_FILTER_TYPE_VF 0
7094 #define HCLGE_FILTER_TYPE_PORT 1
7095 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7096 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7097 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7098 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7099 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7100 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7101 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7102 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7103 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7105 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7107 struct hclge_vport *vport = hclge_get_vport(handle);
7108 struct hclge_dev *hdev = vport->back;
7110 if (hdev->pdev->revision >= 0x21) {
7111 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7112 HCLGE_FILTER_FE_EGRESS, enable, 0);
7113 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7114 HCLGE_FILTER_FE_INGRESS, enable, 0);
7116 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7117 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7121 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7123 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7126 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7127 bool is_kill, u16 vlan, u8 qos,
7130 #define HCLGE_MAX_VF_BYTES 16
7131 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7132 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7133 struct hclge_desc desc[2];
7138 /* if vf vlan table is full, firmware will close vf vlan filter, it
7139 * is unable and unnecessary to add new vlan id to vf vlan filter
7141 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7144 hclge_cmd_setup_basic_desc(&desc[0],
7145 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7146 hclge_cmd_setup_basic_desc(&desc[1],
7147 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7149 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7151 vf_byte_off = vfid / 8;
7152 vf_byte_val = 1 << (vfid % 8);
7154 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7155 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7157 req0->vlan_id = cpu_to_le16(vlan);
7158 req0->vlan_cfg = is_kill;
7160 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7161 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7163 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7165 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7167 dev_err(&hdev->pdev->dev,
7168 "Send vf vlan command fail, ret =%d.\n",
7174 #define HCLGE_VF_VLAN_NO_ENTRY 2
7175 if (!req0->resp_code || req0->resp_code == 1)
7178 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7179 set_bit(vfid, hdev->vf_vlan_full);
7180 dev_warn(&hdev->pdev->dev,
7181 "vf vlan table is full, vf vlan filter is disabled\n");
7185 dev_err(&hdev->pdev->dev,
7186 "Add vf vlan filter fail, ret =%d.\n",
7189 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7190 if (!req0->resp_code)
7193 /* vf vlan filter is disabled when vf vlan table is full,
7194 * then new vlan id will not be added into vf vlan table.
7195 * Just return 0 without warning, avoid massive verbose
7196 * print logs when unload.
7198 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7201 dev_err(&hdev->pdev->dev,
7202 "Kill vf vlan filter fail, ret =%d.\n",
7209 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7210 u16 vlan_id, bool is_kill)
7212 struct hclge_vlan_filter_pf_cfg_cmd *req;
7213 struct hclge_desc desc;
7214 u8 vlan_offset_byte_val;
7215 u8 vlan_offset_byte;
7219 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7221 vlan_offset_160 = vlan_id / 160;
7222 vlan_offset_byte = (vlan_id % 160) / 8;
7223 vlan_offset_byte_val = 1 << (vlan_id % 8);
7225 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7226 req->vlan_offset = vlan_offset_160;
7227 req->vlan_cfg = is_kill;
7228 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7232 dev_err(&hdev->pdev->dev,
7233 "port vlan command, send fail, ret =%d.\n", ret);
7237 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7238 u16 vport_id, u16 vlan_id, u8 qos,
7241 u16 vport_idx, vport_num = 0;
7244 if (is_kill && !vlan_id)
7247 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7250 dev_err(&hdev->pdev->dev,
7251 "Set %d vport vlan filter config fail, ret =%d.\n",
7256 /* vlan 0 may be added twice when 8021q module is enabled */
7257 if (!is_kill && !vlan_id &&
7258 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7261 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7262 dev_err(&hdev->pdev->dev,
7263 "Add port vlan failed, vport %d is already in vlan %d\n",
7269 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7270 dev_err(&hdev->pdev->dev,
7271 "Delete port vlan failed, vport %d is not in vlan %d\n",
7276 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7279 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7280 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7286 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7288 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7289 struct hclge_vport_vtag_tx_cfg_cmd *req;
7290 struct hclge_dev *hdev = vport->back;
7291 struct hclge_desc desc;
7294 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7296 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7297 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7298 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7299 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7300 vcfg->accept_tag1 ? 1 : 0);
7301 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7302 vcfg->accept_untag1 ? 1 : 0);
7303 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7304 vcfg->accept_tag2 ? 1 : 0);
7305 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7306 vcfg->accept_untag2 ? 1 : 0);
7307 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7308 vcfg->insert_tag1_en ? 1 : 0);
7309 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7310 vcfg->insert_tag2_en ? 1 : 0);
7311 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7313 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7314 req->vf_bitmap[req->vf_offset] =
7315 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7317 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7319 dev_err(&hdev->pdev->dev,
7320 "Send port txvlan cfg command fail, ret =%d\n",
7326 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7328 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7329 struct hclge_vport_vtag_rx_cfg_cmd *req;
7330 struct hclge_dev *hdev = vport->back;
7331 struct hclge_desc desc;
7334 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7336 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7337 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7338 vcfg->strip_tag1_en ? 1 : 0);
7339 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7340 vcfg->strip_tag2_en ? 1 : 0);
7341 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7342 vcfg->vlan1_vlan_prionly ? 1 : 0);
7343 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7344 vcfg->vlan2_vlan_prionly ? 1 : 0);
7346 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7347 req->vf_bitmap[req->vf_offset] =
7348 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7350 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7352 dev_err(&hdev->pdev->dev,
7353 "Send port rxvlan cfg command fail, ret =%d\n",
7359 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7360 u16 port_base_vlan_state,
7365 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7366 vport->txvlan_cfg.accept_tag1 = true;
7367 vport->txvlan_cfg.insert_tag1_en = false;
7368 vport->txvlan_cfg.default_tag1 = 0;
7370 vport->txvlan_cfg.accept_tag1 = false;
7371 vport->txvlan_cfg.insert_tag1_en = true;
7372 vport->txvlan_cfg.default_tag1 = vlan_tag;
7375 vport->txvlan_cfg.accept_untag1 = true;
7377 /* accept_tag2 and accept_untag2 are not supported on
7378 * pdev revision(0x20), new revision support them,
7379 * this two fields can not be configured by user.
7381 vport->txvlan_cfg.accept_tag2 = true;
7382 vport->txvlan_cfg.accept_untag2 = true;
7383 vport->txvlan_cfg.insert_tag2_en = false;
7384 vport->txvlan_cfg.default_tag2 = 0;
7386 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7387 vport->rxvlan_cfg.strip_tag1_en = false;
7388 vport->rxvlan_cfg.strip_tag2_en =
7389 vport->rxvlan_cfg.rx_vlan_offload_en;
7391 vport->rxvlan_cfg.strip_tag1_en =
7392 vport->rxvlan_cfg.rx_vlan_offload_en;
7393 vport->rxvlan_cfg.strip_tag2_en = true;
7395 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7396 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7398 ret = hclge_set_vlan_tx_offload_cfg(vport);
7402 return hclge_set_vlan_rx_offload_cfg(vport);
7405 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7407 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7408 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7409 struct hclge_desc desc;
7412 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7413 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7414 rx_req->ot_fst_vlan_type =
7415 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7416 rx_req->ot_sec_vlan_type =
7417 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7418 rx_req->in_fst_vlan_type =
7419 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7420 rx_req->in_sec_vlan_type =
7421 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7423 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7425 dev_err(&hdev->pdev->dev,
7426 "Send rxvlan protocol type command fail, ret =%d\n",
7431 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7433 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7434 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7435 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7437 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7439 dev_err(&hdev->pdev->dev,
7440 "Send txvlan protocol type command fail, ret =%d\n",
7446 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7448 #define HCLGE_DEF_VLAN_TYPE 0x8100
7450 struct hnae3_handle *handle = &hdev->vport[0].nic;
7451 struct hclge_vport *vport;
7455 if (hdev->pdev->revision >= 0x21) {
7456 /* for revision 0x21, vf vlan filter is per function */
7457 for (i = 0; i < hdev->num_alloc_vport; i++) {
7458 vport = &hdev->vport[i];
7459 ret = hclge_set_vlan_filter_ctrl(hdev,
7460 HCLGE_FILTER_TYPE_VF,
7461 HCLGE_FILTER_FE_EGRESS,
7468 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7469 HCLGE_FILTER_FE_INGRESS, true,
7474 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7475 HCLGE_FILTER_FE_EGRESS_V1_B,
7481 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7483 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7484 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7485 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7486 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7487 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7488 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7490 ret = hclge_set_vlan_protocol_type(hdev);
7494 for (i = 0; i < hdev->num_alloc_vport; i++) {
7497 vport = &hdev->vport[i];
7498 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7500 ret = hclge_vlan_offload_cfg(vport,
7501 vport->port_base_vlan_cfg.state,
7507 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7510 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7513 struct hclge_vport_vlan_cfg *vlan;
7515 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7519 vlan->hd_tbl_status = writen_to_tbl;
7520 vlan->vlan_id = vlan_id;
7522 list_add_tail(&vlan->node, &vport->vlan_list);
7525 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7527 struct hclge_vport_vlan_cfg *vlan, *tmp;
7528 struct hclge_dev *hdev = vport->back;
7531 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7532 if (!vlan->hd_tbl_status) {
7533 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7535 vlan->vlan_id, 0, false);
7537 dev_err(&hdev->pdev->dev,
7538 "restore vport vlan list failed, ret=%d\n",
7543 vlan->hd_tbl_status = true;
7549 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7552 struct hclge_vport_vlan_cfg *vlan, *tmp;
7553 struct hclge_dev *hdev = vport->back;
7555 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7556 if (vlan->vlan_id == vlan_id) {
7557 if (is_write_tbl && vlan->hd_tbl_status)
7558 hclge_set_vlan_filter_hw(hdev,
7564 list_del(&vlan->node);
7571 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7573 struct hclge_vport_vlan_cfg *vlan, *tmp;
7574 struct hclge_dev *hdev = vport->back;
7576 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7577 if (vlan->hd_tbl_status)
7578 hclge_set_vlan_filter_hw(hdev,
7584 vlan->hd_tbl_status = false;
7586 list_del(&vlan->node);
7592 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7594 struct hclge_vport_vlan_cfg *vlan, *tmp;
7595 struct hclge_vport *vport;
7598 mutex_lock(&hdev->vport_cfg_mutex);
7599 for (i = 0; i < hdev->num_alloc_vport; i++) {
7600 vport = &hdev->vport[i];
7601 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7602 list_del(&vlan->node);
7606 mutex_unlock(&hdev->vport_cfg_mutex);
7609 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7611 struct hclge_vport *vport = hclge_get_vport(handle);
7612 struct hclge_vport_vlan_cfg *vlan, *tmp;
7613 struct hclge_dev *hdev = vport->back;
7614 u16 vlan_proto, qos;
7618 mutex_lock(&hdev->vport_cfg_mutex);
7619 for (i = 0; i < hdev->num_alloc_vport; i++) {
7620 vport = &hdev->vport[i];
7621 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7622 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7623 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7624 state = vport->port_base_vlan_cfg.state;
7626 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7627 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7628 vport->vport_id, vlan_id, qos,
7633 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7634 if (vlan->hd_tbl_status)
7635 hclge_set_vlan_filter_hw(hdev,
7643 mutex_unlock(&hdev->vport_cfg_mutex);
7646 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7648 struct hclge_vport *vport = hclge_get_vport(handle);
7650 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7651 vport->rxvlan_cfg.strip_tag1_en = false;
7652 vport->rxvlan_cfg.strip_tag2_en = enable;
7654 vport->rxvlan_cfg.strip_tag1_en = enable;
7655 vport->rxvlan_cfg.strip_tag2_en = true;
7657 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7658 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7659 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7661 return hclge_set_vlan_rx_offload_cfg(vport);
7664 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7665 u16 port_base_vlan_state,
7666 struct hclge_vlan_info *new_info,
7667 struct hclge_vlan_info *old_info)
7669 struct hclge_dev *hdev = vport->back;
7672 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7673 hclge_rm_vport_all_vlan_table(vport, false);
7674 return hclge_set_vlan_filter_hw(hdev,
7675 htons(new_info->vlan_proto),
7678 new_info->qos, false);
7681 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7682 vport->vport_id, old_info->vlan_tag,
7683 old_info->qos, true);
7687 return hclge_add_vport_all_vlan_table(vport);
7690 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7691 struct hclge_vlan_info *vlan_info)
7693 struct hnae3_handle *nic = &vport->nic;
7694 struct hclge_vlan_info *old_vlan_info;
7695 struct hclge_dev *hdev = vport->back;
7698 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7700 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7704 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7705 /* add new VLAN tag */
7706 ret = hclge_set_vlan_filter_hw(hdev,
7707 htons(vlan_info->vlan_proto),
7709 vlan_info->vlan_tag,
7710 vlan_info->qos, false);
7714 /* remove old VLAN tag */
7715 ret = hclge_set_vlan_filter_hw(hdev,
7716 htons(old_vlan_info->vlan_proto),
7718 old_vlan_info->vlan_tag,
7719 old_vlan_info->qos, true);
7726 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7731 /* update state only when disable/enable port based VLAN */
7732 vport->port_base_vlan_cfg.state = state;
7733 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7734 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7736 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7739 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7740 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7741 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7746 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7747 enum hnae3_port_base_vlan_state state,
7750 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7752 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7754 return HNAE3_PORT_BASE_VLAN_ENABLE;
7757 return HNAE3_PORT_BASE_VLAN_DISABLE;
7758 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7759 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7761 return HNAE3_PORT_BASE_VLAN_MODIFY;
7765 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7766 u16 vlan, u8 qos, __be16 proto)
7768 struct hclge_vport *vport = hclge_get_vport(handle);
7769 struct hclge_dev *hdev = vport->back;
7770 struct hclge_vlan_info vlan_info;
7774 if (hdev->pdev->revision == 0x20)
7777 /* qos is a 3 bits value, so can not be bigger than 7 */
7778 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7780 if (proto != htons(ETH_P_8021Q))
7781 return -EPROTONOSUPPORT;
7783 vport = &hdev->vport[vfid];
7784 state = hclge_get_port_base_vlan_state(vport,
7785 vport->port_base_vlan_cfg.state,
7787 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7790 vlan_info.vlan_tag = vlan;
7791 vlan_info.qos = qos;
7792 vlan_info.vlan_proto = ntohs(proto);
7794 /* update port based VLAN for PF */
7796 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7797 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7798 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7803 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7804 return hclge_update_port_base_vlan_cfg(vport, state,
7807 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7815 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7816 u16 vlan_id, bool is_kill)
7818 struct hclge_vport *vport = hclge_get_vport(handle);
7819 struct hclge_dev *hdev = vport->back;
7820 bool writen_to_tbl = false;
7823 /* When device is resetting, firmware is unable to handle
7824 * mailbox. Just record the vlan id, and remove it after
7827 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7828 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7832 /* When port base vlan enabled, we use port base vlan as the vlan
7833 * filter entry. In this case, we don't update vlan filter table
7834 * when user add new vlan or remove exist vlan, just update the vport
7835 * vlan list. The vlan id in vlan list will be writen in vlan filter
7836 * table until port base vlan disabled
7838 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7839 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7840 vlan_id, 0, is_kill);
7841 writen_to_tbl = true;
7846 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7848 hclge_add_vport_vlan_table(vport, vlan_id,
7850 } else if (is_kill) {
7851 /* When remove hw vlan filter failed, record the vlan id,
7852 * and try to remove it from hw later, to be consistence
7855 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7860 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7862 #define HCLGE_MAX_SYNC_COUNT 60
7864 int i, ret, sync_cnt = 0;
7867 /* start from vport 1 for PF is always alive */
7868 for (i = 0; i < hdev->num_alloc_vport; i++) {
7869 struct hclge_vport *vport = &hdev->vport[i];
7871 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7873 while (vlan_id != VLAN_N_VID) {
7874 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7875 vport->vport_id, vlan_id,
7877 if (ret && ret != -EINVAL)
7880 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7881 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7884 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7887 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7893 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7895 struct hclge_config_max_frm_size_cmd *req;
7896 struct hclge_desc desc;
7898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7900 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7901 req->max_frm_size = cpu_to_le16(new_mps);
7902 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7904 return hclge_cmd_send(&hdev->hw, &desc, 1);
7907 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7909 struct hclge_vport *vport = hclge_get_vport(handle);
7911 return hclge_set_vport_mtu(vport, new_mtu);
7914 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7916 struct hclge_dev *hdev = vport->back;
7917 int i, max_frm_size, ret;
7919 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7920 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7921 max_frm_size > HCLGE_MAC_MAX_FRAME)
7924 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7925 mutex_lock(&hdev->vport_lock);
7926 /* VF's mps must fit within hdev->mps */
7927 if (vport->vport_id && max_frm_size > hdev->mps) {
7928 mutex_unlock(&hdev->vport_lock);
7930 } else if (vport->vport_id) {
7931 vport->mps = max_frm_size;
7932 mutex_unlock(&hdev->vport_lock);
7936 /* PF's mps must be greater then VF's mps */
7937 for (i = 1; i < hdev->num_alloc_vport; i++)
7938 if (max_frm_size < hdev->vport[i].mps) {
7939 mutex_unlock(&hdev->vport_lock);
7943 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7945 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7947 dev_err(&hdev->pdev->dev,
7948 "Change mtu fail, ret =%d\n", ret);
7952 hdev->mps = max_frm_size;
7953 vport->mps = max_frm_size;
7955 ret = hclge_buffer_alloc(hdev);
7957 dev_err(&hdev->pdev->dev,
7958 "Allocate buffer fail, ret =%d\n", ret);
7961 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7962 mutex_unlock(&hdev->vport_lock);
7966 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7969 struct hclge_reset_tqp_queue_cmd *req;
7970 struct hclge_desc desc;
7973 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7975 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7976 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7978 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
7980 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7982 dev_err(&hdev->pdev->dev,
7983 "Send tqp reset cmd error, status =%d\n", ret);
7990 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7992 struct hclge_reset_tqp_queue_cmd *req;
7993 struct hclge_desc desc;
7996 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7998 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7999 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8001 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8003 dev_err(&hdev->pdev->dev,
8004 "Get reset status error, status =%d\n", ret);
8008 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8011 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8013 struct hnae3_queue *queue;
8014 struct hclge_tqp *tqp;
8016 queue = handle->kinfo.tqp[queue_id];
8017 tqp = container_of(queue, struct hclge_tqp, q);
8022 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8024 struct hclge_vport *vport = hclge_get_vport(handle);
8025 struct hclge_dev *hdev = vport->back;
8026 int reset_try_times = 0;
8031 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8033 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8035 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8039 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8041 dev_err(&hdev->pdev->dev,
8042 "Send reset tqp cmd fail, ret = %d\n", ret);
8046 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8047 /* Wait for tqp hw reset */
8049 reset_status = hclge_get_reset_status(hdev, queue_gid);
8054 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8055 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8059 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8061 dev_err(&hdev->pdev->dev,
8062 "Deassert the soft reset fail, ret = %d\n", ret);
8067 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8069 struct hclge_dev *hdev = vport->back;
8070 int reset_try_times = 0;
8075 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8077 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8079 dev_warn(&hdev->pdev->dev,
8080 "Send reset tqp cmd fail, ret = %d\n", ret);
8084 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8085 /* Wait for tqp hw reset */
8087 reset_status = hclge_get_reset_status(hdev, queue_gid);
8092 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8093 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8097 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8099 dev_warn(&hdev->pdev->dev,
8100 "Deassert the soft reset fail, ret = %d\n", ret);
8103 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8105 struct hclge_vport *vport = hclge_get_vport(handle);
8106 struct hclge_dev *hdev = vport->back;
8108 return hdev->fw_version;
8111 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8113 struct phy_device *phydev = hdev->hw.mac.phydev;
8118 phy_set_asym_pause(phydev, rx_en, tx_en);
8121 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8126 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8127 else if (rx_en && !tx_en)
8128 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8129 else if (!rx_en && tx_en)
8130 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8132 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8134 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8137 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8139 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8144 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8149 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8151 struct phy_device *phydev = hdev->hw.mac.phydev;
8152 u16 remote_advertising = 0;
8153 u16 local_advertising;
8154 u32 rx_pause, tx_pause;
8157 if (!phydev->link || !phydev->autoneg)
8160 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8163 remote_advertising = LPA_PAUSE_CAP;
8165 if (phydev->asym_pause)
8166 remote_advertising |= LPA_PAUSE_ASYM;
8168 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8169 remote_advertising);
8170 tx_pause = flowctl & FLOW_CTRL_TX;
8171 rx_pause = flowctl & FLOW_CTRL_RX;
8173 if (phydev->duplex == HCLGE_MAC_HALF) {
8178 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8181 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8182 u32 *rx_en, u32 *tx_en)
8184 struct hclge_vport *vport = hclge_get_vport(handle);
8185 struct hclge_dev *hdev = vport->back;
8186 struct phy_device *phydev = hdev->hw.mac.phydev;
8188 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8190 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8196 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8199 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8202 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8211 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8212 u32 rx_en, u32 tx_en)
8214 struct hclge_vport *vport = hclge_get_vport(handle);
8215 struct hclge_dev *hdev = vport->back;
8216 struct phy_device *phydev = hdev->hw.mac.phydev;
8220 fc_autoneg = hclge_get_autoneg(handle);
8221 if (auto_neg != fc_autoneg) {
8222 dev_info(&hdev->pdev->dev,
8223 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8228 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8229 dev_info(&hdev->pdev->dev,
8230 "Priority flow control enabled. Cannot set link flow control.\n");
8234 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8237 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8240 return phy_start_aneg(phydev);
8245 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8246 u8 *auto_neg, u32 *speed, u8 *duplex)
8248 struct hclge_vport *vport = hclge_get_vport(handle);
8249 struct hclge_dev *hdev = vport->back;
8252 *speed = hdev->hw.mac.speed;
8254 *duplex = hdev->hw.mac.duplex;
8256 *auto_neg = hdev->hw.mac.autoneg;
8259 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8262 struct hclge_vport *vport = hclge_get_vport(handle);
8263 struct hclge_dev *hdev = vport->back;
8266 *media_type = hdev->hw.mac.media_type;
8269 *module_type = hdev->hw.mac.module_type;
8272 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8273 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8275 struct hclge_vport *vport = hclge_get_vport(handle);
8276 struct hclge_dev *hdev = vport->back;
8277 struct phy_device *phydev = hdev->hw.mac.phydev;
8278 int mdix_ctrl, mdix, is_resolved;
8279 unsigned int retval;
8282 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8283 *tp_mdix = ETH_TP_MDI_INVALID;
8287 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8289 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8290 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8291 HCLGE_PHY_MDIX_CTRL_S);
8293 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8294 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8295 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8297 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8299 switch (mdix_ctrl) {
8301 *tp_mdix_ctrl = ETH_TP_MDI;
8304 *tp_mdix_ctrl = ETH_TP_MDI_X;
8307 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8310 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8315 *tp_mdix = ETH_TP_MDI_INVALID;
8317 *tp_mdix = ETH_TP_MDI_X;
8319 *tp_mdix = ETH_TP_MDI;
8322 static void hclge_info_show(struct hclge_dev *hdev)
8324 struct device *dev = &hdev->pdev->dev;
8326 dev_info(dev, "PF info begin:\n");
8328 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8329 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8330 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8331 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8332 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8333 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8334 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8335 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8336 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8337 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8338 dev_info(dev, "This is %s PF\n",
8339 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8340 dev_info(dev, "DCB %s\n",
8341 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8342 dev_info(dev, "MQPRIO %s\n",
8343 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8345 dev_info(dev, "PF info end.\n");
8348 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8349 struct hclge_vport *vport)
8351 struct hnae3_client *client = vport->nic.client;
8352 struct hclge_dev *hdev = ae_dev->priv;
8356 rst_cnt = hdev->rst_stats.reset_cnt;
8357 ret = client->ops->init_instance(&vport->nic);
8361 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8362 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8363 rst_cnt != hdev->rst_stats.reset_cnt) {
8368 /* Enable nic hw error interrupts */
8369 ret = hclge_config_nic_hw_error(hdev, true);
8371 dev_err(&ae_dev->pdev->dev,
8372 "fail(%d) to enable hw error interrupts\n", ret);
8376 hnae3_set_client_init_flag(client, ae_dev, 1);
8378 if (netif_msg_drv(&hdev->vport->nic))
8379 hclge_info_show(hdev);
8384 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8385 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8386 msleep(HCLGE_WAIT_RESET_DONE);
8388 client->ops->uninit_instance(&vport->nic, 0);
8393 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8394 struct hclge_vport *vport)
8396 struct hnae3_client *client = vport->roce.client;
8397 struct hclge_dev *hdev = ae_dev->priv;
8401 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8405 client = hdev->roce_client;
8406 ret = hclge_init_roce_base_info(vport);
8410 rst_cnt = hdev->rst_stats.reset_cnt;
8411 ret = client->ops->init_instance(&vport->roce);
8415 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8416 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8417 rst_cnt != hdev->rst_stats.reset_cnt) {
8422 /* Enable roce ras interrupts */
8423 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8425 dev_err(&ae_dev->pdev->dev,
8426 "fail(%d) to enable roce ras interrupts\n", ret);
8430 hnae3_set_client_init_flag(client, ae_dev, 1);
8435 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8436 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8437 msleep(HCLGE_WAIT_RESET_DONE);
8439 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8444 static int hclge_init_client_instance(struct hnae3_client *client,
8445 struct hnae3_ae_dev *ae_dev)
8447 struct hclge_dev *hdev = ae_dev->priv;
8448 struct hclge_vport *vport;
8451 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8452 vport = &hdev->vport[i];
8454 switch (client->type) {
8455 case HNAE3_CLIENT_KNIC:
8457 hdev->nic_client = client;
8458 vport->nic.client = client;
8459 ret = hclge_init_nic_client_instance(ae_dev, vport);
8463 ret = hclge_init_roce_client_instance(ae_dev, vport);
8468 case HNAE3_CLIENT_ROCE:
8469 if (hnae3_dev_roce_supported(hdev)) {
8470 hdev->roce_client = client;
8471 vport->roce.client = client;
8474 ret = hclge_init_roce_client_instance(ae_dev, vport);
8487 hdev->nic_client = NULL;
8488 vport->nic.client = NULL;
8491 hdev->roce_client = NULL;
8492 vport->roce.client = NULL;
8496 static void hclge_uninit_client_instance(struct hnae3_client *client,
8497 struct hnae3_ae_dev *ae_dev)
8499 struct hclge_dev *hdev = ae_dev->priv;
8500 struct hclge_vport *vport;
8503 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8504 vport = &hdev->vport[i];
8505 if (hdev->roce_client) {
8506 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8507 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8508 msleep(HCLGE_WAIT_RESET_DONE);
8510 hdev->roce_client->ops->uninit_instance(&vport->roce,
8512 hdev->roce_client = NULL;
8513 vport->roce.client = NULL;
8515 if (client->type == HNAE3_CLIENT_ROCE)
8517 if (hdev->nic_client && client->ops->uninit_instance) {
8518 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8519 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8520 msleep(HCLGE_WAIT_RESET_DONE);
8522 client->ops->uninit_instance(&vport->nic, 0);
8523 hdev->nic_client = NULL;
8524 vport->nic.client = NULL;
8529 static int hclge_pci_init(struct hclge_dev *hdev)
8531 struct pci_dev *pdev = hdev->pdev;
8532 struct hclge_hw *hw;
8535 ret = pci_enable_device(pdev);
8537 dev_err(&pdev->dev, "failed to enable PCI device\n");
8541 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8543 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8546 "can't set consistent PCI DMA");
8547 goto err_disable_device;
8549 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8552 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8554 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8555 goto err_disable_device;
8558 pci_set_master(pdev);
8560 hw->io_base = pcim_iomap(pdev, 2, 0);
8562 dev_err(&pdev->dev, "Can't map configuration register space\n");
8564 goto err_clr_master;
8567 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8571 pci_clear_master(pdev);
8572 pci_release_regions(pdev);
8574 pci_disable_device(pdev);
8579 static void hclge_pci_uninit(struct hclge_dev *hdev)
8581 struct pci_dev *pdev = hdev->pdev;
8583 pcim_iounmap(pdev, hdev->hw.io_base);
8584 pci_free_irq_vectors(pdev);
8585 pci_clear_master(pdev);
8586 pci_release_mem_regions(pdev);
8587 pci_disable_device(pdev);
8590 static void hclge_state_init(struct hclge_dev *hdev)
8592 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8593 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8594 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8595 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8596 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8597 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8600 static void hclge_state_uninit(struct hclge_dev *hdev)
8602 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8603 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8605 if (hdev->service_timer.function)
8606 del_timer_sync(&hdev->service_timer);
8607 if (hdev->reset_timer.function)
8608 del_timer_sync(&hdev->reset_timer);
8609 if (hdev->service_task.func)
8610 cancel_work_sync(&hdev->service_task);
8611 if (hdev->rst_service_task.func)
8612 cancel_work_sync(&hdev->rst_service_task);
8613 if (hdev->mbx_service_task.func)
8614 cancel_work_sync(&hdev->mbx_service_task);
8617 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8619 #define HCLGE_FLR_WAIT_MS 100
8620 #define HCLGE_FLR_WAIT_CNT 50
8621 struct hclge_dev *hdev = ae_dev->priv;
8624 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8625 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8626 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8627 hclge_reset_event(hdev->pdev, NULL);
8629 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8630 cnt++ < HCLGE_FLR_WAIT_CNT)
8631 msleep(HCLGE_FLR_WAIT_MS);
8633 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8634 dev_err(&hdev->pdev->dev,
8635 "flr wait down timeout: %d\n", cnt);
8638 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8640 struct hclge_dev *hdev = ae_dev->priv;
8642 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8645 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8649 for (i = 0; i < hdev->num_alloc_vport; i++) {
8650 struct hclge_vport *vport = &hdev->vport[i];
8653 /* Send cmd to clear VF's FUNC_RST_ING */
8654 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8656 dev_warn(&hdev->pdev->dev,
8657 "clear vf(%d) rst failed %d!\n",
8658 vport->vport_id, ret);
8662 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8664 struct pci_dev *pdev = ae_dev->pdev;
8665 struct hclge_dev *hdev;
8668 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8675 hdev->ae_dev = ae_dev;
8676 hdev->reset_type = HNAE3_NONE_RESET;
8677 hdev->reset_level = HNAE3_FUNC_RESET;
8678 ae_dev->priv = hdev;
8679 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8681 mutex_init(&hdev->vport_lock);
8682 mutex_init(&hdev->vport_cfg_mutex);
8683 spin_lock_init(&hdev->fd_rule_lock);
8685 ret = hclge_pci_init(hdev);
8687 dev_err(&pdev->dev, "PCI init failed\n");
8691 /* Firmware command queue initialize */
8692 ret = hclge_cmd_queue_init(hdev);
8694 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8695 goto err_pci_uninit;
8698 /* Firmware command initialize */
8699 ret = hclge_cmd_init(hdev);
8701 goto err_cmd_uninit;
8703 ret = hclge_get_cap(hdev);
8705 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8707 goto err_cmd_uninit;
8710 ret = hclge_configure(hdev);
8712 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8713 goto err_cmd_uninit;
8716 ret = hclge_init_msi(hdev);
8718 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8719 goto err_cmd_uninit;
8722 ret = hclge_misc_irq_init(hdev);
8725 "Misc IRQ(vector0) init error, ret = %d.\n",
8727 goto err_msi_uninit;
8730 ret = hclge_alloc_tqps(hdev);
8732 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8733 goto err_msi_irq_uninit;
8736 ret = hclge_alloc_vport(hdev);
8738 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8739 goto err_msi_irq_uninit;
8742 ret = hclge_map_tqp(hdev);
8744 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8745 goto err_msi_irq_uninit;
8748 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8749 ret = hclge_mac_mdio_config(hdev);
8751 dev_err(&hdev->pdev->dev,
8752 "mdio config fail ret=%d\n", ret);
8753 goto err_msi_irq_uninit;
8757 ret = hclge_init_umv_space(hdev);
8759 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8760 goto err_mdiobus_unreg;
8763 ret = hclge_mac_init(hdev);
8765 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8766 goto err_mdiobus_unreg;
8769 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8771 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8772 goto err_mdiobus_unreg;
8775 ret = hclge_config_gro(hdev, true);
8777 goto err_mdiobus_unreg;
8779 ret = hclge_init_vlan_config(hdev);
8781 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8782 goto err_mdiobus_unreg;
8785 ret = hclge_tm_schd_init(hdev);
8787 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8788 goto err_mdiobus_unreg;
8791 hclge_rss_init_cfg(hdev);
8792 ret = hclge_rss_init_hw(hdev);
8794 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8795 goto err_mdiobus_unreg;
8798 ret = init_mgr_tbl(hdev);
8800 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8801 goto err_mdiobus_unreg;
8804 ret = hclge_init_fd_config(hdev);
8807 "fd table init fail, ret=%d\n", ret);
8808 goto err_mdiobus_unreg;
8811 INIT_KFIFO(hdev->mac_tnl_log);
8813 hclge_dcb_ops_set(hdev);
8815 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8816 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8817 INIT_WORK(&hdev->service_task, hclge_service_task);
8818 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8819 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8821 hclge_clear_all_event_cause(hdev);
8822 hclge_clear_resetting_state(hdev);
8824 /* Log and clear the hw errors those already occurred */
8825 hclge_handle_all_hns_hw_errors(ae_dev);
8827 /* request delayed reset for the error recovery because an immediate
8828 * global reset on a PF affecting pending initialization of other PFs
8830 if (ae_dev->hw_err_reset_req) {
8831 enum hnae3_reset_type reset_level;
8833 reset_level = hclge_get_reset_level(ae_dev,
8834 &ae_dev->hw_err_reset_req);
8835 hclge_set_def_reset_request(ae_dev, reset_level);
8836 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8839 /* Enable MISC vector(vector0) */
8840 hclge_enable_vector(&hdev->misc_vector, true);
8842 hclge_state_init(hdev);
8843 hdev->last_reset_time = jiffies;
8845 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8849 if (hdev->hw.mac.phydev)
8850 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8852 hclge_misc_irq_uninit(hdev);
8854 pci_free_irq_vectors(pdev);
8856 hclge_cmd_uninit(hdev);
8858 pcim_iounmap(pdev, hdev->hw.io_base);
8859 pci_clear_master(pdev);
8860 pci_release_regions(pdev);
8861 pci_disable_device(pdev);
8866 static void hclge_stats_clear(struct hclge_dev *hdev)
8868 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8871 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8873 struct hclge_vport *vport = hdev->vport;
8876 for (i = 0; i < hdev->num_alloc_vport; i++) {
8877 hclge_vport_stop(vport);
8882 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8884 struct hclge_dev *hdev = ae_dev->priv;
8885 struct pci_dev *pdev = ae_dev->pdev;
8888 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8890 hclge_stats_clear(hdev);
8891 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8892 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8894 ret = hclge_cmd_init(hdev);
8896 dev_err(&pdev->dev, "Cmd queue init failed\n");
8900 ret = hclge_map_tqp(hdev);
8902 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8906 hclge_reset_umv_space(hdev);
8908 ret = hclge_mac_init(hdev);
8910 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8914 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8916 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8920 ret = hclge_config_gro(hdev, true);
8924 ret = hclge_init_vlan_config(hdev);
8926 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8930 ret = hclge_tm_init_hw(hdev, true);
8932 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8936 ret = hclge_rss_init_hw(hdev);
8938 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8942 ret = hclge_init_fd_config(hdev);
8944 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8948 /* Re-enable the hw error interrupts because
8949 * the interrupts get disabled on global reset.
8951 ret = hclge_config_nic_hw_error(hdev, true);
8954 "fail(%d) to re-enable NIC hw error interrupts\n",
8959 if (hdev->roce_client) {
8960 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8963 "fail(%d) to re-enable roce ras interrupts\n",
8969 hclge_reset_vport_state(hdev);
8971 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8977 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8979 struct hclge_dev *hdev = ae_dev->priv;
8980 struct hclge_mac *mac = &hdev->hw.mac;
8982 hclge_state_uninit(hdev);
8985 mdiobus_unregister(mac->mdio_bus);
8987 hclge_uninit_umv_space(hdev);
8989 /* Disable MISC vector(vector0) */
8990 hclge_enable_vector(&hdev->misc_vector, false);
8991 synchronize_irq(hdev->misc_vector.vector_irq);
8993 /* Disable all hw interrupts */
8994 hclge_config_mac_tnl_int(hdev, false);
8995 hclge_config_nic_hw_error(hdev, false);
8996 hclge_config_rocee_ras_interrupt(hdev, false);
8998 hclge_cmd_uninit(hdev);
8999 hclge_misc_irq_uninit(hdev);
9000 hclge_pci_uninit(hdev);
9001 mutex_destroy(&hdev->vport_lock);
9002 hclge_uninit_vport_mac_table(hdev);
9003 hclge_uninit_vport_vlan_table(hdev);
9004 mutex_destroy(&hdev->vport_cfg_mutex);
9005 ae_dev->priv = NULL;
9008 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9010 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9011 struct hclge_vport *vport = hclge_get_vport(handle);
9012 struct hclge_dev *hdev = vport->back;
9014 return min_t(u32, hdev->rss_size_max,
9015 vport->alloc_tqps / kinfo->num_tc);
9018 static void hclge_get_channels(struct hnae3_handle *handle,
9019 struct ethtool_channels *ch)
9021 ch->max_combined = hclge_get_max_channels(handle);
9022 ch->other_count = 1;
9024 ch->combined_count = handle->kinfo.rss_size;
9027 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9028 u16 *alloc_tqps, u16 *max_rss_size)
9030 struct hclge_vport *vport = hclge_get_vport(handle);
9031 struct hclge_dev *hdev = vport->back;
9033 *alloc_tqps = vport->alloc_tqps;
9034 *max_rss_size = hdev->rss_size_max;
9037 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9038 bool rxfh_configured)
9040 struct hclge_vport *vport = hclge_get_vport(handle);
9041 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9042 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9043 struct hclge_dev *hdev = vport->back;
9044 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9045 int cur_rss_size = kinfo->rss_size;
9046 int cur_tqps = kinfo->num_tqps;
9047 u16 tc_valid[HCLGE_MAX_TC_NUM];
9053 kinfo->req_rss_size = new_tqps_num;
9055 ret = hclge_tm_vport_map_update(hdev);
9057 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9061 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9062 roundup_size = ilog2(roundup_size);
9063 /* Set the RSS TC mode according to the new RSS size */
9064 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9067 if (!(hdev->hw_tc_map & BIT(i)))
9071 tc_size[i] = roundup_size;
9072 tc_offset[i] = kinfo->rss_size * i;
9074 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9078 /* RSS indirection table has been configuared by user */
9079 if (rxfh_configured)
9082 /* Reinitializes the rss indirect table according to the new RSS size */
9083 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9087 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9088 rss_indir[i] = i % kinfo->rss_size;
9090 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9092 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9099 dev_info(&hdev->pdev->dev,
9100 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9101 cur_rss_size, kinfo->rss_size,
9102 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9107 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9108 u32 *regs_num_64_bit)
9110 struct hclge_desc desc;
9114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9115 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9117 dev_err(&hdev->pdev->dev,
9118 "Query register number cmd failed, ret = %d.\n", ret);
9122 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9123 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9125 total_num = *regs_num_32_bit + *regs_num_64_bit;
9132 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9135 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9136 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9138 struct hclge_desc *desc;
9139 u32 *reg_val = data;
9149 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9150 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9151 HCLGE_32_BIT_REG_RTN_DATANUM);
9152 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9156 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9157 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9159 dev_err(&hdev->pdev->dev,
9160 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9165 for (i = 0; i < cmd_num; i++) {
9167 desc_data = (__le32 *)(&desc[i].data[0]);
9168 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9170 desc_data = (__le32 *)(&desc[i]);
9171 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9173 for (k = 0; k < n; k++) {
9174 *reg_val++ = le32_to_cpu(*desc_data++);
9186 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9189 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9190 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9192 struct hclge_desc *desc;
9193 u64 *reg_val = data;
9203 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9204 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9205 HCLGE_64_BIT_REG_RTN_DATANUM);
9206 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9210 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9211 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9213 dev_err(&hdev->pdev->dev,
9214 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9219 for (i = 0; i < cmd_num; i++) {
9221 desc_data = (__le64 *)(&desc[i].data[0]);
9222 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9224 desc_data = (__le64 *)(&desc[i]);
9225 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9227 for (k = 0; k < n; k++) {
9228 *reg_val++ = le64_to_cpu(*desc_data++);
9240 #define MAX_SEPARATE_NUM 4
9241 #define SEPARATOR_VALUE 0xFFFFFFFF
9242 #define REG_NUM_PER_LINE 4
9243 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9245 static int hclge_get_regs_len(struct hnae3_handle *handle)
9247 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9248 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9249 struct hclge_vport *vport = hclge_get_vport(handle);
9250 struct hclge_dev *hdev = vport->back;
9251 u32 regs_num_32_bit, regs_num_64_bit;
9254 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9256 dev_err(&hdev->pdev->dev,
9257 "Get register number failed, ret = %d.\n", ret);
9261 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9262 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9263 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9264 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9266 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9267 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9268 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9271 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9274 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9275 struct hclge_vport *vport = hclge_get_vport(handle);
9276 struct hclge_dev *hdev = vport->back;
9277 u32 regs_num_32_bit, regs_num_64_bit;
9278 int i, j, reg_um, separator_num;
9282 *version = hdev->fw_version;
9284 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9286 dev_err(&hdev->pdev->dev,
9287 "Get register number failed, ret = %d.\n", ret);
9291 /* fetching per-PF registers valus from PF PCIe register space */
9292 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9293 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9294 for (i = 0; i < reg_um; i++)
9295 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9296 for (i = 0; i < separator_num; i++)
9297 *reg++ = SEPARATOR_VALUE;
9299 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9300 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9301 for (i = 0; i < reg_um; i++)
9302 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9303 for (i = 0; i < separator_num; i++)
9304 *reg++ = SEPARATOR_VALUE;
9306 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9307 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9308 for (j = 0; j < kinfo->num_tqps; j++) {
9309 for (i = 0; i < reg_um; i++)
9310 *reg++ = hclge_read_dev(&hdev->hw,
9311 ring_reg_addr_list[i] +
9313 for (i = 0; i < separator_num; i++)
9314 *reg++ = SEPARATOR_VALUE;
9317 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9318 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9319 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9320 for (i = 0; i < reg_um; i++)
9321 *reg++ = hclge_read_dev(&hdev->hw,
9322 tqp_intr_reg_addr_list[i] +
9324 for (i = 0; i < separator_num; i++)
9325 *reg++ = SEPARATOR_VALUE;
9328 /* fetching PF common registers values from firmware */
9329 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9331 dev_err(&hdev->pdev->dev,
9332 "Get 32 bit register failed, ret = %d.\n", ret);
9336 reg += regs_num_32_bit;
9337 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9339 dev_err(&hdev->pdev->dev,
9340 "Get 64 bit register failed, ret = %d.\n", ret);
9343 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9345 struct hclge_set_led_state_cmd *req;
9346 struct hclge_desc desc;
9349 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9351 req = (struct hclge_set_led_state_cmd *)desc.data;
9352 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9353 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9355 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9357 dev_err(&hdev->pdev->dev,
9358 "Send set led state cmd error, ret =%d\n", ret);
9363 enum hclge_led_status {
9366 HCLGE_LED_NO_CHANGE = 0xFF,
9369 static int hclge_set_led_id(struct hnae3_handle *handle,
9370 enum ethtool_phys_id_state status)
9372 struct hclge_vport *vport = hclge_get_vport(handle);
9373 struct hclge_dev *hdev = vport->back;
9376 case ETHTOOL_ID_ACTIVE:
9377 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9378 case ETHTOOL_ID_INACTIVE:
9379 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9385 static void hclge_get_link_mode(struct hnae3_handle *handle,
9386 unsigned long *supported,
9387 unsigned long *advertising)
9389 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9390 struct hclge_vport *vport = hclge_get_vport(handle);
9391 struct hclge_dev *hdev = vport->back;
9392 unsigned int idx = 0;
9394 for (; idx < size; idx++) {
9395 supported[idx] = hdev->hw.mac.supported[idx];
9396 advertising[idx] = hdev->hw.mac.advertising[idx];
9400 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9402 struct hclge_vport *vport = hclge_get_vport(handle);
9403 struct hclge_dev *hdev = vport->back;
9405 return hclge_config_gro(hdev, enable);
9408 static const struct hnae3_ae_ops hclge_ops = {
9409 .init_ae_dev = hclge_init_ae_dev,
9410 .uninit_ae_dev = hclge_uninit_ae_dev,
9411 .flr_prepare = hclge_flr_prepare,
9412 .flr_done = hclge_flr_done,
9413 .init_client_instance = hclge_init_client_instance,
9414 .uninit_client_instance = hclge_uninit_client_instance,
9415 .map_ring_to_vector = hclge_map_ring_to_vector,
9416 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9417 .get_vector = hclge_get_vector,
9418 .put_vector = hclge_put_vector,
9419 .set_promisc_mode = hclge_set_promisc_mode,
9420 .set_loopback = hclge_set_loopback,
9421 .start = hclge_ae_start,
9422 .stop = hclge_ae_stop,
9423 .client_start = hclge_client_start,
9424 .client_stop = hclge_client_stop,
9425 .get_status = hclge_get_status,
9426 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9427 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9428 .get_media_type = hclge_get_media_type,
9429 .check_port_speed = hclge_check_port_speed,
9430 .get_fec = hclge_get_fec,
9431 .set_fec = hclge_set_fec,
9432 .get_rss_key_size = hclge_get_rss_key_size,
9433 .get_rss_indir_size = hclge_get_rss_indir_size,
9434 .get_rss = hclge_get_rss,
9435 .set_rss = hclge_set_rss,
9436 .set_rss_tuple = hclge_set_rss_tuple,
9437 .get_rss_tuple = hclge_get_rss_tuple,
9438 .get_tc_size = hclge_get_tc_size,
9439 .get_mac_addr = hclge_get_mac_addr,
9440 .set_mac_addr = hclge_set_mac_addr,
9441 .do_ioctl = hclge_do_ioctl,
9442 .add_uc_addr = hclge_add_uc_addr,
9443 .rm_uc_addr = hclge_rm_uc_addr,
9444 .add_mc_addr = hclge_add_mc_addr,
9445 .rm_mc_addr = hclge_rm_mc_addr,
9446 .set_autoneg = hclge_set_autoneg,
9447 .get_autoneg = hclge_get_autoneg,
9448 .restart_autoneg = hclge_restart_autoneg,
9449 .halt_autoneg = hclge_halt_autoneg,
9450 .get_pauseparam = hclge_get_pauseparam,
9451 .set_pauseparam = hclge_set_pauseparam,
9452 .set_mtu = hclge_set_mtu,
9453 .reset_queue = hclge_reset_tqp,
9454 .get_stats = hclge_get_stats,
9455 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9456 .update_stats = hclge_update_stats,
9457 .get_strings = hclge_get_strings,
9458 .get_sset_count = hclge_get_sset_count,
9459 .get_fw_version = hclge_get_fw_version,
9460 .get_mdix_mode = hclge_get_mdix_mode,
9461 .enable_vlan_filter = hclge_enable_vlan_filter,
9462 .set_vlan_filter = hclge_set_vlan_filter,
9463 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9464 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9465 .reset_event = hclge_reset_event,
9466 .get_reset_level = hclge_get_reset_level,
9467 .set_default_reset_request = hclge_set_def_reset_request,
9468 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9469 .set_channels = hclge_set_channels,
9470 .get_channels = hclge_get_channels,
9471 .get_regs_len = hclge_get_regs_len,
9472 .get_regs = hclge_get_regs,
9473 .set_led_id = hclge_set_led_id,
9474 .get_link_mode = hclge_get_link_mode,
9475 .add_fd_entry = hclge_add_fd_entry,
9476 .del_fd_entry = hclge_del_fd_entry,
9477 .del_all_fd_entries = hclge_del_all_fd_entries,
9478 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9479 .get_fd_rule_info = hclge_get_fd_rule_info,
9480 .get_fd_all_rules = hclge_get_all_rules,
9481 .restore_fd_rules = hclge_restore_fd_entries,
9482 .enable_fd = hclge_enable_fd,
9483 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9484 .dbg_run_cmd = hclge_dbg_run_cmd,
9485 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9486 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9487 .ae_dev_resetting = hclge_ae_dev_resetting,
9488 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9489 .set_gro_en = hclge_gro_en,
9490 .get_global_queue_id = hclge_covert_handle_qid_global,
9491 .set_timer_task = hclge_set_timer_task,
9492 .mac_connect_phy = hclge_mac_connect_phy,
9493 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9494 .restore_vlan_table = hclge_restore_vlan_table,
9497 static struct hnae3_ae_algo ae_algo = {
9499 .pdev_id_table = ae_algo_pci_tbl,
9502 static int hclge_init(void)
9504 pr_info("%s is initializing\n", HCLGE_NAME);
9506 hnae3_register_ae_algo(&ae_algo);
9511 static void hclge_exit(void)
9513 hnae3_unregister_ae_algo(&ae_algo);
9515 module_init(hclge_init);
9516 module_exit(hclge_exit);
9518 MODULE_LICENSE("GPL");
9519 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9520 MODULE_DESCRIPTION("HCLGE Driver");
9521 MODULE_VERSION(HCLGE_MOD_VERSION);