1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45 u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
51 static struct hnae3_ae_algo ae_algo;
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61 /* required last entry */
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68 HCLGE_CMDQ_TX_ADDR_H_REG,
69 HCLGE_CMDQ_TX_DEPTH_REG,
70 HCLGE_CMDQ_TX_TAIL_REG,
71 HCLGE_CMDQ_TX_HEAD_REG,
72 HCLGE_CMDQ_RX_ADDR_L_REG,
73 HCLGE_CMDQ_RX_ADDR_H_REG,
74 HCLGE_CMDQ_RX_DEPTH_REG,
75 HCLGE_CMDQ_RX_TAIL_REG,
76 HCLGE_CMDQ_RX_HEAD_REG,
77 HCLGE_VECTOR0_CMDQ_SRC_REG,
78 HCLGE_CMDQ_INTR_STS_REG,
79 HCLGE_CMDQ_INTR_EN_REG,
80 HCLGE_CMDQ_INTR_GEN_REG};
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83 HCLGE_VECTOR0_OTER_EN_REG,
84 HCLGE_MISC_RESET_STS_REG,
85 HCLGE_MISC_VECTOR_INT_STS,
86 HCLGE_GLOBAL_RESET_REG,
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91 HCLGE_RING_RX_ADDR_H_REG,
92 HCLGE_RING_RX_BD_NUM_REG,
93 HCLGE_RING_RX_BD_LENGTH_REG,
94 HCLGE_RING_RX_MERGE_EN_REG,
95 HCLGE_RING_RX_TAIL_REG,
96 HCLGE_RING_RX_HEAD_REG,
97 HCLGE_RING_RX_FBD_NUM_REG,
98 HCLGE_RING_RX_OFFSET_REG,
99 HCLGE_RING_RX_FBD_OFFSET_REG,
100 HCLGE_RING_RX_STASH_REG,
101 HCLGE_RING_RX_BD_ERR_REG,
102 HCLGE_RING_TX_ADDR_L_REG,
103 HCLGE_RING_TX_ADDR_H_REG,
104 HCLGE_RING_TX_BD_NUM_REG,
105 HCLGE_RING_TX_PRIORITY_REG,
106 HCLGE_RING_TX_TC_REG,
107 HCLGE_RING_TX_MERGE_EN_REG,
108 HCLGE_RING_TX_TAIL_REG,
109 HCLGE_RING_TX_HEAD_REG,
110 HCLGE_RING_TX_FBD_NUM_REG,
111 HCLGE_RING_TX_OFFSET_REG,
112 HCLGE_RING_TX_EBD_NUM_REG,
113 HCLGE_RING_TX_EBD_OFFSET_REG,
114 HCLGE_RING_TX_BD_ERR_REG,
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118 HCLGE_TQP_INTR_GL0_REG,
119 HCLGE_TQP_INTR_GL1_REG,
120 HCLGE_TQP_INTR_GL2_REG,
121 HCLGE_TQP_INTR_RL_REG};
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
125 "Serdes serial Loopback test",
126 "Serdes parallel Loopback test",
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131 {"mac_tx_mac_pause_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133 {"mac_rx_mac_pause_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135 {"mac_tx_control_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137 {"mac_rx_control_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139 {"mac_tx_pfc_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141 {"mac_tx_pfc_pri0_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143 {"mac_tx_pfc_pri1_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145 {"mac_tx_pfc_pri2_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147 {"mac_tx_pfc_pri3_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149 {"mac_tx_pfc_pri4_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151 {"mac_tx_pfc_pri5_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153 {"mac_tx_pfc_pri6_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155 {"mac_tx_pfc_pri7_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157 {"mac_rx_pfc_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159 {"mac_rx_pfc_pri0_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161 {"mac_rx_pfc_pri1_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163 {"mac_rx_pfc_pri2_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165 {"mac_rx_pfc_pri3_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167 {"mac_rx_pfc_pri4_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169 {"mac_rx_pfc_pri5_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171 {"mac_rx_pfc_pri6_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173 {"mac_rx_pfc_pri7_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175 {"mac_tx_total_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177 {"mac_tx_total_oct_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179 {"mac_tx_good_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181 {"mac_tx_bad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183 {"mac_tx_good_oct_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185 {"mac_tx_bad_oct_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187 {"mac_tx_uni_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189 {"mac_tx_multi_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191 {"mac_tx_broad_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193 {"mac_tx_undersize_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195 {"mac_tx_oversize_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197 {"mac_tx_64_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199 {"mac_tx_65_127_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201 {"mac_tx_128_255_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203 {"mac_tx_256_511_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205 {"mac_tx_512_1023_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207 {"mac_tx_1024_1518_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209 {"mac_tx_1519_2047_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211 {"mac_tx_2048_4095_oct_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213 {"mac_tx_4096_8191_oct_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215 {"mac_tx_8192_9216_oct_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217 {"mac_tx_9217_12287_oct_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219 {"mac_tx_12288_16383_oct_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221 {"mac_tx_1519_max_good_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223 {"mac_tx_1519_max_bad_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225 {"mac_rx_total_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227 {"mac_rx_total_oct_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229 {"mac_rx_good_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231 {"mac_rx_bad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233 {"mac_rx_good_oct_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235 {"mac_rx_bad_oct_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237 {"mac_rx_uni_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239 {"mac_rx_multi_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241 {"mac_rx_broad_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243 {"mac_rx_undersize_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245 {"mac_rx_oversize_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247 {"mac_rx_64_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249 {"mac_rx_65_127_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251 {"mac_rx_128_255_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253 {"mac_rx_256_511_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255 {"mac_rx_512_1023_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257 {"mac_rx_1024_1518_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259 {"mac_rx_1519_2047_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261 {"mac_rx_2048_4095_oct_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263 {"mac_rx_4096_8191_oct_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265 {"mac_rx_8192_9216_oct_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267 {"mac_rx_9217_12287_oct_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269 {"mac_rx_12288_16383_oct_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271 {"mac_rx_1519_max_good_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273 {"mac_rx_1519_max_bad_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
276 {"mac_tx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278 {"mac_tx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280 {"mac_tx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282 {"mac_tx_err_all_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284 {"mac_tx_from_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286 {"mac_tx_from_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288 {"mac_rx_fragment_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290 {"mac_rx_undermin_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292 {"mac_rx_jabber_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294 {"mac_rx_fcs_err_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296 {"mac_rx_send_app_good_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298 {"mac_rx_send_app_bad_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
304 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305 .ethter_type = cpu_to_le16(ETH_P_LLDP),
306 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308 .i_port_bitmap = 0x1,
312 static const u8 hclge_hash_key[] = {
313 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
322 #define HCLGE_MAC_CMD_NUM 21
324 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
330 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
333 dev_err(&hdev->pdev->dev,
334 "Get MAC pkt stats fail, status = %d.\n", ret);
339 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340 /* for special opcode 0032, only the first desc has the head */
341 if (unlikely(i == 0)) {
342 desc_data = (__le64 *)(&desc[i].data[0]);
343 n = HCLGE_RD_FIRST_STATS_NUM;
345 desc_data = (__le64 *)(&desc[i]);
346 n = HCLGE_RD_OTHER_STATS_NUM;
349 for (k = 0; k < n; k++) {
350 *data += le64_to_cpu(*desc_data);
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
361 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362 struct hclge_desc *desc;
367 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
370 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
377 for (i = 0; i < desc_num; i++) {
378 /* for special opcode 0034, only the first desc has the head */
380 desc_data = (__le64 *)(&desc[i].data[0]);
381 n = HCLGE_RD_FIRST_STATS_NUM;
383 desc_data = (__le64 *)(&desc[i]);
384 n = HCLGE_RD_OTHER_STATS_NUM;
387 for (k = 0; k < n; k++) {
388 *data += le64_to_cpu(*desc_data);
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
401 struct hclge_desc desc;
406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
411 desc_data = (__le32 *)(&desc.data[0]);
412 reg_num = le32_to_cpu(*desc_data);
414 *desc_num = 1 + ((reg_num - 3) >> 2) +
415 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
425 ret = hclge_mac_query_reg_num(hdev, &desc_num);
427 /* The firmware supports the new statistics acquisition method */
429 ret = hclge_mac_update_stats_complete(hdev, desc_num);
430 else if (ret == -EOPNOTSUPP)
431 ret = hclge_mac_update_stats_defective(hdev);
433 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
440 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441 struct hclge_vport *vport = hclge_get_vport(handle);
442 struct hclge_dev *hdev = vport->back;
443 struct hnae3_queue *queue;
444 struct hclge_desc desc[1];
445 struct hclge_tqp *tqp;
448 for (i = 0; i < kinfo->num_tqps; i++) {
449 queue = handle->kinfo.tqp[i];
450 tqp = container_of(queue, struct hclge_tqp, q);
451 /* command : HCLGE_OPC_QUERY_IGU_STAT */
452 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
455 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456 ret = hclge_cmd_send(&hdev->hw, desc, 1);
458 dev_err(&hdev->pdev->dev,
459 "Query tqp stat fail, status = %d,queue = %d\n",
463 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464 le32_to_cpu(desc[0].data[1]);
467 for (i = 0; i < kinfo->num_tqps; i++) {
468 queue = handle->kinfo.tqp[i];
469 tqp = container_of(queue, struct hclge_tqp, q);
470 /* command : HCLGE_OPC_QUERY_IGU_STAT */
471 hclge_cmd_setup_basic_desc(&desc[0],
472 HCLGE_OPC_QUERY_TX_STATUS,
475 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476 ret = hclge_cmd_send(&hdev->hw, desc, 1);
478 dev_err(&hdev->pdev->dev,
479 "Query tqp stat fail, status = %d,queue = %d\n",
483 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484 le32_to_cpu(desc[0].data[1]);
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
492 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493 struct hclge_tqp *tqp;
497 for (i = 0; i < kinfo->num_tqps; i++) {
498 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
502 for (i = 0; i < kinfo->num_tqps; i++) {
503 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
512 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 /* each tqp has TX & RX two queues */
515 return kinfo->num_tqps * (2);
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
520 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
524 for (i = 0; i < kinfo->num_tqps; i++) {
525 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526 struct hclge_tqp, q);
527 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
529 buff = buff + ETH_GSTRING_LEN;
532 for (i = 0; i < kinfo->num_tqps; i++) {
533 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534 struct hclge_tqp, q);
535 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
537 buff = buff + ETH_GSTRING_LEN;
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544 const struct hclge_comm_stats_str strs[],
550 for (i = 0; i < size; i++)
551 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
556 static u8 *hclge_comm_get_strings(u32 stringset,
557 const struct hclge_comm_stats_str strs[],
560 char *buff = (char *)data;
563 if (stringset != ETH_SS_STATS)
566 for (i = 0; i < size; i++) {
567 snprintf(buff, ETH_GSTRING_LEN,
569 buff = buff + ETH_GSTRING_LEN;
575 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
577 struct hnae3_handle *handle;
580 handle = &hdev->vport[0].nic;
581 if (handle->client) {
582 status = hclge_tqps_update_stats(handle);
584 dev_err(&hdev->pdev->dev,
585 "Update TQPS stats fail, status = %d.\n",
590 status = hclge_mac_update_stats(hdev);
592 dev_err(&hdev->pdev->dev,
593 "Update MAC stats fail, status = %d.\n", status);
596 static void hclge_update_stats(struct hnae3_handle *handle,
597 struct net_device_stats *net_stats)
599 struct hclge_vport *vport = hclge_get_vport(handle);
600 struct hclge_dev *hdev = vport->back;
603 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
606 status = hclge_mac_update_stats(hdev);
608 dev_err(&hdev->pdev->dev,
609 "Update MAC stats fail, status = %d.\n",
612 status = hclge_tqps_update_stats(handle);
614 dev_err(&hdev->pdev->dev,
615 "Update TQPS stats fail, status = %d.\n",
618 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
621 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
623 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
624 HNAE3_SUPPORT_PHY_LOOPBACK |\
625 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
626 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
628 struct hclge_vport *vport = hclge_get_vport(handle);
629 struct hclge_dev *hdev = vport->back;
632 /* Loopback test support rules:
633 * mac: only GE mode support
634 * serdes: all mac mode will support include GE/XGE/LGE/CGE
635 * phy: only support when phy device exist on board
637 if (stringset == ETH_SS_TEST) {
638 /* clear loopback bit flags at first */
639 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
640 if (hdev->pdev->revision >= 0x21 ||
641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
643 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
645 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
649 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
650 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
651 } else if (stringset == ETH_SS_STATS) {
652 count = ARRAY_SIZE(g_mac_stats_string) +
653 hclge_tqps_get_sset_count(handle, stringset);
659 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
662 u8 *p = (char *)data;
665 if (stringset == ETH_SS_STATS) {
666 size = ARRAY_SIZE(g_mac_stats_string);
667 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
669 p = hclge_tqps_get_strings(handle, p);
670 } else if (stringset == ETH_SS_TEST) {
671 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
672 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
677 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
679 p += ETH_GSTRING_LEN;
681 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
683 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
685 p += ETH_GSTRING_LEN;
687 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
688 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
690 p += ETH_GSTRING_LEN;
695 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
697 struct hclge_vport *vport = hclge_get_vport(handle);
698 struct hclge_dev *hdev = vport->back;
701 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
702 ARRAY_SIZE(g_mac_stats_string), data);
703 p = hclge_tqps_get_stats(handle, p);
706 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
709 struct hclge_vport *vport = hclge_get_vport(handle);
710 struct hclge_dev *hdev = vport->back;
712 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
713 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
716 static int hclge_parse_func_status(struct hclge_dev *hdev,
717 struct hclge_func_status_cmd *status)
719 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
722 /* Set the pf to main pf */
723 if (status->pf_state & HCLGE_PF_STATE_MAIN)
724 hdev->flag |= HCLGE_FLAG_MAIN;
726 hdev->flag &= ~HCLGE_FLAG_MAIN;
731 static int hclge_query_function_status(struct hclge_dev *hdev)
733 #define HCLGE_QUERY_MAX_CNT 5
735 struct hclge_func_status_cmd *req;
736 struct hclge_desc desc;
740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
741 req = (struct hclge_func_status_cmd *)desc.data;
744 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
746 dev_err(&hdev->pdev->dev,
747 "query function status failed %d.\n", ret);
751 /* Check pf reset is done */
754 usleep_range(1000, 2000);
755 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
757 ret = hclge_parse_func_status(hdev, req);
762 static int hclge_query_pf_resource(struct hclge_dev *hdev)
764 struct hclge_pf_res_cmd *req;
765 struct hclge_desc desc;
768 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
769 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
771 dev_err(&hdev->pdev->dev,
772 "query pf resource failed %d.\n", ret);
776 req = (struct hclge_pf_res_cmd *)desc.data;
777 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
778 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
780 if (req->tx_buf_size)
782 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
784 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
786 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
788 if (req->dv_buf_size)
790 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
792 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
794 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
796 if (hnae3_dev_roce_supported(hdev)) {
797 hdev->roce_base_msix_offset =
798 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
799 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
801 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
802 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
804 /* PF should have NIC vectors and Roce vectors,
805 * NIC vectors are queued before Roce vectors.
807 hdev->num_msi = hdev->num_roce_msi +
808 hdev->roce_base_msix_offset;
811 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
812 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
818 static int hclge_parse_speed(int speed_cmd, int *speed)
822 *speed = HCLGE_MAC_SPEED_10M;
825 *speed = HCLGE_MAC_SPEED_100M;
828 *speed = HCLGE_MAC_SPEED_1G;
831 *speed = HCLGE_MAC_SPEED_10G;
834 *speed = HCLGE_MAC_SPEED_25G;
837 *speed = HCLGE_MAC_SPEED_40G;
840 *speed = HCLGE_MAC_SPEED_50G;
843 *speed = HCLGE_MAC_SPEED_100G;
852 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
854 struct hclge_vport *vport = hclge_get_vport(handle);
855 struct hclge_dev *hdev = vport->back;
856 u32 speed_ability = hdev->hw.mac.speed_ability;
860 case HCLGE_MAC_SPEED_10M:
861 speed_bit = HCLGE_SUPPORT_10M_BIT;
863 case HCLGE_MAC_SPEED_100M:
864 speed_bit = HCLGE_SUPPORT_100M_BIT;
866 case HCLGE_MAC_SPEED_1G:
867 speed_bit = HCLGE_SUPPORT_1G_BIT;
869 case HCLGE_MAC_SPEED_10G:
870 speed_bit = HCLGE_SUPPORT_10G_BIT;
872 case HCLGE_MAC_SPEED_25G:
873 speed_bit = HCLGE_SUPPORT_25G_BIT;
875 case HCLGE_MAC_SPEED_40G:
876 speed_bit = HCLGE_SUPPORT_40G_BIT;
878 case HCLGE_MAC_SPEED_50G:
879 speed_bit = HCLGE_SUPPORT_50G_BIT;
881 case HCLGE_MAC_SPEED_100G:
882 speed_bit = HCLGE_SUPPORT_100G_BIT;
888 if (speed_bit & speed_ability)
894 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
896 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
897 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
899 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
900 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
902 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
903 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
905 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
906 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
908 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
909 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
913 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
915 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
916 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
918 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
919 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
921 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
922 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
924 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
925 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
927 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
928 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
932 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
934 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
935 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
937 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
938 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
940 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
941 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
943 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
944 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
946 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
947 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
951 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
953 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
954 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
956 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
957 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
959 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
960 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
962 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
963 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
965 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
966 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
968 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
969 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
973 static void hclge_convert_setting_fec(struct hclge_mac *mac)
975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
976 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
978 switch (mac->speed) {
979 case HCLGE_MAC_SPEED_10G:
980 case HCLGE_MAC_SPEED_40G:
981 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
984 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
986 case HCLGE_MAC_SPEED_25G:
987 case HCLGE_MAC_SPEED_50G:
988 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
991 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
994 case HCLGE_MAC_SPEED_100G:
995 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
996 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
999 mac->fec_ability = 0;
1004 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1007 struct hclge_mac *mac = &hdev->hw.mac;
1009 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1010 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1013 hclge_convert_setting_sr(mac, speed_ability);
1014 hclge_convert_setting_lr(mac, speed_ability);
1015 hclge_convert_setting_cr(mac, speed_ability);
1016 if (hdev->pdev->revision >= 0x21)
1017 hclge_convert_setting_fec(mac);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1024 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1027 struct hclge_mac *mac = &hdev->hw.mac;
1029 hclge_convert_setting_kr(mac, speed_ability);
1030 if (hdev->pdev->revision >= 0x21)
1031 hclge_convert_setting_fec(mac);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1037 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1040 unsigned long *supported = hdev->hw.mac.supported;
1042 /* default to support all speed for GE port */
1044 speed_ability = HCLGE_SUPPORT_GE;
1046 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1050 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1069 u8 media_type = hdev->hw.mac.media_type;
1071 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072 hclge_parse_fiber_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074 hclge_parse_copper_link_mode(hdev, speed_ability);
1075 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076 hclge_parse_backplane_link_mode(hdev, speed_ability);
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1080 struct hclge_cfg_param_cmd *req;
1081 u64 mac_addr_tmp_high;
1085 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1087 /* get the configuration */
1088 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1091 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094 HCLGE_CFG_TQP_DESC_N_M,
1095 HCLGE_CFG_TQP_DESC_N_S);
1097 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098 HCLGE_CFG_PHY_ADDR_M,
1099 HCLGE_CFG_PHY_ADDR_S);
1100 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101 HCLGE_CFG_MEDIA_TP_M,
1102 HCLGE_CFG_MEDIA_TP_S);
1103 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104 HCLGE_CFG_RX_BUF_LEN_M,
1105 HCLGE_CFG_RX_BUF_LEN_S);
1106 /* get mac_address */
1107 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109 HCLGE_CFG_MAC_ADDR_H_M,
1110 HCLGE_CFG_MAC_ADDR_H_S);
1112 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1114 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115 HCLGE_CFG_DEFAULT_SPEED_M,
1116 HCLGE_CFG_DEFAULT_SPEED_S);
1117 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118 HCLGE_CFG_RSS_SIZE_M,
1119 HCLGE_CFG_RSS_SIZE_S);
1121 for (i = 0; i < ETH_ALEN; i++)
1122 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1124 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1127 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128 HCLGE_CFG_SPEED_ABILITY_M,
1129 HCLGE_CFG_SPEED_ABILITY_S);
1130 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131 HCLGE_CFG_UMV_TBL_SPACE_M,
1132 HCLGE_CFG_UMV_TBL_SPACE_S);
1133 if (!cfg->umv_space)
1134 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1137 /* hclge_get_cfg: query the static parameter from flash
1138 * @hdev: pointer to struct hclge_dev
1139 * @hcfg: the config structure to be getted
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1143 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144 struct hclge_cfg_param_cmd *req;
1148 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1151 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1154 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156 /* Len should be united by 4 bytes when send to hardware */
1157 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159 req->offset = cpu_to_le32(offset);
1162 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1164 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1168 hclge_parse_cfg(hcfg, desc);
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1177 ret = hclge_query_function_status(hdev);
1179 dev_err(&hdev->pdev->dev,
1180 "query function status error %d.\n", ret);
1184 /* get pf resource */
1185 ret = hclge_query_pf_resource(hdev);
1187 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1194 #define HCLGE_MIN_TX_DESC 64
1195 #define HCLGE_MIN_RX_DESC 64
1197 if (!is_kdump_kernel())
1200 dev_info(&hdev->pdev->dev,
1201 "Running kdump kernel. Using minimal resources\n");
1203 /* minimal queue pairs equals to the number of vports */
1204 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1209 static int hclge_configure(struct hclge_dev *hdev)
1211 struct hclge_cfg cfg;
1215 ret = hclge_get_cfg(hdev, &cfg);
1217 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1221 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222 hdev->base_tqp_pid = 0;
1223 hdev->rss_size_max = cfg.rss_size_max;
1224 hdev->rx_buf_len = cfg.rx_buf_len;
1225 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226 hdev->hw.mac.media_type = cfg.media_type;
1227 hdev->hw.mac.phy_addr = cfg.phy_addr;
1228 hdev->num_tx_desc = cfg.tqp_desc_num;
1229 hdev->num_rx_desc = cfg.tqp_desc_num;
1230 hdev->tm_info.num_pg = 1;
1231 hdev->tc_max = cfg.tc_num;
1232 hdev->tm_info.hw_pfc_map = 0;
1233 hdev->wanted_umv_size = cfg.umv_space;
1235 if (hnae3_dev_fd_supported(hdev)) {
1237 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1240 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1242 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1246 hclge_parse_link_mode(hdev, cfg.speed_ability);
1248 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249 (hdev->tc_max < 1)) {
1250 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1255 /* Dev does not support DCB */
1256 if (!hnae3_dev_dcb_supported(hdev)) {
1260 hdev->pfc_max = hdev->tc_max;
1263 hdev->tm_info.num_tc = 1;
1265 /* Currently not support uncontiuous tc */
1266 for (i = 0; i < hdev->tm_info.num_tc; i++)
1267 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1269 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1271 hclge_init_kdump_kernel_config(hdev);
1276 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1277 unsigned int tso_mss_max)
1279 struct hclge_cfg_tso_status_cmd *req;
1280 struct hclge_desc desc;
1283 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1285 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1288 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1289 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1290 req->tso_mss_min = cpu_to_le16(tso_mss);
1293 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1294 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1295 req->tso_mss_max = cpu_to_le16(tso_mss);
1297 return hclge_cmd_send(&hdev->hw, &desc, 1);
1300 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1302 struct hclge_cfg_gro_status_cmd *req;
1303 struct hclge_desc desc;
1306 if (!hnae3_dev_gro_supported(hdev))
1309 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1310 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1312 req->gro_en = cpu_to_le16(en ? 1 : 0);
1314 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1316 dev_err(&hdev->pdev->dev,
1317 "GRO hardware config cmd failed, ret = %d\n", ret);
1322 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1324 struct hclge_tqp *tqp;
1327 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1328 sizeof(struct hclge_tqp), GFP_KERNEL);
1334 for (i = 0; i < hdev->num_tqps; i++) {
1335 tqp->dev = &hdev->pdev->dev;
1338 tqp->q.ae_algo = &ae_algo;
1339 tqp->q.buf_size = hdev->rx_buf_len;
1340 tqp->q.tx_desc_num = hdev->num_tx_desc;
1341 tqp->q.rx_desc_num = hdev->num_rx_desc;
1342 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1343 i * HCLGE_TQP_REG_SIZE;
1351 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1352 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1354 struct hclge_tqp_map_cmd *req;
1355 struct hclge_desc desc;
1358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1360 req = (struct hclge_tqp_map_cmd *)desc.data;
1361 req->tqp_id = cpu_to_le16(tqp_pid);
1362 req->tqp_vf = func_id;
1363 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1364 1 << HCLGE_TQP_MAP_EN_B;
1365 req->tqp_vid = cpu_to_le16(tqp_vid);
1367 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1369 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1374 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1376 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1377 struct hclge_dev *hdev = vport->back;
1380 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1381 alloced < num_tqps; i++) {
1382 if (!hdev->htqp[i].alloced) {
1383 hdev->htqp[i].q.handle = &vport->nic;
1384 hdev->htqp[i].q.tqp_index = alloced;
1385 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1386 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1387 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1388 hdev->htqp[i].alloced = true;
1392 vport->alloc_tqps = alloced;
1393 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1394 vport->alloc_tqps / hdev->tm_info.num_tc);
1399 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1400 u16 num_tx_desc, u16 num_rx_desc)
1403 struct hnae3_handle *nic = &vport->nic;
1404 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1405 struct hclge_dev *hdev = vport->back;
1408 kinfo->num_tx_desc = num_tx_desc;
1409 kinfo->num_rx_desc = num_rx_desc;
1411 kinfo->rx_buf_len = hdev->rx_buf_len;
1413 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1414 sizeof(struct hnae3_queue *), GFP_KERNEL);
1418 ret = hclge_assign_tqp(vport, num_tqps);
1420 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1425 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1426 struct hclge_vport *vport)
1428 struct hnae3_handle *nic = &vport->nic;
1429 struct hnae3_knic_private_info *kinfo;
1432 kinfo = &nic->kinfo;
1433 for (i = 0; i < vport->alloc_tqps; i++) {
1434 struct hclge_tqp *q =
1435 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1439 is_pf = !(vport->vport_id);
1440 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1449 static int hclge_map_tqp(struct hclge_dev *hdev)
1451 struct hclge_vport *vport = hdev->vport;
1454 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1455 for (i = 0; i < num_vport; i++) {
1458 ret = hclge_map_tqp_to_vport(hdev, vport);
1468 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 struct hnae3_handle *nic = &vport->nic;
1471 struct hclge_dev *hdev = vport->back;
1474 nic->pdev = hdev->pdev;
1475 nic->ae_algo = &ae_algo;
1476 nic->numa_node_mask = hdev->numa_node_mask;
1478 ret = hclge_knic_setup(vport, num_tqps,
1479 hdev->num_tx_desc, hdev->num_rx_desc);
1481 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1486 static int hclge_alloc_vport(struct hclge_dev *hdev)
1488 struct pci_dev *pdev = hdev->pdev;
1489 struct hclge_vport *vport;
1495 /* We need to alloc a vport for main NIC of PF */
1496 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1498 if (hdev->num_tqps < num_vport) {
1499 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1500 hdev->num_tqps, num_vport);
1504 /* Alloc the same number of TQPs for every vport */
1505 tqp_per_vport = hdev->num_tqps / num_vport;
1506 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1508 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1513 hdev->vport = vport;
1514 hdev->num_alloc_vport = num_vport;
1516 if (IS_ENABLED(CONFIG_PCI_IOV))
1517 hdev->num_alloc_vfs = hdev->num_req_vfs;
1519 for (i = 0; i < num_vport; i++) {
1521 vport->vport_id = i;
1522 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1523 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1524 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1525 INIT_LIST_HEAD(&vport->vlan_list);
1526 INIT_LIST_HEAD(&vport->uc_mac_list);
1527 INIT_LIST_HEAD(&vport->mc_mac_list);
1530 ret = hclge_vport_setup(vport, tqp_main_vport);
1532 ret = hclge_vport_setup(vport, tqp_per_vport);
1535 "vport setup failed for vport %d, %d\n",
1546 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1547 struct hclge_pkt_buf_alloc *buf_alloc)
1549 /* TX buffer size is unit by 128 byte */
1550 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1551 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1552 struct hclge_tx_buff_alloc_cmd *req;
1553 struct hclge_desc desc;
1557 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1560 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1561 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1563 req->tx_pkt_buff[i] =
1564 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1565 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1568 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1576 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1577 struct hclge_pkt_buf_alloc *buf_alloc)
1579 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1582 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1587 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1592 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1593 if (hdev->hw_tc_map & BIT(i))
1598 /* Get the number of pfc enabled TCs, which have private buffer */
1599 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1600 struct hclge_pkt_buf_alloc *buf_alloc)
1602 struct hclge_priv_buf *priv;
1606 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607 priv = &buf_alloc->priv_buf[i];
1608 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616 /* Get the number of pfc disabled TCs, which have private buffer */
1617 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1618 struct hclge_pkt_buf_alloc *buf_alloc)
1620 struct hclge_priv_buf *priv;
1624 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1625 priv = &buf_alloc->priv_buf[i];
1626 if (hdev->hw_tc_map & BIT(i) &&
1627 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1635 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1637 struct hclge_priv_buf *priv;
1641 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1642 priv = &buf_alloc->priv_buf[i];
1644 rx_priv += priv->buf_size;
1649 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1651 u32 i, total_tx_size = 0;
1653 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1654 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1656 return total_tx_size;
1659 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1660 struct hclge_pkt_buf_alloc *buf_alloc,
1663 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1664 u32 tc_num = hclge_get_tc_num(hdev);
1665 u32 shared_buf, aligned_mps;
1669 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1671 if (hnae3_dev_dcb_supported(hdev))
1672 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1675 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1676 + hdev->dv_buf_size;
1678 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1679 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1680 HCLGE_BUF_SIZE_UNIT);
1682 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1683 if (rx_all < rx_priv + shared_std)
1686 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1687 buf_alloc->s_buf.buf_size = shared_buf;
1688 if (hnae3_dev_dcb_supported(hdev)) {
1689 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1690 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1691 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1692 HCLGE_BUF_SIZE_UNIT);
1694 buf_alloc->s_buf.self.high = aligned_mps +
1695 HCLGE_NON_DCB_ADDITIONAL_BUF;
1696 buf_alloc->s_buf.self.low = aligned_mps;
1699 if (hnae3_dev_dcb_supported(hdev)) {
1700 hi_thrd = shared_buf - hdev->dv_buf_size;
1702 if (tc_num <= NEED_RESERVE_TC_NUM)
1703 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1707 hi_thrd = hi_thrd / tc_num;
1709 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1781 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1784 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1787 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1788 struct hclge_pkt_buf_alloc *buf_alloc)
1790 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1791 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1794 /* let the last to be cleared first */
1795 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1796 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 unsigned int mask = BIT((unsigned int)i);
1799 if (hdev->hw_tc_map & mask &&
1800 !(hdev->tm_info.hw_pfc_map & mask)) {
1801 /* Clear the no pfc TC private buffer */
1809 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1810 no_pfc_priv_num == 0)
1814 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1817 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1818 struct hclge_pkt_buf_alloc *buf_alloc)
1820 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1821 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1824 /* let the last to be cleared first */
1825 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1826 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1827 unsigned int mask = BIT((unsigned int)i);
1829 if (hdev->hw_tc_map & mask &&
1830 hdev->tm_info.hw_pfc_map & mask) {
1831 /* Reduce the number of pfc TC with private buffer */
1839 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1844 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1847 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1848 struct hclge_pkt_buf_alloc *buf_alloc)
1850 #define COMPENSATE_BUFFER 0x3C00
1851 #define COMPENSATE_HALF_MPS_NUM 5
1852 #define PRIV_WL_GAP 0x1800
1854 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1855 u32 tc_num = hclge_get_tc_num(hdev);
1856 u32 half_mps = hdev->mps >> 1;
1861 rx_priv = rx_priv / tc_num;
1863 if (tc_num <= NEED_RESERVE_TC_NUM)
1864 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1866 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1867 COMPENSATE_HALF_MPS_NUM * half_mps;
1868 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1869 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1871 if (rx_priv < min_rx_priv)
1874 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1875 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1882 if (!(hdev->hw_tc_map & BIT(i)))
1886 priv->buf_size = rx_priv;
1887 priv->wl.high = rx_priv - hdev->dv_buf_size;
1888 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1891 buf_alloc->s_buf.buf_size = 0;
1896 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1897 * @hdev: pointer to struct hclge_dev
1898 * @buf_alloc: pointer to buffer calculation data
1899 * @return: 0: calculate sucessful, negative: fail
1901 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1902 struct hclge_pkt_buf_alloc *buf_alloc)
1904 /* When DCB is not supported, rx private buffer is not allocated. */
1905 if (!hnae3_dev_dcb_supported(hdev)) {
1906 u32 rx_all = hdev->pkt_buf_size;
1908 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1909 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1915 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1918 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1921 /* try to decrease the buffer size */
1922 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1925 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1928 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1934 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1935 struct hclge_pkt_buf_alloc *buf_alloc)
1937 struct hclge_rx_priv_buff_cmd *req;
1938 struct hclge_desc desc;
1942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1943 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1945 /* Alloc private buffer TCs */
1946 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1950 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1952 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1956 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1957 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1959 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1961 dev_err(&hdev->pdev->dev,
1962 "rx private buffer alloc cmd failed %d\n", ret);
1967 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1968 struct hclge_pkt_buf_alloc *buf_alloc)
1970 struct hclge_rx_priv_wl_buf *req;
1971 struct hclge_priv_buf *priv;
1972 struct hclge_desc desc[2];
1976 for (i = 0; i < 2; i++) {
1977 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1979 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1981 /* The first descriptor set the NEXT bit to 1 */
1983 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1985 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1987 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1988 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1990 priv = &buf_alloc->priv_buf[idx];
1991 req->tc_wl[j].high =
1992 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1993 req->tc_wl[j].high |=
1994 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1996 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1997 req->tc_wl[j].low |=
1998 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2002 /* Send 2 descriptor at one time */
2003 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2005 dev_err(&hdev->pdev->dev,
2006 "rx private waterline config cmd failed %d\n",
2011 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2012 struct hclge_pkt_buf_alloc *buf_alloc)
2014 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2015 struct hclge_rx_com_thrd *req;
2016 struct hclge_desc desc[2];
2017 struct hclge_tc_thrd *tc;
2021 for (i = 0; i < 2; i++) {
2022 hclge_cmd_setup_basic_desc(&desc[i],
2023 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2024 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2026 /* The first descriptor set the NEXT bit to 1 */
2028 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2030 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2032 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2033 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2035 req->com_thrd[j].high =
2036 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2037 req->com_thrd[j].high |=
2038 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2039 req->com_thrd[j].low =
2040 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2041 req->com_thrd[j].low |=
2042 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2046 /* Send 2 descriptors at one time */
2047 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2049 dev_err(&hdev->pdev->dev,
2050 "common threshold config cmd failed %d\n", ret);
2054 static int hclge_common_wl_config(struct hclge_dev *hdev,
2055 struct hclge_pkt_buf_alloc *buf_alloc)
2057 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2058 struct hclge_rx_com_wl *req;
2059 struct hclge_desc desc;
2062 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2064 req = (struct hclge_rx_com_wl *)desc.data;
2065 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2066 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2068 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2069 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2071 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2073 dev_err(&hdev->pdev->dev,
2074 "common waterline config cmd failed %d\n", ret);
2079 int hclge_buffer_alloc(struct hclge_dev *hdev)
2081 struct hclge_pkt_buf_alloc *pkt_buf;
2084 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2088 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2090 dev_err(&hdev->pdev->dev,
2091 "could not calc tx buffer size for all TCs %d\n", ret);
2095 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2097 dev_err(&hdev->pdev->dev,
2098 "could not alloc tx buffers %d\n", ret);
2102 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2104 dev_err(&hdev->pdev->dev,
2105 "could not calc rx priv buffer size for all TCs %d\n",
2110 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2112 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2117 if (hnae3_dev_dcb_supported(hdev)) {
2118 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2120 dev_err(&hdev->pdev->dev,
2121 "could not configure rx private waterline %d\n",
2126 ret = hclge_common_thrd_config(hdev, pkt_buf);
2128 dev_err(&hdev->pdev->dev,
2129 "could not configure common threshold %d\n",
2135 ret = hclge_common_wl_config(hdev, pkt_buf);
2137 dev_err(&hdev->pdev->dev,
2138 "could not configure common waterline %d\n", ret);
2145 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2147 struct hnae3_handle *roce = &vport->roce;
2148 struct hnae3_handle *nic = &vport->nic;
2150 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2152 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2153 vport->back->num_msi_left == 0)
2156 roce->rinfo.base_vector = vport->back->roce_base_vector;
2158 roce->rinfo.netdev = nic->kinfo.netdev;
2159 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2161 roce->pdev = nic->pdev;
2162 roce->ae_algo = nic->ae_algo;
2163 roce->numa_node_mask = nic->numa_node_mask;
2168 static int hclge_init_msi(struct hclge_dev *hdev)
2170 struct pci_dev *pdev = hdev->pdev;
2174 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2175 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2178 "failed(%d) to allocate MSI/MSI-X vectors\n",
2182 if (vectors < hdev->num_msi)
2183 dev_warn(&hdev->pdev->dev,
2184 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2185 hdev->num_msi, vectors);
2187 hdev->num_msi = vectors;
2188 hdev->num_msi_left = vectors;
2189 hdev->base_msi_vector = pdev->irq;
2190 hdev->roce_base_vector = hdev->base_msi_vector +
2191 hdev->roce_base_msix_offset;
2193 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2194 sizeof(u16), GFP_KERNEL);
2195 if (!hdev->vector_status) {
2196 pci_free_irq_vectors(pdev);
2200 for (i = 0; i < hdev->num_msi; i++)
2201 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2203 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2204 sizeof(int), GFP_KERNEL);
2205 if (!hdev->vector_irq) {
2206 pci_free_irq_vectors(pdev);
2213 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2215 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2216 duplex = HCLGE_MAC_FULL;
2221 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2224 struct hclge_config_mac_speed_dup_cmd *req;
2225 struct hclge_desc desc;
2228 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2233 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2236 case HCLGE_MAC_SPEED_10M:
2237 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2238 HCLGE_CFG_SPEED_S, 6);
2240 case HCLGE_MAC_SPEED_100M:
2241 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2242 HCLGE_CFG_SPEED_S, 7);
2244 case HCLGE_MAC_SPEED_1G:
2245 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2246 HCLGE_CFG_SPEED_S, 0);
2248 case HCLGE_MAC_SPEED_10G:
2249 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2250 HCLGE_CFG_SPEED_S, 1);
2252 case HCLGE_MAC_SPEED_25G:
2253 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2254 HCLGE_CFG_SPEED_S, 2);
2256 case HCLGE_MAC_SPEED_40G:
2257 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2258 HCLGE_CFG_SPEED_S, 3);
2260 case HCLGE_MAC_SPEED_50G:
2261 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2262 HCLGE_CFG_SPEED_S, 4);
2264 case HCLGE_MAC_SPEED_100G:
2265 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2266 HCLGE_CFG_SPEED_S, 5);
2269 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2273 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2276 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2278 dev_err(&hdev->pdev->dev,
2279 "mac speed/duplex config cmd failed %d.\n", ret);
2286 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2290 duplex = hclge_check_speed_dup(duplex, speed);
2291 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2294 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2298 hdev->hw.mac.speed = speed;
2299 hdev->hw.mac.duplex = duplex;
2304 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2307 struct hclge_vport *vport = hclge_get_vport(handle);
2308 struct hclge_dev *hdev = vport->back;
2310 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2313 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2315 struct hclge_config_auto_neg_cmd *req;
2316 struct hclge_desc desc;
2320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2322 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2323 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2324 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2326 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2328 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2334 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2336 struct hclge_vport *vport = hclge_get_vport(handle);
2337 struct hclge_dev *hdev = vport->back;
2339 if (!hdev->hw.mac.support_autoneg) {
2341 dev_err(&hdev->pdev->dev,
2342 "autoneg is not supported by current port\n");
2349 return hclge_set_autoneg_en(hdev, enable);
2352 static int hclge_get_autoneg(struct hnae3_handle *handle)
2354 struct hclge_vport *vport = hclge_get_vport(handle);
2355 struct hclge_dev *hdev = vport->back;
2356 struct phy_device *phydev = hdev->hw.mac.phydev;
2359 return phydev->autoneg;
2361 return hdev->hw.mac.autoneg;
2364 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2366 struct hclge_vport *vport = hclge_get_vport(handle);
2367 struct hclge_dev *hdev = vport->back;
2370 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2372 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2375 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2378 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2380 struct hclge_vport *vport = hclge_get_vport(handle);
2381 struct hclge_dev *hdev = vport->back;
2383 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2384 return hclge_set_autoneg_en(hdev, !halt);
2389 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2391 struct hclge_config_fec_cmd *req;
2392 struct hclge_desc desc;
2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2397 req = (struct hclge_config_fec_cmd *)desc.data;
2398 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2399 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2400 if (fec_mode & BIT(HNAE3_FEC_RS))
2401 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2402 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2403 if (fec_mode & BIT(HNAE3_FEC_BASER))
2404 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2405 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2407 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2409 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2414 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2416 struct hclge_vport *vport = hclge_get_vport(handle);
2417 struct hclge_dev *hdev = vport->back;
2418 struct hclge_mac *mac = &hdev->hw.mac;
2421 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2422 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2426 ret = hclge_set_fec_hw(hdev, fec_mode);
2430 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2434 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2437 struct hclge_vport *vport = hclge_get_vport(handle);
2438 struct hclge_dev *hdev = vport->back;
2439 struct hclge_mac *mac = &hdev->hw.mac;
2442 *fec_ability = mac->fec_ability;
2444 *fec_mode = mac->fec_mode;
2447 static int hclge_mac_init(struct hclge_dev *hdev)
2449 struct hclge_mac *mac = &hdev->hw.mac;
2452 hdev->support_sfp_query = true;
2453 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2454 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2455 hdev->hw.mac.duplex);
2457 dev_err(&hdev->pdev->dev,
2458 "Config mac speed dup fail ret=%d\n", ret);
2462 if (hdev->hw.mac.support_autoneg) {
2463 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2465 dev_err(&hdev->pdev->dev,
2466 "Config mac autoneg fail ret=%d\n", ret);
2473 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2474 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2476 dev_err(&hdev->pdev->dev,
2477 "Fec mode init fail, ret = %d\n", ret);
2482 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2484 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2488 ret = hclge_buffer_alloc(hdev);
2490 dev_err(&hdev->pdev->dev,
2491 "allocate buffer fail, ret=%d\n", ret);
2496 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2498 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2499 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2500 schedule_work(&hdev->mbx_service_task);
2503 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2505 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2506 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2507 schedule_work(&hdev->rst_service_task);
2510 static void hclge_task_schedule(struct hclge_dev *hdev)
2512 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2513 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2514 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2515 (void)schedule_work(&hdev->service_task);
2518 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2520 struct hclge_link_status_cmd *req;
2521 struct hclge_desc desc;
2525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2526 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2528 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2533 req = (struct hclge_link_status_cmd *)desc.data;
2534 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2536 return !!link_status;
2539 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2541 unsigned int mac_state;
2544 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2547 mac_state = hclge_get_mac_link_status(hdev);
2549 if (hdev->hw.mac.phydev) {
2550 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2551 link_stat = mac_state &
2552 hdev->hw.mac.phydev->link;
2557 link_stat = mac_state;
2563 static void hclge_update_link_status(struct hclge_dev *hdev)
2565 struct hnae3_client *rclient = hdev->roce_client;
2566 struct hnae3_client *client = hdev->nic_client;
2567 struct hnae3_handle *rhandle;
2568 struct hnae3_handle *handle;
2574 state = hclge_get_mac_phy_link(hdev);
2575 if (state != hdev->hw.mac.link) {
2576 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2577 handle = &hdev->vport[i].nic;
2578 client->ops->link_status_change(handle, state);
2579 hclge_config_mac_tnl_int(hdev, state);
2580 rhandle = &hdev->vport[i].roce;
2581 if (rclient && rclient->ops->link_status_change)
2582 rclient->ops->link_status_change(rhandle,
2585 hdev->hw.mac.link = state;
2589 static void hclge_update_port_capability(struct hclge_mac *mac)
2591 /* update fec ability by speed */
2592 hclge_convert_setting_fec(mac);
2594 /* firmware can not identify back plane type, the media type
2595 * read from configuration can help deal it
2597 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2598 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2599 mac->module_type = HNAE3_MODULE_TYPE_KR;
2600 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2601 mac->module_type = HNAE3_MODULE_TYPE_TP;
2603 if (mac->support_autoneg == true) {
2604 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2605 linkmode_copy(mac->advertising, mac->supported);
2607 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2609 linkmode_zero(mac->advertising);
2613 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2615 struct hclge_sfp_info_cmd *resp;
2616 struct hclge_desc desc;
2619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2620 resp = (struct hclge_sfp_info_cmd *)desc.data;
2621 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2622 if (ret == -EOPNOTSUPP) {
2623 dev_warn(&hdev->pdev->dev,
2624 "IMP do not support get SFP speed %d\n", ret);
2627 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2631 *speed = le32_to_cpu(resp->speed);
2636 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2638 struct hclge_sfp_info_cmd *resp;
2639 struct hclge_desc desc;
2642 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2643 resp = (struct hclge_sfp_info_cmd *)desc.data;
2645 resp->query_type = QUERY_ACTIVE_SPEED;
2647 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2648 if (ret == -EOPNOTSUPP) {
2649 dev_warn(&hdev->pdev->dev,
2650 "IMP does not support get SFP info %d\n", ret);
2653 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2657 mac->speed = le32_to_cpu(resp->speed);
2658 /* if resp->speed_ability is 0, it means it's an old version
2659 * firmware, do not update these params
2661 if (resp->speed_ability) {
2662 mac->module_type = le32_to_cpu(resp->module_type);
2663 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2664 mac->autoneg = resp->autoneg;
2665 mac->support_autoneg = resp->autoneg_ability;
2666 if (!resp->active_fec)
2669 mac->fec_mode = BIT(resp->active_fec);
2671 mac->speed_type = QUERY_SFP_SPEED;
2677 static int hclge_update_port_info(struct hclge_dev *hdev)
2679 struct hclge_mac *mac = &hdev->hw.mac;
2680 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2683 /* get the port info from SFP cmd if not copper port */
2684 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2687 /* if IMP does not support get SFP/qSFP info, return directly */
2688 if (!hdev->support_sfp_query)
2691 if (hdev->pdev->revision >= 0x21)
2692 ret = hclge_get_sfp_info(hdev, mac);
2694 ret = hclge_get_sfp_speed(hdev, &speed);
2696 if (ret == -EOPNOTSUPP) {
2697 hdev->support_sfp_query = false;
2703 if (hdev->pdev->revision >= 0x21) {
2704 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2705 hclge_update_port_capability(mac);
2708 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2711 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2712 return 0; /* do nothing if no SFP */
2714 /* must config full duplex for SFP */
2715 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2719 static int hclge_get_status(struct hnae3_handle *handle)
2721 struct hclge_vport *vport = hclge_get_vport(handle);
2722 struct hclge_dev *hdev = vport->back;
2724 hclge_update_link_status(hdev);
2726 return hdev->hw.mac.link;
2729 static void hclge_service_timer(struct timer_list *t)
2731 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2733 mod_timer(&hdev->service_timer, jiffies + HZ);
2734 hdev->hw_stats.stats_timer++;
2735 hdev->fd_arfs_expire_timer++;
2736 hclge_task_schedule(hdev);
2739 static void hclge_service_complete(struct hclge_dev *hdev)
2741 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2743 /* Flush memory before next watchdog */
2744 smp_mb__before_atomic();
2745 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2748 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2750 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2752 /* fetch the events from their corresponding regs */
2753 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2754 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2755 msix_src_reg = hclge_read_dev(&hdev->hw,
2756 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2758 /* Assumption: If by any chance reset and mailbox events are reported
2759 * together then we will only process reset event in this go and will
2760 * defer the processing of the mailbox events. Since, we would have not
2761 * cleared RX CMDQ event this time we would receive again another
2762 * interrupt from H/W just for the mailbox.
2765 /* check for vector0 reset event sources */
2766 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2767 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2768 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2769 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2770 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2771 hdev->rst_stats.imp_rst_cnt++;
2772 return HCLGE_VECTOR0_EVENT_RST;
2775 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2776 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2777 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2778 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2779 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2780 hdev->rst_stats.global_rst_cnt++;
2781 return HCLGE_VECTOR0_EVENT_RST;
2784 /* check for vector0 msix event source */
2785 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2786 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2788 return HCLGE_VECTOR0_EVENT_ERR;
2791 /* check for vector0 mailbox(=CMDQ RX) event source */
2792 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2793 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2794 *clearval = cmdq_src_reg;
2795 return HCLGE_VECTOR0_EVENT_MBX;
2798 /* print other vector0 event source */
2799 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2800 cmdq_src_reg, msix_src_reg);
2801 return HCLGE_VECTOR0_EVENT_OTHER;
2804 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2807 switch (event_type) {
2808 case HCLGE_VECTOR0_EVENT_RST:
2809 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2811 case HCLGE_VECTOR0_EVENT_MBX:
2812 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2819 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2821 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2822 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2823 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2824 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2825 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2828 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2830 writel(enable ? 1 : 0, vector->addr);
2833 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2835 struct hclge_dev *hdev = data;
2839 hclge_enable_vector(&hdev->misc_vector, false);
2840 event_cause = hclge_check_event_cause(hdev, &clearval);
2842 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2843 switch (event_cause) {
2844 case HCLGE_VECTOR0_EVENT_ERR:
2845 /* we do not know what type of reset is required now. This could
2846 * only be decided after we fetch the type of errors which
2847 * caused this event. Therefore, we will do below for now:
2848 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2849 * have defered type of reset to be used.
2850 * 2. Schedule the reset serivce task.
2851 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2852 * will fetch the correct type of reset. This would be done
2853 * by first decoding the types of errors.
2855 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2857 case HCLGE_VECTOR0_EVENT_RST:
2858 hclge_reset_task_schedule(hdev);
2860 case HCLGE_VECTOR0_EVENT_MBX:
2861 /* If we are here then,
2862 * 1. Either we are not handling any mbx task and we are not
2865 * 2. We could be handling a mbx task but nothing more is
2867 * In both cases, we should schedule mbx task as there are more
2868 * mbx messages reported by this interrupt.
2870 hclge_mbx_task_schedule(hdev);
2873 dev_warn(&hdev->pdev->dev,
2874 "received unknown or unhandled event of vector0\n");
2878 /* clear the source of interrupt if it is not cause by reset */
2879 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2880 hclge_clear_event_cause(hdev, event_cause, clearval);
2881 hclge_enable_vector(&hdev->misc_vector, true);
2887 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2889 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2890 dev_warn(&hdev->pdev->dev,
2891 "vector(vector_id %d) has been freed.\n", vector_id);
2895 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2896 hdev->num_msi_left += 1;
2897 hdev->num_msi_used -= 1;
2900 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2902 struct hclge_misc_vector *vector = &hdev->misc_vector;
2904 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2906 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2907 hdev->vector_status[0] = 0;
2909 hdev->num_msi_left -= 1;
2910 hdev->num_msi_used += 1;
2913 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2917 hclge_get_misc_vector(hdev);
2919 /* this would be explicitly freed in the end */
2920 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2921 0, "hclge_misc", hdev);
2923 hclge_free_vector(hdev, 0);
2924 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2925 hdev->misc_vector.vector_irq);
2931 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2933 free_irq(hdev->misc_vector.vector_irq, hdev);
2934 hclge_free_vector(hdev, 0);
2937 int hclge_notify_client(struct hclge_dev *hdev,
2938 enum hnae3_reset_notify_type type)
2940 struct hnae3_client *client = hdev->nic_client;
2943 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2946 if (!client->ops->reset_notify)
2949 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2950 struct hnae3_handle *handle = &hdev->vport[i].nic;
2953 ret = client->ops->reset_notify(handle, type);
2955 dev_err(&hdev->pdev->dev,
2956 "notify nic client failed %d(%d)\n", type, ret);
2964 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2965 enum hnae3_reset_notify_type type)
2967 struct hnae3_client *client = hdev->roce_client;
2971 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2974 if (!client->ops->reset_notify)
2977 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2978 struct hnae3_handle *handle = &hdev->vport[i].roce;
2980 ret = client->ops->reset_notify(handle, type);
2982 dev_err(&hdev->pdev->dev,
2983 "notify roce client failed %d(%d)",
2992 static int hclge_reset_wait(struct hclge_dev *hdev)
2994 #define HCLGE_RESET_WATI_MS 100
2995 #define HCLGE_RESET_WAIT_CNT 200
2996 u32 val, reg, reg_bit;
2999 switch (hdev->reset_type) {
3000 case HNAE3_IMP_RESET:
3001 reg = HCLGE_GLOBAL_RESET_REG;
3002 reg_bit = HCLGE_IMP_RESET_BIT;
3004 case HNAE3_GLOBAL_RESET:
3005 reg = HCLGE_GLOBAL_RESET_REG;
3006 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3008 case HNAE3_FUNC_RESET:
3009 reg = HCLGE_FUN_RST_ING;
3010 reg_bit = HCLGE_FUN_RST_ING_B;
3012 case HNAE3_FLR_RESET:
3015 dev_err(&hdev->pdev->dev,
3016 "Wait for unsupported reset type: %d\n",
3021 if (hdev->reset_type == HNAE3_FLR_RESET) {
3022 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3023 cnt++ < HCLGE_RESET_WAIT_CNT)
3024 msleep(HCLGE_RESET_WATI_MS);
3026 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3027 dev_err(&hdev->pdev->dev,
3028 "flr wait timeout: %d\n", cnt);
3035 val = hclge_read_dev(&hdev->hw, reg);
3036 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3037 msleep(HCLGE_RESET_WATI_MS);
3038 val = hclge_read_dev(&hdev->hw, reg);
3042 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3043 dev_warn(&hdev->pdev->dev,
3044 "Wait for reset timeout: %d\n", hdev->reset_type);
3051 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3053 struct hclge_vf_rst_cmd *req;
3054 struct hclge_desc desc;
3056 req = (struct hclge_vf_rst_cmd *)desc.data;
3057 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3058 req->dest_vfid = func_id;
3063 return hclge_cmd_send(&hdev->hw, &desc, 1);
3066 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3070 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3071 struct hclge_vport *vport = &hdev->vport[i];
3074 /* Send cmd to set/clear VF's FUNC_RST_ING */
3075 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3077 dev_err(&hdev->pdev->dev,
3078 "set vf(%d) rst failed %d!\n",
3079 vport->vport_id, ret);
3083 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3086 /* Inform VF to process the reset.
3087 * hclge_inform_reset_assert_to_vf may fail if VF
3088 * driver is not loaded.
3090 ret = hclge_inform_reset_assert_to_vf(vport);
3092 dev_warn(&hdev->pdev->dev,
3093 "inform reset to vf(%d) failed %d!\n",
3094 vport->vport_id, ret);
3100 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3102 struct hclge_desc desc;
3103 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3106 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3107 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3108 req->fun_reset_vfid = func_id;
3110 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3112 dev_err(&hdev->pdev->dev,
3113 "send function reset cmd fail, status =%d\n", ret);
3118 static void hclge_do_reset(struct hclge_dev *hdev)
3120 struct hnae3_handle *handle = &hdev->vport[0].nic;
3121 struct pci_dev *pdev = hdev->pdev;
3124 if (hclge_get_hw_reset_stat(handle)) {
3125 dev_info(&pdev->dev, "Hardware reset not finish\n");
3126 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3127 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3128 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3132 switch (hdev->reset_type) {
3133 case HNAE3_GLOBAL_RESET:
3134 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3135 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3136 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3137 dev_info(&pdev->dev, "Global Reset requested\n");
3139 case HNAE3_FUNC_RESET:
3140 dev_info(&pdev->dev, "PF Reset requested\n");
3141 /* schedule again to check later */
3142 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3143 hclge_reset_task_schedule(hdev);
3145 case HNAE3_FLR_RESET:
3146 dev_info(&pdev->dev, "FLR requested\n");
3147 /* schedule again to check later */
3148 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3149 hclge_reset_task_schedule(hdev);
3152 dev_warn(&pdev->dev,
3153 "Unsupported reset type: %d\n", hdev->reset_type);
3158 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3159 unsigned long *addr)
3161 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3162 struct hclge_dev *hdev = ae_dev->priv;
3164 /* first, resolve any unknown reset type to the known type(s) */
3165 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3166 /* we will intentionally ignore any errors from this function
3167 * as we will end up in *some* reset request in any case
3169 hclge_handle_hw_msix_error(hdev, addr);
3170 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3171 /* We defered the clearing of the error event which caused
3172 * interrupt since it was not posssible to do that in
3173 * interrupt context (and this is the reason we introduced
3174 * new UNKNOWN reset type). Now, the errors have been
3175 * handled and cleared in hardware we can safely enable
3176 * interrupts. This is an exception to the norm.
3178 hclge_enable_vector(&hdev->misc_vector, true);
3181 /* return the highest priority reset level amongst all */
3182 if (test_bit(HNAE3_IMP_RESET, addr)) {
3183 rst_level = HNAE3_IMP_RESET;
3184 clear_bit(HNAE3_IMP_RESET, addr);
3185 clear_bit(HNAE3_GLOBAL_RESET, addr);
3186 clear_bit(HNAE3_FUNC_RESET, addr);
3187 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3188 rst_level = HNAE3_GLOBAL_RESET;
3189 clear_bit(HNAE3_GLOBAL_RESET, addr);
3190 clear_bit(HNAE3_FUNC_RESET, addr);
3191 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3192 rst_level = HNAE3_FUNC_RESET;
3193 clear_bit(HNAE3_FUNC_RESET, addr);
3194 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3195 rst_level = HNAE3_FLR_RESET;
3196 clear_bit(HNAE3_FLR_RESET, addr);
3199 if (hdev->reset_type != HNAE3_NONE_RESET &&
3200 rst_level < hdev->reset_type)
3201 return HNAE3_NONE_RESET;
3206 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3210 switch (hdev->reset_type) {
3211 case HNAE3_IMP_RESET:
3212 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3214 case HNAE3_GLOBAL_RESET:
3215 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3224 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3225 hclge_enable_vector(&hdev->misc_vector, true);
3228 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3232 switch (hdev->reset_type) {
3233 case HNAE3_FUNC_RESET:
3235 case HNAE3_FLR_RESET:
3236 ret = hclge_set_all_vf_rst(hdev, true);
3245 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3247 #define HCLGE_RESET_SYNC_TIME 100
3252 switch (hdev->reset_type) {
3253 case HNAE3_FUNC_RESET:
3254 /* There is no mechanism for PF to know if VF has stopped IO
3255 * for now, just wait 100 ms for VF to stop IO
3257 msleep(HCLGE_RESET_SYNC_TIME);
3258 ret = hclge_func_reset_cmd(hdev, 0);
3260 dev_err(&hdev->pdev->dev,
3261 "asserting function reset fail %d!\n", ret);
3265 /* After performaning pf reset, it is not necessary to do the
3266 * mailbox handling or send any command to firmware, because
3267 * any mailbox handling or command to firmware is only valid
3268 * after hclge_cmd_init is called.
3270 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3271 hdev->rst_stats.pf_rst_cnt++;
3273 case HNAE3_FLR_RESET:
3274 /* There is no mechanism for PF to know if VF has stopped IO
3275 * for now, just wait 100 ms for VF to stop IO
3277 msleep(HCLGE_RESET_SYNC_TIME);
3278 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3279 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3280 hdev->rst_stats.flr_rst_cnt++;
3282 case HNAE3_IMP_RESET:
3283 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3284 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3285 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3291 /* inform hardware that preparatory work is done */
3292 msleep(HCLGE_RESET_SYNC_TIME);
3293 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3294 HCLGE_NIC_CMQ_ENABLE);
3295 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3300 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3302 #define MAX_RESET_FAIL_CNT 5
3304 if (hdev->reset_pending) {
3305 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3306 hdev->reset_pending);
3308 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3309 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3310 BIT(HCLGE_IMP_RESET_BIT))) {
3311 dev_info(&hdev->pdev->dev,
3312 "reset failed because IMP Reset is pending\n");
3313 hclge_clear_reset_cause(hdev);
3315 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3316 hdev->reset_fail_cnt++;
3318 set_bit(hdev->reset_type, &hdev->reset_pending);
3319 dev_info(&hdev->pdev->dev,
3320 "re-schedule to wait for hw reset done\n");
3324 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3325 hclge_clear_reset_cause(hdev);
3326 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3327 mod_timer(&hdev->reset_timer,
3328 jiffies + HCLGE_RESET_INTERVAL);
3333 hclge_clear_reset_cause(hdev);
3334 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3338 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3342 switch (hdev->reset_type) {
3343 case HNAE3_FUNC_RESET:
3345 case HNAE3_FLR_RESET:
3346 ret = hclge_set_all_vf_rst(hdev, false);
3355 static int hclge_reset_stack(struct hclge_dev *hdev)
3359 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3363 ret = hclge_reset_ae_dev(hdev->ae_dev);
3367 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3371 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3374 static void hclge_reset(struct hclge_dev *hdev)
3376 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3377 bool is_timeout = false;
3380 /* Initialize ae_dev reset status as well, in case enet layer wants to
3381 * know if device is undergoing reset
3383 ae_dev->reset_type = hdev->reset_type;
3384 hdev->rst_stats.reset_cnt++;
3385 /* perform reset of the stack & ae device for a client */
3386 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3390 ret = hclge_reset_prepare_down(hdev);
3395 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3397 goto err_reset_lock;
3401 ret = hclge_reset_prepare_wait(hdev);
3405 if (hclge_reset_wait(hdev)) {
3410 hdev->rst_stats.hw_reset_done_cnt++;
3412 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3418 ret = hclge_reset_stack(hdev);
3420 goto err_reset_lock;
3422 hclge_clear_reset_cause(hdev);
3424 ret = hclge_reset_prepare_up(hdev);
3426 goto err_reset_lock;
3430 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3431 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3434 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3439 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3441 goto err_reset_lock;
3445 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3449 hdev->last_reset_time = jiffies;
3450 hdev->reset_fail_cnt = 0;
3451 hdev->rst_stats.reset_done_cnt++;
3452 ae_dev->reset_type = HNAE3_NONE_RESET;
3453 del_timer(&hdev->reset_timer);
3460 if (hclge_reset_err_handle(hdev, is_timeout))
3461 hclge_reset_task_schedule(hdev);
3464 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3466 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3467 struct hclge_dev *hdev = ae_dev->priv;
3469 /* We might end up getting called broadly because of 2 below cases:
3470 * 1. Recoverable error was conveyed through APEI and only way to bring
3471 * normalcy is to reset.
3472 * 2. A new reset request from the stack due to timeout
3474 * For the first case,error event might not have ae handle available.
3475 * check if this is a new reset request and we are not here just because
3476 * last reset attempt did not succeed and watchdog hit us again. We will
3477 * know this if last reset request did not occur very recently (watchdog
3478 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3479 * In case of new request we reset the "reset level" to PF reset.
3480 * And if it is a repeat reset request of the most recent one then we
3481 * want to make sure we throttle the reset request. Therefore, we will
3482 * not allow it again before 3*HZ times.
3485 handle = &hdev->vport[0].nic;
3487 if (time_before(jiffies, (hdev->last_reset_time +
3488 HCLGE_RESET_INTERVAL)))
3490 else if (hdev->default_reset_request)
3492 hclge_get_reset_level(ae_dev,
3493 &hdev->default_reset_request);
3494 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3495 hdev->reset_level = HNAE3_FUNC_RESET;
3497 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3500 /* request reset & schedule reset task */
3501 set_bit(hdev->reset_level, &hdev->reset_request);
3502 hclge_reset_task_schedule(hdev);
3504 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3505 hdev->reset_level++;
3508 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3509 enum hnae3_reset_type rst_type)
3511 struct hclge_dev *hdev = ae_dev->priv;
3513 set_bit(rst_type, &hdev->default_reset_request);
3516 static void hclge_reset_timer(struct timer_list *t)
3518 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3520 dev_info(&hdev->pdev->dev,
3521 "triggering reset in reset timer\n");
3522 hclge_reset_event(hdev->pdev, NULL);
3525 static void hclge_reset_subtask(struct hclge_dev *hdev)
3527 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3529 /* check if there is any ongoing reset in the hardware. This status can
3530 * be checked from reset_pending. If there is then, we need to wait for
3531 * hardware to complete reset.
3532 * a. If we are able to figure out in reasonable time that hardware
3533 * has fully resetted then, we can proceed with driver, client
3535 * b. else, we can come back later to check this status so re-sched
3538 hdev->last_reset_time = jiffies;
3539 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3540 if (hdev->reset_type != HNAE3_NONE_RESET)
3543 /* check if we got any *new* reset requests to be honored */
3544 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3545 if (hdev->reset_type != HNAE3_NONE_RESET)
3546 hclge_do_reset(hdev);
3548 hdev->reset_type = HNAE3_NONE_RESET;
3551 static void hclge_reset_service_task(struct work_struct *work)
3553 struct hclge_dev *hdev =
3554 container_of(work, struct hclge_dev, rst_service_task);
3556 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3559 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3561 hclge_reset_subtask(hdev);
3563 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3566 static void hclge_mailbox_service_task(struct work_struct *work)
3568 struct hclge_dev *hdev =
3569 container_of(work, struct hclge_dev, mbx_service_task);
3571 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3574 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3576 hclge_mbx_handler(hdev);
3578 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3581 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3585 /* start from vport 1 for PF is always alive */
3586 for (i = 1; i < hdev->num_alloc_vport; i++) {
3587 struct hclge_vport *vport = &hdev->vport[i];
3589 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3590 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3592 /* If vf is not alive, set to default value */
3593 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3594 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3598 static void hclge_service_task(struct work_struct *work)
3600 struct hclge_dev *hdev =
3601 container_of(work, struct hclge_dev, service_task);
3603 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3604 hclge_update_stats_for_all(hdev);
3605 hdev->hw_stats.stats_timer = 0;
3608 hclge_update_port_info(hdev);
3609 hclge_update_link_status(hdev);
3610 hclge_update_vport_alive(hdev);
3611 hclge_sync_vlan_filter(hdev);
3612 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3613 hclge_rfs_filter_expire(hdev);
3614 hdev->fd_arfs_expire_timer = 0;
3616 hclge_service_complete(hdev);
3619 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3621 /* VF handle has no client */
3622 if (!handle->client)
3623 return container_of(handle, struct hclge_vport, nic);
3624 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3625 return container_of(handle, struct hclge_vport, roce);
3627 return container_of(handle, struct hclge_vport, nic);
3630 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3631 struct hnae3_vector_info *vector_info)
3633 struct hclge_vport *vport = hclge_get_vport(handle);
3634 struct hnae3_vector_info *vector = vector_info;
3635 struct hclge_dev *hdev = vport->back;
3639 vector_num = min(hdev->num_msi_left, vector_num);
3641 for (j = 0; j < vector_num; j++) {
3642 for (i = 1; i < hdev->num_msi; i++) {
3643 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3644 vector->vector = pci_irq_vector(hdev->pdev, i);
3645 vector->io_addr = hdev->hw.io_base +
3646 HCLGE_VECTOR_REG_BASE +
3647 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3649 HCLGE_VECTOR_VF_OFFSET;
3650 hdev->vector_status[i] = vport->vport_id;
3651 hdev->vector_irq[i] = vector->vector;
3660 hdev->num_msi_left -= alloc;
3661 hdev->num_msi_used += alloc;
3666 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3670 for (i = 0; i < hdev->num_msi; i++)
3671 if (vector == hdev->vector_irq[i])
3677 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3679 struct hclge_vport *vport = hclge_get_vport(handle);
3680 struct hclge_dev *hdev = vport->back;
3683 vector_id = hclge_get_vector_index(hdev, vector);
3684 if (vector_id < 0) {
3685 dev_err(&hdev->pdev->dev,
3686 "Get vector index fail. vector_id =%d\n", vector_id);
3690 hclge_free_vector(hdev, vector_id);
3695 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3697 return HCLGE_RSS_KEY_SIZE;
3700 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3702 return HCLGE_RSS_IND_TBL_SIZE;
3705 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3706 const u8 hfunc, const u8 *key)
3708 struct hclge_rss_config_cmd *req;
3709 unsigned int key_offset = 0;
3710 struct hclge_desc desc;
3715 key_counts = HCLGE_RSS_KEY_SIZE;
3716 req = (struct hclge_rss_config_cmd *)desc.data;
3718 while (key_counts) {
3719 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3722 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3723 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3725 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3726 memcpy(req->hash_key,
3727 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3729 key_counts -= key_size;
3731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3733 dev_err(&hdev->pdev->dev,
3734 "Configure RSS config fail, status = %d\n",
3742 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3744 struct hclge_rss_indirection_table_cmd *req;
3745 struct hclge_desc desc;
3749 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3751 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3752 hclge_cmd_setup_basic_desc
3753 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3755 req->start_table_index =
3756 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3757 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3759 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3760 req->rss_result[j] =
3761 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3763 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3765 dev_err(&hdev->pdev->dev,
3766 "Configure rss indir table fail,status = %d\n",
3774 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3775 u16 *tc_size, u16 *tc_offset)
3777 struct hclge_rss_tc_mode_cmd *req;
3778 struct hclge_desc desc;
3782 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3783 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3785 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3788 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3789 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3790 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3791 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3792 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3794 req->rss_tc_mode[i] = cpu_to_le16(mode);
3797 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3799 dev_err(&hdev->pdev->dev,
3800 "Configure rss tc mode fail, status = %d\n", ret);
3805 static void hclge_get_rss_type(struct hclge_vport *vport)
3807 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3808 vport->rss_tuple_sets.ipv4_udp_en ||
3809 vport->rss_tuple_sets.ipv4_sctp_en ||
3810 vport->rss_tuple_sets.ipv6_tcp_en ||
3811 vport->rss_tuple_sets.ipv6_udp_en ||
3812 vport->rss_tuple_sets.ipv6_sctp_en)
3813 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3814 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3815 vport->rss_tuple_sets.ipv6_fragment_en)
3816 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3818 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3821 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3823 struct hclge_rss_input_tuple_cmd *req;
3824 struct hclge_desc desc;
3827 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3829 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3831 /* Get the tuple cfg from pf */
3832 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3833 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3834 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3835 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3836 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3837 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3838 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3839 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3840 hclge_get_rss_type(&hdev->vport[0]);
3841 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3843 dev_err(&hdev->pdev->dev,
3844 "Configure rss input fail, status = %d\n", ret);
3848 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3851 struct hclge_vport *vport = hclge_get_vport(handle);
3854 /* Get hash algorithm */
3856 switch (vport->rss_algo) {
3857 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3858 *hfunc = ETH_RSS_HASH_TOP;
3860 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3861 *hfunc = ETH_RSS_HASH_XOR;
3864 *hfunc = ETH_RSS_HASH_UNKNOWN;
3869 /* Get the RSS Key required by the user */
3871 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3873 /* Get indirect table */
3875 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3876 indir[i] = vport->rss_indirection_tbl[i];
3881 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3882 const u8 *key, const u8 hfunc)
3884 struct hclge_vport *vport = hclge_get_vport(handle);
3885 struct hclge_dev *hdev = vport->back;
3889 /* Set the RSS Hash Key if specififed by the user */
3892 case ETH_RSS_HASH_TOP:
3893 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3895 case ETH_RSS_HASH_XOR:
3896 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3898 case ETH_RSS_HASH_NO_CHANGE:
3899 hash_algo = vport->rss_algo;
3905 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3909 /* Update the shadow RSS key with user specified qids */
3910 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3911 vport->rss_algo = hash_algo;
3914 /* Update the shadow RSS table with user specified qids */
3915 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3916 vport->rss_indirection_tbl[i] = indir[i];
3918 /* Update the hardware */
3919 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3922 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3924 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3926 if (nfc->data & RXH_L4_B_2_3)
3927 hash_sets |= HCLGE_D_PORT_BIT;
3929 hash_sets &= ~HCLGE_D_PORT_BIT;
3931 if (nfc->data & RXH_IP_SRC)
3932 hash_sets |= HCLGE_S_IP_BIT;
3934 hash_sets &= ~HCLGE_S_IP_BIT;
3936 if (nfc->data & RXH_IP_DST)
3937 hash_sets |= HCLGE_D_IP_BIT;
3939 hash_sets &= ~HCLGE_D_IP_BIT;
3941 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3942 hash_sets |= HCLGE_V_TAG_BIT;
3947 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3948 struct ethtool_rxnfc *nfc)
3950 struct hclge_vport *vport = hclge_get_vport(handle);
3951 struct hclge_dev *hdev = vport->back;
3952 struct hclge_rss_input_tuple_cmd *req;
3953 struct hclge_desc desc;
3957 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3958 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3961 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3964 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3965 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3966 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3967 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3968 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3969 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3970 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3971 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3973 tuple_sets = hclge_get_rss_hash_bits(nfc);
3974 switch (nfc->flow_type) {
3976 req->ipv4_tcp_en = tuple_sets;
3979 req->ipv6_tcp_en = tuple_sets;
3982 req->ipv4_udp_en = tuple_sets;
3985 req->ipv6_udp_en = tuple_sets;
3988 req->ipv4_sctp_en = tuple_sets;
3991 if ((nfc->data & RXH_L4_B_0_1) ||
3992 (nfc->data & RXH_L4_B_2_3))
3995 req->ipv6_sctp_en = tuple_sets;
3998 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4001 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4007 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4009 dev_err(&hdev->pdev->dev,
4010 "Set rss tuple fail, status = %d\n", ret);
4014 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4015 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4016 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4017 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4018 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4019 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4020 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4021 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4022 hclge_get_rss_type(vport);
4026 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4027 struct ethtool_rxnfc *nfc)
4029 struct hclge_vport *vport = hclge_get_vport(handle);
4034 switch (nfc->flow_type) {
4036 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4039 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4042 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4045 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4048 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4051 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4055 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4064 if (tuple_sets & HCLGE_D_PORT_BIT)
4065 nfc->data |= RXH_L4_B_2_3;
4066 if (tuple_sets & HCLGE_S_PORT_BIT)
4067 nfc->data |= RXH_L4_B_0_1;
4068 if (tuple_sets & HCLGE_D_IP_BIT)
4069 nfc->data |= RXH_IP_DST;
4070 if (tuple_sets & HCLGE_S_IP_BIT)
4071 nfc->data |= RXH_IP_SRC;
4076 static int hclge_get_tc_size(struct hnae3_handle *handle)
4078 struct hclge_vport *vport = hclge_get_vport(handle);
4079 struct hclge_dev *hdev = vport->back;
4081 return hdev->rss_size_max;
4084 int hclge_rss_init_hw(struct hclge_dev *hdev)
4086 struct hclge_vport *vport = hdev->vport;
4087 u8 *rss_indir = vport[0].rss_indirection_tbl;
4088 u16 rss_size = vport[0].alloc_rss_size;
4089 u8 *key = vport[0].rss_hash_key;
4090 u8 hfunc = vport[0].rss_algo;
4091 u16 tc_offset[HCLGE_MAX_TC_NUM];
4092 u16 tc_valid[HCLGE_MAX_TC_NUM];
4093 u16 tc_size[HCLGE_MAX_TC_NUM];
4098 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4102 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4106 ret = hclge_set_rss_input_tuple(hdev);
4110 /* Each TC have the same queue size, and tc_size set to hardware is
4111 * the log2 of roundup power of two of rss_size, the acutal queue
4112 * size is limited by indirection table.
4114 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4115 dev_err(&hdev->pdev->dev,
4116 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4121 roundup_size = roundup_pow_of_two(rss_size);
4122 roundup_size = ilog2(roundup_size);
4124 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4127 if (!(hdev->hw_tc_map & BIT(i)))
4131 tc_size[i] = roundup_size;
4132 tc_offset[i] = rss_size * i;
4135 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4138 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4140 struct hclge_vport *vport = hdev->vport;
4143 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4144 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4145 vport[j].rss_indirection_tbl[i] =
4146 i % vport[j].alloc_rss_size;
4150 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4152 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4153 struct hclge_vport *vport = hdev->vport;
4155 if (hdev->pdev->revision >= 0x21)
4156 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4158 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4159 vport[i].rss_tuple_sets.ipv4_tcp_en =
4160 HCLGE_RSS_INPUT_TUPLE_OTHER;
4161 vport[i].rss_tuple_sets.ipv4_udp_en =
4162 HCLGE_RSS_INPUT_TUPLE_OTHER;
4163 vport[i].rss_tuple_sets.ipv4_sctp_en =
4164 HCLGE_RSS_INPUT_TUPLE_SCTP;
4165 vport[i].rss_tuple_sets.ipv4_fragment_en =
4166 HCLGE_RSS_INPUT_TUPLE_OTHER;
4167 vport[i].rss_tuple_sets.ipv6_tcp_en =
4168 HCLGE_RSS_INPUT_TUPLE_OTHER;
4169 vport[i].rss_tuple_sets.ipv6_udp_en =
4170 HCLGE_RSS_INPUT_TUPLE_OTHER;
4171 vport[i].rss_tuple_sets.ipv6_sctp_en =
4172 HCLGE_RSS_INPUT_TUPLE_SCTP;
4173 vport[i].rss_tuple_sets.ipv6_fragment_en =
4174 HCLGE_RSS_INPUT_TUPLE_OTHER;
4176 vport[i].rss_algo = rss_algo;
4178 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4179 HCLGE_RSS_KEY_SIZE);
4182 hclge_rss_indir_init_cfg(hdev);
4185 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4186 int vector_id, bool en,
4187 struct hnae3_ring_chain_node *ring_chain)
4189 struct hclge_dev *hdev = vport->back;
4190 struct hnae3_ring_chain_node *node;
4191 struct hclge_desc desc;
4192 struct hclge_ctrl_vector_chain_cmd *req
4193 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4194 enum hclge_cmd_status status;
4195 enum hclge_opcode_type op;
4196 u16 tqp_type_and_id;
4199 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4200 hclge_cmd_setup_basic_desc(&desc, op, false);
4201 req->int_vector_id = vector_id;
4204 for (node = ring_chain; node; node = node->next) {
4205 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4206 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4208 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4209 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4210 HCLGE_TQP_ID_S, node->tqp_index);
4211 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4213 hnae3_get_field(node->int_gl_idx,
4214 HNAE3_RING_GL_IDX_M,
4215 HNAE3_RING_GL_IDX_S));
4216 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4217 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4218 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4219 req->vfid = vport->vport_id;
4221 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4223 dev_err(&hdev->pdev->dev,
4224 "Map TQP fail, status is %d.\n",
4230 hclge_cmd_setup_basic_desc(&desc,
4233 req->int_vector_id = vector_id;
4238 req->int_cause_num = i;
4239 req->vfid = vport->vport_id;
4240 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4242 dev_err(&hdev->pdev->dev,
4243 "Map TQP fail, status is %d.\n", status);
4251 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4252 struct hnae3_ring_chain_node *ring_chain)
4254 struct hclge_vport *vport = hclge_get_vport(handle);
4255 struct hclge_dev *hdev = vport->back;
4258 vector_id = hclge_get_vector_index(hdev, vector);
4259 if (vector_id < 0) {
4260 dev_err(&hdev->pdev->dev,
4261 "Get vector index fail. vector_id =%d\n", vector_id);
4265 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4268 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4269 struct hnae3_ring_chain_node *ring_chain)
4271 struct hclge_vport *vport = hclge_get_vport(handle);
4272 struct hclge_dev *hdev = vport->back;
4275 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4278 vector_id = hclge_get_vector_index(hdev, vector);
4279 if (vector_id < 0) {
4280 dev_err(&handle->pdev->dev,
4281 "Get vector index fail. ret =%d\n", vector_id);
4285 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4287 dev_err(&handle->pdev->dev,
4288 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4294 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4295 struct hclge_promisc_param *param)
4297 struct hclge_promisc_cfg_cmd *req;
4298 struct hclge_desc desc;
4301 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4303 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4304 req->vf_id = param->vf_id;
4306 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4307 * pdev revision(0x20), new revision support them. The
4308 * value of this two fields will not return error when driver
4309 * send command to fireware in revision(0x20).
4311 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4312 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4314 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4316 dev_err(&hdev->pdev->dev,
4317 "Set promisc mode fail, status is %d.\n", ret);
4322 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4323 bool en_mc, bool en_bc, int vport_id)
4328 memset(param, 0, sizeof(struct hclge_promisc_param));
4330 param->enable = HCLGE_PROMISC_EN_UC;
4332 param->enable |= HCLGE_PROMISC_EN_MC;
4334 param->enable |= HCLGE_PROMISC_EN_BC;
4335 param->vf_id = vport_id;
4338 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4341 struct hclge_vport *vport = hclge_get_vport(handle);
4342 struct hclge_dev *hdev = vport->back;
4343 struct hclge_promisc_param param;
4344 bool en_bc_pmc = true;
4346 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4347 * always bypassed. So broadcast promisc should be disabled until
4348 * user enable promisc mode
4350 if (handle->pdev->revision == 0x20)
4351 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4353 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4355 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4358 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4360 struct hclge_get_fd_mode_cmd *req;
4361 struct hclge_desc desc;
4364 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4366 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4368 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4370 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4374 *fd_mode = req->mode;
4379 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4380 u32 *stage1_entry_num,
4381 u32 *stage2_entry_num,
4382 u16 *stage1_counter_num,
4383 u16 *stage2_counter_num)
4385 struct hclge_get_fd_allocation_cmd *req;
4386 struct hclge_desc desc;
4389 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4391 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4393 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4395 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4400 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4401 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4402 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4403 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4408 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4410 struct hclge_set_fd_key_config_cmd *req;
4411 struct hclge_fd_key_cfg *stage;
4412 struct hclge_desc desc;
4415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4417 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4418 stage = &hdev->fd_cfg.key_cfg[stage_num];
4419 req->stage = stage_num;
4420 req->key_select = stage->key_sel;
4421 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4422 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4423 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4424 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4425 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4426 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4428 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4430 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4435 static int hclge_init_fd_config(struct hclge_dev *hdev)
4437 #define LOW_2_WORDS 0x03
4438 struct hclge_fd_key_cfg *key_cfg;
4441 if (!hnae3_dev_fd_supported(hdev))
4444 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4448 switch (hdev->fd_cfg.fd_mode) {
4449 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4450 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4452 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4453 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4456 dev_err(&hdev->pdev->dev,
4457 "Unsupported flow director mode %d\n",
4458 hdev->fd_cfg.fd_mode);
4462 hdev->fd_cfg.proto_support =
4463 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4464 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4465 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4466 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4467 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4468 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4469 key_cfg->outer_sipv6_word_en = 0;
4470 key_cfg->outer_dipv6_word_en = 0;
4472 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4473 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4474 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4475 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4477 /* If use max 400bit key, we can support tuples for ether type */
4478 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4479 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4480 key_cfg->tuple_active |=
4481 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4484 /* roce_type is used to filter roce frames
4485 * dst_vport is used to specify the rule
4487 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4489 ret = hclge_get_fd_allocation(hdev,
4490 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4491 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4492 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4493 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4497 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4500 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4501 int loc, u8 *key, bool is_add)
4503 struct hclge_fd_tcam_config_1_cmd *req1;
4504 struct hclge_fd_tcam_config_2_cmd *req2;
4505 struct hclge_fd_tcam_config_3_cmd *req3;
4506 struct hclge_desc desc[3];
4509 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4510 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4511 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4512 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4513 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4515 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4516 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4517 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4519 req1->stage = stage;
4520 req1->xy_sel = sel_x ? 1 : 0;
4521 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4522 req1->index = cpu_to_le32(loc);
4523 req1->entry_vld = sel_x ? is_add : 0;
4526 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4527 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4528 sizeof(req2->tcam_data));
4529 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4530 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4533 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4535 dev_err(&hdev->pdev->dev,
4536 "config tcam key fail, ret=%d\n",
4542 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4543 struct hclge_fd_ad_data *action)
4545 struct hclge_fd_ad_config_cmd *req;
4546 struct hclge_desc desc;
4550 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4552 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4553 req->index = cpu_to_le32(loc);
4556 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4557 action->write_rule_id_to_bd);
4558 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4561 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4562 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4563 action->forward_to_direct_queue);
4564 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4566 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4567 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4568 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4569 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4570 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4571 action->counter_id);
4573 req->ad_data = cpu_to_le64(ad_data);
4574 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4576 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4581 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4582 struct hclge_fd_rule *rule)
4584 u16 tmp_x_s, tmp_y_s;
4585 u32 tmp_x_l, tmp_y_l;
4588 if (rule->unused_tuple & tuple_bit)
4591 switch (tuple_bit) {
4594 case BIT(INNER_DST_MAC):
4595 for (i = 0; i < ETH_ALEN; i++) {
4596 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4597 rule->tuples_mask.dst_mac[i]);
4598 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4599 rule->tuples_mask.dst_mac[i]);
4603 case BIT(INNER_SRC_MAC):
4604 for (i = 0; i < ETH_ALEN; i++) {
4605 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4606 rule->tuples.src_mac[i]);
4607 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4608 rule->tuples.src_mac[i]);
4612 case BIT(INNER_VLAN_TAG_FST):
4613 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4614 rule->tuples_mask.vlan_tag1);
4615 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4616 rule->tuples_mask.vlan_tag1);
4617 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4618 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4621 case BIT(INNER_ETH_TYPE):
4622 calc_x(tmp_x_s, rule->tuples.ether_proto,
4623 rule->tuples_mask.ether_proto);
4624 calc_y(tmp_y_s, rule->tuples.ether_proto,
4625 rule->tuples_mask.ether_proto);
4626 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4627 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4630 case BIT(INNER_IP_TOS):
4631 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4632 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4635 case BIT(INNER_IP_PROTO):
4636 calc_x(*key_x, rule->tuples.ip_proto,
4637 rule->tuples_mask.ip_proto);
4638 calc_y(*key_y, rule->tuples.ip_proto,
4639 rule->tuples_mask.ip_proto);
4642 case BIT(INNER_SRC_IP):
4643 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4644 rule->tuples_mask.src_ip[IPV4_INDEX]);
4645 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4646 rule->tuples_mask.src_ip[IPV4_INDEX]);
4647 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4648 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4651 case BIT(INNER_DST_IP):
4652 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4653 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4654 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4655 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4656 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4657 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4660 case BIT(INNER_SRC_PORT):
4661 calc_x(tmp_x_s, rule->tuples.src_port,
4662 rule->tuples_mask.src_port);
4663 calc_y(tmp_y_s, rule->tuples.src_port,
4664 rule->tuples_mask.src_port);
4665 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4666 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4669 case BIT(INNER_DST_PORT):
4670 calc_x(tmp_x_s, rule->tuples.dst_port,
4671 rule->tuples_mask.dst_port);
4672 calc_y(tmp_y_s, rule->tuples.dst_port,
4673 rule->tuples_mask.dst_port);
4674 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4675 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4683 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4684 u8 vf_id, u8 network_port_id)
4686 u32 port_number = 0;
4688 if (port_type == HOST_PORT) {
4689 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4691 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4693 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4695 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4696 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4697 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4703 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4704 __le32 *key_x, __le32 *key_y,
4705 struct hclge_fd_rule *rule)
4707 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4708 u8 cur_pos = 0, tuple_size, shift_bits;
4711 for (i = 0; i < MAX_META_DATA; i++) {
4712 tuple_size = meta_data_key_info[i].key_length;
4713 tuple_bit = key_cfg->meta_data_active & BIT(i);
4715 switch (tuple_bit) {
4716 case BIT(ROCE_TYPE):
4717 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4718 cur_pos += tuple_size;
4720 case BIT(DST_VPORT):
4721 port_number = hclge_get_port_number(HOST_PORT, 0,
4723 hnae3_set_field(meta_data,
4724 GENMASK(cur_pos + tuple_size, cur_pos),
4725 cur_pos, port_number);
4726 cur_pos += tuple_size;
4733 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4734 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4735 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4737 *key_x = cpu_to_le32(tmp_x << shift_bits);
4738 *key_y = cpu_to_le32(tmp_y << shift_bits);
4741 /* A complete key is combined with meta data key and tuple key.
4742 * Meta data key is stored at the MSB region, and tuple key is stored at
4743 * the LSB region, unused bits will be filled 0.
4745 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4746 struct hclge_fd_rule *rule)
4748 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4749 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4750 u8 *cur_key_x, *cur_key_y;
4752 int ret, tuple_size;
4753 u8 meta_data_region;
4755 memset(key_x, 0, sizeof(key_x));
4756 memset(key_y, 0, sizeof(key_y));
4760 for (i = 0 ; i < MAX_TUPLE; i++) {
4764 tuple_size = tuple_key_info[i].key_length / 8;
4765 check_tuple = key_cfg->tuple_active & BIT(i);
4767 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4770 cur_key_x += tuple_size;
4771 cur_key_y += tuple_size;
4775 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4776 MAX_META_DATA_LENGTH / 8;
4778 hclge_fd_convert_meta_data(key_cfg,
4779 (__le32 *)(key_x + meta_data_region),
4780 (__le32 *)(key_y + meta_data_region),
4783 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4786 dev_err(&hdev->pdev->dev,
4787 "fd key_y config fail, loc=%d, ret=%d\n",
4788 rule->queue_id, ret);
4792 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4795 dev_err(&hdev->pdev->dev,
4796 "fd key_x config fail, loc=%d, ret=%d\n",
4797 rule->queue_id, ret);
4801 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4802 struct hclge_fd_rule *rule)
4804 struct hclge_fd_ad_data ad_data;
4806 ad_data.ad_id = rule->location;
4808 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4809 ad_data.drop_packet = true;
4810 ad_data.forward_to_direct_queue = false;
4811 ad_data.queue_id = 0;
4813 ad_data.drop_packet = false;
4814 ad_data.forward_to_direct_queue = true;
4815 ad_data.queue_id = rule->queue_id;
4818 ad_data.use_counter = false;
4819 ad_data.counter_id = 0;
4821 ad_data.use_next_stage = false;
4822 ad_data.next_input_key = 0;
4824 ad_data.write_rule_id_to_bd = true;
4825 ad_data.rule_id = rule->location;
4827 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4830 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4831 struct ethtool_rx_flow_spec *fs, u32 *unused)
4833 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4834 struct ethtool_usrip4_spec *usr_ip4_spec;
4835 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4836 struct ethtool_usrip6_spec *usr_ip6_spec;
4837 struct ethhdr *ether_spec;
4839 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4842 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4845 if ((fs->flow_type & FLOW_EXT) &&
4846 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4847 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4851 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4855 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4856 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4858 if (!tcp_ip4_spec->ip4src)
4859 *unused |= BIT(INNER_SRC_IP);
4861 if (!tcp_ip4_spec->ip4dst)
4862 *unused |= BIT(INNER_DST_IP);
4864 if (!tcp_ip4_spec->psrc)
4865 *unused |= BIT(INNER_SRC_PORT);
4867 if (!tcp_ip4_spec->pdst)
4868 *unused |= BIT(INNER_DST_PORT);
4870 if (!tcp_ip4_spec->tos)
4871 *unused |= BIT(INNER_IP_TOS);
4875 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4876 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4877 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4879 if (!usr_ip4_spec->ip4src)
4880 *unused |= BIT(INNER_SRC_IP);
4882 if (!usr_ip4_spec->ip4dst)
4883 *unused |= BIT(INNER_DST_IP);
4885 if (!usr_ip4_spec->tos)
4886 *unused |= BIT(INNER_IP_TOS);
4888 if (!usr_ip4_spec->proto)
4889 *unused |= BIT(INNER_IP_PROTO);
4891 if (usr_ip4_spec->l4_4_bytes)
4894 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4901 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4902 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4905 /* check whether src/dst ip address used */
4906 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4907 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4908 *unused |= BIT(INNER_SRC_IP);
4910 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4911 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4912 *unused |= BIT(INNER_DST_IP);
4914 if (!tcp_ip6_spec->psrc)
4915 *unused |= BIT(INNER_SRC_PORT);
4917 if (!tcp_ip6_spec->pdst)
4918 *unused |= BIT(INNER_DST_PORT);
4920 if (tcp_ip6_spec->tclass)
4924 case IPV6_USER_FLOW:
4925 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4926 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4927 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4928 BIT(INNER_DST_PORT);
4930 /* check whether src/dst ip address used */
4931 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4932 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4933 *unused |= BIT(INNER_SRC_IP);
4935 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4936 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4937 *unused |= BIT(INNER_DST_IP);
4939 if (!usr_ip6_spec->l4_proto)
4940 *unused |= BIT(INNER_IP_PROTO);
4942 if (usr_ip6_spec->tclass)
4945 if (usr_ip6_spec->l4_4_bytes)
4950 ether_spec = &fs->h_u.ether_spec;
4951 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4952 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4953 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4955 if (is_zero_ether_addr(ether_spec->h_source))
4956 *unused |= BIT(INNER_SRC_MAC);
4958 if (is_zero_ether_addr(ether_spec->h_dest))
4959 *unused |= BIT(INNER_DST_MAC);
4961 if (!ether_spec->h_proto)
4962 *unused |= BIT(INNER_ETH_TYPE);
4969 if ((fs->flow_type & FLOW_EXT)) {
4970 if (fs->h_ext.vlan_etype)
4972 if (!fs->h_ext.vlan_tci)
4973 *unused |= BIT(INNER_VLAN_TAG_FST);
4975 if (fs->m_ext.vlan_tci) {
4976 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4980 *unused |= BIT(INNER_VLAN_TAG_FST);
4983 if (fs->flow_type & FLOW_MAC_EXT) {
4984 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4987 if (is_zero_ether_addr(fs->h_ext.h_dest))
4988 *unused |= BIT(INNER_DST_MAC);
4990 *unused &= ~(BIT(INNER_DST_MAC));
4996 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4998 struct hclge_fd_rule *rule = NULL;
4999 struct hlist_node *node2;
5001 spin_lock_bh(&hdev->fd_rule_lock);
5002 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5003 if (rule->location >= location)
5007 spin_unlock_bh(&hdev->fd_rule_lock);
5009 return rule && rule->location == location;
5012 /* make sure being called after lock up with fd_rule_lock */
5013 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5014 struct hclge_fd_rule *new_rule,
5018 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5019 struct hlist_node *node2;
5021 if (is_add && !new_rule)
5024 hlist_for_each_entry_safe(rule, node2,
5025 &hdev->fd_rule_list, rule_node) {
5026 if (rule->location >= location)
5031 if (rule && rule->location == location) {
5032 hlist_del(&rule->rule_node);
5034 hdev->hclge_fd_rule_num--;
5037 if (!hdev->hclge_fd_rule_num)
5038 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5039 clear_bit(location, hdev->fd_bmap);
5043 } else if (!is_add) {
5044 dev_err(&hdev->pdev->dev,
5045 "delete fail, rule %d is inexistent\n",
5050 INIT_HLIST_NODE(&new_rule->rule_node);
5053 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5055 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5057 set_bit(location, hdev->fd_bmap);
5058 hdev->hclge_fd_rule_num++;
5059 hdev->fd_active_type = new_rule->rule_type;
5064 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5065 struct ethtool_rx_flow_spec *fs,
5066 struct hclge_fd_rule *rule)
5068 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5070 switch (flow_type) {
5074 rule->tuples.src_ip[IPV4_INDEX] =
5075 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5076 rule->tuples_mask.src_ip[IPV4_INDEX] =
5077 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5079 rule->tuples.dst_ip[IPV4_INDEX] =
5080 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5081 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5082 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5084 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5085 rule->tuples_mask.src_port =
5086 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5088 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5089 rule->tuples_mask.dst_port =
5090 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5092 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5093 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5095 rule->tuples.ether_proto = ETH_P_IP;
5096 rule->tuples_mask.ether_proto = 0xFFFF;
5100 rule->tuples.src_ip[IPV4_INDEX] =
5101 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5102 rule->tuples_mask.src_ip[IPV4_INDEX] =
5103 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5105 rule->tuples.dst_ip[IPV4_INDEX] =
5106 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5107 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5108 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5110 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5111 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5113 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5114 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5116 rule->tuples.ether_proto = ETH_P_IP;
5117 rule->tuples_mask.ether_proto = 0xFFFF;
5123 be32_to_cpu_array(rule->tuples.src_ip,
5124 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5125 be32_to_cpu_array(rule->tuples_mask.src_ip,
5126 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5128 be32_to_cpu_array(rule->tuples.dst_ip,
5129 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5130 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5131 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5133 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5134 rule->tuples_mask.src_port =
5135 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5137 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5138 rule->tuples_mask.dst_port =
5139 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5141 rule->tuples.ether_proto = ETH_P_IPV6;
5142 rule->tuples_mask.ether_proto = 0xFFFF;
5145 case IPV6_USER_FLOW:
5146 be32_to_cpu_array(rule->tuples.src_ip,
5147 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5148 be32_to_cpu_array(rule->tuples_mask.src_ip,
5149 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5151 be32_to_cpu_array(rule->tuples.dst_ip,
5152 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5153 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5154 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5156 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5157 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5159 rule->tuples.ether_proto = ETH_P_IPV6;
5160 rule->tuples_mask.ether_proto = 0xFFFF;
5164 ether_addr_copy(rule->tuples.src_mac,
5165 fs->h_u.ether_spec.h_source);
5166 ether_addr_copy(rule->tuples_mask.src_mac,
5167 fs->m_u.ether_spec.h_source);
5169 ether_addr_copy(rule->tuples.dst_mac,
5170 fs->h_u.ether_spec.h_dest);
5171 ether_addr_copy(rule->tuples_mask.dst_mac,
5172 fs->m_u.ether_spec.h_dest);
5174 rule->tuples.ether_proto =
5175 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5176 rule->tuples_mask.ether_proto =
5177 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5184 switch (flow_type) {
5187 rule->tuples.ip_proto = IPPROTO_SCTP;
5188 rule->tuples_mask.ip_proto = 0xFF;
5192 rule->tuples.ip_proto = IPPROTO_TCP;
5193 rule->tuples_mask.ip_proto = 0xFF;
5197 rule->tuples.ip_proto = IPPROTO_UDP;
5198 rule->tuples_mask.ip_proto = 0xFF;
5204 if ((fs->flow_type & FLOW_EXT)) {
5205 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5206 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5209 if (fs->flow_type & FLOW_MAC_EXT) {
5210 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5211 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5217 /* make sure being called after lock up with fd_rule_lock */
5218 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5219 struct hclge_fd_rule *rule)
5224 dev_err(&hdev->pdev->dev,
5225 "The flow director rule is NULL\n");
5229 /* it will never fail here, so needn't to check return value */
5230 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5232 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5236 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5243 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5247 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5248 struct ethtool_rxnfc *cmd)
5250 struct hclge_vport *vport = hclge_get_vport(handle);
5251 struct hclge_dev *hdev = vport->back;
5252 u16 dst_vport_id = 0, q_index = 0;
5253 struct ethtool_rx_flow_spec *fs;
5254 struct hclge_fd_rule *rule;
5259 if (!hnae3_dev_fd_supported(hdev))
5263 dev_warn(&hdev->pdev->dev,
5264 "Please enable flow director first\n");
5268 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5270 ret = hclge_fd_check_spec(hdev, fs, &unused);
5272 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5276 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5277 action = HCLGE_FD_ACTION_DROP_PACKET;
5279 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5280 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5283 if (vf > hdev->num_req_vfs) {
5284 dev_err(&hdev->pdev->dev,
5285 "Error: vf id (%d) > max vf num (%d)\n",
5286 vf, hdev->num_req_vfs);
5290 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5291 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5294 dev_err(&hdev->pdev->dev,
5295 "Error: queue id (%d) > max tqp num (%d)\n",
5300 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5304 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5308 ret = hclge_fd_get_tuple(hdev, fs, rule);
5314 rule->flow_type = fs->flow_type;
5316 rule->location = fs->location;
5317 rule->unused_tuple = unused;
5318 rule->vf_id = dst_vport_id;
5319 rule->queue_id = q_index;
5320 rule->action = action;
5321 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5323 /* to avoid rule conflict, when user configure rule by ethtool,
5324 * we need to clear all arfs rules
5326 hclge_clear_arfs_rules(handle);
5328 spin_lock_bh(&hdev->fd_rule_lock);
5329 ret = hclge_fd_config_rule(hdev, rule);
5331 spin_unlock_bh(&hdev->fd_rule_lock);
5336 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5337 struct ethtool_rxnfc *cmd)
5339 struct hclge_vport *vport = hclge_get_vport(handle);
5340 struct hclge_dev *hdev = vport->back;
5341 struct ethtool_rx_flow_spec *fs;
5344 if (!hnae3_dev_fd_supported(hdev))
5347 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5349 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5352 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5353 dev_err(&hdev->pdev->dev,
5354 "Delete fail, rule %d is inexistent\n", fs->location);
5358 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5363 spin_lock_bh(&hdev->fd_rule_lock);
5364 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5366 spin_unlock_bh(&hdev->fd_rule_lock);
5371 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5374 struct hclge_vport *vport = hclge_get_vport(handle);
5375 struct hclge_dev *hdev = vport->back;
5376 struct hclge_fd_rule *rule;
5377 struct hlist_node *node;
5380 if (!hnae3_dev_fd_supported(hdev))
5383 spin_lock_bh(&hdev->fd_rule_lock);
5384 for_each_set_bit(location, hdev->fd_bmap,
5385 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5386 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5390 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5392 hlist_del(&rule->rule_node);
5395 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5396 hdev->hclge_fd_rule_num = 0;
5397 bitmap_zero(hdev->fd_bmap,
5398 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5401 spin_unlock_bh(&hdev->fd_rule_lock);
5404 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5406 struct hclge_vport *vport = hclge_get_vport(handle);
5407 struct hclge_dev *hdev = vport->back;
5408 struct hclge_fd_rule *rule;
5409 struct hlist_node *node;
5412 /* Return ok here, because reset error handling will check this
5413 * return value. If error is returned here, the reset process will
5416 if (!hnae3_dev_fd_supported(hdev))
5419 /* if fd is disabled, should not restore it when reset */
5423 spin_lock_bh(&hdev->fd_rule_lock);
5424 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5425 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5427 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5430 dev_warn(&hdev->pdev->dev,
5431 "Restore rule %d failed, remove it\n",
5433 clear_bit(rule->location, hdev->fd_bmap);
5434 hlist_del(&rule->rule_node);
5436 hdev->hclge_fd_rule_num--;
5440 if (hdev->hclge_fd_rule_num)
5441 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5443 spin_unlock_bh(&hdev->fd_rule_lock);
5448 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5449 struct ethtool_rxnfc *cmd)
5451 struct hclge_vport *vport = hclge_get_vport(handle);
5452 struct hclge_dev *hdev = vport->back;
5454 if (!hnae3_dev_fd_supported(hdev))
5457 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5458 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5463 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5464 struct ethtool_rxnfc *cmd)
5466 struct hclge_vport *vport = hclge_get_vport(handle);
5467 struct hclge_fd_rule *rule = NULL;
5468 struct hclge_dev *hdev = vport->back;
5469 struct ethtool_rx_flow_spec *fs;
5470 struct hlist_node *node2;
5472 if (!hnae3_dev_fd_supported(hdev))
5475 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5477 spin_lock_bh(&hdev->fd_rule_lock);
5479 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5480 if (rule->location >= fs->location)
5484 if (!rule || fs->location != rule->location) {
5485 spin_unlock_bh(&hdev->fd_rule_lock);
5490 fs->flow_type = rule->flow_type;
5491 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5495 fs->h_u.tcp_ip4_spec.ip4src =
5496 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5497 fs->m_u.tcp_ip4_spec.ip4src =
5498 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5499 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5501 fs->h_u.tcp_ip4_spec.ip4dst =
5502 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5503 fs->m_u.tcp_ip4_spec.ip4dst =
5504 rule->unused_tuple & BIT(INNER_DST_IP) ?
5505 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5507 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5508 fs->m_u.tcp_ip4_spec.psrc =
5509 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5510 0 : cpu_to_be16(rule->tuples_mask.src_port);
5512 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5513 fs->m_u.tcp_ip4_spec.pdst =
5514 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5515 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5517 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5518 fs->m_u.tcp_ip4_spec.tos =
5519 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5520 0 : rule->tuples_mask.ip_tos;
5524 fs->h_u.usr_ip4_spec.ip4src =
5525 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5526 fs->m_u.tcp_ip4_spec.ip4src =
5527 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5528 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5530 fs->h_u.usr_ip4_spec.ip4dst =
5531 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5532 fs->m_u.usr_ip4_spec.ip4dst =
5533 rule->unused_tuple & BIT(INNER_DST_IP) ?
5534 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5536 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5537 fs->m_u.usr_ip4_spec.tos =
5538 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5539 0 : rule->tuples_mask.ip_tos;
5541 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5542 fs->m_u.usr_ip4_spec.proto =
5543 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5544 0 : rule->tuples_mask.ip_proto;
5546 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5552 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5553 rule->tuples.src_ip, IPV6_SIZE);
5554 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5555 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5556 sizeof(int) * IPV6_SIZE);
5558 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5559 rule->tuples_mask.src_ip, IPV6_SIZE);
5561 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5562 rule->tuples.dst_ip, IPV6_SIZE);
5563 if (rule->unused_tuple & BIT(INNER_DST_IP))
5564 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5565 sizeof(int) * IPV6_SIZE);
5567 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5568 rule->tuples_mask.dst_ip, IPV6_SIZE);
5570 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5571 fs->m_u.tcp_ip6_spec.psrc =
5572 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5573 0 : cpu_to_be16(rule->tuples_mask.src_port);
5575 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5576 fs->m_u.tcp_ip6_spec.pdst =
5577 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5578 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5581 case IPV6_USER_FLOW:
5582 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5583 rule->tuples.src_ip, IPV6_SIZE);
5584 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5585 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5586 sizeof(int) * IPV6_SIZE);
5588 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5589 rule->tuples_mask.src_ip, IPV6_SIZE);
5591 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5592 rule->tuples.dst_ip, IPV6_SIZE);
5593 if (rule->unused_tuple & BIT(INNER_DST_IP))
5594 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5595 sizeof(int) * IPV6_SIZE);
5597 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5598 rule->tuples_mask.dst_ip, IPV6_SIZE);
5600 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5601 fs->m_u.usr_ip6_spec.l4_proto =
5602 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5603 0 : rule->tuples_mask.ip_proto;
5607 ether_addr_copy(fs->h_u.ether_spec.h_source,
5608 rule->tuples.src_mac);
5609 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5610 eth_zero_addr(fs->m_u.ether_spec.h_source);
5612 ether_addr_copy(fs->m_u.ether_spec.h_source,
5613 rule->tuples_mask.src_mac);
5615 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5616 rule->tuples.dst_mac);
5617 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5618 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5620 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5621 rule->tuples_mask.dst_mac);
5623 fs->h_u.ether_spec.h_proto =
5624 cpu_to_be16(rule->tuples.ether_proto);
5625 fs->m_u.ether_spec.h_proto =
5626 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5627 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5631 spin_unlock_bh(&hdev->fd_rule_lock);
5635 if (fs->flow_type & FLOW_EXT) {
5636 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5637 fs->m_ext.vlan_tci =
5638 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5639 cpu_to_be16(VLAN_VID_MASK) :
5640 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5643 if (fs->flow_type & FLOW_MAC_EXT) {
5644 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5645 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5646 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5648 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5649 rule->tuples_mask.dst_mac);
5652 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5653 fs->ring_cookie = RX_CLS_FLOW_DISC;
5657 fs->ring_cookie = rule->queue_id;
5658 vf_id = rule->vf_id;
5659 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5660 fs->ring_cookie |= vf_id;
5663 spin_unlock_bh(&hdev->fd_rule_lock);
5668 static int hclge_get_all_rules(struct hnae3_handle *handle,
5669 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5671 struct hclge_vport *vport = hclge_get_vport(handle);
5672 struct hclge_dev *hdev = vport->back;
5673 struct hclge_fd_rule *rule;
5674 struct hlist_node *node2;
5677 if (!hnae3_dev_fd_supported(hdev))
5680 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5682 spin_lock_bh(&hdev->fd_rule_lock);
5683 hlist_for_each_entry_safe(rule, node2,
5684 &hdev->fd_rule_list, rule_node) {
5685 if (cnt == cmd->rule_cnt) {
5686 spin_unlock_bh(&hdev->fd_rule_lock);
5690 rule_locs[cnt] = rule->location;
5694 spin_unlock_bh(&hdev->fd_rule_lock);
5696 cmd->rule_cnt = cnt;
5701 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5702 struct hclge_fd_rule_tuples *tuples)
5704 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5705 tuples->ip_proto = fkeys->basic.ip_proto;
5706 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5708 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5709 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5710 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5712 memcpy(tuples->src_ip,
5713 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5714 sizeof(tuples->src_ip));
5715 memcpy(tuples->dst_ip,
5716 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5717 sizeof(tuples->dst_ip));
5721 /* traverse all rules, check whether an existed rule has the same tuples */
5722 static struct hclge_fd_rule *
5723 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5724 const struct hclge_fd_rule_tuples *tuples)
5726 struct hclge_fd_rule *rule = NULL;
5727 struct hlist_node *node;
5729 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5730 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5737 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5738 struct hclge_fd_rule *rule)
5740 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5741 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5742 BIT(INNER_SRC_PORT);
5745 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5746 if (tuples->ether_proto == ETH_P_IP) {
5747 if (tuples->ip_proto == IPPROTO_TCP)
5748 rule->flow_type = TCP_V4_FLOW;
5750 rule->flow_type = UDP_V4_FLOW;
5752 if (tuples->ip_proto == IPPROTO_TCP)
5753 rule->flow_type = TCP_V6_FLOW;
5755 rule->flow_type = UDP_V6_FLOW;
5757 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5758 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5761 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5762 u16 flow_id, struct flow_keys *fkeys)
5764 struct hclge_vport *vport = hclge_get_vport(handle);
5765 struct hclge_fd_rule_tuples new_tuples;
5766 struct hclge_dev *hdev = vport->back;
5767 struct hclge_fd_rule *rule;
5772 if (!hnae3_dev_fd_supported(hdev))
5775 memset(&new_tuples, 0, sizeof(new_tuples));
5776 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5778 spin_lock_bh(&hdev->fd_rule_lock);
5780 /* when there is already fd rule existed add by user,
5781 * arfs should not work
5783 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5784 spin_unlock_bh(&hdev->fd_rule_lock);
5789 /* check is there flow director filter existed for this flow,
5790 * if not, create a new filter for it;
5791 * if filter exist with different queue id, modify the filter;
5792 * if filter exist with same queue id, do nothing
5794 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5796 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5797 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5798 spin_unlock_bh(&hdev->fd_rule_lock);
5803 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5805 spin_unlock_bh(&hdev->fd_rule_lock);
5810 set_bit(bit_id, hdev->fd_bmap);
5811 rule->location = bit_id;
5812 rule->flow_id = flow_id;
5813 rule->queue_id = queue_id;
5814 hclge_fd_build_arfs_rule(&new_tuples, rule);
5815 ret = hclge_fd_config_rule(hdev, rule);
5817 spin_unlock_bh(&hdev->fd_rule_lock);
5822 return rule->location;
5825 spin_unlock_bh(&hdev->fd_rule_lock);
5827 if (rule->queue_id == queue_id)
5828 return rule->location;
5830 tmp_queue_id = rule->queue_id;
5831 rule->queue_id = queue_id;
5832 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5834 rule->queue_id = tmp_queue_id;
5838 return rule->location;
5841 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5843 #ifdef CONFIG_RFS_ACCEL
5844 struct hnae3_handle *handle = &hdev->vport[0].nic;
5845 struct hclge_fd_rule *rule;
5846 struct hlist_node *node;
5847 HLIST_HEAD(del_list);
5849 spin_lock_bh(&hdev->fd_rule_lock);
5850 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5851 spin_unlock_bh(&hdev->fd_rule_lock);
5854 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5855 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5856 rule->flow_id, rule->location)) {
5857 hlist_del_init(&rule->rule_node);
5858 hlist_add_head(&rule->rule_node, &del_list);
5859 hdev->hclge_fd_rule_num--;
5860 clear_bit(rule->location, hdev->fd_bmap);
5863 spin_unlock_bh(&hdev->fd_rule_lock);
5865 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5866 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5867 rule->location, NULL, false);
5873 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5875 #ifdef CONFIG_RFS_ACCEL
5876 struct hclge_vport *vport = hclge_get_vport(handle);
5877 struct hclge_dev *hdev = vport->back;
5879 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5880 hclge_del_all_fd_entries(handle, true);
5884 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5886 struct hclge_vport *vport = hclge_get_vport(handle);
5887 struct hclge_dev *hdev = vport->back;
5889 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5890 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5893 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5895 struct hclge_vport *vport = hclge_get_vport(handle);
5896 struct hclge_dev *hdev = vport->back;
5898 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5901 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5903 struct hclge_vport *vport = hclge_get_vport(handle);
5904 struct hclge_dev *hdev = vport->back;
5906 return hdev->rst_stats.hw_reset_done_cnt;
5909 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5911 struct hclge_vport *vport = hclge_get_vport(handle);
5912 struct hclge_dev *hdev = vport->back;
5915 hdev->fd_en = enable;
5916 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5918 hclge_del_all_fd_entries(handle, clear);
5920 hclge_restore_fd_entries(handle);
5923 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5925 struct hclge_desc desc;
5926 struct hclge_config_mac_mode_cmd *req =
5927 (struct hclge_config_mac_mode_cmd *)desc.data;
5931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5932 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5933 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5934 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5935 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5936 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5937 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5938 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5939 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5940 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5941 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5942 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5943 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5944 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5945 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5946 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5950 dev_err(&hdev->pdev->dev,
5951 "mac enable fail, ret =%d.\n", ret);
5954 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5956 struct hclge_config_mac_mode_cmd *req;
5957 struct hclge_desc desc;
5961 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5962 /* 1 Read out the MAC mode config at first */
5963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5966 dev_err(&hdev->pdev->dev,
5967 "mac loopback get fail, ret =%d.\n", ret);
5971 /* 2 Then setup the loopback flag */
5972 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5973 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5974 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5975 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5977 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5979 /* 3 Config mac work mode with loopback flag
5980 * and its original configure parameters
5982 hclge_cmd_reuse_desc(&desc, false);
5983 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5985 dev_err(&hdev->pdev->dev,
5986 "mac loopback set fail, ret =%d.\n", ret);
5990 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5991 enum hnae3_loop loop_mode)
5993 #define HCLGE_SERDES_RETRY_MS 10
5994 #define HCLGE_SERDES_RETRY_NUM 100
5996 #define HCLGE_MAC_LINK_STATUS_MS 10
5997 #define HCLGE_MAC_LINK_STATUS_NUM 100
5998 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5999 #define HCLGE_MAC_LINK_STATUS_UP 1
6001 struct hclge_serdes_lb_cmd *req;
6002 struct hclge_desc desc;
6003 int mac_link_ret = 0;
6007 req = (struct hclge_serdes_lb_cmd *)desc.data;
6008 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6010 switch (loop_mode) {
6011 case HNAE3_LOOP_SERIAL_SERDES:
6012 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6014 case HNAE3_LOOP_PARALLEL_SERDES:
6015 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6018 dev_err(&hdev->pdev->dev,
6019 "unsupported serdes loopback mode %d\n", loop_mode);
6024 req->enable = loop_mode_b;
6025 req->mask = loop_mode_b;
6026 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6028 req->mask = loop_mode_b;
6029 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6032 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6034 dev_err(&hdev->pdev->dev,
6035 "serdes loopback set fail, ret = %d\n", ret);
6040 msleep(HCLGE_SERDES_RETRY_MS);
6041 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6043 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6045 dev_err(&hdev->pdev->dev,
6046 "serdes loopback get, ret = %d\n", ret);
6049 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6050 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6052 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6053 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6055 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6056 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6060 hclge_cfg_mac_mode(hdev, en);
6064 /* serdes Internal loopback, independent of the network cable.*/
6065 msleep(HCLGE_MAC_LINK_STATUS_MS);
6066 ret = hclge_get_mac_link_status(hdev);
6067 if (ret == mac_link_ret)
6069 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6071 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6076 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6077 int stream_id, bool enable)
6079 struct hclge_desc desc;
6080 struct hclge_cfg_com_tqp_queue_cmd *req =
6081 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6084 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6085 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6086 req->stream_id = cpu_to_le16(stream_id);
6088 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6090 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6092 dev_err(&hdev->pdev->dev,
6093 "Tqp enable fail, status =%d.\n", ret);
6097 static int hclge_set_loopback(struct hnae3_handle *handle,
6098 enum hnae3_loop loop_mode, bool en)
6100 struct hclge_vport *vport = hclge_get_vport(handle);
6101 struct hnae3_knic_private_info *kinfo;
6102 struct hclge_dev *hdev = vport->back;
6105 switch (loop_mode) {
6106 case HNAE3_LOOP_APP:
6107 ret = hclge_set_app_loopback(hdev, en);
6109 case HNAE3_LOOP_SERIAL_SERDES:
6110 case HNAE3_LOOP_PARALLEL_SERDES:
6111 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6115 dev_err(&hdev->pdev->dev,
6116 "loop_mode %d is not supported\n", loop_mode);
6123 kinfo = &vport->nic.kinfo;
6124 for (i = 0; i < kinfo->num_tqps; i++) {
6125 ret = hclge_tqp_enable(hdev, i, 0, en);
6133 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6135 struct hclge_vport *vport = hclge_get_vport(handle);
6136 struct hnae3_knic_private_info *kinfo;
6137 struct hnae3_queue *queue;
6138 struct hclge_tqp *tqp;
6141 kinfo = &vport->nic.kinfo;
6142 for (i = 0; i < kinfo->num_tqps; i++) {
6143 queue = handle->kinfo.tqp[i];
6144 tqp = container_of(queue, struct hclge_tqp, q);
6145 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6149 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6151 struct hclge_vport *vport = hclge_get_vport(handle);
6152 struct hclge_dev *hdev = vport->back;
6155 mod_timer(&hdev->service_timer, jiffies + HZ);
6157 del_timer_sync(&hdev->service_timer);
6158 cancel_work_sync(&hdev->service_task);
6159 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6163 static int hclge_ae_start(struct hnae3_handle *handle)
6165 struct hclge_vport *vport = hclge_get_vport(handle);
6166 struct hclge_dev *hdev = vport->back;
6169 hclge_cfg_mac_mode(hdev, true);
6170 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6171 hdev->hw.mac.link = 0;
6173 /* reset tqp stats */
6174 hclge_reset_tqp_stats(handle);
6176 hclge_mac_start_phy(hdev);
6181 static void hclge_ae_stop(struct hnae3_handle *handle)
6183 struct hclge_vport *vport = hclge_get_vport(handle);
6184 struct hclge_dev *hdev = vport->back;
6187 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6189 hclge_clear_arfs_rules(handle);
6191 /* If it is not PF reset, the firmware will disable the MAC,
6192 * so it only need to stop phy here.
6194 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6195 hdev->reset_type != HNAE3_FUNC_RESET) {
6196 hclge_mac_stop_phy(hdev);
6200 for (i = 0; i < handle->kinfo.num_tqps; i++)
6201 hclge_reset_tqp(handle, i);
6204 hclge_cfg_mac_mode(hdev, false);
6206 hclge_mac_stop_phy(hdev);
6208 /* reset tqp stats */
6209 hclge_reset_tqp_stats(handle);
6210 hclge_update_link_status(hdev);
6213 int hclge_vport_start(struct hclge_vport *vport)
6215 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6216 vport->last_active_jiffies = jiffies;
6220 void hclge_vport_stop(struct hclge_vport *vport)
6222 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6225 static int hclge_client_start(struct hnae3_handle *handle)
6227 struct hclge_vport *vport = hclge_get_vport(handle);
6229 return hclge_vport_start(vport);
6232 static void hclge_client_stop(struct hnae3_handle *handle)
6234 struct hclge_vport *vport = hclge_get_vport(handle);
6236 hclge_vport_stop(vport);
6239 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6240 u16 cmdq_resp, u8 resp_code,
6241 enum hclge_mac_vlan_tbl_opcode op)
6243 struct hclge_dev *hdev = vport->back;
6244 int return_status = -EIO;
6247 dev_err(&hdev->pdev->dev,
6248 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6253 if (op == HCLGE_MAC_VLAN_ADD) {
6254 if ((!resp_code) || (resp_code == 1)) {
6256 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6257 return_status = -ENOSPC;
6258 dev_err(&hdev->pdev->dev,
6259 "add mac addr failed for uc_overflow.\n");
6260 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6261 return_status = -ENOSPC;
6262 dev_err(&hdev->pdev->dev,
6263 "add mac addr failed for mc_overflow.\n");
6265 dev_err(&hdev->pdev->dev,
6266 "add mac addr failed for undefined, code=%d.\n",
6269 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6272 } else if (resp_code == 1) {
6273 return_status = -ENOENT;
6274 dev_dbg(&hdev->pdev->dev,
6275 "remove mac addr failed for miss.\n");
6277 dev_err(&hdev->pdev->dev,
6278 "remove mac addr failed for undefined, code=%d.\n",
6281 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6284 } else if (resp_code == 1) {
6285 return_status = -ENOENT;
6286 dev_dbg(&hdev->pdev->dev,
6287 "lookup mac addr failed for miss.\n");
6289 dev_err(&hdev->pdev->dev,
6290 "lookup mac addr failed for undefined, code=%d.\n",
6294 return_status = -EINVAL;
6295 dev_err(&hdev->pdev->dev,
6296 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6300 return return_status;
6303 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6305 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6310 if (vfid > 255 || vfid < 0)
6313 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6314 word_num = vfid / 32;
6315 bit_num = vfid % 32;
6317 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6319 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6321 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6322 bit_num = vfid % 32;
6324 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6326 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6332 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6334 #define HCLGE_DESC_NUMBER 3
6335 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6338 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6339 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6340 if (desc[i].data[j])
6346 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6347 const u8 *addr, bool is_mc)
6349 const unsigned char *mac_addr = addr;
6350 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6351 (mac_addr[0]) | (mac_addr[1] << 8);
6352 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6354 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6356 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6357 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6360 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6361 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6364 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6365 struct hclge_mac_vlan_tbl_entry_cmd *req)
6367 struct hclge_dev *hdev = vport->back;
6368 struct hclge_desc desc;
6373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6375 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6377 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6379 dev_err(&hdev->pdev->dev,
6380 "del mac addr failed for cmd_send, ret =%d.\n",
6384 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6385 retval = le16_to_cpu(desc.retval);
6387 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6388 HCLGE_MAC_VLAN_REMOVE);
6391 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6392 struct hclge_mac_vlan_tbl_entry_cmd *req,
6393 struct hclge_desc *desc,
6396 struct hclge_dev *hdev = vport->back;
6401 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6403 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6404 memcpy(desc[0].data,
6406 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6407 hclge_cmd_setup_basic_desc(&desc[1],
6408 HCLGE_OPC_MAC_VLAN_ADD,
6410 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6411 hclge_cmd_setup_basic_desc(&desc[2],
6412 HCLGE_OPC_MAC_VLAN_ADD,
6414 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6416 memcpy(desc[0].data,
6418 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6419 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6422 dev_err(&hdev->pdev->dev,
6423 "lookup mac addr failed for cmd_send, ret =%d.\n",
6427 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6428 retval = le16_to_cpu(desc[0].retval);
6430 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6431 HCLGE_MAC_VLAN_LKUP);
6434 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6435 struct hclge_mac_vlan_tbl_entry_cmd *req,
6436 struct hclge_desc *mc_desc)
6438 struct hclge_dev *hdev = vport->back;
6445 struct hclge_desc desc;
6447 hclge_cmd_setup_basic_desc(&desc,
6448 HCLGE_OPC_MAC_VLAN_ADD,
6450 memcpy(desc.data, req,
6451 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6452 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6453 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6454 retval = le16_to_cpu(desc.retval);
6456 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6458 HCLGE_MAC_VLAN_ADD);
6460 hclge_cmd_reuse_desc(&mc_desc[0], false);
6461 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6462 hclge_cmd_reuse_desc(&mc_desc[1], false);
6463 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6464 hclge_cmd_reuse_desc(&mc_desc[2], false);
6465 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6466 memcpy(mc_desc[0].data, req,
6467 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6468 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6469 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6470 retval = le16_to_cpu(mc_desc[0].retval);
6472 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6474 HCLGE_MAC_VLAN_ADD);
6478 dev_err(&hdev->pdev->dev,
6479 "add mac addr failed for cmd_send, ret =%d.\n",
6487 static int hclge_init_umv_space(struct hclge_dev *hdev)
6489 u16 allocated_size = 0;
6492 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6497 if (allocated_size < hdev->wanted_umv_size)
6498 dev_warn(&hdev->pdev->dev,
6499 "Alloc umv space failed, want %d, get %d\n",
6500 hdev->wanted_umv_size, allocated_size);
6502 mutex_init(&hdev->umv_mutex);
6503 hdev->max_umv_size = allocated_size;
6504 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6505 * preserve some unicast mac vlan table entries shared by pf
6508 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6509 hdev->share_umv_size = hdev->priv_umv_size +
6510 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6515 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6519 if (hdev->max_umv_size > 0) {
6520 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6524 hdev->max_umv_size = 0;
6526 mutex_destroy(&hdev->umv_mutex);
6531 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6532 u16 *allocated_size, bool is_alloc)
6534 struct hclge_umv_spc_alc_cmd *req;
6535 struct hclge_desc desc;
6538 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6539 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6541 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6543 req->space_size = cpu_to_le32(space_size);
6545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6547 dev_err(&hdev->pdev->dev,
6548 "%s umv space failed for cmd_send, ret =%d\n",
6549 is_alloc ? "allocate" : "free", ret);
6553 if (is_alloc && allocated_size)
6554 *allocated_size = le32_to_cpu(desc.data[1]);
6559 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6561 struct hclge_vport *vport;
6564 for (i = 0; i < hdev->num_alloc_vport; i++) {
6565 vport = &hdev->vport[i];
6566 vport->used_umv_num = 0;
6569 mutex_lock(&hdev->umv_mutex);
6570 hdev->share_umv_size = hdev->priv_umv_size +
6571 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6572 mutex_unlock(&hdev->umv_mutex);
6575 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6577 struct hclge_dev *hdev = vport->back;
6580 mutex_lock(&hdev->umv_mutex);
6581 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6582 hdev->share_umv_size == 0);
6583 mutex_unlock(&hdev->umv_mutex);
6588 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6590 struct hclge_dev *hdev = vport->back;
6592 mutex_lock(&hdev->umv_mutex);
6594 if (vport->used_umv_num > hdev->priv_umv_size)
6595 hdev->share_umv_size++;
6597 if (vport->used_umv_num > 0)
6598 vport->used_umv_num--;
6600 if (vport->used_umv_num >= hdev->priv_umv_size &&
6601 hdev->share_umv_size > 0)
6602 hdev->share_umv_size--;
6603 vport->used_umv_num++;
6605 mutex_unlock(&hdev->umv_mutex);
6608 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6609 const unsigned char *addr)
6611 struct hclge_vport *vport = hclge_get_vport(handle);
6613 return hclge_add_uc_addr_common(vport, addr);
6616 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6617 const unsigned char *addr)
6619 struct hclge_dev *hdev = vport->back;
6620 struct hclge_mac_vlan_tbl_entry_cmd req;
6621 struct hclge_desc desc;
6622 u16 egress_port = 0;
6625 /* mac addr check */
6626 if (is_zero_ether_addr(addr) ||
6627 is_broadcast_ether_addr(addr) ||
6628 is_multicast_ether_addr(addr)) {
6629 dev_err(&hdev->pdev->dev,
6630 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6631 addr, is_zero_ether_addr(addr),
6632 is_broadcast_ether_addr(addr),
6633 is_multicast_ether_addr(addr));
6637 memset(&req, 0, sizeof(req));
6639 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6640 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6642 req.egress_port = cpu_to_le16(egress_port);
6644 hclge_prepare_mac_addr(&req, addr, false);
6646 /* Lookup the mac address in the mac_vlan table, and add
6647 * it if the entry is inexistent. Repeated unicast entry
6648 * is not allowed in the mac vlan table.
6650 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6651 if (ret == -ENOENT) {
6652 if (!hclge_is_umv_space_full(vport)) {
6653 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6655 hclge_update_umv_space(vport, false);
6659 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6660 hdev->priv_umv_size);
6665 /* check if we just hit the duplicate */
6667 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6668 vport->vport_id, addr);
6672 dev_err(&hdev->pdev->dev,
6673 "PF failed to add unicast entry(%pM) in the MAC table\n",
6679 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6680 const unsigned char *addr)
6682 struct hclge_vport *vport = hclge_get_vport(handle);
6684 return hclge_rm_uc_addr_common(vport, addr);
6687 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6688 const unsigned char *addr)
6690 struct hclge_dev *hdev = vport->back;
6691 struct hclge_mac_vlan_tbl_entry_cmd req;
6694 /* mac addr check */
6695 if (is_zero_ether_addr(addr) ||
6696 is_broadcast_ether_addr(addr) ||
6697 is_multicast_ether_addr(addr)) {
6698 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6703 memset(&req, 0, sizeof(req));
6704 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6705 hclge_prepare_mac_addr(&req, addr, false);
6706 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6708 hclge_update_umv_space(vport, true);
6713 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6714 const unsigned char *addr)
6716 struct hclge_vport *vport = hclge_get_vport(handle);
6718 return hclge_add_mc_addr_common(vport, addr);
6721 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6722 const unsigned char *addr)
6724 struct hclge_dev *hdev = vport->back;
6725 struct hclge_mac_vlan_tbl_entry_cmd req;
6726 struct hclge_desc desc[3];
6729 /* mac addr check */
6730 if (!is_multicast_ether_addr(addr)) {
6731 dev_err(&hdev->pdev->dev,
6732 "Add mc mac err! invalid mac:%pM.\n",
6736 memset(&req, 0, sizeof(req));
6737 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6738 hclge_prepare_mac_addr(&req, addr, true);
6739 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6741 /* This mac addr do not exist, add new entry for it */
6742 memset(desc[0].data, 0, sizeof(desc[0].data));
6743 memset(desc[1].data, 0, sizeof(desc[0].data));
6744 memset(desc[2].data, 0, sizeof(desc[0].data));
6746 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6749 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6751 if (status == -ENOSPC)
6752 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6757 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6758 const unsigned char *addr)
6760 struct hclge_vport *vport = hclge_get_vport(handle);
6762 return hclge_rm_mc_addr_common(vport, addr);
6765 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6766 const unsigned char *addr)
6768 struct hclge_dev *hdev = vport->back;
6769 struct hclge_mac_vlan_tbl_entry_cmd req;
6770 enum hclge_cmd_status status;
6771 struct hclge_desc desc[3];
6773 /* mac addr check */
6774 if (!is_multicast_ether_addr(addr)) {
6775 dev_dbg(&hdev->pdev->dev,
6776 "Remove mc mac err! invalid mac:%pM.\n",
6781 memset(&req, 0, sizeof(req));
6782 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6783 hclge_prepare_mac_addr(&req, addr, true);
6784 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6786 /* This mac addr exist, remove this handle's VFID for it */
6787 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6791 if (hclge_is_all_function_id_zero(desc))
6792 /* All the vfid is zero, so need to delete this entry */
6793 status = hclge_remove_mac_vlan_tbl(vport, &req);
6795 /* Not all the vfid is zero, update the vfid */
6796 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6799 /* Maybe this mac address is in mta table, but it cannot be
6800 * deleted here because an entry of mta represents an address
6801 * range rather than a specific address. the delete action to
6802 * all entries will take effect in update_mta_status called by
6803 * hns3_nic_set_rx_mode.
6811 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6812 enum HCLGE_MAC_ADDR_TYPE mac_type)
6814 struct hclge_vport_mac_addr_cfg *mac_cfg;
6815 struct list_head *list;
6817 if (!vport->vport_id)
6820 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6824 mac_cfg->hd_tbl_status = true;
6825 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6827 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6828 &vport->uc_mac_list : &vport->mc_mac_list;
6830 list_add_tail(&mac_cfg->node, list);
6833 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6835 enum HCLGE_MAC_ADDR_TYPE mac_type)
6837 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6838 struct list_head *list;
6839 bool uc_flag, mc_flag;
6841 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6842 &vport->uc_mac_list : &vport->mc_mac_list;
6844 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6845 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6847 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6848 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6849 if (uc_flag && mac_cfg->hd_tbl_status)
6850 hclge_rm_uc_addr_common(vport, mac_addr);
6852 if (mc_flag && mac_cfg->hd_tbl_status)
6853 hclge_rm_mc_addr_common(vport, mac_addr);
6855 list_del(&mac_cfg->node);
6862 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6863 enum HCLGE_MAC_ADDR_TYPE mac_type)
6865 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6866 struct list_head *list;
6868 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6869 &vport->uc_mac_list : &vport->mc_mac_list;
6871 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6872 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6873 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6875 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6876 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6878 mac_cfg->hd_tbl_status = false;
6880 list_del(&mac_cfg->node);
6886 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6888 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6889 struct hclge_vport *vport;
6892 mutex_lock(&hdev->vport_cfg_mutex);
6893 for (i = 0; i < hdev->num_alloc_vport; i++) {
6894 vport = &hdev->vport[i];
6895 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6896 list_del(&mac->node);
6900 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6901 list_del(&mac->node);
6905 mutex_unlock(&hdev->vport_cfg_mutex);
6908 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6909 u16 cmdq_resp, u8 resp_code)
6911 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6912 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6913 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6914 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6919 dev_err(&hdev->pdev->dev,
6920 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6925 switch (resp_code) {
6926 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6927 case HCLGE_ETHERTYPE_ALREADY_ADD:
6930 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6931 dev_err(&hdev->pdev->dev,
6932 "add mac ethertype failed for manager table overflow.\n");
6933 return_status = -EIO;
6935 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6936 dev_err(&hdev->pdev->dev,
6937 "add mac ethertype failed for key conflict.\n");
6938 return_status = -EIO;
6941 dev_err(&hdev->pdev->dev,
6942 "add mac ethertype failed for undefined, code=%d.\n",
6944 return_status = -EIO;
6947 return return_status;
6950 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6951 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6953 struct hclge_desc desc;
6958 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6959 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6961 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6963 dev_err(&hdev->pdev->dev,
6964 "add mac ethertype failed for cmd_send, ret =%d.\n",
6969 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6970 retval = le16_to_cpu(desc.retval);
6972 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6975 static int init_mgr_tbl(struct hclge_dev *hdev)
6980 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6981 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6983 dev_err(&hdev->pdev->dev,
6984 "add mac ethertype failed, ret =%d.\n",
6993 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6995 struct hclge_vport *vport = hclge_get_vport(handle);
6996 struct hclge_dev *hdev = vport->back;
6998 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7001 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7004 const unsigned char *new_addr = (const unsigned char *)p;
7005 struct hclge_vport *vport = hclge_get_vport(handle);
7006 struct hclge_dev *hdev = vport->back;
7009 /* mac addr check */
7010 if (is_zero_ether_addr(new_addr) ||
7011 is_broadcast_ether_addr(new_addr) ||
7012 is_multicast_ether_addr(new_addr)) {
7013 dev_err(&hdev->pdev->dev,
7014 "Change uc mac err! invalid mac:%p.\n",
7019 if ((!is_first || is_kdump_kernel()) &&
7020 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7021 dev_warn(&hdev->pdev->dev,
7022 "remove old uc mac address fail.\n");
7024 ret = hclge_add_uc_addr(handle, new_addr);
7026 dev_err(&hdev->pdev->dev,
7027 "add uc mac address fail, ret =%d.\n",
7031 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7032 dev_err(&hdev->pdev->dev,
7033 "restore uc mac address fail.\n");
7038 ret = hclge_pause_addr_cfg(hdev, new_addr);
7040 dev_err(&hdev->pdev->dev,
7041 "configure mac pause address fail, ret =%d.\n",
7046 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7051 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7054 struct hclge_vport *vport = hclge_get_vport(handle);
7055 struct hclge_dev *hdev = vport->back;
7057 if (!hdev->hw.mac.phydev)
7060 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7063 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7064 u8 fe_type, bool filter_en, u8 vf_id)
7066 struct hclge_vlan_filter_ctrl_cmd *req;
7067 struct hclge_desc desc;
7070 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7072 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7073 req->vlan_type = vlan_type;
7074 req->vlan_fe = filter_en ? fe_type : 0;
7077 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7079 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7085 #define HCLGE_FILTER_TYPE_VF 0
7086 #define HCLGE_FILTER_TYPE_PORT 1
7087 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7088 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7089 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7090 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7091 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7092 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7093 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7094 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7095 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7097 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7099 struct hclge_vport *vport = hclge_get_vport(handle);
7100 struct hclge_dev *hdev = vport->back;
7102 if (hdev->pdev->revision >= 0x21) {
7103 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7104 HCLGE_FILTER_FE_EGRESS, enable, 0);
7105 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7106 HCLGE_FILTER_FE_INGRESS, enable, 0);
7108 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7109 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7113 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7115 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7118 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7119 bool is_kill, u16 vlan, u8 qos,
7122 #define HCLGE_MAX_VF_BYTES 16
7123 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7124 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7125 struct hclge_desc desc[2];
7130 /* if vf vlan table is full, firmware will close vf vlan filter, it
7131 * is unable and unnecessary to add new vlan id to vf vlan filter
7133 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7136 hclge_cmd_setup_basic_desc(&desc[0],
7137 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7138 hclge_cmd_setup_basic_desc(&desc[1],
7139 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7141 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7143 vf_byte_off = vfid / 8;
7144 vf_byte_val = 1 << (vfid % 8);
7146 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7147 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7149 req0->vlan_id = cpu_to_le16(vlan);
7150 req0->vlan_cfg = is_kill;
7152 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7153 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7155 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7157 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7159 dev_err(&hdev->pdev->dev,
7160 "Send vf vlan command fail, ret =%d.\n",
7166 #define HCLGE_VF_VLAN_NO_ENTRY 2
7167 if (!req0->resp_code || req0->resp_code == 1)
7170 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7171 set_bit(vfid, hdev->vf_vlan_full);
7172 dev_warn(&hdev->pdev->dev,
7173 "vf vlan table is full, vf vlan filter is disabled\n");
7177 dev_err(&hdev->pdev->dev,
7178 "Add vf vlan filter fail, ret =%d.\n",
7181 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7182 if (!req0->resp_code)
7185 /* vf vlan filter is disabled when vf vlan table is full,
7186 * then new vlan id will not be added into vf vlan table.
7187 * Just return 0 without warning, avoid massive verbose
7188 * print logs when unload.
7190 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7193 dev_err(&hdev->pdev->dev,
7194 "Kill vf vlan filter fail, ret =%d.\n",
7201 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7202 u16 vlan_id, bool is_kill)
7204 struct hclge_vlan_filter_pf_cfg_cmd *req;
7205 struct hclge_desc desc;
7206 u8 vlan_offset_byte_val;
7207 u8 vlan_offset_byte;
7211 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7213 vlan_offset_160 = vlan_id / 160;
7214 vlan_offset_byte = (vlan_id % 160) / 8;
7215 vlan_offset_byte_val = 1 << (vlan_id % 8);
7217 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7218 req->vlan_offset = vlan_offset_160;
7219 req->vlan_cfg = is_kill;
7220 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7222 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7224 dev_err(&hdev->pdev->dev,
7225 "port vlan command, send fail, ret =%d.\n", ret);
7229 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7230 u16 vport_id, u16 vlan_id, u8 qos,
7233 u16 vport_idx, vport_num = 0;
7236 if (is_kill && !vlan_id)
7239 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7242 dev_err(&hdev->pdev->dev,
7243 "Set %d vport vlan filter config fail, ret =%d.\n",
7248 /* vlan 0 may be added twice when 8021q module is enabled */
7249 if (!is_kill && !vlan_id &&
7250 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7253 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7254 dev_err(&hdev->pdev->dev,
7255 "Add port vlan failed, vport %d is already in vlan %d\n",
7261 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7262 dev_err(&hdev->pdev->dev,
7263 "Delete port vlan failed, vport %d is not in vlan %d\n",
7268 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7271 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7272 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7278 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7280 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7281 struct hclge_vport_vtag_tx_cfg_cmd *req;
7282 struct hclge_dev *hdev = vport->back;
7283 struct hclge_desc desc;
7286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7288 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7289 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7290 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7291 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7292 vcfg->accept_tag1 ? 1 : 0);
7293 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7294 vcfg->accept_untag1 ? 1 : 0);
7295 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7296 vcfg->accept_tag2 ? 1 : 0);
7297 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7298 vcfg->accept_untag2 ? 1 : 0);
7299 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7300 vcfg->insert_tag1_en ? 1 : 0);
7301 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7302 vcfg->insert_tag2_en ? 1 : 0);
7303 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7305 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7306 req->vf_bitmap[req->vf_offset] =
7307 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7309 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7311 dev_err(&hdev->pdev->dev,
7312 "Send port txvlan cfg command fail, ret =%d\n",
7318 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7320 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7321 struct hclge_vport_vtag_rx_cfg_cmd *req;
7322 struct hclge_dev *hdev = vport->back;
7323 struct hclge_desc desc;
7326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7328 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7329 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7330 vcfg->strip_tag1_en ? 1 : 0);
7331 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7332 vcfg->strip_tag2_en ? 1 : 0);
7333 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7334 vcfg->vlan1_vlan_prionly ? 1 : 0);
7335 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7336 vcfg->vlan2_vlan_prionly ? 1 : 0);
7338 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7339 req->vf_bitmap[req->vf_offset] =
7340 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7342 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7344 dev_err(&hdev->pdev->dev,
7345 "Send port rxvlan cfg command fail, ret =%d\n",
7351 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7352 u16 port_base_vlan_state,
7357 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7358 vport->txvlan_cfg.accept_tag1 = true;
7359 vport->txvlan_cfg.insert_tag1_en = false;
7360 vport->txvlan_cfg.default_tag1 = 0;
7362 vport->txvlan_cfg.accept_tag1 = false;
7363 vport->txvlan_cfg.insert_tag1_en = true;
7364 vport->txvlan_cfg.default_tag1 = vlan_tag;
7367 vport->txvlan_cfg.accept_untag1 = true;
7369 /* accept_tag2 and accept_untag2 are not supported on
7370 * pdev revision(0x20), new revision support them,
7371 * this two fields can not be configured by user.
7373 vport->txvlan_cfg.accept_tag2 = true;
7374 vport->txvlan_cfg.accept_untag2 = true;
7375 vport->txvlan_cfg.insert_tag2_en = false;
7376 vport->txvlan_cfg.default_tag2 = 0;
7378 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7379 vport->rxvlan_cfg.strip_tag1_en = false;
7380 vport->rxvlan_cfg.strip_tag2_en =
7381 vport->rxvlan_cfg.rx_vlan_offload_en;
7383 vport->rxvlan_cfg.strip_tag1_en =
7384 vport->rxvlan_cfg.rx_vlan_offload_en;
7385 vport->rxvlan_cfg.strip_tag2_en = true;
7387 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7388 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7390 ret = hclge_set_vlan_tx_offload_cfg(vport);
7394 return hclge_set_vlan_rx_offload_cfg(vport);
7397 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7399 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7400 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7401 struct hclge_desc desc;
7404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7405 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7406 rx_req->ot_fst_vlan_type =
7407 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7408 rx_req->ot_sec_vlan_type =
7409 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7410 rx_req->in_fst_vlan_type =
7411 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7412 rx_req->in_sec_vlan_type =
7413 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7415 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7417 dev_err(&hdev->pdev->dev,
7418 "Send rxvlan protocol type command fail, ret =%d\n",
7423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7425 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7426 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7427 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7429 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7431 dev_err(&hdev->pdev->dev,
7432 "Send txvlan protocol type command fail, ret =%d\n",
7438 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7440 #define HCLGE_DEF_VLAN_TYPE 0x8100
7442 struct hnae3_handle *handle = &hdev->vport[0].nic;
7443 struct hclge_vport *vport;
7447 if (hdev->pdev->revision >= 0x21) {
7448 /* for revision 0x21, vf vlan filter is per function */
7449 for (i = 0; i < hdev->num_alloc_vport; i++) {
7450 vport = &hdev->vport[i];
7451 ret = hclge_set_vlan_filter_ctrl(hdev,
7452 HCLGE_FILTER_TYPE_VF,
7453 HCLGE_FILTER_FE_EGRESS,
7460 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7461 HCLGE_FILTER_FE_INGRESS, true,
7466 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7467 HCLGE_FILTER_FE_EGRESS_V1_B,
7473 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7475 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7476 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7477 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7478 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7479 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7480 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7482 ret = hclge_set_vlan_protocol_type(hdev);
7486 for (i = 0; i < hdev->num_alloc_vport; i++) {
7489 vport = &hdev->vport[i];
7490 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7492 ret = hclge_vlan_offload_cfg(vport,
7493 vport->port_base_vlan_cfg.state,
7499 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7502 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7505 struct hclge_vport_vlan_cfg *vlan;
7507 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7511 vlan->hd_tbl_status = writen_to_tbl;
7512 vlan->vlan_id = vlan_id;
7514 list_add_tail(&vlan->node, &vport->vlan_list);
7517 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7519 struct hclge_vport_vlan_cfg *vlan, *tmp;
7520 struct hclge_dev *hdev = vport->back;
7523 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7524 if (!vlan->hd_tbl_status) {
7525 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7527 vlan->vlan_id, 0, false);
7529 dev_err(&hdev->pdev->dev,
7530 "restore vport vlan list failed, ret=%d\n",
7535 vlan->hd_tbl_status = true;
7541 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7544 struct hclge_vport_vlan_cfg *vlan, *tmp;
7545 struct hclge_dev *hdev = vport->back;
7547 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7548 if (vlan->vlan_id == vlan_id) {
7549 if (is_write_tbl && vlan->hd_tbl_status)
7550 hclge_set_vlan_filter_hw(hdev,
7556 list_del(&vlan->node);
7563 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7565 struct hclge_vport_vlan_cfg *vlan, *tmp;
7566 struct hclge_dev *hdev = vport->back;
7568 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7569 if (vlan->hd_tbl_status)
7570 hclge_set_vlan_filter_hw(hdev,
7576 vlan->hd_tbl_status = false;
7578 list_del(&vlan->node);
7584 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7586 struct hclge_vport_vlan_cfg *vlan, *tmp;
7587 struct hclge_vport *vport;
7590 mutex_lock(&hdev->vport_cfg_mutex);
7591 for (i = 0; i < hdev->num_alloc_vport; i++) {
7592 vport = &hdev->vport[i];
7593 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7594 list_del(&vlan->node);
7598 mutex_unlock(&hdev->vport_cfg_mutex);
7601 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7603 struct hclge_vport *vport = hclge_get_vport(handle);
7604 struct hclge_vport_vlan_cfg *vlan, *tmp;
7605 struct hclge_dev *hdev = vport->back;
7606 u16 vlan_proto, qos;
7610 mutex_lock(&hdev->vport_cfg_mutex);
7611 for (i = 0; i < hdev->num_alloc_vport; i++) {
7612 vport = &hdev->vport[i];
7613 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7614 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7615 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7616 state = vport->port_base_vlan_cfg.state;
7618 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7619 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7620 vport->vport_id, vlan_id, qos,
7625 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7626 if (vlan->hd_tbl_status)
7627 hclge_set_vlan_filter_hw(hdev,
7635 mutex_unlock(&hdev->vport_cfg_mutex);
7638 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7640 struct hclge_vport *vport = hclge_get_vport(handle);
7642 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7643 vport->rxvlan_cfg.strip_tag1_en = false;
7644 vport->rxvlan_cfg.strip_tag2_en = enable;
7646 vport->rxvlan_cfg.strip_tag1_en = enable;
7647 vport->rxvlan_cfg.strip_tag2_en = true;
7649 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7650 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7651 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7653 return hclge_set_vlan_rx_offload_cfg(vport);
7656 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7657 u16 port_base_vlan_state,
7658 struct hclge_vlan_info *new_info,
7659 struct hclge_vlan_info *old_info)
7661 struct hclge_dev *hdev = vport->back;
7664 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7665 hclge_rm_vport_all_vlan_table(vport, false);
7666 return hclge_set_vlan_filter_hw(hdev,
7667 htons(new_info->vlan_proto),
7670 new_info->qos, false);
7673 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7674 vport->vport_id, old_info->vlan_tag,
7675 old_info->qos, true);
7679 return hclge_add_vport_all_vlan_table(vport);
7682 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7683 struct hclge_vlan_info *vlan_info)
7685 struct hnae3_handle *nic = &vport->nic;
7686 struct hclge_vlan_info *old_vlan_info;
7687 struct hclge_dev *hdev = vport->back;
7690 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7692 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7696 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7697 /* add new VLAN tag */
7698 ret = hclge_set_vlan_filter_hw(hdev,
7699 htons(vlan_info->vlan_proto),
7701 vlan_info->vlan_tag,
7702 vlan_info->qos, false);
7706 /* remove old VLAN tag */
7707 ret = hclge_set_vlan_filter_hw(hdev,
7708 htons(old_vlan_info->vlan_proto),
7710 old_vlan_info->vlan_tag,
7711 old_vlan_info->qos, true);
7718 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7723 /* update state only when disable/enable port based VLAN */
7724 vport->port_base_vlan_cfg.state = state;
7725 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7726 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7728 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7731 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7732 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7733 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7738 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7739 enum hnae3_port_base_vlan_state state,
7742 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7744 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7746 return HNAE3_PORT_BASE_VLAN_ENABLE;
7749 return HNAE3_PORT_BASE_VLAN_DISABLE;
7750 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7751 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7753 return HNAE3_PORT_BASE_VLAN_MODIFY;
7757 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7758 u16 vlan, u8 qos, __be16 proto)
7760 struct hclge_vport *vport = hclge_get_vport(handle);
7761 struct hclge_dev *hdev = vport->back;
7762 struct hclge_vlan_info vlan_info;
7766 if (hdev->pdev->revision == 0x20)
7769 /* qos is a 3 bits value, so can not be bigger than 7 */
7770 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7772 if (proto != htons(ETH_P_8021Q))
7773 return -EPROTONOSUPPORT;
7775 vport = &hdev->vport[vfid];
7776 state = hclge_get_port_base_vlan_state(vport,
7777 vport->port_base_vlan_cfg.state,
7779 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7782 vlan_info.vlan_tag = vlan;
7783 vlan_info.qos = qos;
7784 vlan_info.vlan_proto = ntohs(proto);
7786 /* update port based VLAN for PF */
7788 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7789 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7790 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7795 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7796 return hclge_update_port_base_vlan_cfg(vport, state,
7799 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7807 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7808 u16 vlan_id, bool is_kill)
7810 struct hclge_vport *vport = hclge_get_vport(handle);
7811 struct hclge_dev *hdev = vport->back;
7812 bool writen_to_tbl = false;
7815 /* When device is resetting, firmware is unable to handle
7816 * mailbox. Just record the vlan id, and remove it after
7819 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7820 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7824 /* When port base vlan enabled, we use port base vlan as the vlan
7825 * filter entry. In this case, we don't update vlan filter table
7826 * when user add new vlan or remove exist vlan, just update the vport
7827 * vlan list. The vlan id in vlan list will be writen in vlan filter
7828 * table until port base vlan disabled
7830 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7831 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7832 vlan_id, 0, is_kill);
7833 writen_to_tbl = true;
7838 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7840 hclge_add_vport_vlan_table(vport, vlan_id,
7842 } else if (is_kill) {
7843 /* When remove hw vlan filter failed, record the vlan id,
7844 * and try to remove it from hw later, to be consistence
7847 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7852 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7854 #define HCLGE_MAX_SYNC_COUNT 60
7856 int i, ret, sync_cnt = 0;
7859 /* start from vport 1 for PF is always alive */
7860 for (i = 0; i < hdev->num_alloc_vport; i++) {
7861 struct hclge_vport *vport = &hdev->vport[i];
7863 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7865 while (vlan_id != VLAN_N_VID) {
7866 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7867 vport->vport_id, vlan_id,
7869 if (ret && ret != -EINVAL)
7872 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7873 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7876 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7879 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7885 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7887 struct hclge_config_max_frm_size_cmd *req;
7888 struct hclge_desc desc;
7890 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7892 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7893 req->max_frm_size = cpu_to_le16(new_mps);
7894 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7896 return hclge_cmd_send(&hdev->hw, &desc, 1);
7899 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7901 struct hclge_vport *vport = hclge_get_vport(handle);
7903 return hclge_set_vport_mtu(vport, new_mtu);
7906 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7908 struct hclge_dev *hdev = vport->back;
7909 int i, max_frm_size, ret;
7911 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7912 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7913 max_frm_size > HCLGE_MAC_MAX_FRAME)
7916 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7917 mutex_lock(&hdev->vport_lock);
7918 /* VF's mps must fit within hdev->mps */
7919 if (vport->vport_id && max_frm_size > hdev->mps) {
7920 mutex_unlock(&hdev->vport_lock);
7922 } else if (vport->vport_id) {
7923 vport->mps = max_frm_size;
7924 mutex_unlock(&hdev->vport_lock);
7928 /* PF's mps must be greater then VF's mps */
7929 for (i = 1; i < hdev->num_alloc_vport; i++)
7930 if (max_frm_size < hdev->vport[i].mps) {
7931 mutex_unlock(&hdev->vport_lock);
7935 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7937 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7939 dev_err(&hdev->pdev->dev,
7940 "Change mtu fail, ret =%d\n", ret);
7944 hdev->mps = max_frm_size;
7945 vport->mps = max_frm_size;
7947 ret = hclge_buffer_alloc(hdev);
7949 dev_err(&hdev->pdev->dev,
7950 "Allocate buffer fail, ret =%d\n", ret);
7953 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7954 mutex_unlock(&hdev->vport_lock);
7958 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7961 struct hclge_reset_tqp_queue_cmd *req;
7962 struct hclge_desc desc;
7965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7967 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7968 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7969 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7971 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7973 dev_err(&hdev->pdev->dev,
7974 "Send tqp reset cmd error, status =%d\n", ret);
7981 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7983 struct hclge_reset_tqp_queue_cmd *req;
7984 struct hclge_desc desc;
7987 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7989 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7990 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7992 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7994 dev_err(&hdev->pdev->dev,
7995 "Get reset status error, status =%d\n", ret);
7999 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8002 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8004 struct hnae3_queue *queue;
8005 struct hclge_tqp *tqp;
8007 queue = handle->kinfo.tqp[queue_id];
8008 tqp = container_of(queue, struct hclge_tqp, q);
8013 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8015 struct hclge_vport *vport = hclge_get_vport(handle);
8016 struct hclge_dev *hdev = vport->back;
8017 int reset_try_times = 0;
8022 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8024 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8026 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8030 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8032 dev_err(&hdev->pdev->dev,
8033 "Send reset tqp cmd fail, ret = %d\n", ret);
8037 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8038 /* Wait for tqp hw reset */
8040 reset_status = hclge_get_reset_status(hdev, queue_gid);
8045 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8046 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8050 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8052 dev_err(&hdev->pdev->dev,
8053 "Deassert the soft reset fail, ret = %d\n", ret);
8058 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8060 struct hclge_dev *hdev = vport->back;
8061 int reset_try_times = 0;
8066 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8068 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8070 dev_warn(&hdev->pdev->dev,
8071 "Send reset tqp cmd fail, ret = %d\n", ret);
8075 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8076 /* Wait for tqp hw reset */
8078 reset_status = hclge_get_reset_status(hdev, queue_gid);
8083 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8084 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8088 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8090 dev_warn(&hdev->pdev->dev,
8091 "Deassert the soft reset fail, ret = %d\n", ret);
8094 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8096 struct hclge_vport *vport = hclge_get_vport(handle);
8097 struct hclge_dev *hdev = vport->back;
8099 return hdev->fw_version;
8102 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8104 struct phy_device *phydev = hdev->hw.mac.phydev;
8109 phy_set_asym_pause(phydev, rx_en, tx_en);
8112 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8117 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8118 else if (rx_en && !tx_en)
8119 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8120 else if (!rx_en && tx_en)
8121 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8123 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8125 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8128 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8130 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8135 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8140 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8142 struct phy_device *phydev = hdev->hw.mac.phydev;
8143 u16 remote_advertising = 0;
8144 u16 local_advertising;
8145 u32 rx_pause, tx_pause;
8148 if (!phydev->link || !phydev->autoneg)
8151 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8154 remote_advertising = LPA_PAUSE_CAP;
8156 if (phydev->asym_pause)
8157 remote_advertising |= LPA_PAUSE_ASYM;
8159 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8160 remote_advertising);
8161 tx_pause = flowctl & FLOW_CTRL_TX;
8162 rx_pause = flowctl & FLOW_CTRL_RX;
8164 if (phydev->duplex == HCLGE_MAC_HALF) {
8169 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8172 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8173 u32 *rx_en, u32 *tx_en)
8175 struct hclge_vport *vport = hclge_get_vport(handle);
8176 struct hclge_dev *hdev = vport->back;
8178 *auto_neg = hclge_get_autoneg(handle);
8180 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8186 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8189 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8192 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8201 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8202 u32 rx_en, u32 tx_en)
8204 struct hclge_vport *vport = hclge_get_vport(handle);
8205 struct hclge_dev *hdev = vport->back;
8206 struct phy_device *phydev = hdev->hw.mac.phydev;
8209 fc_autoneg = hclge_get_autoneg(handle);
8210 if (auto_neg != fc_autoneg) {
8211 dev_info(&hdev->pdev->dev,
8212 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8216 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8217 dev_info(&hdev->pdev->dev,
8218 "Priority flow control enabled. Cannot set link flow control.\n");
8222 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8225 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8228 return phy_start_aneg(phydev);
8230 if (hdev->pdev->revision == 0x20)
8233 return hclge_restart_autoneg(handle);
8236 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8237 u8 *auto_neg, u32 *speed, u8 *duplex)
8239 struct hclge_vport *vport = hclge_get_vport(handle);
8240 struct hclge_dev *hdev = vport->back;
8243 *speed = hdev->hw.mac.speed;
8245 *duplex = hdev->hw.mac.duplex;
8247 *auto_neg = hdev->hw.mac.autoneg;
8250 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8253 struct hclge_vport *vport = hclge_get_vport(handle);
8254 struct hclge_dev *hdev = vport->back;
8257 *media_type = hdev->hw.mac.media_type;
8260 *module_type = hdev->hw.mac.module_type;
8263 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8264 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8266 struct hclge_vport *vport = hclge_get_vport(handle);
8267 struct hclge_dev *hdev = vport->back;
8268 struct phy_device *phydev = hdev->hw.mac.phydev;
8269 int mdix_ctrl, mdix, is_resolved;
8270 unsigned int retval;
8273 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8274 *tp_mdix = ETH_TP_MDI_INVALID;
8278 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8280 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8281 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8282 HCLGE_PHY_MDIX_CTRL_S);
8284 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8285 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8286 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8288 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8290 switch (mdix_ctrl) {
8292 *tp_mdix_ctrl = ETH_TP_MDI;
8295 *tp_mdix_ctrl = ETH_TP_MDI_X;
8298 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8301 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8306 *tp_mdix = ETH_TP_MDI_INVALID;
8308 *tp_mdix = ETH_TP_MDI_X;
8310 *tp_mdix = ETH_TP_MDI;
8313 static void hclge_info_show(struct hclge_dev *hdev)
8315 struct device *dev = &hdev->pdev->dev;
8317 dev_info(dev, "PF info begin:\n");
8319 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8320 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8321 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8322 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8323 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8324 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8325 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8326 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8327 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8328 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8329 dev_info(dev, "This is %s PF\n",
8330 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8331 dev_info(dev, "DCB %s\n",
8332 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8333 dev_info(dev, "MQPRIO %s\n",
8334 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8336 dev_info(dev, "PF info end.\n");
8339 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8340 struct hclge_vport *vport)
8342 struct hnae3_client *client = vport->nic.client;
8343 struct hclge_dev *hdev = ae_dev->priv;
8347 rst_cnt = hdev->rst_stats.reset_cnt;
8348 ret = client->ops->init_instance(&vport->nic);
8352 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8353 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8354 rst_cnt != hdev->rst_stats.reset_cnt) {
8359 /* Enable nic hw error interrupts */
8360 ret = hclge_config_nic_hw_error(hdev, true);
8362 dev_err(&ae_dev->pdev->dev,
8363 "fail(%d) to enable hw error interrupts\n", ret);
8367 hnae3_set_client_init_flag(client, ae_dev, 1);
8369 if (netif_msg_drv(&hdev->vport->nic))
8370 hclge_info_show(hdev);
8375 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8376 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8377 msleep(HCLGE_WAIT_RESET_DONE);
8379 client->ops->uninit_instance(&vport->nic, 0);
8384 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8385 struct hclge_vport *vport)
8387 struct hnae3_client *client = vport->roce.client;
8388 struct hclge_dev *hdev = ae_dev->priv;
8392 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8396 client = hdev->roce_client;
8397 ret = hclge_init_roce_base_info(vport);
8401 rst_cnt = hdev->rst_stats.reset_cnt;
8402 ret = client->ops->init_instance(&vport->roce);
8406 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8407 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8408 rst_cnt != hdev->rst_stats.reset_cnt) {
8413 /* Enable roce ras interrupts */
8414 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8416 dev_err(&ae_dev->pdev->dev,
8417 "fail(%d) to enable roce ras interrupts\n", ret);
8421 hnae3_set_client_init_flag(client, ae_dev, 1);
8426 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8427 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8428 msleep(HCLGE_WAIT_RESET_DONE);
8430 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8435 static int hclge_init_client_instance(struct hnae3_client *client,
8436 struct hnae3_ae_dev *ae_dev)
8438 struct hclge_dev *hdev = ae_dev->priv;
8439 struct hclge_vport *vport;
8442 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8443 vport = &hdev->vport[i];
8445 switch (client->type) {
8446 case HNAE3_CLIENT_KNIC:
8448 hdev->nic_client = client;
8449 vport->nic.client = client;
8450 ret = hclge_init_nic_client_instance(ae_dev, vport);
8454 ret = hclge_init_roce_client_instance(ae_dev, vport);
8459 case HNAE3_CLIENT_ROCE:
8460 if (hnae3_dev_roce_supported(hdev)) {
8461 hdev->roce_client = client;
8462 vport->roce.client = client;
8465 ret = hclge_init_roce_client_instance(ae_dev, vport);
8478 hdev->nic_client = NULL;
8479 vport->nic.client = NULL;
8482 hdev->roce_client = NULL;
8483 vport->roce.client = NULL;
8487 static void hclge_uninit_client_instance(struct hnae3_client *client,
8488 struct hnae3_ae_dev *ae_dev)
8490 struct hclge_dev *hdev = ae_dev->priv;
8491 struct hclge_vport *vport;
8494 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8495 vport = &hdev->vport[i];
8496 if (hdev->roce_client) {
8497 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8498 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8499 msleep(HCLGE_WAIT_RESET_DONE);
8501 hdev->roce_client->ops->uninit_instance(&vport->roce,
8503 hdev->roce_client = NULL;
8504 vport->roce.client = NULL;
8506 if (client->type == HNAE3_CLIENT_ROCE)
8508 if (hdev->nic_client && client->ops->uninit_instance) {
8509 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8510 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8511 msleep(HCLGE_WAIT_RESET_DONE);
8513 client->ops->uninit_instance(&vport->nic, 0);
8514 hdev->nic_client = NULL;
8515 vport->nic.client = NULL;
8520 static int hclge_pci_init(struct hclge_dev *hdev)
8522 struct pci_dev *pdev = hdev->pdev;
8523 struct hclge_hw *hw;
8526 ret = pci_enable_device(pdev);
8528 dev_err(&pdev->dev, "failed to enable PCI device\n");
8532 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8534 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8537 "can't set consistent PCI DMA");
8538 goto err_disable_device;
8540 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8543 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8545 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8546 goto err_disable_device;
8549 pci_set_master(pdev);
8551 hw->io_base = pcim_iomap(pdev, 2, 0);
8553 dev_err(&pdev->dev, "Can't map configuration register space\n");
8555 goto err_clr_master;
8558 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8562 pci_clear_master(pdev);
8563 pci_release_regions(pdev);
8565 pci_disable_device(pdev);
8570 static void hclge_pci_uninit(struct hclge_dev *hdev)
8572 struct pci_dev *pdev = hdev->pdev;
8574 pcim_iounmap(pdev, hdev->hw.io_base);
8575 pci_free_irq_vectors(pdev);
8576 pci_clear_master(pdev);
8577 pci_release_mem_regions(pdev);
8578 pci_disable_device(pdev);
8581 static void hclge_state_init(struct hclge_dev *hdev)
8583 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8584 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8585 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8586 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8587 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8588 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8591 static void hclge_state_uninit(struct hclge_dev *hdev)
8593 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8594 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8596 if (hdev->service_timer.function)
8597 del_timer_sync(&hdev->service_timer);
8598 if (hdev->reset_timer.function)
8599 del_timer_sync(&hdev->reset_timer);
8600 if (hdev->service_task.func)
8601 cancel_work_sync(&hdev->service_task);
8602 if (hdev->rst_service_task.func)
8603 cancel_work_sync(&hdev->rst_service_task);
8604 if (hdev->mbx_service_task.func)
8605 cancel_work_sync(&hdev->mbx_service_task);
8608 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8610 #define HCLGE_FLR_WAIT_MS 100
8611 #define HCLGE_FLR_WAIT_CNT 50
8612 struct hclge_dev *hdev = ae_dev->priv;
8615 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8616 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8617 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8618 hclge_reset_event(hdev->pdev, NULL);
8620 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8621 cnt++ < HCLGE_FLR_WAIT_CNT)
8622 msleep(HCLGE_FLR_WAIT_MS);
8624 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8625 dev_err(&hdev->pdev->dev,
8626 "flr wait down timeout: %d\n", cnt);
8629 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8631 struct hclge_dev *hdev = ae_dev->priv;
8633 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8636 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8640 for (i = 0; i < hdev->num_alloc_vport; i++) {
8641 struct hclge_vport *vport = &hdev->vport[i];
8644 /* Send cmd to clear VF's FUNC_RST_ING */
8645 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8647 dev_warn(&hdev->pdev->dev,
8648 "clear vf(%d) rst failed %d!\n",
8649 vport->vport_id, ret);
8653 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8655 struct pci_dev *pdev = ae_dev->pdev;
8656 struct hclge_dev *hdev;
8659 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8666 hdev->ae_dev = ae_dev;
8667 hdev->reset_type = HNAE3_NONE_RESET;
8668 hdev->reset_level = HNAE3_FUNC_RESET;
8669 ae_dev->priv = hdev;
8670 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8672 mutex_init(&hdev->vport_lock);
8673 mutex_init(&hdev->vport_cfg_mutex);
8674 spin_lock_init(&hdev->fd_rule_lock);
8676 ret = hclge_pci_init(hdev);
8678 dev_err(&pdev->dev, "PCI init failed\n");
8682 /* Firmware command queue initialize */
8683 ret = hclge_cmd_queue_init(hdev);
8685 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8686 goto err_pci_uninit;
8689 /* Firmware command initialize */
8690 ret = hclge_cmd_init(hdev);
8692 goto err_cmd_uninit;
8694 ret = hclge_get_cap(hdev);
8696 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8698 goto err_cmd_uninit;
8701 ret = hclge_configure(hdev);
8703 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8704 goto err_cmd_uninit;
8707 ret = hclge_init_msi(hdev);
8709 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8710 goto err_cmd_uninit;
8713 ret = hclge_misc_irq_init(hdev);
8716 "Misc IRQ(vector0) init error, ret = %d.\n",
8718 goto err_msi_uninit;
8721 ret = hclge_alloc_tqps(hdev);
8723 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8724 goto err_msi_irq_uninit;
8727 ret = hclge_alloc_vport(hdev);
8729 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8730 goto err_msi_irq_uninit;
8733 ret = hclge_map_tqp(hdev);
8735 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8736 goto err_msi_irq_uninit;
8739 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8740 ret = hclge_mac_mdio_config(hdev);
8742 dev_err(&hdev->pdev->dev,
8743 "mdio config fail ret=%d\n", ret);
8744 goto err_msi_irq_uninit;
8748 ret = hclge_init_umv_space(hdev);
8750 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8751 goto err_mdiobus_unreg;
8754 ret = hclge_mac_init(hdev);
8756 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8757 goto err_mdiobus_unreg;
8760 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8762 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8763 goto err_mdiobus_unreg;
8766 ret = hclge_config_gro(hdev, true);
8768 goto err_mdiobus_unreg;
8770 ret = hclge_init_vlan_config(hdev);
8772 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8773 goto err_mdiobus_unreg;
8776 ret = hclge_tm_schd_init(hdev);
8778 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8779 goto err_mdiobus_unreg;
8782 hclge_rss_init_cfg(hdev);
8783 ret = hclge_rss_init_hw(hdev);
8785 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8786 goto err_mdiobus_unreg;
8789 ret = init_mgr_tbl(hdev);
8791 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8792 goto err_mdiobus_unreg;
8795 ret = hclge_init_fd_config(hdev);
8798 "fd table init fail, ret=%d\n", ret);
8799 goto err_mdiobus_unreg;
8802 INIT_KFIFO(hdev->mac_tnl_log);
8804 hclge_dcb_ops_set(hdev);
8806 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8807 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8808 INIT_WORK(&hdev->service_task, hclge_service_task);
8809 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8810 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8812 hclge_clear_all_event_cause(hdev);
8813 hclge_clear_resetting_state(hdev);
8815 /* Log and clear the hw errors those already occurred */
8816 hclge_handle_all_hns_hw_errors(ae_dev);
8818 /* request delayed reset for the error recovery because an immediate
8819 * global reset on a PF affecting pending initialization of other PFs
8821 if (ae_dev->hw_err_reset_req) {
8822 enum hnae3_reset_type reset_level;
8824 reset_level = hclge_get_reset_level(ae_dev,
8825 &ae_dev->hw_err_reset_req);
8826 hclge_set_def_reset_request(ae_dev, reset_level);
8827 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8830 /* Enable MISC vector(vector0) */
8831 hclge_enable_vector(&hdev->misc_vector, true);
8833 hclge_state_init(hdev);
8834 hdev->last_reset_time = jiffies;
8836 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8840 if (hdev->hw.mac.phydev)
8841 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8843 hclge_misc_irq_uninit(hdev);
8845 pci_free_irq_vectors(pdev);
8847 hclge_cmd_uninit(hdev);
8849 pcim_iounmap(pdev, hdev->hw.io_base);
8850 pci_clear_master(pdev);
8851 pci_release_regions(pdev);
8852 pci_disable_device(pdev);
8857 static void hclge_stats_clear(struct hclge_dev *hdev)
8859 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8862 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8864 struct hclge_vport *vport = hdev->vport;
8867 for (i = 0; i < hdev->num_alloc_vport; i++) {
8868 hclge_vport_stop(vport);
8873 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8875 struct hclge_dev *hdev = ae_dev->priv;
8876 struct pci_dev *pdev = ae_dev->pdev;
8879 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8881 hclge_stats_clear(hdev);
8882 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8883 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8885 ret = hclge_cmd_init(hdev);
8887 dev_err(&pdev->dev, "Cmd queue init failed\n");
8891 ret = hclge_map_tqp(hdev);
8893 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8897 hclge_reset_umv_space(hdev);
8899 ret = hclge_mac_init(hdev);
8901 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8905 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8907 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8911 ret = hclge_config_gro(hdev, true);
8915 ret = hclge_init_vlan_config(hdev);
8917 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8921 ret = hclge_tm_init_hw(hdev, true);
8923 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8927 ret = hclge_rss_init_hw(hdev);
8929 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8933 ret = hclge_init_fd_config(hdev);
8935 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8939 /* Re-enable the hw error interrupts because
8940 * the interrupts get disabled on global reset.
8942 ret = hclge_config_nic_hw_error(hdev, true);
8945 "fail(%d) to re-enable NIC hw error interrupts\n",
8950 if (hdev->roce_client) {
8951 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8954 "fail(%d) to re-enable roce ras interrupts\n",
8960 hclge_reset_vport_state(hdev);
8962 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8968 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8970 struct hclge_dev *hdev = ae_dev->priv;
8971 struct hclge_mac *mac = &hdev->hw.mac;
8973 hclge_state_uninit(hdev);
8976 mdiobus_unregister(mac->mdio_bus);
8978 hclge_uninit_umv_space(hdev);
8980 /* Disable MISC vector(vector0) */
8981 hclge_enable_vector(&hdev->misc_vector, false);
8982 synchronize_irq(hdev->misc_vector.vector_irq);
8984 /* Disable all hw interrupts */
8985 hclge_config_mac_tnl_int(hdev, false);
8986 hclge_config_nic_hw_error(hdev, false);
8987 hclge_config_rocee_ras_interrupt(hdev, false);
8989 hclge_cmd_uninit(hdev);
8990 hclge_misc_irq_uninit(hdev);
8991 hclge_pci_uninit(hdev);
8992 mutex_destroy(&hdev->vport_lock);
8993 hclge_uninit_vport_mac_table(hdev);
8994 hclge_uninit_vport_vlan_table(hdev);
8995 mutex_destroy(&hdev->vport_cfg_mutex);
8996 ae_dev->priv = NULL;
8999 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9001 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9002 struct hclge_vport *vport = hclge_get_vport(handle);
9003 struct hclge_dev *hdev = vport->back;
9005 return min_t(u32, hdev->rss_size_max,
9006 vport->alloc_tqps / kinfo->num_tc);
9009 static void hclge_get_channels(struct hnae3_handle *handle,
9010 struct ethtool_channels *ch)
9012 ch->max_combined = hclge_get_max_channels(handle);
9013 ch->other_count = 1;
9015 ch->combined_count = handle->kinfo.rss_size;
9018 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9019 u16 *alloc_tqps, u16 *max_rss_size)
9021 struct hclge_vport *vport = hclge_get_vport(handle);
9022 struct hclge_dev *hdev = vport->back;
9024 *alloc_tqps = vport->alloc_tqps;
9025 *max_rss_size = hdev->rss_size_max;
9028 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9029 bool rxfh_configured)
9031 struct hclge_vport *vport = hclge_get_vport(handle);
9032 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9033 struct hclge_dev *hdev = vport->back;
9034 int cur_rss_size = kinfo->rss_size;
9035 int cur_tqps = kinfo->num_tqps;
9036 u16 tc_offset[HCLGE_MAX_TC_NUM];
9037 u16 tc_valid[HCLGE_MAX_TC_NUM];
9038 u16 tc_size[HCLGE_MAX_TC_NUM];
9044 kinfo->req_rss_size = new_tqps_num;
9046 ret = hclge_tm_vport_map_update(hdev);
9048 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9052 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9053 roundup_size = ilog2(roundup_size);
9054 /* Set the RSS TC mode according to the new RSS size */
9055 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9058 if (!(hdev->hw_tc_map & BIT(i)))
9062 tc_size[i] = roundup_size;
9063 tc_offset[i] = kinfo->rss_size * i;
9065 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9069 /* RSS indirection table has been configuared by user */
9070 if (rxfh_configured)
9073 /* Reinitializes the rss indirect table according to the new RSS size */
9074 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9078 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9079 rss_indir[i] = i % kinfo->rss_size;
9081 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9083 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9090 dev_info(&hdev->pdev->dev,
9091 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9092 cur_rss_size, kinfo->rss_size,
9093 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9098 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9099 u32 *regs_num_64_bit)
9101 struct hclge_desc desc;
9105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9106 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9108 dev_err(&hdev->pdev->dev,
9109 "Query register number cmd failed, ret = %d.\n", ret);
9113 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9114 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9116 total_num = *regs_num_32_bit + *regs_num_64_bit;
9123 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9126 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9127 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9129 struct hclge_desc *desc;
9130 u32 *reg_val = data;
9140 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9141 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9142 HCLGE_32_BIT_REG_RTN_DATANUM);
9143 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9147 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9148 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9150 dev_err(&hdev->pdev->dev,
9151 "Query 32 bit register cmd failed, ret = %d.\n", ret);
9156 for (i = 0; i < cmd_num; i++) {
9158 desc_data = (__le32 *)(&desc[i].data[0]);
9159 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9161 desc_data = (__le32 *)(&desc[i]);
9162 n = HCLGE_32_BIT_REG_RTN_DATANUM;
9164 for (k = 0; k < n; k++) {
9165 *reg_val++ = le32_to_cpu(*desc_data++);
9177 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9180 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9181 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9183 struct hclge_desc *desc;
9184 u64 *reg_val = data;
9194 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9195 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9196 HCLGE_64_BIT_REG_RTN_DATANUM);
9197 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9201 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9202 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9204 dev_err(&hdev->pdev->dev,
9205 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9210 for (i = 0; i < cmd_num; i++) {
9212 desc_data = (__le64 *)(&desc[i].data[0]);
9213 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9215 desc_data = (__le64 *)(&desc[i]);
9216 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9218 for (k = 0; k < n; k++) {
9219 *reg_val++ = le64_to_cpu(*desc_data++);
9231 #define MAX_SEPARATE_NUM 4
9232 #define SEPARATOR_VALUE 0xFFFFFFFF
9233 #define REG_NUM_PER_LINE 4
9234 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9236 static int hclge_get_regs_len(struct hnae3_handle *handle)
9238 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9239 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9240 struct hclge_vport *vport = hclge_get_vport(handle);
9241 struct hclge_dev *hdev = vport->back;
9242 u32 regs_num_32_bit, regs_num_64_bit;
9245 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9247 dev_err(&hdev->pdev->dev,
9248 "Get register number failed, ret = %d.\n", ret);
9252 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9253 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9254 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9255 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9257 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9258 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9259 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9262 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9265 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9266 struct hclge_vport *vport = hclge_get_vport(handle);
9267 struct hclge_dev *hdev = vport->back;
9268 u32 regs_num_32_bit, regs_num_64_bit;
9269 int i, j, reg_um, separator_num;
9273 *version = hdev->fw_version;
9275 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9277 dev_err(&hdev->pdev->dev,
9278 "Get register number failed, ret = %d.\n", ret);
9282 /* fetching per-PF registers valus from PF PCIe register space */
9283 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9284 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9285 for (i = 0; i < reg_um; i++)
9286 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9287 for (i = 0; i < separator_num; i++)
9288 *reg++ = SEPARATOR_VALUE;
9290 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9291 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9292 for (i = 0; i < reg_um; i++)
9293 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9294 for (i = 0; i < separator_num; i++)
9295 *reg++ = SEPARATOR_VALUE;
9297 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9298 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9299 for (j = 0; j < kinfo->num_tqps; j++) {
9300 for (i = 0; i < reg_um; i++)
9301 *reg++ = hclge_read_dev(&hdev->hw,
9302 ring_reg_addr_list[i] +
9304 for (i = 0; i < separator_num; i++)
9305 *reg++ = SEPARATOR_VALUE;
9308 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9309 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9310 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9311 for (i = 0; i < reg_um; i++)
9312 *reg++ = hclge_read_dev(&hdev->hw,
9313 tqp_intr_reg_addr_list[i] +
9315 for (i = 0; i < separator_num; i++)
9316 *reg++ = SEPARATOR_VALUE;
9319 /* fetching PF common registers values from firmware */
9320 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9322 dev_err(&hdev->pdev->dev,
9323 "Get 32 bit register failed, ret = %d.\n", ret);
9327 reg += regs_num_32_bit;
9328 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9330 dev_err(&hdev->pdev->dev,
9331 "Get 64 bit register failed, ret = %d.\n", ret);
9334 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9336 struct hclge_set_led_state_cmd *req;
9337 struct hclge_desc desc;
9340 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9342 req = (struct hclge_set_led_state_cmd *)desc.data;
9343 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9344 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9346 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9348 dev_err(&hdev->pdev->dev,
9349 "Send set led state cmd error, ret =%d\n", ret);
9354 enum hclge_led_status {
9357 HCLGE_LED_NO_CHANGE = 0xFF,
9360 static int hclge_set_led_id(struct hnae3_handle *handle,
9361 enum ethtool_phys_id_state status)
9363 struct hclge_vport *vport = hclge_get_vport(handle);
9364 struct hclge_dev *hdev = vport->back;
9367 case ETHTOOL_ID_ACTIVE:
9368 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9369 case ETHTOOL_ID_INACTIVE:
9370 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9376 static void hclge_get_link_mode(struct hnae3_handle *handle,
9377 unsigned long *supported,
9378 unsigned long *advertising)
9380 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9381 struct hclge_vport *vport = hclge_get_vport(handle);
9382 struct hclge_dev *hdev = vport->back;
9383 unsigned int idx = 0;
9385 for (; idx < size; idx++) {
9386 supported[idx] = hdev->hw.mac.supported[idx];
9387 advertising[idx] = hdev->hw.mac.advertising[idx];
9391 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9393 struct hclge_vport *vport = hclge_get_vport(handle);
9394 struct hclge_dev *hdev = vport->back;
9396 return hclge_config_gro(hdev, enable);
9399 static const struct hnae3_ae_ops hclge_ops = {
9400 .init_ae_dev = hclge_init_ae_dev,
9401 .uninit_ae_dev = hclge_uninit_ae_dev,
9402 .flr_prepare = hclge_flr_prepare,
9403 .flr_done = hclge_flr_done,
9404 .init_client_instance = hclge_init_client_instance,
9405 .uninit_client_instance = hclge_uninit_client_instance,
9406 .map_ring_to_vector = hclge_map_ring_to_vector,
9407 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9408 .get_vector = hclge_get_vector,
9409 .put_vector = hclge_put_vector,
9410 .set_promisc_mode = hclge_set_promisc_mode,
9411 .set_loopback = hclge_set_loopback,
9412 .start = hclge_ae_start,
9413 .stop = hclge_ae_stop,
9414 .client_start = hclge_client_start,
9415 .client_stop = hclge_client_stop,
9416 .get_status = hclge_get_status,
9417 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9418 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9419 .get_media_type = hclge_get_media_type,
9420 .check_port_speed = hclge_check_port_speed,
9421 .get_fec = hclge_get_fec,
9422 .set_fec = hclge_set_fec,
9423 .get_rss_key_size = hclge_get_rss_key_size,
9424 .get_rss_indir_size = hclge_get_rss_indir_size,
9425 .get_rss = hclge_get_rss,
9426 .set_rss = hclge_set_rss,
9427 .set_rss_tuple = hclge_set_rss_tuple,
9428 .get_rss_tuple = hclge_get_rss_tuple,
9429 .get_tc_size = hclge_get_tc_size,
9430 .get_mac_addr = hclge_get_mac_addr,
9431 .set_mac_addr = hclge_set_mac_addr,
9432 .do_ioctl = hclge_do_ioctl,
9433 .add_uc_addr = hclge_add_uc_addr,
9434 .rm_uc_addr = hclge_rm_uc_addr,
9435 .add_mc_addr = hclge_add_mc_addr,
9436 .rm_mc_addr = hclge_rm_mc_addr,
9437 .set_autoneg = hclge_set_autoneg,
9438 .get_autoneg = hclge_get_autoneg,
9439 .restart_autoneg = hclge_restart_autoneg,
9440 .halt_autoneg = hclge_halt_autoneg,
9441 .get_pauseparam = hclge_get_pauseparam,
9442 .set_pauseparam = hclge_set_pauseparam,
9443 .set_mtu = hclge_set_mtu,
9444 .reset_queue = hclge_reset_tqp,
9445 .get_stats = hclge_get_stats,
9446 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9447 .update_stats = hclge_update_stats,
9448 .get_strings = hclge_get_strings,
9449 .get_sset_count = hclge_get_sset_count,
9450 .get_fw_version = hclge_get_fw_version,
9451 .get_mdix_mode = hclge_get_mdix_mode,
9452 .enable_vlan_filter = hclge_enable_vlan_filter,
9453 .set_vlan_filter = hclge_set_vlan_filter,
9454 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9455 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9456 .reset_event = hclge_reset_event,
9457 .get_reset_level = hclge_get_reset_level,
9458 .set_default_reset_request = hclge_set_def_reset_request,
9459 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9460 .set_channels = hclge_set_channels,
9461 .get_channels = hclge_get_channels,
9462 .get_regs_len = hclge_get_regs_len,
9463 .get_regs = hclge_get_regs,
9464 .set_led_id = hclge_set_led_id,
9465 .get_link_mode = hclge_get_link_mode,
9466 .add_fd_entry = hclge_add_fd_entry,
9467 .del_fd_entry = hclge_del_fd_entry,
9468 .del_all_fd_entries = hclge_del_all_fd_entries,
9469 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9470 .get_fd_rule_info = hclge_get_fd_rule_info,
9471 .get_fd_all_rules = hclge_get_all_rules,
9472 .restore_fd_rules = hclge_restore_fd_entries,
9473 .enable_fd = hclge_enable_fd,
9474 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9475 .dbg_run_cmd = hclge_dbg_run_cmd,
9476 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9477 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9478 .ae_dev_resetting = hclge_ae_dev_resetting,
9479 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9480 .set_gro_en = hclge_gro_en,
9481 .get_global_queue_id = hclge_covert_handle_qid_global,
9482 .set_timer_task = hclge_set_timer_task,
9483 .mac_connect_phy = hclge_mac_connect_phy,
9484 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9485 .restore_vlan_table = hclge_restore_vlan_table,
9488 static struct hnae3_ae_algo ae_algo = {
9490 .pdev_id_table = ae_algo_pci_tbl,
9493 static int hclge_init(void)
9495 pr_info("%s is initializing\n", HCLGE_NAME);
9497 hnae3_register_ae_algo(&ae_algo);
9502 static void hclge_exit(void)
9504 hnae3_unregister_ae_algo(&ae_algo);
9506 module_init(hclge_init);
9507 module_exit(hclge_exit);
9509 MODULE_LICENSE("GPL");
9510 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9511 MODULE_DESCRIPTION("HCLGE Driver");
9512 MODULE_VERSION(HCLGE_MOD_VERSION);