1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
34 #define HCLGE_RESET_MAX_FAIL_CNT 5
36 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
40 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
41 u16 *allocated_size, bool is_alloc);
42 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
43 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
44 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
47 static struct hnae3_ae_algo ae_algo;
49 static const struct pci_device_id ae_algo_pci_tbl[] = {
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
57 /* required last entry */
61 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
63 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
64 HCLGE_CMDQ_TX_ADDR_H_REG,
65 HCLGE_CMDQ_TX_DEPTH_REG,
66 HCLGE_CMDQ_TX_TAIL_REG,
67 HCLGE_CMDQ_TX_HEAD_REG,
68 HCLGE_CMDQ_RX_ADDR_L_REG,
69 HCLGE_CMDQ_RX_ADDR_H_REG,
70 HCLGE_CMDQ_RX_DEPTH_REG,
71 HCLGE_CMDQ_RX_TAIL_REG,
72 HCLGE_CMDQ_RX_HEAD_REG,
73 HCLGE_VECTOR0_CMDQ_SRC_REG,
74 HCLGE_CMDQ_INTR_STS_REG,
75 HCLGE_CMDQ_INTR_EN_REG,
76 HCLGE_CMDQ_INTR_GEN_REG};
78 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
79 HCLGE_VECTOR0_OTER_EN_REG,
80 HCLGE_MISC_RESET_STS_REG,
81 HCLGE_MISC_VECTOR_INT_STS,
82 HCLGE_GLOBAL_RESET_REG,
86 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
87 HCLGE_RING_RX_ADDR_H_REG,
88 HCLGE_RING_RX_BD_NUM_REG,
89 HCLGE_RING_RX_BD_LENGTH_REG,
90 HCLGE_RING_RX_MERGE_EN_REG,
91 HCLGE_RING_RX_TAIL_REG,
92 HCLGE_RING_RX_HEAD_REG,
93 HCLGE_RING_RX_FBD_NUM_REG,
94 HCLGE_RING_RX_OFFSET_REG,
95 HCLGE_RING_RX_FBD_OFFSET_REG,
96 HCLGE_RING_RX_STASH_REG,
97 HCLGE_RING_RX_BD_ERR_REG,
98 HCLGE_RING_TX_ADDR_L_REG,
99 HCLGE_RING_TX_ADDR_H_REG,
100 HCLGE_RING_TX_BD_NUM_REG,
101 HCLGE_RING_TX_PRIORITY_REG,
102 HCLGE_RING_TX_TC_REG,
103 HCLGE_RING_TX_MERGE_EN_REG,
104 HCLGE_RING_TX_TAIL_REG,
105 HCLGE_RING_TX_HEAD_REG,
106 HCLGE_RING_TX_FBD_NUM_REG,
107 HCLGE_RING_TX_OFFSET_REG,
108 HCLGE_RING_TX_EBD_NUM_REG,
109 HCLGE_RING_TX_EBD_OFFSET_REG,
110 HCLGE_RING_TX_BD_ERR_REG,
113 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
114 HCLGE_TQP_INTR_GL0_REG,
115 HCLGE_TQP_INTR_GL1_REG,
116 HCLGE_TQP_INTR_GL2_REG,
117 HCLGE_TQP_INTR_RL_REG};
119 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
121 "Serdes serial Loopback test",
122 "Serdes parallel Loopback test",
126 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
127 {"mac_tx_mac_pause_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
129 {"mac_rx_mac_pause_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
131 {"mac_tx_control_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
133 {"mac_rx_control_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
135 {"mac_tx_pfc_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
137 {"mac_tx_pfc_pri0_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
139 {"mac_tx_pfc_pri1_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
141 {"mac_tx_pfc_pri2_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
143 {"mac_tx_pfc_pri3_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
145 {"mac_tx_pfc_pri4_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
147 {"mac_tx_pfc_pri5_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
149 {"mac_tx_pfc_pri6_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
151 {"mac_tx_pfc_pri7_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
153 {"mac_rx_pfc_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
155 {"mac_rx_pfc_pri0_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
157 {"mac_rx_pfc_pri1_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
159 {"mac_rx_pfc_pri2_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
161 {"mac_rx_pfc_pri3_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
163 {"mac_rx_pfc_pri4_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
165 {"mac_rx_pfc_pri5_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
167 {"mac_rx_pfc_pri6_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
169 {"mac_rx_pfc_pri7_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
171 {"mac_tx_total_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
173 {"mac_tx_total_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
175 {"mac_tx_good_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
177 {"mac_tx_bad_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
179 {"mac_tx_good_oct_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
181 {"mac_tx_bad_oct_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
183 {"mac_tx_uni_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
185 {"mac_tx_multi_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
187 {"mac_tx_broad_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
189 {"mac_tx_undersize_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
191 {"mac_tx_oversize_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
193 {"mac_tx_64_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
195 {"mac_tx_65_127_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
197 {"mac_tx_128_255_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
199 {"mac_tx_256_511_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
201 {"mac_tx_512_1023_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
203 {"mac_tx_1024_1518_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
205 {"mac_tx_1519_2047_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
207 {"mac_tx_2048_4095_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
209 {"mac_tx_4096_8191_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
211 {"mac_tx_8192_9216_oct_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
213 {"mac_tx_9217_12287_oct_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
215 {"mac_tx_12288_16383_oct_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
217 {"mac_tx_1519_max_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
219 {"mac_tx_1519_max_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
221 {"mac_rx_total_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
223 {"mac_rx_total_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
225 {"mac_rx_good_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
227 {"mac_rx_bad_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
229 {"mac_rx_good_oct_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
231 {"mac_rx_bad_oct_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
233 {"mac_rx_uni_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
235 {"mac_rx_multi_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
237 {"mac_rx_broad_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
239 {"mac_rx_undersize_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
241 {"mac_rx_oversize_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
243 {"mac_rx_64_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
245 {"mac_rx_65_127_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
247 {"mac_rx_128_255_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
249 {"mac_rx_256_511_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
251 {"mac_rx_512_1023_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
253 {"mac_rx_1024_1518_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
255 {"mac_rx_1519_2047_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
257 {"mac_rx_2048_4095_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
259 {"mac_rx_4096_8191_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
261 {"mac_rx_8192_9216_oct_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
263 {"mac_rx_9217_12287_oct_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
265 {"mac_rx_12288_16383_oct_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
267 {"mac_rx_1519_max_good_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
269 {"mac_rx_1519_max_bad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
272 {"mac_tx_fragment_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
274 {"mac_tx_undermin_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
276 {"mac_tx_jabber_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
278 {"mac_tx_err_all_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
280 {"mac_tx_from_app_good_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
282 {"mac_tx_from_app_bad_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
284 {"mac_rx_fragment_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
286 {"mac_rx_undermin_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
288 {"mac_rx_jabber_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
290 {"mac_rx_fcs_err_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
292 {"mac_rx_send_app_good_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
294 {"mac_rx_send_app_bad_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
298 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
300 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
301 .ethter_type = cpu_to_le16(ETH_P_LLDP),
302 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
303 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
304 .i_port_bitmap = 0x1,
308 static const u8 hclge_hash_key[] = {
309 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
310 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
311 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
312 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
313 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
316 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
318 #define HCLGE_MAC_CMD_NUM 21
320 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
321 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
326 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
327 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
329 dev_err(&hdev->pdev->dev,
330 "Get MAC pkt stats fail, status = %d.\n", ret);
335 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
336 /* for special opcode 0032, only the first desc has the head */
337 if (unlikely(i == 0)) {
338 desc_data = (__le64 *)(&desc[i].data[0]);
339 n = HCLGE_RD_FIRST_STATS_NUM;
341 desc_data = (__le64 *)(&desc[i]);
342 n = HCLGE_RD_OTHER_STATS_NUM;
345 for (k = 0; k < n; k++) {
346 *data += le64_to_cpu(*desc_data);
355 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
357 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
358 struct hclge_desc *desc;
363 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
366 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
367 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
373 for (i = 0; i < desc_num; i++) {
374 /* for special opcode 0034, only the first desc has the head */
376 desc_data = (__le64 *)(&desc[i].data[0]);
377 n = HCLGE_RD_FIRST_STATS_NUM;
379 desc_data = (__le64 *)(&desc[i]);
380 n = HCLGE_RD_OTHER_STATS_NUM;
383 for (k = 0; k < n; k++) {
384 *data += le64_to_cpu(*desc_data);
395 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
397 struct hclge_desc desc;
402 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
403 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
407 desc_data = (__le32 *)(&desc.data[0]);
408 reg_num = le32_to_cpu(*desc_data);
410 *desc_num = 1 + ((reg_num - 3) >> 2) +
411 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
416 static int hclge_mac_update_stats(struct hclge_dev *hdev)
421 ret = hclge_mac_query_reg_num(hdev, &desc_num);
423 /* The firmware supports the new statistics acquisition method */
425 ret = hclge_mac_update_stats_complete(hdev, desc_num);
426 else if (ret == -EOPNOTSUPP)
427 ret = hclge_mac_update_stats_defective(hdev);
429 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
434 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
436 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
437 struct hclge_vport *vport = hclge_get_vport(handle);
438 struct hclge_dev *hdev = vport->back;
439 struct hnae3_queue *queue;
440 struct hclge_desc desc[1];
441 struct hclge_tqp *tqp;
444 for (i = 0; i < kinfo->num_tqps; i++) {
445 queue = handle->kinfo.tqp[i];
446 tqp = container_of(queue, struct hclge_tqp, q);
447 /* command : HCLGE_OPC_QUERY_IGU_STAT */
448 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
451 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
452 ret = hclge_cmd_send(&hdev->hw, desc, 1);
454 dev_err(&hdev->pdev->dev,
455 "Query tqp stat fail, status = %d,queue = %d\n",
459 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
460 le32_to_cpu(desc[0].data[1]);
463 for (i = 0; i < kinfo->num_tqps; i++) {
464 queue = handle->kinfo.tqp[i];
465 tqp = container_of(queue, struct hclge_tqp, q);
466 /* command : HCLGE_OPC_QUERY_IGU_STAT */
467 hclge_cmd_setup_basic_desc(&desc[0],
468 HCLGE_OPC_QUERY_TX_STATUS,
471 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
472 ret = hclge_cmd_send(&hdev->hw, desc, 1);
474 dev_err(&hdev->pdev->dev,
475 "Query tqp stat fail, status = %d,queue = %d\n",
479 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
480 le32_to_cpu(desc[0].data[1]);
486 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
488 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
489 struct hclge_tqp *tqp;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
498 for (i = 0; i < kinfo->num_tqps; i++) {
499 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
500 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
506 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
510 /* each tqp has TX & RX two queues */
511 return kinfo->num_tqps * (2);
514 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
516 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
522 struct hclge_tqp, q);
523 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
525 buff = buff + ETH_GSTRING_LEN;
528 for (i = 0; i < kinfo->num_tqps; i++) {
529 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
530 struct hclge_tqp, q);
531 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
533 buff = buff + ETH_GSTRING_LEN;
539 static u64 *hclge_comm_get_stats(const void *comm_stats,
540 const struct hclge_comm_stats_str strs[],
546 for (i = 0; i < size; i++)
547 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
552 static u8 *hclge_comm_get_strings(u32 stringset,
553 const struct hclge_comm_stats_str strs[],
556 char *buff = (char *)data;
559 if (stringset != ETH_SS_STATS)
562 for (i = 0; i < size; i++) {
563 snprintf(buff, ETH_GSTRING_LEN,
565 buff = buff + ETH_GSTRING_LEN;
571 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
573 struct hnae3_handle *handle;
576 handle = &hdev->vport[0].nic;
577 if (handle->client) {
578 status = hclge_tqps_update_stats(handle);
580 dev_err(&hdev->pdev->dev,
581 "Update TQPS stats fail, status = %d.\n",
586 status = hclge_mac_update_stats(hdev);
588 dev_err(&hdev->pdev->dev,
589 "Update MAC stats fail, status = %d.\n", status);
592 static void hclge_update_stats(struct hnae3_handle *handle,
593 struct net_device_stats *net_stats)
595 struct hclge_vport *vport = hclge_get_vport(handle);
596 struct hclge_dev *hdev = vport->back;
599 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
602 status = hclge_mac_update_stats(hdev);
604 dev_err(&hdev->pdev->dev,
605 "Update MAC stats fail, status = %d.\n",
608 status = hclge_tqps_update_stats(handle);
610 dev_err(&hdev->pdev->dev,
611 "Update TQPS stats fail, status = %d.\n",
614 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
617 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
619 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
620 HNAE3_SUPPORT_PHY_LOOPBACK |\
621 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
622 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
624 struct hclge_vport *vport = hclge_get_vport(handle);
625 struct hclge_dev *hdev = vport->back;
628 /* Loopback test support rules:
629 * mac: only GE mode support
630 * serdes: all mac mode will support include GE/XGE/LGE/CGE
631 * phy: only support when phy device exist on board
633 if (stringset == ETH_SS_TEST) {
634 /* clear loopback bit flags at first */
635 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
636 if (hdev->pdev->revision >= 0x21 ||
637 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
638 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
639 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
641 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
645 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
646 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
647 } else if (stringset == ETH_SS_STATS) {
648 count = ARRAY_SIZE(g_mac_stats_string) +
649 hclge_tqps_get_sset_count(handle, stringset);
655 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
658 u8 *p = (char *)data;
661 if (stringset == ETH_SS_STATS) {
662 size = ARRAY_SIZE(g_mac_stats_string);
663 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
665 p = hclge_tqps_get_strings(handle, p);
666 } else if (stringset == ETH_SS_TEST) {
667 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
668 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
670 p += ETH_GSTRING_LEN;
672 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
673 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
675 p += ETH_GSTRING_LEN;
677 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
679 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
681 p += ETH_GSTRING_LEN;
683 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
698 ARRAY_SIZE(g_mac_stats_string), data);
699 p = hclge_tqps_get_stats(handle, p);
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705 struct hclge_vport *vport = hclge_get_vport(handle);
706 struct hclge_dev *hdev = vport->back;
708 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 struct hclge_func_status_cmd *status)
715 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718 /* Set the pf to main pf */
719 if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 hdev->flag |= HCLGE_FLAG_MAIN;
722 hdev->flag &= ~HCLGE_FLAG_MAIN;
727 static int hclge_query_function_status(struct hclge_dev *hdev)
729 #define HCLGE_QUERY_MAX_CNT 5
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n", ret);
747 /* Check pf reset is done */
750 usleep_range(1000, 2000);
751 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
753 ret = hclge_parse_func_status(hdev, req);
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
760 struct hclge_pf_res_cmd *req;
761 struct hclge_desc desc;
764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
767 dev_err(&hdev->pdev->dev,
768 "query pf resource failed %d.\n", ret);
772 req = (struct hclge_pf_res_cmd *)desc.data;
773 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
776 if (req->tx_buf_size)
778 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
780 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
782 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
784 if (req->dv_buf_size)
786 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
788 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
790 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
792 if (hnae3_dev_roce_supported(hdev)) {
793 hdev->roce_base_msix_offset =
794 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
800 /* PF should have NIC vectors and Roce vectors,
801 * NIC vectors are queued before Roce vectors.
803 hdev->num_msi = hdev->num_roce_msi +
804 hdev->roce_base_msix_offset;
807 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
814 static int hclge_parse_speed(int speed_cmd, int *speed)
818 *speed = HCLGE_MAC_SPEED_10M;
821 *speed = HCLGE_MAC_SPEED_100M;
824 *speed = HCLGE_MAC_SPEED_1G;
827 *speed = HCLGE_MAC_SPEED_10G;
830 *speed = HCLGE_MAC_SPEED_25G;
833 *speed = HCLGE_MAC_SPEED_40G;
836 *speed = HCLGE_MAC_SPEED_50G;
839 *speed = HCLGE_MAC_SPEED_100G;
848 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
850 struct hclge_vport *vport = hclge_get_vport(handle);
851 struct hclge_dev *hdev = vport->back;
852 u32 speed_ability = hdev->hw.mac.speed_ability;
856 case HCLGE_MAC_SPEED_10M:
857 speed_bit = HCLGE_SUPPORT_10M_BIT;
859 case HCLGE_MAC_SPEED_100M:
860 speed_bit = HCLGE_SUPPORT_100M_BIT;
862 case HCLGE_MAC_SPEED_1G:
863 speed_bit = HCLGE_SUPPORT_1G_BIT;
865 case HCLGE_MAC_SPEED_10G:
866 speed_bit = HCLGE_SUPPORT_10G_BIT;
868 case HCLGE_MAC_SPEED_25G:
869 speed_bit = HCLGE_SUPPORT_25G_BIT;
871 case HCLGE_MAC_SPEED_40G:
872 speed_bit = HCLGE_SUPPORT_40G_BIT;
874 case HCLGE_MAC_SPEED_50G:
875 speed_bit = HCLGE_SUPPORT_50G_BIT;
877 case HCLGE_MAC_SPEED_100G:
878 speed_bit = HCLGE_SUPPORT_100G_BIT;
884 if (speed_bit & speed_ability)
890 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
892 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
893 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
895 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
896 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
898 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
899 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
901 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
902 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
904 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
905 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
909 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
911 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
912 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
914 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
915 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
917 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
918 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
920 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
921 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
923 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
924 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
928 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
930 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
931 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
933 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
934 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
936 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
937 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
939 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
940 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
942 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
943 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
947 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
949 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
950 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
952 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
953 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
955 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
956 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
958 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
959 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
961 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
962 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
964 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
965 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
969 static void hclge_convert_setting_fec(struct hclge_mac *mac)
971 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
972 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
974 switch (mac->speed) {
975 case HCLGE_MAC_SPEED_10G:
976 case HCLGE_MAC_SPEED_40G:
977 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
982 case HCLGE_MAC_SPEED_25G:
983 case HCLGE_MAC_SPEED_50G:
984 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990 case HCLGE_MAC_SPEED_100G:
991 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
992 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995 mac->fec_ability = 0;
1000 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003 struct hclge_mac *mac = &hdev->hw.mac;
1005 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009 hclge_convert_setting_sr(mac, speed_ability);
1010 hclge_convert_setting_lr(mac, speed_ability);
1011 hclge_convert_setting_cr(mac, speed_ability);
1012 if (hdev->pdev->revision >= 0x21)
1013 hclge_convert_setting_fec(mac);
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1016 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023 struct hclge_mac *mac = &hdev->hw.mac;
1025 hclge_convert_setting_kr(mac, speed_ability);
1026 if (hdev->pdev->revision >= 0x21)
1027 hclge_convert_setting_fec(mac);
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036 unsigned long *supported = hdev->hw.mac.supported;
1038 /* default to support all speed for GE port */
1040 speed_ability = HCLGE_SUPPORT_GE;
1042 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1053 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1054 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1065 u8 media_type = hdev->hw.mac.media_type;
1067 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1068 hclge_parse_fiber_link_mode(hdev, speed_ability);
1069 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1070 hclge_parse_copper_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1072 hclge_parse_backplane_link_mode(hdev, speed_ability);
1074 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1076 struct hclge_cfg_param_cmd *req;
1077 u64 mac_addr_tmp_high;
1081 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1083 /* get the configuration */
1084 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1088 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1089 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TQP_DESC_N_M,
1091 HCLGE_CFG_TQP_DESC_N_S);
1093 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1094 HCLGE_CFG_PHY_ADDR_M,
1095 HCLGE_CFG_PHY_ADDR_S);
1096 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1097 HCLGE_CFG_MEDIA_TP_M,
1098 HCLGE_CFG_MEDIA_TP_S);
1099 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1100 HCLGE_CFG_RX_BUF_LEN_M,
1101 HCLGE_CFG_RX_BUF_LEN_S);
1102 /* get mac_address */
1103 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1104 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1105 HCLGE_CFG_MAC_ADDR_H_M,
1106 HCLGE_CFG_MAC_ADDR_H_S);
1108 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1110 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1111 HCLGE_CFG_DEFAULT_SPEED_M,
1112 HCLGE_CFG_DEFAULT_SPEED_S);
1113 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1114 HCLGE_CFG_RSS_SIZE_M,
1115 HCLGE_CFG_RSS_SIZE_S);
1117 for (i = 0; i < ETH_ALEN; i++)
1118 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1120 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1121 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1123 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1124 HCLGE_CFG_SPEED_ABILITY_M,
1125 HCLGE_CFG_SPEED_ABILITY_S);
1126 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1127 HCLGE_CFG_UMV_TBL_SPACE_M,
1128 HCLGE_CFG_UMV_TBL_SPACE_S);
1129 if (!cfg->umv_space)
1130 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 /* hclge_get_cfg: query the static parameter from flash
1134 * @hdev: pointer to struct hclge_dev
1135 * @hcfg: the config structure to be getted
1137 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1139 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1140 struct hclge_cfg_param_cmd *req;
1144 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1147 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1148 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1151 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1152 /* Len should be united by 4 bytes when send to hardware */
1153 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1154 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1155 req->offset = cpu_to_le32(offset);
1158 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1164 hclge_parse_cfg(hcfg, desc);
1169 static int hclge_get_cap(struct hclge_dev *hdev)
1173 ret = hclge_query_function_status(hdev);
1175 dev_err(&hdev->pdev->dev,
1176 "query function status error %d.\n", ret);
1180 /* get pf resource */
1181 ret = hclge_query_pf_resource(hdev);
1183 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1188 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 #define HCLGE_MIN_TX_DESC 64
1191 #define HCLGE_MIN_RX_DESC 64
1193 if (!is_kdump_kernel())
1196 dev_info(&hdev->pdev->dev,
1197 "Running kdump kernel. Using minimal resources\n");
1199 /* minimal queue pairs equals to the number of vports */
1200 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1201 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1202 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1205 static int hclge_configure(struct hclge_dev *hdev)
1207 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1273 unsigned int tso_mss_max)
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1466 struct hnae3_handle *nic = &vport->nic;
1467 struct hclge_dev *hdev = vport->back;
1470 nic->pdev = hdev->pdev;
1471 nic->ae_algo = &ae_algo;
1472 nic->numa_node_mask = hdev->numa_node_mask;
1474 ret = hclge_knic_setup(vport, num_tqps,
1475 hdev->num_tx_desc, hdev->num_rx_desc);
1477 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1482 static int hclge_alloc_vport(struct hclge_dev *hdev)
1484 struct pci_dev *pdev = hdev->pdev;
1485 struct hclge_vport *vport;
1491 /* We need to alloc a vport for main NIC of PF */
1492 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1494 if (hdev->num_tqps < num_vport) {
1495 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1496 hdev->num_tqps, num_vport);
1500 /* Alloc the same number of TQPs for every vport */
1501 tqp_per_vport = hdev->num_tqps / num_vport;
1502 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1504 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1509 hdev->vport = vport;
1510 hdev->num_alloc_vport = num_vport;
1512 if (IS_ENABLED(CONFIG_PCI_IOV))
1513 hdev->num_alloc_vfs = hdev->num_req_vfs;
1515 for (i = 0; i < num_vport; i++) {
1517 vport->vport_id = i;
1518 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1519 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1520 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1521 INIT_LIST_HEAD(&vport->vlan_list);
1522 INIT_LIST_HEAD(&vport->uc_mac_list);
1523 INIT_LIST_HEAD(&vport->mc_mac_list);
1526 ret = hclge_vport_setup(vport, tqp_main_vport);
1528 ret = hclge_vport_setup(vport, tqp_per_vport);
1531 "vport setup failed for vport %d, %d\n",
1542 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1543 struct hclge_pkt_buf_alloc *buf_alloc)
1545 /* TX buffer size is unit by 128 byte */
1546 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1547 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1548 struct hclge_tx_buff_alloc_cmd *req;
1549 struct hclge_desc desc;
1553 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1555 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1556 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1557 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1559 req->tx_pkt_buff[i] =
1560 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1561 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1564 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1566 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1572 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1573 struct hclge_pkt_buf_alloc *buf_alloc)
1575 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1578 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1583 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1588 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1589 if (hdev->hw_tc_map & BIT(i))
1594 /* Get the number of pfc enabled TCs, which have private buffer */
1595 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1596 struct hclge_pkt_buf_alloc *buf_alloc)
1598 struct hclge_priv_buf *priv;
1602 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1603 priv = &buf_alloc->priv_buf[i];
1604 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1612 /* Get the number of pfc disabled TCs, which have private buffer */
1613 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1614 struct hclge_pkt_buf_alloc *buf_alloc)
1616 struct hclge_priv_buf *priv;
1620 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1621 priv = &buf_alloc->priv_buf[i];
1622 if (hdev->hw_tc_map & BIT(i) &&
1623 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1631 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1633 struct hclge_priv_buf *priv;
1637 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1638 priv = &buf_alloc->priv_buf[i];
1640 rx_priv += priv->buf_size;
1645 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1647 u32 i, total_tx_size = 0;
1649 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1650 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1652 return total_tx_size;
1655 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1656 struct hclge_pkt_buf_alloc *buf_alloc,
1659 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1660 u32 tc_num = hclge_get_tc_num(hdev);
1661 u32 shared_buf, aligned_mps;
1665 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1667 if (hnae3_dev_dcb_supported(hdev))
1668 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1671 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1672 + hdev->dv_buf_size;
1674 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1675 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1676 HCLGE_BUF_SIZE_UNIT);
1678 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1679 if (rx_all < rx_priv + shared_std)
1682 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1683 buf_alloc->s_buf.buf_size = shared_buf;
1684 if (hnae3_dev_dcb_supported(hdev)) {
1685 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1686 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1687 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1688 HCLGE_BUF_SIZE_UNIT);
1690 buf_alloc->s_buf.self.high = aligned_mps +
1691 HCLGE_NON_DCB_ADDITIONAL_BUF;
1692 buf_alloc->s_buf.self.low = aligned_mps;
1695 if (hnae3_dev_dcb_supported(hdev)) {
1697 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1699 hi_thrd = shared_buf - hdev->dv_buf_size;
1701 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1702 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1703 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1705 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1706 lo_thrd = aligned_mps;
1709 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1710 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1711 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1717 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1718 struct hclge_pkt_buf_alloc *buf_alloc)
1722 total_size = hdev->pkt_buf_size;
1724 /* alloc tx buffer for all enabled tc */
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1728 if (hdev->hw_tc_map & BIT(i)) {
1729 if (total_size < hdev->tx_buf_size)
1732 priv->tx_buf_size = hdev->tx_buf_size;
1734 priv->tx_buf_size = 0;
1737 total_size -= priv->tx_buf_size;
1743 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1744 struct hclge_pkt_buf_alloc *buf_alloc)
1746 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1747 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1750 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1751 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1758 if (!(hdev->hw_tc_map & BIT(i)))
1763 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1764 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1765 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1766 HCLGE_BUF_SIZE_UNIT);
1769 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1773 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1776 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1779 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1780 struct hclge_pkt_buf_alloc *buf_alloc)
1782 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1783 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1786 /* let the last to be cleared first */
1787 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1788 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1789 unsigned int mask = BIT((unsigned int)i);
1791 if (hdev->hw_tc_map & mask &&
1792 !(hdev->tm_info.hw_pfc_map & mask)) {
1793 /* Clear the no pfc TC private buffer */
1801 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1802 no_pfc_priv_num == 0)
1806 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1809 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1810 struct hclge_pkt_buf_alloc *buf_alloc)
1812 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1813 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1816 /* let the last to be cleared first */
1817 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1818 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1819 unsigned int mask = BIT((unsigned int)i);
1821 if (hdev->hw_tc_map & mask &&
1822 hdev->tm_info.hw_pfc_map & mask) {
1823 /* Reduce the number of pfc TC with private buffer */
1831 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1836 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1839 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1840 * @hdev: pointer to struct hclge_dev
1841 * @buf_alloc: pointer to buffer calculation data
1842 * @return: 0: calculate sucessful, negative: fail
1844 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1845 struct hclge_pkt_buf_alloc *buf_alloc)
1847 /* When DCB is not supported, rx private buffer is not allocated. */
1848 if (!hnae3_dev_dcb_supported(hdev)) {
1849 u32 rx_all = hdev->pkt_buf_size;
1851 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1852 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1861 /* try to decrease the buffer size */
1862 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1865 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1868 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1875 struct hclge_pkt_buf_alloc *buf_alloc)
1877 struct hclge_rx_priv_buff_cmd *req;
1878 struct hclge_desc desc;
1882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1883 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1885 /* Alloc private buffer TCs */
1886 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1887 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1892 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1896 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1897 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1899 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1901 dev_err(&hdev->pdev->dev,
1902 "rx private buffer alloc cmd failed %d\n", ret);
1907 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1908 struct hclge_pkt_buf_alloc *buf_alloc)
1910 struct hclge_rx_priv_wl_buf *req;
1911 struct hclge_priv_buf *priv;
1912 struct hclge_desc desc[2];
1916 for (i = 0; i < 2; i++) {
1917 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1919 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1921 /* The first descriptor set the NEXT bit to 1 */
1923 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1925 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1927 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1928 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1930 priv = &buf_alloc->priv_buf[idx];
1931 req->tc_wl[j].high =
1932 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1933 req->tc_wl[j].high |=
1934 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1936 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1937 req->tc_wl[j].low |=
1938 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1942 /* Send 2 descriptor at one time */
1943 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1945 dev_err(&hdev->pdev->dev,
1946 "rx private waterline config cmd failed %d\n",
1951 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1952 struct hclge_pkt_buf_alloc *buf_alloc)
1954 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1955 struct hclge_rx_com_thrd *req;
1956 struct hclge_desc desc[2];
1957 struct hclge_tc_thrd *tc;
1961 for (i = 0; i < 2; i++) {
1962 hclge_cmd_setup_basic_desc(&desc[i],
1963 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1964 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1966 /* The first descriptor set the NEXT bit to 1 */
1968 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1970 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1972 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1973 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1975 req->com_thrd[j].high =
1976 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1977 req->com_thrd[j].high |=
1978 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1979 req->com_thrd[j].low =
1980 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1981 req->com_thrd[j].low |=
1982 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1986 /* Send 2 descriptors at one time */
1987 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1989 dev_err(&hdev->pdev->dev,
1990 "common threshold config cmd failed %d\n", ret);
1994 static int hclge_common_wl_config(struct hclge_dev *hdev,
1995 struct hclge_pkt_buf_alloc *buf_alloc)
1997 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1998 struct hclge_rx_com_wl *req;
1999 struct hclge_desc desc;
2002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2004 req = (struct hclge_rx_com_wl *)desc.data;
2005 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2006 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2008 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2009 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2011 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2013 dev_err(&hdev->pdev->dev,
2014 "common waterline config cmd failed %d\n", ret);
2019 int hclge_buffer_alloc(struct hclge_dev *hdev)
2021 struct hclge_pkt_buf_alloc *pkt_buf;
2024 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2028 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2030 dev_err(&hdev->pdev->dev,
2031 "could not calc tx buffer size for all TCs %d\n", ret);
2035 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2037 dev_err(&hdev->pdev->dev,
2038 "could not alloc tx buffers %d\n", ret);
2042 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2044 dev_err(&hdev->pdev->dev,
2045 "could not calc rx priv buffer size for all TCs %d\n",
2050 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2052 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2057 if (hnae3_dev_dcb_supported(hdev)) {
2058 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2060 dev_err(&hdev->pdev->dev,
2061 "could not configure rx private waterline %d\n",
2066 ret = hclge_common_thrd_config(hdev, pkt_buf);
2068 dev_err(&hdev->pdev->dev,
2069 "could not configure common threshold %d\n",
2075 ret = hclge_common_wl_config(hdev, pkt_buf);
2077 dev_err(&hdev->pdev->dev,
2078 "could not configure common waterline %d\n", ret);
2085 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2087 struct hnae3_handle *roce = &vport->roce;
2088 struct hnae3_handle *nic = &vport->nic;
2090 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2092 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2093 vport->back->num_msi_left == 0)
2096 roce->rinfo.base_vector = vport->back->roce_base_vector;
2098 roce->rinfo.netdev = nic->kinfo.netdev;
2099 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2101 roce->pdev = nic->pdev;
2102 roce->ae_algo = nic->ae_algo;
2103 roce->numa_node_mask = nic->numa_node_mask;
2108 static int hclge_init_msi(struct hclge_dev *hdev)
2110 struct pci_dev *pdev = hdev->pdev;
2114 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2115 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2118 "failed(%d) to allocate MSI/MSI-X vectors\n",
2122 if (vectors < hdev->num_msi)
2123 dev_warn(&hdev->pdev->dev,
2124 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2125 hdev->num_msi, vectors);
2127 hdev->num_msi = vectors;
2128 hdev->num_msi_left = vectors;
2129 hdev->base_msi_vector = pdev->irq;
2130 hdev->roce_base_vector = hdev->base_msi_vector +
2131 hdev->roce_base_msix_offset;
2133 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2134 sizeof(u16), GFP_KERNEL);
2135 if (!hdev->vector_status) {
2136 pci_free_irq_vectors(pdev);
2140 for (i = 0; i < hdev->num_msi; i++)
2141 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2143 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2144 sizeof(int), GFP_KERNEL);
2145 if (!hdev->vector_irq) {
2146 pci_free_irq_vectors(pdev);
2153 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2155 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2156 duplex = HCLGE_MAC_FULL;
2161 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2164 struct hclge_config_mac_speed_dup_cmd *req;
2165 struct hclge_desc desc;
2168 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2170 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2173 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2176 case HCLGE_MAC_SPEED_10M:
2177 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2178 HCLGE_CFG_SPEED_S, 6);
2180 case HCLGE_MAC_SPEED_100M:
2181 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2182 HCLGE_CFG_SPEED_S, 7);
2184 case HCLGE_MAC_SPEED_1G:
2185 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2186 HCLGE_CFG_SPEED_S, 0);
2188 case HCLGE_MAC_SPEED_10G:
2189 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2190 HCLGE_CFG_SPEED_S, 1);
2192 case HCLGE_MAC_SPEED_25G:
2193 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2194 HCLGE_CFG_SPEED_S, 2);
2196 case HCLGE_MAC_SPEED_40G:
2197 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2198 HCLGE_CFG_SPEED_S, 3);
2200 case HCLGE_MAC_SPEED_50G:
2201 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2202 HCLGE_CFG_SPEED_S, 4);
2204 case HCLGE_MAC_SPEED_100G:
2205 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2206 HCLGE_CFG_SPEED_S, 5);
2209 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2213 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2216 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2218 dev_err(&hdev->pdev->dev,
2219 "mac speed/duplex config cmd failed %d.\n", ret);
2226 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2230 duplex = hclge_check_speed_dup(duplex, speed);
2231 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2234 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2238 hdev->hw.mac.speed = speed;
2239 hdev->hw.mac.duplex = duplex;
2244 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2247 struct hclge_vport *vport = hclge_get_vport(handle);
2248 struct hclge_dev *hdev = vport->back;
2250 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2253 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2255 struct hclge_config_auto_neg_cmd *req;
2256 struct hclge_desc desc;
2260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2262 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2263 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2264 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2268 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2276 struct hclge_vport *vport = hclge_get_vport(handle);
2277 struct hclge_dev *hdev = vport->back;
2279 if (!hdev->hw.mac.support_autoneg) {
2281 dev_err(&hdev->pdev->dev,
2282 "autoneg is not supported by current port\n");
2289 return hclge_set_autoneg_en(hdev, enable);
2292 static int hclge_get_autoneg(struct hnae3_handle *handle)
2294 struct hclge_vport *vport = hclge_get_vport(handle);
2295 struct hclge_dev *hdev = vport->back;
2296 struct phy_device *phydev = hdev->hw.mac.phydev;
2299 return phydev->autoneg;
2301 return hdev->hw.mac.autoneg;
2304 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2306 struct hclge_vport *vport = hclge_get_vport(handle);
2307 struct hclge_dev *hdev = vport->back;
2310 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2312 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2315 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2318 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2320 struct hclge_config_fec_cmd *req;
2321 struct hclge_desc desc;
2324 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2326 req = (struct hclge_config_fec_cmd *)desc.data;
2327 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2328 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2329 if (fec_mode & BIT(HNAE3_FEC_RS))
2330 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2331 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2332 if (fec_mode & BIT(HNAE3_FEC_BASER))
2333 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2334 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2336 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2338 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2343 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2345 struct hclge_vport *vport = hclge_get_vport(handle);
2346 struct hclge_dev *hdev = vport->back;
2347 struct hclge_mac *mac = &hdev->hw.mac;
2350 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2351 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2355 ret = hclge_set_fec_hw(hdev, fec_mode);
2359 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2363 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2366 struct hclge_vport *vport = hclge_get_vport(handle);
2367 struct hclge_dev *hdev = vport->back;
2368 struct hclge_mac *mac = &hdev->hw.mac;
2371 *fec_ability = mac->fec_ability;
2373 *fec_mode = mac->fec_mode;
2376 static int hclge_mac_init(struct hclge_dev *hdev)
2378 struct hclge_mac *mac = &hdev->hw.mac;
2381 hdev->support_sfp_query = true;
2382 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2383 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2384 hdev->hw.mac.duplex);
2386 dev_err(&hdev->pdev->dev,
2387 "Config mac speed dup fail ret=%d\n", ret);
2393 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2394 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2396 dev_err(&hdev->pdev->dev,
2397 "Fec mode init fail, ret = %d\n", ret);
2402 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2404 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2408 ret = hclge_buffer_alloc(hdev);
2410 dev_err(&hdev->pdev->dev,
2411 "allocate buffer fail, ret=%d\n", ret);
2416 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2418 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2419 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2420 schedule_work(&hdev->mbx_service_task);
2423 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2425 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2426 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2427 schedule_work(&hdev->rst_service_task);
2430 static void hclge_task_schedule(struct hclge_dev *hdev)
2432 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2433 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2434 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2435 (void)schedule_work(&hdev->service_task);
2438 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2440 struct hclge_link_status_cmd *req;
2441 struct hclge_desc desc;
2445 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2446 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2448 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453 req = (struct hclge_link_status_cmd *)desc.data;
2454 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2456 return !!link_status;
2459 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2461 unsigned int mac_state;
2464 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2467 mac_state = hclge_get_mac_link_status(hdev);
2469 if (hdev->hw.mac.phydev) {
2470 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2471 link_stat = mac_state &
2472 hdev->hw.mac.phydev->link;
2477 link_stat = mac_state;
2483 static void hclge_update_link_status(struct hclge_dev *hdev)
2485 struct hnae3_client *rclient = hdev->roce_client;
2486 struct hnae3_client *client = hdev->nic_client;
2487 struct hnae3_handle *rhandle;
2488 struct hnae3_handle *handle;
2494 state = hclge_get_mac_phy_link(hdev);
2495 if (state != hdev->hw.mac.link) {
2496 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2497 handle = &hdev->vport[i].nic;
2498 client->ops->link_status_change(handle, state);
2499 hclge_config_mac_tnl_int(hdev, state);
2500 rhandle = &hdev->vport[i].roce;
2501 if (rclient && rclient->ops->link_status_change)
2502 rclient->ops->link_status_change(rhandle,
2505 hdev->hw.mac.link = state;
2509 static void hclge_update_port_capability(struct hclge_mac *mac)
2511 /* update fec ability by speed */
2512 hclge_convert_setting_fec(mac);
2514 /* firmware can not identify back plane type, the media type
2515 * read from configuration can help deal it
2517 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2518 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2519 mac->module_type = HNAE3_MODULE_TYPE_KR;
2520 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2521 mac->module_type = HNAE3_MODULE_TYPE_TP;
2523 if (mac->support_autoneg == true) {
2524 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2525 linkmode_copy(mac->advertising, mac->supported);
2527 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2529 linkmode_zero(mac->advertising);
2533 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2535 struct hclge_sfp_info_cmd *resp;
2536 struct hclge_desc desc;
2539 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2540 resp = (struct hclge_sfp_info_cmd *)desc.data;
2541 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2542 if (ret == -EOPNOTSUPP) {
2543 dev_warn(&hdev->pdev->dev,
2544 "IMP do not support get SFP speed %d\n", ret);
2547 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2551 *speed = le32_to_cpu(resp->speed);
2556 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2558 struct hclge_sfp_info_cmd *resp;
2559 struct hclge_desc desc;
2562 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2563 resp = (struct hclge_sfp_info_cmd *)desc.data;
2565 resp->query_type = QUERY_ACTIVE_SPEED;
2567 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 if (ret == -EOPNOTSUPP) {
2569 dev_warn(&hdev->pdev->dev,
2570 "IMP does not support get SFP info %d\n", ret);
2573 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2577 mac->speed = le32_to_cpu(resp->speed);
2578 /* if resp->speed_ability is 0, it means it's an old version
2579 * firmware, do not update these params
2581 if (resp->speed_ability) {
2582 mac->module_type = le32_to_cpu(resp->module_type);
2583 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2584 mac->autoneg = resp->autoneg;
2585 mac->support_autoneg = resp->autoneg_ability;
2586 if (!resp->active_fec)
2589 mac->fec_mode = BIT(resp->active_fec);
2591 mac->speed_type = QUERY_SFP_SPEED;
2597 static int hclge_update_port_info(struct hclge_dev *hdev)
2599 struct hclge_mac *mac = &hdev->hw.mac;
2600 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2603 /* get the port info from SFP cmd if not copper port */
2604 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2607 /* if IMP does not support get SFP/qSFP info, return directly */
2608 if (!hdev->support_sfp_query)
2611 if (hdev->pdev->revision >= 0x21)
2612 ret = hclge_get_sfp_info(hdev, mac);
2614 ret = hclge_get_sfp_speed(hdev, &speed);
2616 if (ret == -EOPNOTSUPP) {
2617 hdev->support_sfp_query = false;
2623 if (hdev->pdev->revision >= 0x21) {
2624 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2625 hclge_update_port_capability(mac);
2628 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2631 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2632 return 0; /* do nothing if no SFP */
2634 /* must config full duplex for SFP */
2635 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2639 static int hclge_get_status(struct hnae3_handle *handle)
2641 struct hclge_vport *vport = hclge_get_vport(handle);
2642 struct hclge_dev *hdev = vport->back;
2644 hclge_update_link_status(hdev);
2646 return hdev->hw.mac.link;
2649 static void hclge_service_timer(struct timer_list *t)
2651 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2653 mod_timer(&hdev->service_timer, jiffies + HZ);
2654 hdev->hw_stats.stats_timer++;
2655 hdev->fd_arfs_expire_timer++;
2656 hclge_task_schedule(hdev);
2659 static void hclge_service_complete(struct hclge_dev *hdev)
2661 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2663 /* Flush memory before next watchdog */
2664 smp_mb__before_atomic();
2665 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2668 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2670 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2672 /* fetch the events from their corresponding regs */
2673 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2674 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2675 msix_src_reg = hclge_read_dev(&hdev->hw,
2676 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2678 /* Assumption: If by any chance reset and mailbox events are reported
2679 * together then we will only process reset event in this go and will
2680 * defer the processing of the mailbox events. Since, we would have not
2681 * cleared RX CMDQ event this time we would receive again another
2682 * interrupt from H/W just for the mailbox.
2685 /* check for vector0 reset event sources */
2686 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2687 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2688 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2689 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2691 hdev->rst_stats.imp_rst_cnt++;
2692 return HCLGE_VECTOR0_EVENT_RST;
2695 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2696 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2697 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2698 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2699 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2700 hdev->rst_stats.global_rst_cnt++;
2701 return HCLGE_VECTOR0_EVENT_RST;
2704 /* check for vector0 msix event source */
2705 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2706 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2708 return HCLGE_VECTOR0_EVENT_ERR;
2711 /* check for vector0 mailbox(=CMDQ RX) event source */
2712 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2713 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2714 *clearval = cmdq_src_reg;
2715 return HCLGE_VECTOR0_EVENT_MBX;
2718 /* print other vector0 event source */
2719 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2720 cmdq_src_reg, msix_src_reg);
2721 return HCLGE_VECTOR0_EVENT_OTHER;
2724 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2727 switch (event_type) {
2728 case HCLGE_VECTOR0_EVENT_RST:
2729 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2731 case HCLGE_VECTOR0_EVENT_MBX:
2732 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2739 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2741 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2742 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2743 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2744 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2745 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2748 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2750 writel(enable ? 1 : 0, vector->addr);
2753 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2755 struct hclge_dev *hdev = data;
2759 hclge_enable_vector(&hdev->misc_vector, false);
2760 event_cause = hclge_check_event_cause(hdev, &clearval);
2762 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2763 switch (event_cause) {
2764 case HCLGE_VECTOR0_EVENT_ERR:
2765 /* we do not know what type of reset is required now. This could
2766 * only be decided after we fetch the type of errors which
2767 * caused this event. Therefore, we will do below for now:
2768 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2769 * have defered type of reset to be used.
2770 * 2. Schedule the reset serivce task.
2771 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2772 * will fetch the correct type of reset. This would be done
2773 * by first decoding the types of errors.
2775 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2777 case HCLGE_VECTOR0_EVENT_RST:
2778 hclge_reset_task_schedule(hdev);
2780 case HCLGE_VECTOR0_EVENT_MBX:
2781 /* If we are here then,
2782 * 1. Either we are not handling any mbx task and we are not
2785 * 2. We could be handling a mbx task but nothing more is
2787 * In both cases, we should schedule mbx task as there are more
2788 * mbx messages reported by this interrupt.
2790 hclge_mbx_task_schedule(hdev);
2793 dev_warn(&hdev->pdev->dev,
2794 "received unknown or unhandled event of vector0\n");
2798 /* clear the source of interrupt if it is not cause by reset */
2799 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2800 hclge_clear_event_cause(hdev, event_cause, clearval);
2801 hclge_enable_vector(&hdev->misc_vector, true);
2807 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2809 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2810 dev_warn(&hdev->pdev->dev,
2811 "vector(vector_id %d) has been freed.\n", vector_id);
2815 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2816 hdev->num_msi_left += 1;
2817 hdev->num_msi_used -= 1;
2820 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2822 struct hclge_misc_vector *vector = &hdev->misc_vector;
2824 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2826 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2827 hdev->vector_status[0] = 0;
2829 hdev->num_msi_left -= 1;
2830 hdev->num_msi_used += 1;
2833 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2837 hclge_get_misc_vector(hdev);
2839 /* this would be explicitly freed in the end */
2840 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2841 0, "hclge_misc", hdev);
2843 hclge_free_vector(hdev, 0);
2844 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2845 hdev->misc_vector.vector_irq);
2851 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2853 free_irq(hdev->misc_vector.vector_irq, hdev);
2854 hclge_free_vector(hdev, 0);
2857 int hclge_notify_client(struct hclge_dev *hdev,
2858 enum hnae3_reset_notify_type type)
2860 struct hnae3_client *client = hdev->nic_client;
2863 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2866 if (!client->ops->reset_notify)
2869 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2870 struct hnae3_handle *handle = &hdev->vport[i].nic;
2873 ret = client->ops->reset_notify(handle, type);
2875 dev_err(&hdev->pdev->dev,
2876 "notify nic client failed %d(%d)\n", type, ret);
2884 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2885 enum hnae3_reset_notify_type type)
2887 struct hnae3_client *client = hdev->roce_client;
2891 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
2894 if (!client->ops->reset_notify)
2897 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2898 struct hnae3_handle *handle = &hdev->vport[i].roce;
2900 ret = client->ops->reset_notify(handle, type);
2902 dev_err(&hdev->pdev->dev,
2903 "notify roce client failed %d(%d)",
2912 static int hclge_reset_wait(struct hclge_dev *hdev)
2914 #define HCLGE_RESET_WATI_MS 100
2915 #define HCLGE_RESET_WAIT_CNT 200
2916 u32 val, reg, reg_bit;
2919 switch (hdev->reset_type) {
2920 case HNAE3_IMP_RESET:
2921 reg = HCLGE_GLOBAL_RESET_REG;
2922 reg_bit = HCLGE_IMP_RESET_BIT;
2924 case HNAE3_GLOBAL_RESET:
2925 reg = HCLGE_GLOBAL_RESET_REG;
2926 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2928 case HNAE3_FUNC_RESET:
2929 reg = HCLGE_FUN_RST_ING;
2930 reg_bit = HCLGE_FUN_RST_ING_B;
2932 case HNAE3_FLR_RESET:
2935 dev_err(&hdev->pdev->dev,
2936 "Wait for unsupported reset type: %d\n",
2941 if (hdev->reset_type == HNAE3_FLR_RESET) {
2942 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2943 cnt++ < HCLGE_RESET_WAIT_CNT)
2944 msleep(HCLGE_RESET_WATI_MS);
2946 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2947 dev_err(&hdev->pdev->dev,
2948 "flr wait timeout: %d\n", cnt);
2955 val = hclge_read_dev(&hdev->hw, reg);
2956 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2957 msleep(HCLGE_RESET_WATI_MS);
2958 val = hclge_read_dev(&hdev->hw, reg);
2962 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2963 dev_warn(&hdev->pdev->dev,
2964 "Wait for reset timeout: %d\n", hdev->reset_type);
2971 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2973 struct hclge_vf_rst_cmd *req;
2974 struct hclge_desc desc;
2976 req = (struct hclge_vf_rst_cmd *)desc.data;
2977 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2978 req->dest_vfid = func_id;
2983 return hclge_cmd_send(&hdev->hw, &desc, 1);
2986 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2990 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2991 struct hclge_vport *vport = &hdev->vport[i];
2994 /* Send cmd to set/clear VF's FUNC_RST_ING */
2995 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2997 dev_err(&hdev->pdev->dev,
2998 "set vf(%d) rst failed %d!\n",
2999 vport->vport_id, ret);
3003 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3006 /* Inform VF to process the reset.
3007 * hclge_inform_reset_assert_to_vf may fail if VF
3008 * driver is not loaded.
3010 ret = hclge_inform_reset_assert_to_vf(vport);
3012 dev_warn(&hdev->pdev->dev,
3013 "inform reset to vf(%d) failed %d!\n",
3014 vport->vport_id, ret);
3020 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3022 struct hclge_desc desc;
3023 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3026 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3027 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3028 req->fun_reset_vfid = func_id;
3030 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3032 dev_err(&hdev->pdev->dev,
3033 "send function reset cmd fail, status =%d\n", ret);
3038 static void hclge_do_reset(struct hclge_dev *hdev)
3040 struct hnae3_handle *handle = &hdev->vport[0].nic;
3041 struct pci_dev *pdev = hdev->pdev;
3044 if (hclge_get_hw_reset_stat(handle)) {
3045 dev_info(&pdev->dev, "Hardware reset not finish\n");
3046 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3047 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3048 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3052 switch (hdev->reset_type) {
3053 case HNAE3_GLOBAL_RESET:
3054 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3055 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3056 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3057 dev_info(&pdev->dev, "Global Reset requested\n");
3059 case HNAE3_FUNC_RESET:
3060 dev_info(&pdev->dev, "PF Reset requested\n");
3061 /* schedule again to check later */
3062 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3063 hclge_reset_task_schedule(hdev);
3065 case HNAE3_FLR_RESET:
3066 dev_info(&pdev->dev, "FLR requested\n");
3067 /* schedule again to check later */
3068 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3069 hclge_reset_task_schedule(hdev);
3072 dev_warn(&pdev->dev,
3073 "Unsupported reset type: %d\n", hdev->reset_type);
3078 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3079 unsigned long *addr)
3081 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3082 struct hclge_dev *hdev = ae_dev->priv;
3084 /* first, resolve any unknown reset type to the known type(s) */
3085 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3086 /* we will intentionally ignore any errors from this function
3087 * as we will end up in *some* reset request in any case
3089 hclge_handle_hw_msix_error(hdev, addr);
3090 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3091 /* We defered the clearing of the error event which caused
3092 * interrupt since it was not posssible to do that in
3093 * interrupt context (and this is the reason we introduced
3094 * new UNKNOWN reset type). Now, the errors have been
3095 * handled and cleared in hardware we can safely enable
3096 * interrupts. This is an exception to the norm.
3098 hclge_enable_vector(&hdev->misc_vector, true);
3101 /* return the highest priority reset level amongst all */
3102 if (test_bit(HNAE3_IMP_RESET, addr)) {
3103 rst_level = HNAE3_IMP_RESET;
3104 clear_bit(HNAE3_IMP_RESET, addr);
3105 clear_bit(HNAE3_GLOBAL_RESET, addr);
3106 clear_bit(HNAE3_FUNC_RESET, addr);
3107 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3108 rst_level = HNAE3_GLOBAL_RESET;
3109 clear_bit(HNAE3_GLOBAL_RESET, addr);
3110 clear_bit(HNAE3_FUNC_RESET, addr);
3111 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3112 rst_level = HNAE3_FUNC_RESET;
3113 clear_bit(HNAE3_FUNC_RESET, addr);
3114 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3115 rst_level = HNAE3_FLR_RESET;
3116 clear_bit(HNAE3_FLR_RESET, addr);
3119 if (hdev->reset_type != HNAE3_NONE_RESET &&
3120 rst_level < hdev->reset_type)
3121 return HNAE3_NONE_RESET;
3126 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3130 switch (hdev->reset_type) {
3131 case HNAE3_IMP_RESET:
3132 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3134 case HNAE3_GLOBAL_RESET:
3135 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3144 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3145 hclge_enable_vector(&hdev->misc_vector, true);
3148 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3152 switch (hdev->reset_type) {
3153 case HNAE3_FUNC_RESET:
3155 case HNAE3_FLR_RESET:
3156 ret = hclge_set_all_vf_rst(hdev, true);
3165 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3167 #define HCLGE_RESET_SYNC_TIME 100
3172 switch (hdev->reset_type) {
3173 case HNAE3_FUNC_RESET:
3174 /* There is no mechanism for PF to know if VF has stopped IO
3175 * for now, just wait 100 ms for VF to stop IO
3177 msleep(HCLGE_RESET_SYNC_TIME);
3178 ret = hclge_func_reset_cmd(hdev, 0);
3180 dev_err(&hdev->pdev->dev,
3181 "asserting function reset fail %d!\n", ret);
3185 /* After performaning pf reset, it is not necessary to do the
3186 * mailbox handling or send any command to firmware, because
3187 * any mailbox handling or command to firmware is only valid
3188 * after hclge_cmd_init is called.
3190 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3191 hdev->rst_stats.pf_rst_cnt++;
3193 case HNAE3_FLR_RESET:
3194 /* There is no mechanism for PF to know if VF has stopped IO
3195 * for now, just wait 100 ms for VF to stop IO
3197 msleep(HCLGE_RESET_SYNC_TIME);
3198 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3199 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3200 hdev->rst_stats.flr_rst_cnt++;
3202 case HNAE3_IMP_RESET:
3203 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3204 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3205 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3211 /* inform hardware that preparatory work is done */
3212 msleep(HCLGE_RESET_SYNC_TIME);
3213 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3214 HCLGE_NIC_CMQ_ENABLE);
3215 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3220 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3222 #define MAX_RESET_FAIL_CNT 5
3224 if (hdev->reset_pending) {
3225 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3226 hdev->reset_pending);
3228 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3229 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3230 BIT(HCLGE_IMP_RESET_BIT))) {
3231 dev_info(&hdev->pdev->dev,
3232 "reset failed because IMP Reset is pending\n");
3233 hclge_clear_reset_cause(hdev);
3235 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3236 hdev->reset_fail_cnt++;
3238 set_bit(hdev->reset_type, &hdev->reset_pending);
3239 dev_info(&hdev->pdev->dev,
3240 "re-schedule to wait for hw reset done\n");
3244 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3245 hclge_clear_reset_cause(hdev);
3246 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3247 mod_timer(&hdev->reset_timer,
3248 jiffies + HCLGE_RESET_INTERVAL);
3253 hclge_clear_reset_cause(hdev);
3254 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3258 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3262 switch (hdev->reset_type) {
3263 case HNAE3_FUNC_RESET:
3265 case HNAE3_FLR_RESET:
3266 ret = hclge_set_all_vf_rst(hdev, false);
3275 static int hclge_reset_stack(struct hclge_dev *hdev)
3279 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3283 ret = hclge_reset_ae_dev(hdev->ae_dev);
3287 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3291 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3294 static void hclge_reset(struct hclge_dev *hdev)
3296 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3297 bool is_timeout = false;
3300 /* Initialize ae_dev reset status as well, in case enet layer wants to
3301 * know if device is undergoing reset
3303 ae_dev->reset_type = hdev->reset_type;
3304 hdev->rst_stats.reset_cnt++;
3305 /* perform reset of the stack & ae device for a client */
3306 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3310 ret = hclge_reset_prepare_down(hdev);
3315 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3317 goto err_reset_lock;
3321 ret = hclge_reset_prepare_wait(hdev);
3325 if (hclge_reset_wait(hdev)) {
3330 hdev->rst_stats.hw_reset_done_cnt++;
3332 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3338 ret = hclge_reset_stack(hdev);
3340 goto err_reset_lock;
3342 hclge_clear_reset_cause(hdev);
3344 ret = hclge_reset_prepare_up(hdev);
3346 goto err_reset_lock;
3350 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3351 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3354 if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3359 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3361 goto err_reset_lock;
3365 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3369 hdev->last_reset_time = jiffies;
3370 hdev->reset_fail_cnt = 0;
3371 hdev->rst_stats.reset_done_cnt++;
3372 ae_dev->reset_type = HNAE3_NONE_RESET;
3373 del_timer(&hdev->reset_timer);
3380 if (hclge_reset_err_handle(hdev, is_timeout))
3381 hclge_reset_task_schedule(hdev);
3384 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3386 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3387 struct hclge_dev *hdev = ae_dev->priv;
3389 /* We might end up getting called broadly because of 2 below cases:
3390 * 1. Recoverable error was conveyed through APEI and only way to bring
3391 * normalcy is to reset.
3392 * 2. A new reset request from the stack due to timeout
3394 * For the first case,error event might not have ae handle available.
3395 * check if this is a new reset request and we are not here just because
3396 * last reset attempt did not succeed and watchdog hit us again. We will
3397 * know this if last reset request did not occur very recently (watchdog
3398 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3399 * In case of new request we reset the "reset level" to PF reset.
3400 * And if it is a repeat reset request of the most recent one then we
3401 * want to make sure we throttle the reset request. Therefore, we will
3402 * not allow it again before 3*HZ times.
3405 handle = &hdev->vport[0].nic;
3407 if (time_before(jiffies, (hdev->last_reset_time +
3408 HCLGE_RESET_INTERVAL)))
3410 else if (hdev->default_reset_request)
3412 hclge_get_reset_level(ae_dev,
3413 &hdev->default_reset_request);
3414 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3415 hdev->reset_level = HNAE3_FUNC_RESET;
3417 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3420 /* request reset & schedule reset task */
3421 set_bit(hdev->reset_level, &hdev->reset_request);
3422 hclge_reset_task_schedule(hdev);
3424 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3425 hdev->reset_level++;
3428 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3429 enum hnae3_reset_type rst_type)
3431 struct hclge_dev *hdev = ae_dev->priv;
3433 set_bit(rst_type, &hdev->default_reset_request);
3436 static void hclge_reset_timer(struct timer_list *t)
3438 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3440 dev_info(&hdev->pdev->dev,
3441 "triggering reset in reset timer\n");
3442 hclge_reset_event(hdev->pdev, NULL);
3445 static void hclge_reset_subtask(struct hclge_dev *hdev)
3447 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3449 /* check if there is any ongoing reset in the hardware. This status can
3450 * be checked from reset_pending. If there is then, we need to wait for
3451 * hardware to complete reset.
3452 * a. If we are able to figure out in reasonable time that hardware
3453 * has fully resetted then, we can proceed with driver, client
3455 * b. else, we can come back later to check this status so re-sched
3458 hdev->last_reset_time = jiffies;
3459 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3460 if (hdev->reset_type != HNAE3_NONE_RESET)
3463 /* check if we got any *new* reset requests to be honored */
3464 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3465 if (hdev->reset_type != HNAE3_NONE_RESET)
3466 hclge_do_reset(hdev);
3468 hdev->reset_type = HNAE3_NONE_RESET;
3471 static void hclge_reset_service_task(struct work_struct *work)
3473 struct hclge_dev *hdev =
3474 container_of(work, struct hclge_dev, rst_service_task);
3476 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3479 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3481 hclge_reset_subtask(hdev);
3483 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3486 static void hclge_mailbox_service_task(struct work_struct *work)
3488 struct hclge_dev *hdev =
3489 container_of(work, struct hclge_dev, mbx_service_task);
3491 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3494 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3496 hclge_mbx_handler(hdev);
3498 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3501 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3505 /* start from vport 1 for PF is always alive */
3506 for (i = 1; i < hdev->num_alloc_vport; i++) {
3507 struct hclge_vport *vport = &hdev->vport[i];
3509 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3510 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3512 /* If vf is not alive, set to default value */
3513 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3514 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3518 static void hclge_service_task(struct work_struct *work)
3520 struct hclge_dev *hdev =
3521 container_of(work, struct hclge_dev, service_task);
3523 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3524 hclge_update_stats_for_all(hdev);
3525 hdev->hw_stats.stats_timer = 0;
3528 hclge_update_port_info(hdev);
3529 hclge_update_link_status(hdev);
3530 hclge_update_vport_alive(hdev);
3531 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3532 hclge_rfs_filter_expire(hdev);
3533 hdev->fd_arfs_expire_timer = 0;
3535 hclge_service_complete(hdev);
3538 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3540 /* VF handle has no client */
3541 if (!handle->client)
3542 return container_of(handle, struct hclge_vport, nic);
3543 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3544 return container_of(handle, struct hclge_vport, roce);
3546 return container_of(handle, struct hclge_vport, nic);
3549 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3550 struct hnae3_vector_info *vector_info)
3552 struct hclge_vport *vport = hclge_get_vport(handle);
3553 struct hnae3_vector_info *vector = vector_info;
3554 struct hclge_dev *hdev = vport->back;
3558 vector_num = min(hdev->num_msi_left, vector_num);
3560 for (j = 0; j < vector_num; j++) {
3561 for (i = 1; i < hdev->num_msi; i++) {
3562 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3563 vector->vector = pci_irq_vector(hdev->pdev, i);
3564 vector->io_addr = hdev->hw.io_base +
3565 HCLGE_VECTOR_REG_BASE +
3566 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3568 HCLGE_VECTOR_VF_OFFSET;
3569 hdev->vector_status[i] = vport->vport_id;
3570 hdev->vector_irq[i] = vector->vector;
3579 hdev->num_msi_left -= alloc;
3580 hdev->num_msi_used += alloc;
3585 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3589 for (i = 0; i < hdev->num_msi; i++)
3590 if (vector == hdev->vector_irq[i])
3596 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3598 struct hclge_vport *vport = hclge_get_vport(handle);
3599 struct hclge_dev *hdev = vport->back;
3602 vector_id = hclge_get_vector_index(hdev, vector);
3603 if (vector_id < 0) {
3604 dev_err(&hdev->pdev->dev,
3605 "Get vector index fail. vector_id =%d\n", vector_id);
3609 hclge_free_vector(hdev, vector_id);
3614 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3616 return HCLGE_RSS_KEY_SIZE;
3619 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3621 return HCLGE_RSS_IND_TBL_SIZE;
3624 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3625 const u8 hfunc, const u8 *key)
3627 struct hclge_rss_config_cmd *req;
3628 unsigned int key_offset = 0;
3629 struct hclge_desc desc;
3634 key_counts = HCLGE_RSS_KEY_SIZE;
3635 req = (struct hclge_rss_config_cmd *)desc.data;
3637 while (key_counts) {
3638 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3641 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3642 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3644 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3645 memcpy(req->hash_key,
3646 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3648 key_counts -= key_size;
3650 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3652 dev_err(&hdev->pdev->dev,
3653 "Configure RSS config fail, status = %d\n",
3661 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3663 struct hclge_rss_indirection_table_cmd *req;
3664 struct hclge_desc desc;
3668 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3670 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3671 hclge_cmd_setup_basic_desc
3672 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3674 req->start_table_index =
3675 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3676 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3678 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3679 req->rss_result[j] =
3680 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3684 dev_err(&hdev->pdev->dev,
3685 "Configure rss indir table fail,status = %d\n",
3693 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3694 u16 *tc_size, u16 *tc_offset)
3696 struct hclge_rss_tc_mode_cmd *req;
3697 struct hclge_desc desc;
3701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3702 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3704 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3707 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3708 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3709 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3710 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3711 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3713 req->rss_tc_mode[i] = cpu_to_le16(mode);
3716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3718 dev_err(&hdev->pdev->dev,
3719 "Configure rss tc mode fail, status = %d\n", ret);
3724 static void hclge_get_rss_type(struct hclge_vport *vport)
3726 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3727 vport->rss_tuple_sets.ipv4_udp_en ||
3728 vport->rss_tuple_sets.ipv4_sctp_en ||
3729 vport->rss_tuple_sets.ipv6_tcp_en ||
3730 vport->rss_tuple_sets.ipv6_udp_en ||
3731 vport->rss_tuple_sets.ipv6_sctp_en)
3732 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3733 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3734 vport->rss_tuple_sets.ipv6_fragment_en)
3735 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3737 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3740 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3742 struct hclge_rss_input_tuple_cmd *req;
3743 struct hclge_desc desc;
3746 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3748 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3750 /* Get the tuple cfg from pf */
3751 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3752 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3753 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3754 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3755 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3756 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3757 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3758 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3759 hclge_get_rss_type(&hdev->vport[0]);
3760 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3762 dev_err(&hdev->pdev->dev,
3763 "Configure rss input fail, status = %d\n", ret);
3767 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3770 struct hclge_vport *vport = hclge_get_vport(handle);
3773 /* Get hash algorithm */
3775 switch (vport->rss_algo) {
3776 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3777 *hfunc = ETH_RSS_HASH_TOP;
3779 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3780 *hfunc = ETH_RSS_HASH_XOR;
3783 *hfunc = ETH_RSS_HASH_UNKNOWN;
3788 /* Get the RSS Key required by the user */
3790 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3792 /* Get indirect table */
3794 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3795 indir[i] = vport->rss_indirection_tbl[i];
3800 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3801 const u8 *key, const u8 hfunc)
3803 struct hclge_vport *vport = hclge_get_vport(handle);
3804 struct hclge_dev *hdev = vport->back;
3808 /* Set the RSS Hash Key if specififed by the user */
3811 case ETH_RSS_HASH_TOP:
3812 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3814 case ETH_RSS_HASH_XOR:
3815 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3817 case ETH_RSS_HASH_NO_CHANGE:
3818 hash_algo = vport->rss_algo;
3824 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3828 /* Update the shadow RSS key with user specified qids */
3829 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3830 vport->rss_algo = hash_algo;
3833 /* Update the shadow RSS table with user specified qids */
3834 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3835 vport->rss_indirection_tbl[i] = indir[i];
3837 /* Update the hardware */
3838 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3841 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3843 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3845 if (nfc->data & RXH_L4_B_2_3)
3846 hash_sets |= HCLGE_D_PORT_BIT;
3848 hash_sets &= ~HCLGE_D_PORT_BIT;
3850 if (nfc->data & RXH_IP_SRC)
3851 hash_sets |= HCLGE_S_IP_BIT;
3853 hash_sets &= ~HCLGE_S_IP_BIT;
3855 if (nfc->data & RXH_IP_DST)
3856 hash_sets |= HCLGE_D_IP_BIT;
3858 hash_sets &= ~HCLGE_D_IP_BIT;
3860 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3861 hash_sets |= HCLGE_V_TAG_BIT;
3866 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3867 struct ethtool_rxnfc *nfc)
3869 struct hclge_vport *vport = hclge_get_vport(handle);
3870 struct hclge_dev *hdev = vport->back;
3871 struct hclge_rss_input_tuple_cmd *req;
3872 struct hclge_desc desc;
3876 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3877 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3880 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3883 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3884 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3885 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3886 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3887 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3888 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3889 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3890 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3892 tuple_sets = hclge_get_rss_hash_bits(nfc);
3893 switch (nfc->flow_type) {
3895 req->ipv4_tcp_en = tuple_sets;
3898 req->ipv6_tcp_en = tuple_sets;
3901 req->ipv4_udp_en = tuple_sets;
3904 req->ipv6_udp_en = tuple_sets;
3907 req->ipv4_sctp_en = tuple_sets;
3910 if ((nfc->data & RXH_L4_B_0_1) ||
3911 (nfc->data & RXH_L4_B_2_3))
3914 req->ipv6_sctp_en = tuple_sets;
3917 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3920 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3926 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3928 dev_err(&hdev->pdev->dev,
3929 "Set rss tuple fail, status = %d\n", ret);
3933 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3934 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3935 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3936 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3937 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3938 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3939 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3940 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3941 hclge_get_rss_type(vport);
3945 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3946 struct ethtool_rxnfc *nfc)
3948 struct hclge_vport *vport = hclge_get_vport(handle);
3953 switch (nfc->flow_type) {
3955 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3958 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3961 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3964 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3967 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3970 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3974 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3983 if (tuple_sets & HCLGE_D_PORT_BIT)
3984 nfc->data |= RXH_L4_B_2_3;
3985 if (tuple_sets & HCLGE_S_PORT_BIT)
3986 nfc->data |= RXH_L4_B_0_1;
3987 if (tuple_sets & HCLGE_D_IP_BIT)
3988 nfc->data |= RXH_IP_DST;
3989 if (tuple_sets & HCLGE_S_IP_BIT)
3990 nfc->data |= RXH_IP_SRC;
3995 static int hclge_get_tc_size(struct hnae3_handle *handle)
3997 struct hclge_vport *vport = hclge_get_vport(handle);
3998 struct hclge_dev *hdev = vport->back;
4000 return hdev->rss_size_max;
4003 int hclge_rss_init_hw(struct hclge_dev *hdev)
4005 struct hclge_vport *vport = hdev->vport;
4006 u8 *rss_indir = vport[0].rss_indirection_tbl;
4007 u16 rss_size = vport[0].alloc_rss_size;
4008 u8 *key = vport[0].rss_hash_key;
4009 u8 hfunc = vport[0].rss_algo;
4010 u16 tc_offset[HCLGE_MAX_TC_NUM];
4011 u16 tc_valid[HCLGE_MAX_TC_NUM];
4012 u16 tc_size[HCLGE_MAX_TC_NUM];
4017 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4021 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4025 ret = hclge_set_rss_input_tuple(hdev);
4029 /* Each TC have the same queue size, and tc_size set to hardware is
4030 * the log2 of roundup power of two of rss_size, the acutal queue
4031 * size is limited by indirection table.
4033 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4034 dev_err(&hdev->pdev->dev,
4035 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4040 roundup_size = roundup_pow_of_two(rss_size);
4041 roundup_size = ilog2(roundup_size);
4043 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4046 if (!(hdev->hw_tc_map & BIT(i)))
4050 tc_size[i] = roundup_size;
4051 tc_offset[i] = rss_size * i;
4054 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4057 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4059 struct hclge_vport *vport = hdev->vport;
4062 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4063 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4064 vport[j].rss_indirection_tbl[i] =
4065 i % vport[j].alloc_rss_size;
4069 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4071 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4072 struct hclge_vport *vport = hdev->vport;
4074 if (hdev->pdev->revision >= 0x21)
4075 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4077 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4078 vport[i].rss_tuple_sets.ipv4_tcp_en =
4079 HCLGE_RSS_INPUT_TUPLE_OTHER;
4080 vport[i].rss_tuple_sets.ipv4_udp_en =
4081 HCLGE_RSS_INPUT_TUPLE_OTHER;
4082 vport[i].rss_tuple_sets.ipv4_sctp_en =
4083 HCLGE_RSS_INPUT_TUPLE_SCTP;
4084 vport[i].rss_tuple_sets.ipv4_fragment_en =
4085 HCLGE_RSS_INPUT_TUPLE_OTHER;
4086 vport[i].rss_tuple_sets.ipv6_tcp_en =
4087 HCLGE_RSS_INPUT_TUPLE_OTHER;
4088 vport[i].rss_tuple_sets.ipv6_udp_en =
4089 HCLGE_RSS_INPUT_TUPLE_OTHER;
4090 vport[i].rss_tuple_sets.ipv6_sctp_en =
4091 HCLGE_RSS_INPUT_TUPLE_SCTP;
4092 vport[i].rss_tuple_sets.ipv6_fragment_en =
4093 HCLGE_RSS_INPUT_TUPLE_OTHER;
4095 vport[i].rss_algo = rss_algo;
4097 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4098 HCLGE_RSS_KEY_SIZE);
4101 hclge_rss_indir_init_cfg(hdev);
4104 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4105 int vector_id, bool en,
4106 struct hnae3_ring_chain_node *ring_chain)
4108 struct hclge_dev *hdev = vport->back;
4109 struct hnae3_ring_chain_node *node;
4110 struct hclge_desc desc;
4111 struct hclge_ctrl_vector_chain_cmd *req
4112 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4113 enum hclge_cmd_status status;
4114 enum hclge_opcode_type op;
4115 u16 tqp_type_and_id;
4118 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4119 hclge_cmd_setup_basic_desc(&desc, op, false);
4120 req->int_vector_id = vector_id;
4123 for (node = ring_chain; node; node = node->next) {
4124 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4125 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4127 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4128 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4129 HCLGE_TQP_ID_S, node->tqp_index);
4130 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4132 hnae3_get_field(node->int_gl_idx,
4133 HNAE3_RING_GL_IDX_M,
4134 HNAE3_RING_GL_IDX_S));
4135 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4136 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4137 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4138 req->vfid = vport->vport_id;
4140 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4142 dev_err(&hdev->pdev->dev,
4143 "Map TQP fail, status is %d.\n",
4149 hclge_cmd_setup_basic_desc(&desc,
4152 req->int_vector_id = vector_id;
4157 req->int_cause_num = i;
4158 req->vfid = vport->vport_id;
4159 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4161 dev_err(&hdev->pdev->dev,
4162 "Map TQP fail, status is %d.\n", status);
4170 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4171 struct hnae3_ring_chain_node *ring_chain)
4173 struct hclge_vport *vport = hclge_get_vport(handle);
4174 struct hclge_dev *hdev = vport->back;
4177 vector_id = hclge_get_vector_index(hdev, vector);
4178 if (vector_id < 0) {
4179 dev_err(&hdev->pdev->dev,
4180 "Get vector index fail. vector_id =%d\n", vector_id);
4184 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4187 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4188 struct hnae3_ring_chain_node *ring_chain)
4190 struct hclge_vport *vport = hclge_get_vport(handle);
4191 struct hclge_dev *hdev = vport->back;
4194 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4197 vector_id = hclge_get_vector_index(hdev, vector);
4198 if (vector_id < 0) {
4199 dev_err(&handle->pdev->dev,
4200 "Get vector index fail. ret =%d\n", vector_id);
4204 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4206 dev_err(&handle->pdev->dev,
4207 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4213 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4214 struct hclge_promisc_param *param)
4216 struct hclge_promisc_cfg_cmd *req;
4217 struct hclge_desc desc;
4220 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4222 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4223 req->vf_id = param->vf_id;
4225 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4226 * pdev revision(0x20), new revision support them. The
4227 * value of this two fields will not return error when driver
4228 * send command to fireware in revision(0x20).
4230 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4231 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4233 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4235 dev_err(&hdev->pdev->dev,
4236 "Set promisc mode fail, status is %d.\n", ret);
4241 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4242 bool en_mc, bool en_bc, int vport_id)
4247 memset(param, 0, sizeof(struct hclge_promisc_param));
4249 param->enable = HCLGE_PROMISC_EN_UC;
4251 param->enable |= HCLGE_PROMISC_EN_MC;
4253 param->enable |= HCLGE_PROMISC_EN_BC;
4254 param->vf_id = vport_id;
4257 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4260 struct hclge_vport *vport = hclge_get_vport(handle);
4261 struct hclge_dev *hdev = vport->back;
4262 struct hclge_promisc_param param;
4263 bool en_bc_pmc = true;
4265 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4266 * always bypassed. So broadcast promisc should be disabled until
4267 * user enable promisc mode
4269 if (handle->pdev->revision == 0x20)
4270 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4272 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4274 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4277 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4279 struct hclge_get_fd_mode_cmd *req;
4280 struct hclge_desc desc;
4283 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4285 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4287 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4289 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4293 *fd_mode = req->mode;
4298 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4299 u32 *stage1_entry_num,
4300 u32 *stage2_entry_num,
4301 u16 *stage1_counter_num,
4302 u16 *stage2_counter_num)
4304 struct hclge_get_fd_allocation_cmd *req;
4305 struct hclge_desc desc;
4308 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4310 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4312 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4314 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4319 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4320 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4321 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4322 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4327 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4329 struct hclge_set_fd_key_config_cmd *req;
4330 struct hclge_fd_key_cfg *stage;
4331 struct hclge_desc desc;
4334 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4336 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4337 stage = &hdev->fd_cfg.key_cfg[stage_num];
4338 req->stage = stage_num;
4339 req->key_select = stage->key_sel;
4340 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4341 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4342 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4343 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4344 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4345 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4347 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4349 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4354 static int hclge_init_fd_config(struct hclge_dev *hdev)
4356 #define LOW_2_WORDS 0x03
4357 struct hclge_fd_key_cfg *key_cfg;
4360 if (!hnae3_dev_fd_supported(hdev))
4363 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4367 switch (hdev->fd_cfg.fd_mode) {
4368 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4369 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4371 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4372 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4375 dev_err(&hdev->pdev->dev,
4376 "Unsupported flow director mode %d\n",
4377 hdev->fd_cfg.fd_mode);
4381 hdev->fd_cfg.proto_support =
4382 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4383 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4384 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4385 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4386 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4387 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4388 key_cfg->outer_sipv6_word_en = 0;
4389 key_cfg->outer_dipv6_word_en = 0;
4391 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4392 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4393 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4394 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4396 /* If use max 400bit key, we can support tuples for ether type */
4397 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4398 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4399 key_cfg->tuple_active |=
4400 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4403 /* roce_type is used to filter roce frames
4404 * dst_vport is used to specify the rule
4406 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4408 ret = hclge_get_fd_allocation(hdev,
4409 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4410 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4411 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4412 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4416 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4419 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4420 int loc, u8 *key, bool is_add)
4422 struct hclge_fd_tcam_config_1_cmd *req1;
4423 struct hclge_fd_tcam_config_2_cmd *req2;
4424 struct hclge_fd_tcam_config_3_cmd *req3;
4425 struct hclge_desc desc[3];
4428 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4429 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4430 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4431 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4432 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4434 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4435 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4436 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4438 req1->stage = stage;
4439 req1->xy_sel = sel_x ? 1 : 0;
4440 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4441 req1->index = cpu_to_le32(loc);
4442 req1->entry_vld = sel_x ? is_add : 0;
4445 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4446 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4447 sizeof(req2->tcam_data));
4448 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4449 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4452 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4454 dev_err(&hdev->pdev->dev,
4455 "config tcam key fail, ret=%d\n",
4461 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4462 struct hclge_fd_ad_data *action)
4464 struct hclge_fd_ad_config_cmd *req;
4465 struct hclge_desc desc;
4469 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4471 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4472 req->index = cpu_to_le32(loc);
4475 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4476 action->write_rule_id_to_bd);
4477 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4480 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4481 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4482 action->forward_to_direct_queue);
4483 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4485 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4486 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4487 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4488 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4489 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4490 action->counter_id);
4492 req->ad_data = cpu_to_le64(ad_data);
4493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4495 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4500 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4501 struct hclge_fd_rule *rule)
4503 u16 tmp_x_s, tmp_y_s;
4504 u32 tmp_x_l, tmp_y_l;
4507 if (rule->unused_tuple & tuple_bit)
4510 switch (tuple_bit) {
4513 case BIT(INNER_DST_MAC):
4514 for (i = 0; i < ETH_ALEN; i++) {
4515 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4516 rule->tuples_mask.dst_mac[i]);
4517 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4518 rule->tuples_mask.dst_mac[i]);
4522 case BIT(INNER_SRC_MAC):
4523 for (i = 0; i < ETH_ALEN; i++) {
4524 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4525 rule->tuples.src_mac[i]);
4526 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4527 rule->tuples.src_mac[i]);
4531 case BIT(INNER_VLAN_TAG_FST):
4532 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4533 rule->tuples_mask.vlan_tag1);
4534 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4535 rule->tuples_mask.vlan_tag1);
4536 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4537 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4540 case BIT(INNER_ETH_TYPE):
4541 calc_x(tmp_x_s, rule->tuples.ether_proto,
4542 rule->tuples_mask.ether_proto);
4543 calc_y(tmp_y_s, rule->tuples.ether_proto,
4544 rule->tuples_mask.ether_proto);
4545 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4546 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4549 case BIT(INNER_IP_TOS):
4550 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4551 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4554 case BIT(INNER_IP_PROTO):
4555 calc_x(*key_x, rule->tuples.ip_proto,
4556 rule->tuples_mask.ip_proto);
4557 calc_y(*key_y, rule->tuples.ip_proto,
4558 rule->tuples_mask.ip_proto);
4561 case BIT(INNER_SRC_IP):
4562 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4563 rule->tuples_mask.src_ip[IPV4_INDEX]);
4564 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4565 rule->tuples_mask.src_ip[IPV4_INDEX]);
4566 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4567 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4570 case BIT(INNER_DST_IP):
4571 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4572 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4573 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4574 rule->tuples_mask.dst_ip[IPV4_INDEX]);
4575 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4576 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4579 case BIT(INNER_SRC_PORT):
4580 calc_x(tmp_x_s, rule->tuples.src_port,
4581 rule->tuples_mask.src_port);
4582 calc_y(tmp_y_s, rule->tuples.src_port,
4583 rule->tuples_mask.src_port);
4584 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4585 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4588 case BIT(INNER_DST_PORT):
4589 calc_x(tmp_x_s, rule->tuples.dst_port,
4590 rule->tuples_mask.dst_port);
4591 calc_y(tmp_y_s, rule->tuples.dst_port,
4592 rule->tuples_mask.dst_port);
4593 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4594 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4602 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4603 u8 vf_id, u8 network_port_id)
4605 u32 port_number = 0;
4607 if (port_type == HOST_PORT) {
4608 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4610 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4612 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4614 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4615 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4616 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4622 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4623 __le32 *key_x, __le32 *key_y,
4624 struct hclge_fd_rule *rule)
4626 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4627 u8 cur_pos = 0, tuple_size, shift_bits;
4630 for (i = 0; i < MAX_META_DATA; i++) {
4631 tuple_size = meta_data_key_info[i].key_length;
4632 tuple_bit = key_cfg->meta_data_active & BIT(i);
4634 switch (tuple_bit) {
4635 case BIT(ROCE_TYPE):
4636 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4637 cur_pos += tuple_size;
4639 case BIT(DST_VPORT):
4640 port_number = hclge_get_port_number(HOST_PORT, 0,
4642 hnae3_set_field(meta_data,
4643 GENMASK(cur_pos + tuple_size, cur_pos),
4644 cur_pos, port_number);
4645 cur_pos += tuple_size;
4652 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4653 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4654 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4656 *key_x = cpu_to_le32(tmp_x << shift_bits);
4657 *key_y = cpu_to_le32(tmp_y << shift_bits);
4660 /* A complete key is combined with meta data key and tuple key.
4661 * Meta data key is stored at the MSB region, and tuple key is stored at
4662 * the LSB region, unused bits will be filled 0.
4664 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4665 struct hclge_fd_rule *rule)
4667 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4668 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4669 u8 *cur_key_x, *cur_key_y;
4671 int ret, tuple_size;
4672 u8 meta_data_region;
4674 memset(key_x, 0, sizeof(key_x));
4675 memset(key_y, 0, sizeof(key_y));
4679 for (i = 0 ; i < MAX_TUPLE; i++) {
4683 tuple_size = tuple_key_info[i].key_length / 8;
4684 check_tuple = key_cfg->tuple_active & BIT(i);
4686 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4689 cur_key_x += tuple_size;
4690 cur_key_y += tuple_size;
4694 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4695 MAX_META_DATA_LENGTH / 8;
4697 hclge_fd_convert_meta_data(key_cfg,
4698 (__le32 *)(key_x + meta_data_region),
4699 (__le32 *)(key_y + meta_data_region),
4702 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4705 dev_err(&hdev->pdev->dev,
4706 "fd key_y config fail, loc=%d, ret=%d\n",
4707 rule->queue_id, ret);
4711 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4714 dev_err(&hdev->pdev->dev,
4715 "fd key_x config fail, loc=%d, ret=%d\n",
4716 rule->queue_id, ret);
4720 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4721 struct hclge_fd_rule *rule)
4723 struct hclge_fd_ad_data ad_data;
4725 ad_data.ad_id = rule->location;
4727 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4728 ad_data.drop_packet = true;
4729 ad_data.forward_to_direct_queue = false;
4730 ad_data.queue_id = 0;
4732 ad_data.drop_packet = false;
4733 ad_data.forward_to_direct_queue = true;
4734 ad_data.queue_id = rule->queue_id;
4737 ad_data.use_counter = false;
4738 ad_data.counter_id = 0;
4740 ad_data.use_next_stage = false;
4741 ad_data.next_input_key = 0;
4743 ad_data.write_rule_id_to_bd = true;
4744 ad_data.rule_id = rule->location;
4746 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4749 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4750 struct ethtool_rx_flow_spec *fs, u32 *unused)
4752 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4753 struct ethtool_usrip4_spec *usr_ip4_spec;
4754 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4755 struct ethtool_usrip6_spec *usr_ip6_spec;
4756 struct ethhdr *ether_spec;
4758 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4761 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4764 if ((fs->flow_type & FLOW_EXT) &&
4765 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4766 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4770 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4774 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4775 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4777 if (!tcp_ip4_spec->ip4src)
4778 *unused |= BIT(INNER_SRC_IP);
4780 if (!tcp_ip4_spec->ip4dst)
4781 *unused |= BIT(INNER_DST_IP);
4783 if (!tcp_ip4_spec->psrc)
4784 *unused |= BIT(INNER_SRC_PORT);
4786 if (!tcp_ip4_spec->pdst)
4787 *unused |= BIT(INNER_DST_PORT);
4789 if (!tcp_ip4_spec->tos)
4790 *unused |= BIT(INNER_IP_TOS);
4794 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4795 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4796 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4798 if (!usr_ip4_spec->ip4src)
4799 *unused |= BIT(INNER_SRC_IP);
4801 if (!usr_ip4_spec->ip4dst)
4802 *unused |= BIT(INNER_DST_IP);
4804 if (!usr_ip4_spec->tos)
4805 *unused |= BIT(INNER_IP_TOS);
4807 if (!usr_ip4_spec->proto)
4808 *unused |= BIT(INNER_IP_PROTO);
4810 if (usr_ip4_spec->l4_4_bytes)
4813 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4820 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4821 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4824 /* check whether src/dst ip address used */
4825 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4826 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4827 *unused |= BIT(INNER_SRC_IP);
4829 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4830 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4831 *unused |= BIT(INNER_DST_IP);
4833 if (!tcp_ip6_spec->psrc)
4834 *unused |= BIT(INNER_SRC_PORT);
4836 if (!tcp_ip6_spec->pdst)
4837 *unused |= BIT(INNER_DST_PORT);
4839 if (tcp_ip6_spec->tclass)
4843 case IPV6_USER_FLOW:
4844 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4845 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4846 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4847 BIT(INNER_DST_PORT);
4849 /* check whether src/dst ip address used */
4850 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4851 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4852 *unused |= BIT(INNER_SRC_IP);
4854 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4855 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4856 *unused |= BIT(INNER_DST_IP);
4858 if (!usr_ip6_spec->l4_proto)
4859 *unused |= BIT(INNER_IP_PROTO);
4861 if (usr_ip6_spec->tclass)
4864 if (usr_ip6_spec->l4_4_bytes)
4869 ether_spec = &fs->h_u.ether_spec;
4870 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4871 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4872 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4874 if (is_zero_ether_addr(ether_spec->h_source))
4875 *unused |= BIT(INNER_SRC_MAC);
4877 if (is_zero_ether_addr(ether_spec->h_dest))
4878 *unused |= BIT(INNER_DST_MAC);
4880 if (!ether_spec->h_proto)
4881 *unused |= BIT(INNER_ETH_TYPE);
4888 if ((fs->flow_type & FLOW_EXT)) {
4889 if (fs->h_ext.vlan_etype)
4891 if (!fs->h_ext.vlan_tci)
4892 *unused |= BIT(INNER_VLAN_TAG_FST);
4894 if (fs->m_ext.vlan_tci) {
4895 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4899 *unused |= BIT(INNER_VLAN_TAG_FST);
4902 if (fs->flow_type & FLOW_MAC_EXT) {
4903 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4906 if (is_zero_ether_addr(fs->h_ext.h_dest))
4907 *unused |= BIT(INNER_DST_MAC);
4909 *unused &= ~(BIT(INNER_DST_MAC));
4915 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4917 struct hclge_fd_rule *rule = NULL;
4918 struct hlist_node *node2;
4920 spin_lock_bh(&hdev->fd_rule_lock);
4921 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4922 if (rule->location >= location)
4926 spin_unlock_bh(&hdev->fd_rule_lock);
4928 return rule && rule->location == location;
4931 /* make sure being called after lock up with fd_rule_lock */
4932 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4933 struct hclge_fd_rule *new_rule,
4937 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4938 struct hlist_node *node2;
4940 if (is_add && !new_rule)
4943 hlist_for_each_entry_safe(rule, node2,
4944 &hdev->fd_rule_list, rule_node) {
4945 if (rule->location >= location)
4950 if (rule && rule->location == location) {
4951 hlist_del(&rule->rule_node);
4953 hdev->hclge_fd_rule_num--;
4956 if (!hdev->hclge_fd_rule_num)
4957 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4958 clear_bit(location, hdev->fd_bmap);
4962 } else if (!is_add) {
4963 dev_err(&hdev->pdev->dev,
4964 "delete fail, rule %d is inexistent\n",
4969 INIT_HLIST_NODE(&new_rule->rule_node);
4972 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4974 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4976 set_bit(location, hdev->fd_bmap);
4977 hdev->hclge_fd_rule_num++;
4978 hdev->fd_active_type = new_rule->rule_type;
4983 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4984 struct ethtool_rx_flow_spec *fs,
4985 struct hclge_fd_rule *rule)
4987 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4989 switch (flow_type) {
4993 rule->tuples.src_ip[IPV4_INDEX] =
4994 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4995 rule->tuples_mask.src_ip[IPV4_INDEX] =
4996 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4998 rule->tuples.dst_ip[IPV4_INDEX] =
4999 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5000 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5001 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5003 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5004 rule->tuples_mask.src_port =
5005 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5007 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5008 rule->tuples_mask.dst_port =
5009 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5011 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5012 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5014 rule->tuples.ether_proto = ETH_P_IP;
5015 rule->tuples_mask.ether_proto = 0xFFFF;
5019 rule->tuples.src_ip[IPV4_INDEX] =
5020 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5021 rule->tuples_mask.src_ip[IPV4_INDEX] =
5022 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5024 rule->tuples.dst_ip[IPV4_INDEX] =
5025 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5026 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5027 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5029 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5030 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5032 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5033 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5035 rule->tuples.ether_proto = ETH_P_IP;
5036 rule->tuples_mask.ether_proto = 0xFFFF;
5042 be32_to_cpu_array(rule->tuples.src_ip,
5043 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5044 be32_to_cpu_array(rule->tuples_mask.src_ip,
5045 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5047 be32_to_cpu_array(rule->tuples.dst_ip,
5048 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5049 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5050 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5052 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5053 rule->tuples_mask.src_port =
5054 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5056 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5057 rule->tuples_mask.dst_port =
5058 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5060 rule->tuples.ether_proto = ETH_P_IPV6;
5061 rule->tuples_mask.ether_proto = 0xFFFF;
5064 case IPV6_USER_FLOW:
5065 be32_to_cpu_array(rule->tuples.src_ip,
5066 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5067 be32_to_cpu_array(rule->tuples_mask.src_ip,
5068 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5070 be32_to_cpu_array(rule->tuples.dst_ip,
5071 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5072 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5073 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5075 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5076 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5078 rule->tuples.ether_proto = ETH_P_IPV6;
5079 rule->tuples_mask.ether_proto = 0xFFFF;
5083 ether_addr_copy(rule->tuples.src_mac,
5084 fs->h_u.ether_spec.h_source);
5085 ether_addr_copy(rule->tuples_mask.src_mac,
5086 fs->m_u.ether_spec.h_source);
5088 ether_addr_copy(rule->tuples.dst_mac,
5089 fs->h_u.ether_spec.h_dest);
5090 ether_addr_copy(rule->tuples_mask.dst_mac,
5091 fs->m_u.ether_spec.h_dest);
5093 rule->tuples.ether_proto =
5094 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5095 rule->tuples_mask.ether_proto =
5096 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5103 switch (flow_type) {
5106 rule->tuples.ip_proto = IPPROTO_SCTP;
5107 rule->tuples_mask.ip_proto = 0xFF;
5111 rule->tuples.ip_proto = IPPROTO_TCP;
5112 rule->tuples_mask.ip_proto = 0xFF;
5116 rule->tuples.ip_proto = IPPROTO_UDP;
5117 rule->tuples_mask.ip_proto = 0xFF;
5123 if ((fs->flow_type & FLOW_EXT)) {
5124 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5125 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5128 if (fs->flow_type & FLOW_MAC_EXT) {
5129 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5130 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5136 /* make sure being called after lock up with fd_rule_lock */
5137 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5138 struct hclge_fd_rule *rule)
5143 dev_err(&hdev->pdev->dev,
5144 "The flow director rule is NULL\n");
5148 /* it will never fail here, so needn't to check return value */
5149 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5151 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5155 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5162 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5166 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5167 struct ethtool_rxnfc *cmd)
5169 struct hclge_vport *vport = hclge_get_vport(handle);
5170 struct hclge_dev *hdev = vport->back;
5171 u16 dst_vport_id = 0, q_index = 0;
5172 struct ethtool_rx_flow_spec *fs;
5173 struct hclge_fd_rule *rule;
5178 if (!hnae3_dev_fd_supported(hdev))
5182 dev_warn(&hdev->pdev->dev,
5183 "Please enable flow director first\n");
5187 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5189 ret = hclge_fd_check_spec(hdev, fs, &unused);
5191 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5195 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5196 action = HCLGE_FD_ACTION_DROP_PACKET;
5198 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5199 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5202 if (vf > hdev->num_req_vfs) {
5203 dev_err(&hdev->pdev->dev,
5204 "Error: vf id (%d) > max vf num (%d)\n",
5205 vf, hdev->num_req_vfs);
5209 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5210 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5213 dev_err(&hdev->pdev->dev,
5214 "Error: queue id (%d) > max tqp num (%d)\n",
5219 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5223 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5227 ret = hclge_fd_get_tuple(hdev, fs, rule);
5233 rule->flow_type = fs->flow_type;
5235 rule->location = fs->location;
5236 rule->unused_tuple = unused;
5237 rule->vf_id = dst_vport_id;
5238 rule->queue_id = q_index;
5239 rule->action = action;
5240 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5242 /* to avoid rule conflict, when user configure rule by ethtool,
5243 * we need to clear all arfs rules
5245 hclge_clear_arfs_rules(handle);
5247 spin_lock_bh(&hdev->fd_rule_lock);
5248 ret = hclge_fd_config_rule(hdev, rule);
5250 spin_unlock_bh(&hdev->fd_rule_lock);
5255 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5256 struct ethtool_rxnfc *cmd)
5258 struct hclge_vport *vport = hclge_get_vport(handle);
5259 struct hclge_dev *hdev = vport->back;
5260 struct ethtool_rx_flow_spec *fs;
5263 if (!hnae3_dev_fd_supported(hdev))
5266 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5268 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5271 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5272 dev_err(&hdev->pdev->dev,
5273 "Delete fail, rule %d is inexistent\n", fs->location);
5277 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5282 spin_lock_bh(&hdev->fd_rule_lock);
5283 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5285 spin_unlock_bh(&hdev->fd_rule_lock);
5290 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5293 struct hclge_vport *vport = hclge_get_vport(handle);
5294 struct hclge_dev *hdev = vport->back;
5295 struct hclge_fd_rule *rule;
5296 struct hlist_node *node;
5299 if (!hnae3_dev_fd_supported(hdev))
5302 spin_lock_bh(&hdev->fd_rule_lock);
5303 for_each_set_bit(location, hdev->fd_bmap,
5304 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5305 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5309 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5311 hlist_del(&rule->rule_node);
5314 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5315 hdev->hclge_fd_rule_num = 0;
5316 bitmap_zero(hdev->fd_bmap,
5317 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5320 spin_unlock_bh(&hdev->fd_rule_lock);
5323 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5325 struct hclge_vport *vport = hclge_get_vport(handle);
5326 struct hclge_dev *hdev = vport->back;
5327 struct hclge_fd_rule *rule;
5328 struct hlist_node *node;
5331 /* Return ok here, because reset error handling will check this
5332 * return value. If error is returned here, the reset process will
5335 if (!hnae3_dev_fd_supported(hdev))
5338 /* if fd is disabled, should not restore it when reset */
5342 spin_lock_bh(&hdev->fd_rule_lock);
5343 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5344 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5346 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5349 dev_warn(&hdev->pdev->dev,
5350 "Restore rule %d failed, remove it\n",
5352 clear_bit(rule->location, hdev->fd_bmap);
5353 hlist_del(&rule->rule_node);
5355 hdev->hclge_fd_rule_num--;
5359 if (hdev->hclge_fd_rule_num)
5360 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5362 spin_unlock_bh(&hdev->fd_rule_lock);
5367 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5368 struct ethtool_rxnfc *cmd)
5370 struct hclge_vport *vport = hclge_get_vport(handle);
5371 struct hclge_dev *hdev = vport->back;
5373 if (!hnae3_dev_fd_supported(hdev))
5376 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5377 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5382 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5383 struct ethtool_rxnfc *cmd)
5385 struct hclge_vport *vport = hclge_get_vport(handle);
5386 struct hclge_fd_rule *rule = NULL;
5387 struct hclge_dev *hdev = vport->back;
5388 struct ethtool_rx_flow_spec *fs;
5389 struct hlist_node *node2;
5391 if (!hnae3_dev_fd_supported(hdev))
5394 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5396 spin_lock_bh(&hdev->fd_rule_lock);
5398 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5399 if (rule->location >= fs->location)
5403 if (!rule || fs->location != rule->location) {
5404 spin_unlock_bh(&hdev->fd_rule_lock);
5409 fs->flow_type = rule->flow_type;
5410 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5414 fs->h_u.tcp_ip4_spec.ip4src =
5415 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5416 fs->m_u.tcp_ip4_spec.ip4src =
5417 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5418 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5420 fs->h_u.tcp_ip4_spec.ip4dst =
5421 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5422 fs->m_u.tcp_ip4_spec.ip4dst =
5423 rule->unused_tuple & BIT(INNER_DST_IP) ?
5424 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5426 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5427 fs->m_u.tcp_ip4_spec.psrc =
5428 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5429 0 : cpu_to_be16(rule->tuples_mask.src_port);
5431 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5432 fs->m_u.tcp_ip4_spec.pdst =
5433 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5434 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5436 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5437 fs->m_u.tcp_ip4_spec.tos =
5438 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5439 0 : rule->tuples_mask.ip_tos;
5443 fs->h_u.usr_ip4_spec.ip4src =
5444 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5445 fs->m_u.tcp_ip4_spec.ip4src =
5446 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5447 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5449 fs->h_u.usr_ip4_spec.ip4dst =
5450 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5451 fs->m_u.usr_ip4_spec.ip4dst =
5452 rule->unused_tuple & BIT(INNER_DST_IP) ?
5453 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5455 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5456 fs->m_u.usr_ip4_spec.tos =
5457 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5458 0 : rule->tuples_mask.ip_tos;
5460 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5461 fs->m_u.usr_ip4_spec.proto =
5462 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5463 0 : rule->tuples_mask.ip_proto;
5465 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5471 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5472 rule->tuples.src_ip, IPV6_SIZE);
5473 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5474 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5475 sizeof(int) * IPV6_SIZE);
5477 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5478 rule->tuples_mask.src_ip, IPV6_SIZE);
5480 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5481 rule->tuples.dst_ip, IPV6_SIZE);
5482 if (rule->unused_tuple & BIT(INNER_DST_IP))
5483 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5484 sizeof(int) * IPV6_SIZE);
5486 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5487 rule->tuples_mask.dst_ip, IPV6_SIZE);
5489 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5490 fs->m_u.tcp_ip6_spec.psrc =
5491 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5492 0 : cpu_to_be16(rule->tuples_mask.src_port);
5494 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5495 fs->m_u.tcp_ip6_spec.pdst =
5496 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5497 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5500 case IPV6_USER_FLOW:
5501 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5502 rule->tuples.src_ip, IPV6_SIZE);
5503 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5504 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5505 sizeof(int) * IPV6_SIZE);
5507 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5508 rule->tuples_mask.src_ip, IPV6_SIZE);
5510 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5511 rule->tuples.dst_ip, IPV6_SIZE);
5512 if (rule->unused_tuple & BIT(INNER_DST_IP))
5513 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5514 sizeof(int) * IPV6_SIZE);
5516 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5517 rule->tuples_mask.dst_ip, IPV6_SIZE);
5519 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5520 fs->m_u.usr_ip6_spec.l4_proto =
5521 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5522 0 : rule->tuples_mask.ip_proto;
5526 ether_addr_copy(fs->h_u.ether_spec.h_source,
5527 rule->tuples.src_mac);
5528 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5529 eth_zero_addr(fs->m_u.ether_spec.h_source);
5531 ether_addr_copy(fs->m_u.ether_spec.h_source,
5532 rule->tuples_mask.src_mac);
5534 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5535 rule->tuples.dst_mac);
5536 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5537 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5539 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5540 rule->tuples_mask.dst_mac);
5542 fs->h_u.ether_spec.h_proto =
5543 cpu_to_be16(rule->tuples.ether_proto);
5544 fs->m_u.ether_spec.h_proto =
5545 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5546 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5550 spin_unlock_bh(&hdev->fd_rule_lock);
5554 if (fs->flow_type & FLOW_EXT) {
5555 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5556 fs->m_ext.vlan_tci =
5557 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5558 cpu_to_be16(VLAN_VID_MASK) :
5559 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5562 if (fs->flow_type & FLOW_MAC_EXT) {
5563 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5564 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5565 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5567 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5568 rule->tuples_mask.dst_mac);
5571 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5572 fs->ring_cookie = RX_CLS_FLOW_DISC;
5576 fs->ring_cookie = rule->queue_id;
5577 vf_id = rule->vf_id;
5578 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5579 fs->ring_cookie |= vf_id;
5582 spin_unlock_bh(&hdev->fd_rule_lock);
5587 static int hclge_get_all_rules(struct hnae3_handle *handle,
5588 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5590 struct hclge_vport *vport = hclge_get_vport(handle);
5591 struct hclge_dev *hdev = vport->back;
5592 struct hclge_fd_rule *rule;
5593 struct hlist_node *node2;
5596 if (!hnae3_dev_fd_supported(hdev))
5599 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5601 spin_lock_bh(&hdev->fd_rule_lock);
5602 hlist_for_each_entry_safe(rule, node2,
5603 &hdev->fd_rule_list, rule_node) {
5604 if (cnt == cmd->rule_cnt) {
5605 spin_unlock_bh(&hdev->fd_rule_lock);
5609 rule_locs[cnt] = rule->location;
5613 spin_unlock_bh(&hdev->fd_rule_lock);
5615 cmd->rule_cnt = cnt;
5620 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5621 struct hclge_fd_rule_tuples *tuples)
5623 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5624 tuples->ip_proto = fkeys->basic.ip_proto;
5625 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5627 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5628 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5629 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5631 memcpy(tuples->src_ip,
5632 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5633 sizeof(tuples->src_ip));
5634 memcpy(tuples->dst_ip,
5635 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5636 sizeof(tuples->dst_ip));
5640 /* traverse all rules, check whether an existed rule has the same tuples */
5641 static struct hclge_fd_rule *
5642 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5643 const struct hclge_fd_rule_tuples *tuples)
5645 struct hclge_fd_rule *rule = NULL;
5646 struct hlist_node *node;
5648 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5649 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5656 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5657 struct hclge_fd_rule *rule)
5659 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5660 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5661 BIT(INNER_SRC_PORT);
5664 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5665 if (tuples->ether_proto == ETH_P_IP) {
5666 if (tuples->ip_proto == IPPROTO_TCP)
5667 rule->flow_type = TCP_V4_FLOW;
5669 rule->flow_type = UDP_V4_FLOW;
5671 if (tuples->ip_proto == IPPROTO_TCP)
5672 rule->flow_type = TCP_V6_FLOW;
5674 rule->flow_type = UDP_V6_FLOW;
5676 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5677 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5680 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5681 u16 flow_id, struct flow_keys *fkeys)
5683 struct hclge_vport *vport = hclge_get_vport(handle);
5684 struct hclge_fd_rule_tuples new_tuples;
5685 struct hclge_dev *hdev = vport->back;
5686 struct hclge_fd_rule *rule;
5691 if (!hnae3_dev_fd_supported(hdev))
5694 memset(&new_tuples, 0, sizeof(new_tuples));
5695 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5697 spin_lock_bh(&hdev->fd_rule_lock);
5699 /* when there is already fd rule existed add by user,
5700 * arfs should not work
5702 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5703 spin_unlock_bh(&hdev->fd_rule_lock);
5708 /* check is there flow director filter existed for this flow,
5709 * if not, create a new filter for it;
5710 * if filter exist with different queue id, modify the filter;
5711 * if filter exist with same queue id, do nothing
5713 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5715 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5716 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5717 spin_unlock_bh(&hdev->fd_rule_lock);
5722 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5724 spin_unlock_bh(&hdev->fd_rule_lock);
5729 set_bit(bit_id, hdev->fd_bmap);
5730 rule->location = bit_id;
5731 rule->flow_id = flow_id;
5732 rule->queue_id = queue_id;
5733 hclge_fd_build_arfs_rule(&new_tuples, rule);
5734 ret = hclge_fd_config_rule(hdev, rule);
5736 spin_unlock_bh(&hdev->fd_rule_lock);
5741 return rule->location;
5744 spin_unlock_bh(&hdev->fd_rule_lock);
5746 if (rule->queue_id == queue_id)
5747 return rule->location;
5749 tmp_queue_id = rule->queue_id;
5750 rule->queue_id = queue_id;
5751 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5753 rule->queue_id = tmp_queue_id;
5757 return rule->location;
5760 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5762 #ifdef CONFIG_RFS_ACCEL
5763 struct hnae3_handle *handle = &hdev->vport[0].nic;
5764 struct hclge_fd_rule *rule;
5765 struct hlist_node *node;
5766 HLIST_HEAD(del_list);
5768 spin_lock_bh(&hdev->fd_rule_lock);
5769 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5770 spin_unlock_bh(&hdev->fd_rule_lock);
5773 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5774 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5775 rule->flow_id, rule->location)) {
5776 hlist_del_init(&rule->rule_node);
5777 hlist_add_head(&rule->rule_node, &del_list);
5778 hdev->hclge_fd_rule_num--;
5779 clear_bit(rule->location, hdev->fd_bmap);
5782 spin_unlock_bh(&hdev->fd_rule_lock);
5784 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5785 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5786 rule->location, NULL, false);
5792 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5794 #ifdef CONFIG_RFS_ACCEL
5795 struct hclge_vport *vport = hclge_get_vport(handle);
5796 struct hclge_dev *hdev = vport->back;
5798 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5799 hclge_del_all_fd_entries(handle, true);
5803 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5805 struct hclge_vport *vport = hclge_get_vport(handle);
5806 struct hclge_dev *hdev = vport->back;
5808 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5809 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5812 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5814 struct hclge_vport *vport = hclge_get_vport(handle);
5815 struct hclge_dev *hdev = vport->back;
5817 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5820 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5822 struct hclge_vport *vport = hclge_get_vport(handle);
5823 struct hclge_dev *hdev = vport->back;
5825 return hdev->rst_stats.hw_reset_done_cnt;
5828 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5830 struct hclge_vport *vport = hclge_get_vport(handle);
5831 struct hclge_dev *hdev = vport->back;
5834 hdev->fd_en = enable;
5835 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5837 hclge_del_all_fd_entries(handle, clear);
5839 hclge_restore_fd_entries(handle);
5842 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5844 struct hclge_desc desc;
5845 struct hclge_config_mac_mode_cmd *req =
5846 (struct hclge_config_mac_mode_cmd *)desc.data;
5850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5851 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5852 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5853 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5854 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5855 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5856 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5857 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5858 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5859 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5860 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5861 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5862 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5863 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5865 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5869 dev_err(&hdev->pdev->dev,
5870 "mac enable fail, ret =%d.\n", ret);
5873 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5875 struct hclge_config_mac_mode_cmd *req;
5876 struct hclge_desc desc;
5880 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5881 /* 1 Read out the MAC mode config at first */
5882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5885 dev_err(&hdev->pdev->dev,
5886 "mac loopback get fail, ret =%d.\n", ret);
5890 /* 2 Then setup the loopback flag */
5891 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5892 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5893 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5894 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5896 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5898 /* 3 Config mac work mode with loopback flag
5899 * and its original configure parameters
5901 hclge_cmd_reuse_desc(&desc, false);
5902 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5904 dev_err(&hdev->pdev->dev,
5905 "mac loopback set fail, ret =%d.\n", ret);
5909 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5910 enum hnae3_loop loop_mode)
5912 #define HCLGE_SERDES_RETRY_MS 10
5913 #define HCLGE_SERDES_RETRY_NUM 100
5915 #define HCLGE_MAC_LINK_STATUS_MS 10
5916 #define HCLGE_MAC_LINK_STATUS_NUM 100
5917 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5918 #define HCLGE_MAC_LINK_STATUS_UP 1
5920 struct hclge_serdes_lb_cmd *req;
5921 struct hclge_desc desc;
5922 int mac_link_ret = 0;
5926 req = (struct hclge_serdes_lb_cmd *)desc.data;
5927 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5929 switch (loop_mode) {
5930 case HNAE3_LOOP_SERIAL_SERDES:
5931 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5933 case HNAE3_LOOP_PARALLEL_SERDES:
5934 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5937 dev_err(&hdev->pdev->dev,
5938 "unsupported serdes loopback mode %d\n", loop_mode);
5943 req->enable = loop_mode_b;
5944 req->mask = loop_mode_b;
5945 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5947 req->mask = loop_mode_b;
5948 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5951 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5953 dev_err(&hdev->pdev->dev,
5954 "serdes loopback set fail, ret = %d\n", ret);
5959 msleep(HCLGE_SERDES_RETRY_MS);
5960 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5964 dev_err(&hdev->pdev->dev,
5965 "serdes loopback get, ret = %d\n", ret);
5968 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5969 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5971 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5972 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5974 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5975 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5979 hclge_cfg_mac_mode(hdev, en);
5983 /* serdes Internal loopback, independent of the network cable.*/
5984 msleep(HCLGE_MAC_LINK_STATUS_MS);
5985 ret = hclge_get_mac_link_status(hdev);
5986 if (ret == mac_link_ret)
5988 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5990 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5995 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
5996 int stream_id, bool enable)
5998 struct hclge_desc desc;
5999 struct hclge_cfg_com_tqp_queue_cmd *req =
6000 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6004 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6005 req->stream_id = cpu_to_le16(stream_id);
6007 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6009 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6011 dev_err(&hdev->pdev->dev,
6012 "Tqp enable fail, status =%d.\n", ret);
6016 static int hclge_set_loopback(struct hnae3_handle *handle,
6017 enum hnae3_loop loop_mode, bool en)
6019 struct hclge_vport *vport = hclge_get_vport(handle);
6020 struct hnae3_knic_private_info *kinfo;
6021 struct hclge_dev *hdev = vport->back;
6024 switch (loop_mode) {
6025 case HNAE3_LOOP_APP:
6026 ret = hclge_set_app_loopback(hdev, en);
6028 case HNAE3_LOOP_SERIAL_SERDES:
6029 case HNAE3_LOOP_PARALLEL_SERDES:
6030 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6034 dev_err(&hdev->pdev->dev,
6035 "loop_mode %d is not supported\n", loop_mode);
6042 kinfo = &vport->nic.kinfo;
6043 for (i = 0; i < kinfo->num_tqps; i++) {
6044 ret = hclge_tqp_enable(hdev, i, 0, en);
6052 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6054 struct hclge_vport *vport = hclge_get_vport(handle);
6055 struct hnae3_knic_private_info *kinfo;
6056 struct hnae3_queue *queue;
6057 struct hclge_tqp *tqp;
6060 kinfo = &vport->nic.kinfo;
6061 for (i = 0; i < kinfo->num_tqps; i++) {
6062 queue = handle->kinfo.tqp[i];
6063 tqp = container_of(queue, struct hclge_tqp, q);
6064 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6068 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6070 struct hclge_vport *vport = hclge_get_vport(handle);
6071 struct hclge_dev *hdev = vport->back;
6074 mod_timer(&hdev->service_timer, jiffies + HZ);
6076 del_timer_sync(&hdev->service_timer);
6077 cancel_work_sync(&hdev->service_task);
6078 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6082 static int hclge_ae_start(struct hnae3_handle *handle)
6084 struct hclge_vport *vport = hclge_get_vport(handle);
6085 struct hclge_dev *hdev = vport->back;
6088 hclge_cfg_mac_mode(hdev, true);
6089 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6090 hdev->hw.mac.link = 0;
6092 /* reset tqp stats */
6093 hclge_reset_tqp_stats(handle);
6095 hclge_mac_start_phy(hdev);
6100 static void hclge_ae_stop(struct hnae3_handle *handle)
6102 struct hclge_vport *vport = hclge_get_vport(handle);
6103 struct hclge_dev *hdev = vport->back;
6106 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6108 hclge_clear_arfs_rules(handle);
6110 /* If it is not PF reset, the firmware will disable the MAC,
6111 * so it only need to stop phy here.
6113 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6114 hdev->reset_type != HNAE3_FUNC_RESET) {
6115 hclge_mac_stop_phy(hdev);
6119 for (i = 0; i < handle->kinfo.num_tqps; i++)
6120 hclge_reset_tqp(handle, i);
6123 hclge_cfg_mac_mode(hdev, false);
6125 hclge_mac_stop_phy(hdev);
6127 /* reset tqp stats */
6128 hclge_reset_tqp_stats(handle);
6129 hclge_update_link_status(hdev);
6132 int hclge_vport_start(struct hclge_vport *vport)
6134 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6135 vport->last_active_jiffies = jiffies;
6139 void hclge_vport_stop(struct hclge_vport *vport)
6141 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6144 static int hclge_client_start(struct hnae3_handle *handle)
6146 struct hclge_vport *vport = hclge_get_vport(handle);
6148 return hclge_vport_start(vport);
6151 static void hclge_client_stop(struct hnae3_handle *handle)
6153 struct hclge_vport *vport = hclge_get_vport(handle);
6155 hclge_vport_stop(vport);
6158 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6159 u16 cmdq_resp, u8 resp_code,
6160 enum hclge_mac_vlan_tbl_opcode op)
6162 struct hclge_dev *hdev = vport->back;
6163 int return_status = -EIO;
6166 dev_err(&hdev->pdev->dev,
6167 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6172 if (op == HCLGE_MAC_VLAN_ADD) {
6173 if ((!resp_code) || (resp_code == 1)) {
6175 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6176 return_status = -ENOSPC;
6177 dev_err(&hdev->pdev->dev,
6178 "add mac addr failed for uc_overflow.\n");
6179 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6180 return_status = -ENOSPC;
6181 dev_err(&hdev->pdev->dev,
6182 "add mac addr failed for mc_overflow.\n");
6184 dev_err(&hdev->pdev->dev,
6185 "add mac addr failed for undefined, code=%d.\n",
6188 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6191 } else if (resp_code == 1) {
6192 return_status = -ENOENT;
6193 dev_dbg(&hdev->pdev->dev,
6194 "remove mac addr failed for miss.\n");
6196 dev_err(&hdev->pdev->dev,
6197 "remove mac addr failed for undefined, code=%d.\n",
6200 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6203 } else if (resp_code == 1) {
6204 return_status = -ENOENT;
6205 dev_dbg(&hdev->pdev->dev,
6206 "lookup mac addr failed for miss.\n");
6208 dev_err(&hdev->pdev->dev,
6209 "lookup mac addr failed for undefined, code=%d.\n",
6213 return_status = -EINVAL;
6214 dev_err(&hdev->pdev->dev,
6215 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6219 return return_status;
6222 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6224 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6229 if (vfid > 255 || vfid < 0)
6232 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6233 word_num = vfid / 32;
6234 bit_num = vfid % 32;
6236 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6238 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6240 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6241 bit_num = vfid % 32;
6243 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6245 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6251 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6253 #define HCLGE_DESC_NUMBER 3
6254 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6257 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6258 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6259 if (desc[i].data[j])
6265 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6266 const u8 *addr, bool is_mc)
6268 const unsigned char *mac_addr = addr;
6269 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6270 (mac_addr[0]) | (mac_addr[1] << 8);
6271 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6273 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6275 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6276 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6279 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6280 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6283 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6284 struct hclge_mac_vlan_tbl_entry_cmd *req)
6286 struct hclge_dev *hdev = vport->back;
6287 struct hclge_desc desc;
6292 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6294 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6296 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6298 dev_err(&hdev->pdev->dev,
6299 "del mac addr failed for cmd_send, ret =%d.\n",
6303 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6304 retval = le16_to_cpu(desc.retval);
6306 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6307 HCLGE_MAC_VLAN_REMOVE);
6310 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6311 struct hclge_mac_vlan_tbl_entry_cmd *req,
6312 struct hclge_desc *desc,
6315 struct hclge_dev *hdev = vport->back;
6320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6322 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6323 memcpy(desc[0].data,
6325 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6326 hclge_cmd_setup_basic_desc(&desc[1],
6327 HCLGE_OPC_MAC_VLAN_ADD,
6329 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6330 hclge_cmd_setup_basic_desc(&desc[2],
6331 HCLGE_OPC_MAC_VLAN_ADD,
6333 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6335 memcpy(desc[0].data,
6337 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6338 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6341 dev_err(&hdev->pdev->dev,
6342 "lookup mac addr failed for cmd_send, ret =%d.\n",
6346 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6347 retval = le16_to_cpu(desc[0].retval);
6349 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6350 HCLGE_MAC_VLAN_LKUP);
6353 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6354 struct hclge_mac_vlan_tbl_entry_cmd *req,
6355 struct hclge_desc *mc_desc)
6357 struct hclge_dev *hdev = vport->back;
6364 struct hclge_desc desc;
6366 hclge_cmd_setup_basic_desc(&desc,
6367 HCLGE_OPC_MAC_VLAN_ADD,
6369 memcpy(desc.data, req,
6370 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6372 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6373 retval = le16_to_cpu(desc.retval);
6375 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6377 HCLGE_MAC_VLAN_ADD);
6379 hclge_cmd_reuse_desc(&mc_desc[0], false);
6380 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6381 hclge_cmd_reuse_desc(&mc_desc[1], false);
6382 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6383 hclge_cmd_reuse_desc(&mc_desc[2], false);
6384 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6385 memcpy(mc_desc[0].data, req,
6386 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6387 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6388 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6389 retval = le16_to_cpu(mc_desc[0].retval);
6391 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6393 HCLGE_MAC_VLAN_ADD);
6397 dev_err(&hdev->pdev->dev,
6398 "add mac addr failed for cmd_send, ret =%d.\n",
6406 static int hclge_init_umv_space(struct hclge_dev *hdev)
6408 u16 allocated_size = 0;
6411 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6416 if (allocated_size < hdev->wanted_umv_size)
6417 dev_warn(&hdev->pdev->dev,
6418 "Alloc umv space failed, want %d, get %d\n",
6419 hdev->wanted_umv_size, allocated_size);
6421 mutex_init(&hdev->umv_mutex);
6422 hdev->max_umv_size = allocated_size;
6423 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6424 * preserve some unicast mac vlan table entries shared by pf
6427 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6428 hdev->share_umv_size = hdev->priv_umv_size +
6429 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6434 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6438 if (hdev->max_umv_size > 0) {
6439 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6443 hdev->max_umv_size = 0;
6445 mutex_destroy(&hdev->umv_mutex);
6450 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6451 u16 *allocated_size, bool is_alloc)
6453 struct hclge_umv_spc_alc_cmd *req;
6454 struct hclge_desc desc;
6457 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6458 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6460 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6462 req->space_size = cpu_to_le32(space_size);
6464 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6466 dev_err(&hdev->pdev->dev,
6467 "%s umv space failed for cmd_send, ret =%d\n",
6468 is_alloc ? "allocate" : "free", ret);
6472 if (is_alloc && allocated_size)
6473 *allocated_size = le32_to_cpu(desc.data[1]);
6478 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6480 struct hclge_vport *vport;
6483 for (i = 0; i < hdev->num_alloc_vport; i++) {
6484 vport = &hdev->vport[i];
6485 vport->used_umv_num = 0;
6488 mutex_lock(&hdev->umv_mutex);
6489 hdev->share_umv_size = hdev->priv_umv_size +
6490 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6491 mutex_unlock(&hdev->umv_mutex);
6494 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6496 struct hclge_dev *hdev = vport->back;
6499 mutex_lock(&hdev->umv_mutex);
6500 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6501 hdev->share_umv_size == 0);
6502 mutex_unlock(&hdev->umv_mutex);
6507 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6509 struct hclge_dev *hdev = vport->back;
6511 mutex_lock(&hdev->umv_mutex);
6513 if (vport->used_umv_num > hdev->priv_umv_size)
6514 hdev->share_umv_size++;
6516 if (vport->used_umv_num > 0)
6517 vport->used_umv_num--;
6519 if (vport->used_umv_num >= hdev->priv_umv_size &&
6520 hdev->share_umv_size > 0)
6521 hdev->share_umv_size--;
6522 vport->used_umv_num++;
6524 mutex_unlock(&hdev->umv_mutex);
6527 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6528 const unsigned char *addr)
6530 struct hclge_vport *vport = hclge_get_vport(handle);
6532 return hclge_add_uc_addr_common(vport, addr);
6535 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6536 const unsigned char *addr)
6538 struct hclge_dev *hdev = vport->back;
6539 struct hclge_mac_vlan_tbl_entry_cmd req;
6540 struct hclge_desc desc;
6541 u16 egress_port = 0;
6544 /* mac addr check */
6545 if (is_zero_ether_addr(addr) ||
6546 is_broadcast_ether_addr(addr) ||
6547 is_multicast_ether_addr(addr)) {
6548 dev_err(&hdev->pdev->dev,
6549 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6550 addr, is_zero_ether_addr(addr),
6551 is_broadcast_ether_addr(addr),
6552 is_multicast_ether_addr(addr));
6556 memset(&req, 0, sizeof(req));
6558 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6559 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6561 req.egress_port = cpu_to_le16(egress_port);
6563 hclge_prepare_mac_addr(&req, addr, false);
6565 /* Lookup the mac address in the mac_vlan table, and add
6566 * it if the entry is inexistent. Repeated unicast entry
6567 * is not allowed in the mac vlan table.
6569 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6570 if (ret == -ENOENT) {
6571 if (!hclge_is_umv_space_full(vport)) {
6572 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6574 hclge_update_umv_space(vport, false);
6578 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6579 hdev->priv_umv_size);
6584 /* check if we just hit the duplicate */
6586 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6587 vport->vport_id, addr);
6591 dev_err(&hdev->pdev->dev,
6592 "PF failed to add unicast entry(%pM) in the MAC table\n",
6598 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6599 const unsigned char *addr)
6601 struct hclge_vport *vport = hclge_get_vport(handle);
6603 return hclge_rm_uc_addr_common(vport, addr);
6606 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6607 const unsigned char *addr)
6609 struct hclge_dev *hdev = vport->back;
6610 struct hclge_mac_vlan_tbl_entry_cmd req;
6613 /* mac addr check */
6614 if (is_zero_ether_addr(addr) ||
6615 is_broadcast_ether_addr(addr) ||
6616 is_multicast_ether_addr(addr)) {
6617 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6622 memset(&req, 0, sizeof(req));
6623 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6624 hclge_prepare_mac_addr(&req, addr, false);
6625 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6627 hclge_update_umv_space(vport, true);
6632 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6633 const unsigned char *addr)
6635 struct hclge_vport *vport = hclge_get_vport(handle);
6637 return hclge_add_mc_addr_common(vport, addr);
6640 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6641 const unsigned char *addr)
6643 struct hclge_dev *hdev = vport->back;
6644 struct hclge_mac_vlan_tbl_entry_cmd req;
6645 struct hclge_desc desc[3];
6648 /* mac addr check */
6649 if (!is_multicast_ether_addr(addr)) {
6650 dev_err(&hdev->pdev->dev,
6651 "Add mc mac err! invalid mac:%pM.\n",
6655 memset(&req, 0, sizeof(req));
6656 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6657 hclge_prepare_mac_addr(&req, addr, true);
6658 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6660 /* This mac addr do not exist, add new entry for it */
6661 memset(desc[0].data, 0, sizeof(desc[0].data));
6662 memset(desc[1].data, 0, sizeof(desc[0].data));
6663 memset(desc[2].data, 0, sizeof(desc[0].data));
6665 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6668 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6670 if (status == -ENOSPC)
6671 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6676 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6677 const unsigned char *addr)
6679 struct hclge_vport *vport = hclge_get_vport(handle);
6681 return hclge_rm_mc_addr_common(vport, addr);
6684 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6685 const unsigned char *addr)
6687 struct hclge_dev *hdev = vport->back;
6688 struct hclge_mac_vlan_tbl_entry_cmd req;
6689 enum hclge_cmd_status status;
6690 struct hclge_desc desc[3];
6692 /* mac addr check */
6693 if (!is_multicast_ether_addr(addr)) {
6694 dev_dbg(&hdev->pdev->dev,
6695 "Remove mc mac err! invalid mac:%pM.\n",
6700 memset(&req, 0, sizeof(req));
6701 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6702 hclge_prepare_mac_addr(&req, addr, true);
6703 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6705 /* This mac addr exist, remove this handle's VFID for it */
6706 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6710 if (hclge_is_all_function_id_zero(desc))
6711 /* All the vfid is zero, so need to delete this entry */
6712 status = hclge_remove_mac_vlan_tbl(vport, &req);
6714 /* Not all the vfid is zero, update the vfid */
6715 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6718 /* Maybe this mac address is in mta table, but it cannot be
6719 * deleted here because an entry of mta represents an address
6720 * range rather than a specific address. the delete action to
6721 * all entries will take effect in update_mta_status called by
6722 * hns3_nic_set_rx_mode.
6730 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6731 enum HCLGE_MAC_ADDR_TYPE mac_type)
6733 struct hclge_vport_mac_addr_cfg *mac_cfg;
6734 struct list_head *list;
6736 if (!vport->vport_id)
6739 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6743 mac_cfg->hd_tbl_status = true;
6744 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6746 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6747 &vport->uc_mac_list : &vport->mc_mac_list;
6749 list_add_tail(&mac_cfg->node, list);
6752 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6754 enum HCLGE_MAC_ADDR_TYPE mac_type)
6756 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6757 struct list_head *list;
6758 bool uc_flag, mc_flag;
6760 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6761 &vport->uc_mac_list : &vport->mc_mac_list;
6763 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6764 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6766 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6767 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6768 if (uc_flag && mac_cfg->hd_tbl_status)
6769 hclge_rm_uc_addr_common(vport, mac_addr);
6771 if (mc_flag && mac_cfg->hd_tbl_status)
6772 hclge_rm_mc_addr_common(vport, mac_addr);
6774 list_del(&mac_cfg->node);
6781 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6782 enum HCLGE_MAC_ADDR_TYPE mac_type)
6784 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6785 struct list_head *list;
6787 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6788 &vport->uc_mac_list : &vport->mc_mac_list;
6790 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6791 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6792 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6794 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6795 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6797 mac_cfg->hd_tbl_status = false;
6799 list_del(&mac_cfg->node);
6805 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6807 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6808 struct hclge_vport *vport;
6811 mutex_lock(&hdev->vport_cfg_mutex);
6812 for (i = 0; i < hdev->num_alloc_vport; i++) {
6813 vport = &hdev->vport[i];
6814 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6815 list_del(&mac->node);
6819 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6820 list_del(&mac->node);
6824 mutex_unlock(&hdev->vport_cfg_mutex);
6827 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6828 u16 cmdq_resp, u8 resp_code)
6830 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6831 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6832 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6833 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6838 dev_err(&hdev->pdev->dev,
6839 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6844 switch (resp_code) {
6845 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6846 case HCLGE_ETHERTYPE_ALREADY_ADD:
6849 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6850 dev_err(&hdev->pdev->dev,
6851 "add mac ethertype failed for manager table overflow.\n");
6852 return_status = -EIO;
6854 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6855 dev_err(&hdev->pdev->dev,
6856 "add mac ethertype failed for key conflict.\n");
6857 return_status = -EIO;
6860 dev_err(&hdev->pdev->dev,
6861 "add mac ethertype failed for undefined, code=%d.\n",
6863 return_status = -EIO;
6866 return return_status;
6869 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6870 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6872 struct hclge_desc desc;
6877 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6878 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6882 dev_err(&hdev->pdev->dev,
6883 "add mac ethertype failed for cmd_send, ret =%d.\n",
6888 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6889 retval = le16_to_cpu(desc.retval);
6891 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6894 static int init_mgr_tbl(struct hclge_dev *hdev)
6899 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6900 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6902 dev_err(&hdev->pdev->dev,
6903 "add mac ethertype failed, ret =%d.\n",
6912 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6914 struct hclge_vport *vport = hclge_get_vport(handle);
6915 struct hclge_dev *hdev = vport->back;
6917 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6920 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6923 const unsigned char *new_addr = (const unsigned char *)p;
6924 struct hclge_vport *vport = hclge_get_vport(handle);
6925 struct hclge_dev *hdev = vport->back;
6928 /* mac addr check */
6929 if (is_zero_ether_addr(new_addr) ||
6930 is_broadcast_ether_addr(new_addr) ||
6931 is_multicast_ether_addr(new_addr)) {
6932 dev_err(&hdev->pdev->dev,
6933 "Change uc mac err! invalid mac:%p.\n",
6938 if ((!is_first || is_kdump_kernel()) &&
6939 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6940 dev_warn(&hdev->pdev->dev,
6941 "remove old uc mac address fail.\n");
6943 ret = hclge_add_uc_addr(handle, new_addr);
6945 dev_err(&hdev->pdev->dev,
6946 "add uc mac address fail, ret =%d.\n",
6950 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6951 dev_err(&hdev->pdev->dev,
6952 "restore uc mac address fail.\n");
6957 ret = hclge_pause_addr_cfg(hdev, new_addr);
6959 dev_err(&hdev->pdev->dev,
6960 "configure mac pause address fail, ret =%d.\n",
6965 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6970 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6973 struct hclge_vport *vport = hclge_get_vport(handle);
6974 struct hclge_dev *hdev = vport->back;
6976 if (!hdev->hw.mac.phydev)
6979 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6982 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6983 u8 fe_type, bool filter_en, u8 vf_id)
6985 struct hclge_vlan_filter_ctrl_cmd *req;
6986 struct hclge_desc desc;
6989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6991 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6992 req->vlan_type = vlan_type;
6993 req->vlan_fe = filter_en ? fe_type : 0;
6996 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6998 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7004 #define HCLGE_FILTER_TYPE_VF 0
7005 #define HCLGE_FILTER_TYPE_PORT 1
7006 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7007 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7008 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7009 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7010 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7011 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7012 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7013 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7014 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7016 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7018 struct hclge_vport *vport = hclge_get_vport(handle);
7019 struct hclge_dev *hdev = vport->back;
7021 if (hdev->pdev->revision >= 0x21) {
7022 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7023 HCLGE_FILTER_FE_EGRESS, enable, 0);
7024 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7025 HCLGE_FILTER_FE_INGRESS, enable, 0);
7027 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7028 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7032 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7034 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7037 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7038 bool is_kill, u16 vlan, u8 qos,
7041 #define HCLGE_MAX_VF_BYTES 16
7042 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7043 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7044 struct hclge_desc desc[2];
7049 /* if vf vlan table is full, firmware will close vf vlan filter, it
7050 * is unable and unnecessary to add new vlan id to vf vlan filter
7052 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7055 hclge_cmd_setup_basic_desc(&desc[0],
7056 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7057 hclge_cmd_setup_basic_desc(&desc[1],
7058 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7060 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7062 vf_byte_off = vfid / 8;
7063 vf_byte_val = 1 << (vfid % 8);
7065 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7066 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7068 req0->vlan_id = cpu_to_le16(vlan);
7069 req0->vlan_cfg = is_kill;
7071 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7072 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7074 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7076 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7078 dev_err(&hdev->pdev->dev,
7079 "Send vf vlan command fail, ret =%d.\n",
7085 #define HCLGE_VF_VLAN_NO_ENTRY 2
7086 if (!req0->resp_code || req0->resp_code == 1)
7089 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7090 set_bit(vfid, hdev->vf_vlan_full);
7091 dev_warn(&hdev->pdev->dev,
7092 "vf vlan table is full, vf vlan filter is disabled\n");
7096 dev_err(&hdev->pdev->dev,
7097 "Add vf vlan filter fail, ret =%d.\n",
7100 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7101 if (!req0->resp_code)
7104 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7105 dev_warn(&hdev->pdev->dev,
7106 "vlan %d filter is not in vf vlan table\n",
7111 dev_err(&hdev->pdev->dev,
7112 "Kill vf vlan filter fail, ret =%d.\n",
7119 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7120 u16 vlan_id, bool is_kill)
7122 struct hclge_vlan_filter_pf_cfg_cmd *req;
7123 struct hclge_desc desc;
7124 u8 vlan_offset_byte_val;
7125 u8 vlan_offset_byte;
7129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7131 vlan_offset_160 = vlan_id / 160;
7132 vlan_offset_byte = (vlan_id % 160) / 8;
7133 vlan_offset_byte_val = 1 << (vlan_id % 8);
7135 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7136 req->vlan_offset = vlan_offset_160;
7137 req->vlan_cfg = is_kill;
7138 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7140 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7142 dev_err(&hdev->pdev->dev,
7143 "port vlan command, send fail, ret =%d.\n", ret);
7147 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7148 u16 vport_id, u16 vlan_id, u8 qos,
7151 u16 vport_idx, vport_num = 0;
7154 if (is_kill && !vlan_id)
7157 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7160 dev_err(&hdev->pdev->dev,
7161 "Set %d vport vlan filter config fail, ret =%d.\n",
7166 /* vlan 0 may be added twice when 8021q module is enabled */
7167 if (!is_kill && !vlan_id &&
7168 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7171 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7172 dev_err(&hdev->pdev->dev,
7173 "Add port vlan failed, vport %d is already in vlan %d\n",
7179 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7180 dev_err(&hdev->pdev->dev,
7181 "Delete port vlan failed, vport %d is not in vlan %d\n",
7186 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7189 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7190 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7196 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7198 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7199 struct hclge_vport_vtag_tx_cfg_cmd *req;
7200 struct hclge_dev *hdev = vport->back;
7201 struct hclge_desc desc;
7204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7206 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7207 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7208 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7209 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7210 vcfg->accept_tag1 ? 1 : 0);
7211 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7212 vcfg->accept_untag1 ? 1 : 0);
7213 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7214 vcfg->accept_tag2 ? 1 : 0);
7215 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7216 vcfg->accept_untag2 ? 1 : 0);
7217 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7218 vcfg->insert_tag1_en ? 1 : 0);
7219 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7220 vcfg->insert_tag2_en ? 1 : 0);
7221 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7223 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7224 req->vf_bitmap[req->vf_offset] =
7225 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7227 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7229 dev_err(&hdev->pdev->dev,
7230 "Send port txvlan cfg command fail, ret =%d\n",
7236 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7238 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7239 struct hclge_vport_vtag_rx_cfg_cmd *req;
7240 struct hclge_dev *hdev = vport->back;
7241 struct hclge_desc desc;
7244 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7246 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7247 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7248 vcfg->strip_tag1_en ? 1 : 0);
7249 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7250 vcfg->strip_tag2_en ? 1 : 0);
7251 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7252 vcfg->vlan1_vlan_prionly ? 1 : 0);
7253 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7254 vcfg->vlan2_vlan_prionly ? 1 : 0);
7256 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7257 req->vf_bitmap[req->vf_offset] =
7258 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7260 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7262 dev_err(&hdev->pdev->dev,
7263 "Send port rxvlan cfg command fail, ret =%d\n",
7269 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7270 u16 port_base_vlan_state,
7275 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7276 vport->txvlan_cfg.accept_tag1 = true;
7277 vport->txvlan_cfg.insert_tag1_en = false;
7278 vport->txvlan_cfg.default_tag1 = 0;
7280 vport->txvlan_cfg.accept_tag1 = false;
7281 vport->txvlan_cfg.insert_tag1_en = true;
7282 vport->txvlan_cfg.default_tag1 = vlan_tag;
7285 vport->txvlan_cfg.accept_untag1 = true;
7287 /* accept_tag2 and accept_untag2 are not supported on
7288 * pdev revision(0x20), new revision support them,
7289 * this two fields can not be configured by user.
7291 vport->txvlan_cfg.accept_tag2 = true;
7292 vport->txvlan_cfg.accept_untag2 = true;
7293 vport->txvlan_cfg.insert_tag2_en = false;
7294 vport->txvlan_cfg.default_tag2 = 0;
7296 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7297 vport->rxvlan_cfg.strip_tag1_en = false;
7298 vport->rxvlan_cfg.strip_tag2_en =
7299 vport->rxvlan_cfg.rx_vlan_offload_en;
7301 vport->rxvlan_cfg.strip_tag1_en =
7302 vport->rxvlan_cfg.rx_vlan_offload_en;
7303 vport->rxvlan_cfg.strip_tag2_en = true;
7305 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7306 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7308 ret = hclge_set_vlan_tx_offload_cfg(vport);
7312 return hclge_set_vlan_rx_offload_cfg(vport);
7315 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7317 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7318 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7319 struct hclge_desc desc;
7322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7323 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7324 rx_req->ot_fst_vlan_type =
7325 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7326 rx_req->ot_sec_vlan_type =
7327 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7328 rx_req->in_fst_vlan_type =
7329 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7330 rx_req->in_sec_vlan_type =
7331 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7333 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7335 dev_err(&hdev->pdev->dev,
7336 "Send rxvlan protocol type command fail, ret =%d\n",
7341 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7343 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7344 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7345 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7347 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7349 dev_err(&hdev->pdev->dev,
7350 "Send txvlan protocol type command fail, ret =%d\n",
7356 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7358 #define HCLGE_DEF_VLAN_TYPE 0x8100
7360 struct hnae3_handle *handle = &hdev->vport[0].nic;
7361 struct hclge_vport *vport;
7365 if (hdev->pdev->revision >= 0x21) {
7366 /* for revision 0x21, vf vlan filter is per function */
7367 for (i = 0; i < hdev->num_alloc_vport; i++) {
7368 vport = &hdev->vport[i];
7369 ret = hclge_set_vlan_filter_ctrl(hdev,
7370 HCLGE_FILTER_TYPE_VF,
7371 HCLGE_FILTER_FE_EGRESS,
7378 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7379 HCLGE_FILTER_FE_INGRESS, true,
7384 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7385 HCLGE_FILTER_FE_EGRESS_V1_B,
7391 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7393 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7394 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7395 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7396 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7397 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7400 ret = hclge_set_vlan_protocol_type(hdev);
7404 for (i = 0; i < hdev->num_alloc_vport; i++) {
7407 vport = &hdev->vport[i];
7408 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7410 ret = hclge_vlan_offload_cfg(vport,
7411 vport->port_base_vlan_cfg.state,
7417 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7420 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7423 struct hclge_vport_vlan_cfg *vlan;
7425 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7429 vlan->hd_tbl_status = writen_to_tbl;
7430 vlan->vlan_id = vlan_id;
7432 list_add_tail(&vlan->node, &vport->vlan_list);
7435 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7437 struct hclge_vport_vlan_cfg *vlan, *tmp;
7438 struct hclge_dev *hdev = vport->back;
7441 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7442 if (!vlan->hd_tbl_status) {
7443 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7445 vlan->vlan_id, 0, false);
7447 dev_err(&hdev->pdev->dev,
7448 "restore vport vlan list failed, ret=%d\n",
7453 vlan->hd_tbl_status = true;
7459 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7462 struct hclge_vport_vlan_cfg *vlan, *tmp;
7463 struct hclge_dev *hdev = vport->back;
7465 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7466 if (vlan->vlan_id == vlan_id) {
7467 if (is_write_tbl && vlan->hd_tbl_status)
7468 hclge_set_vlan_filter_hw(hdev,
7474 list_del(&vlan->node);
7481 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7483 struct hclge_vport_vlan_cfg *vlan, *tmp;
7484 struct hclge_dev *hdev = vport->back;
7486 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7487 if (vlan->hd_tbl_status)
7488 hclge_set_vlan_filter_hw(hdev,
7494 vlan->hd_tbl_status = false;
7496 list_del(&vlan->node);
7502 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7504 struct hclge_vport_vlan_cfg *vlan, *tmp;
7505 struct hclge_vport *vport;
7508 mutex_lock(&hdev->vport_cfg_mutex);
7509 for (i = 0; i < hdev->num_alloc_vport; i++) {
7510 vport = &hdev->vport[i];
7511 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7512 list_del(&vlan->node);
7516 mutex_unlock(&hdev->vport_cfg_mutex);
7519 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7521 struct hclge_vport *vport = hclge_get_vport(handle);
7522 struct hclge_vport_vlan_cfg *vlan, *tmp;
7523 struct hclge_dev *hdev = vport->back;
7524 u16 vlan_proto, qos;
7528 mutex_lock(&hdev->vport_cfg_mutex);
7529 for (i = 0; i < hdev->num_alloc_vport; i++) {
7530 vport = &hdev->vport[i];
7531 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7532 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7533 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7534 state = vport->port_base_vlan_cfg.state;
7536 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7537 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7538 vport->vport_id, vlan_id, qos,
7543 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7544 if (vlan->hd_tbl_status)
7545 hclge_set_vlan_filter_hw(hdev,
7553 mutex_unlock(&hdev->vport_cfg_mutex);
7556 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7558 struct hclge_vport *vport = hclge_get_vport(handle);
7560 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7561 vport->rxvlan_cfg.strip_tag1_en = false;
7562 vport->rxvlan_cfg.strip_tag2_en = enable;
7564 vport->rxvlan_cfg.strip_tag1_en = enable;
7565 vport->rxvlan_cfg.strip_tag2_en = true;
7567 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7568 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7569 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7571 return hclge_set_vlan_rx_offload_cfg(vport);
7574 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7575 u16 port_base_vlan_state,
7576 struct hclge_vlan_info *new_info,
7577 struct hclge_vlan_info *old_info)
7579 struct hclge_dev *hdev = vport->back;
7582 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7583 hclge_rm_vport_all_vlan_table(vport, false);
7584 return hclge_set_vlan_filter_hw(hdev,
7585 htons(new_info->vlan_proto),
7588 new_info->qos, false);
7591 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7592 vport->vport_id, old_info->vlan_tag,
7593 old_info->qos, true);
7597 return hclge_add_vport_all_vlan_table(vport);
7600 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7601 struct hclge_vlan_info *vlan_info)
7603 struct hnae3_handle *nic = &vport->nic;
7604 struct hclge_vlan_info *old_vlan_info;
7605 struct hclge_dev *hdev = vport->back;
7608 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7610 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7614 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7615 /* add new VLAN tag */
7616 ret = hclge_set_vlan_filter_hw(hdev,
7617 htons(vlan_info->vlan_proto),
7619 vlan_info->vlan_tag,
7620 vlan_info->qos, false);
7624 /* remove old VLAN tag */
7625 ret = hclge_set_vlan_filter_hw(hdev,
7626 htons(old_vlan_info->vlan_proto),
7628 old_vlan_info->vlan_tag,
7629 old_vlan_info->qos, true);
7636 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7641 /* update state only when disable/enable port based VLAN */
7642 vport->port_base_vlan_cfg.state = state;
7643 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7644 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7646 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7649 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7650 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7651 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7656 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7657 enum hnae3_port_base_vlan_state state,
7660 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7662 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7664 return HNAE3_PORT_BASE_VLAN_ENABLE;
7667 return HNAE3_PORT_BASE_VLAN_DISABLE;
7668 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7669 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7671 return HNAE3_PORT_BASE_VLAN_MODIFY;
7675 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7676 u16 vlan, u8 qos, __be16 proto)
7678 struct hclge_vport *vport = hclge_get_vport(handle);
7679 struct hclge_dev *hdev = vport->back;
7680 struct hclge_vlan_info vlan_info;
7684 if (hdev->pdev->revision == 0x20)
7687 /* qos is a 3 bits value, so can not be bigger than 7 */
7688 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7690 if (proto != htons(ETH_P_8021Q))
7691 return -EPROTONOSUPPORT;
7693 vport = &hdev->vport[vfid];
7694 state = hclge_get_port_base_vlan_state(vport,
7695 vport->port_base_vlan_cfg.state,
7697 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7700 vlan_info.vlan_tag = vlan;
7701 vlan_info.qos = qos;
7702 vlan_info.vlan_proto = ntohs(proto);
7704 /* update port based VLAN for PF */
7706 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7707 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7708 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7713 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7714 return hclge_update_port_base_vlan_cfg(vport, state,
7717 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7725 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7726 u16 vlan_id, bool is_kill)
7728 struct hclge_vport *vport = hclge_get_vport(handle);
7729 struct hclge_dev *hdev = vport->back;
7730 bool writen_to_tbl = false;
7733 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7734 * filter entry. In this case, we don't update VLAN filter table
7735 * when user add new VLAN or remove exist VLAN, just update the vport
7736 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7737 * table until port based VLAN disabled
7739 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7740 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7741 vlan_id, 0, is_kill);
7742 writen_to_tbl = true;
7749 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7751 hclge_add_vport_vlan_table(vport, vlan_id,
7757 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7759 struct hclge_config_max_frm_size_cmd *req;
7760 struct hclge_desc desc;
7762 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7764 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7765 req->max_frm_size = cpu_to_le16(new_mps);
7766 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7768 return hclge_cmd_send(&hdev->hw, &desc, 1);
7771 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7773 struct hclge_vport *vport = hclge_get_vport(handle);
7775 return hclge_set_vport_mtu(vport, new_mtu);
7778 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7780 struct hclge_dev *hdev = vport->back;
7781 int i, max_frm_size, ret;
7783 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7784 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7785 max_frm_size > HCLGE_MAC_MAX_FRAME)
7788 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7789 mutex_lock(&hdev->vport_lock);
7790 /* VF's mps must fit within hdev->mps */
7791 if (vport->vport_id && max_frm_size > hdev->mps) {
7792 mutex_unlock(&hdev->vport_lock);
7794 } else if (vport->vport_id) {
7795 vport->mps = max_frm_size;
7796 mutex_unlock(&hdev->vport_lock);
7800 /* PF's mps must be greater then VF's mps */
7801 for (i = 1; i < hdev->num_alloc_vport; i++)
7802 if (max_frm_size < hdev->vport[i].mps) {
7803 mutex_unlock(&hdev->vport_lock);
7807 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7809 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7811 dev_err(&hdev->pdev->dev,
7812 "Change mtu fail, ret =%d\n", ret);
7816 hdev->mps = max_frm_size;
7817 vport->mps = max_frm_size;
7819 ret = hclge_buffer_alloc(hdev);
7821 dev_err(&hdev->pdev->dev,
7822 "Allocate buffer fail, ret =%d\n", ret);
7825 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7826 mutex_unlock(&hdev->vport_lock);
7830 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7833 struct hclge_reset_tqp_queue_cmd *req;
7834 struct hclge_desc desc;
7837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7839 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7840 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7841 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7843 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7845 dev_err(&hdev->pdev->dev,
7846 "Send tqp reset cmd error, status =%d\n", ret);
7853 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7855 struct hclge_reset_tqp_queue_cmd *req;
7856 struct hclge_desc desc;
7859 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7861 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7862 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7864 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7866 dev_err(&hdev->pdev->dev,
7867 "Get reset status error, status =%d\n", ret);
7871 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7874 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7876 struct hnae3_queue *queue;
7877 struct hclge_tqp *tqp;
7879 queue = handle->kinfo.tqp[queue_id];
7880 tqp = container_of(queue, struct hclge_tqp, q);
7885 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7887 struct hclge_vport *vport = hclge_get_vport(handle);
7888 struct hclge_dev *hdev = vport->back;
7889 int reset_try_times = 0;
7894 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7896 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7898 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7902 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7904 dev_err(&hdev->pdev->dev,
7905 "Send reset tqp cmd fail, ret = %d\n", ret);
7909 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7910 /* Wait for tqp hw reset */
7912 reset_status = hclge_get_reset_status(hdev, queue_gid);
7917 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7918 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7922 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7924 dev_err(&hdev->pdev->dev,
7925 "Deassert the soft reset fail, ret = %d\n", ret);
7930 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7932 struct hclge_dev *hdev = vport->back;
7933 int reset_try_times = 0;
7938 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7940 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7942 dev_warn(&hdev->pdev->dev,
7943 "Send reset tqp cmd fail, ret = %d\n", ret);
7947 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7948 /* Wait for tqp hw reset */
7950 reset_status = hclge_get_reset_status(hdev, queue_gid);
7955 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7956 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7960 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7962 dev_warn(&hdev->pdev->dev,
7963 "Deassert the soft reset fail, ret = %d\n", ret);
7966 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7968 struct hclge_vport *vport = hclge_get_vport(handle);
7969 struct hclge_dev *hdev = vport->back;
7971 return hdev->fw_version;
7974 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7976 struct phy_device *phydev = hdev->hw.mac.phydev;
7981 phy_set_asym_pause(phydev, rx_en, tx_en);
7984 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7989 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7990 else if (rx_en && !tx_en)
7991 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7992 else if (!rx_en && tx_en)
7993 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7995 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7997 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8000 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8002 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8007 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8012 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8014 struct phy_device *phydev = hdev->hw.mac.phydev;
8015 u16 remote_advertising = 0;
8016 u16 local_advertising;
8017 u32 rx_pause, tx_pause;
8020 if (!phydev->link || !phydev->autoneg)
8023 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8026 remote_advertising = LPA_PAUSE_CAP;
8028 if (phydev->asym_pause)
8029 remote_advertising |= LPA_PAUSE_ASYM;
8031 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8032 remote_advertising);
8033 tx_pause = flowctl & FLOW_CTRL_TX;
8034 rx_pause = flowctl & FLOW_CTRL_RX;
8036 if (phydev->duplex == HCLGE_MAC_HALF) {
8041 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8044 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8045 u32 *rx_en, u32 *tx_en)
8047 struct hclge_vport *vport = hclge_get_vport(handle);
8048 struct hclge_dev *hdev = vport->back;
8050 *auto_neg = hclge_get_autoneg(handle);
8052 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8058 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8061 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8064 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8073 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8074 u32 rx_en, u32 tx_en)
8076 struct hclge_vport *vport = hclge_get_vport(handle);
8077 struct hclge_dev *hdev = vport->back;
8078 struct phy_device *phydev = hdev->hw.mac.phydev;
8081 fc_autoneg = hclge_get_autoneg(handle);
8082 if (auto_neg != fc_autoneg) {
8083 dev_info(&hdev->pdev->dev,
8084 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8088 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8089 dev_info(&hdev->pdev->dev,
8090 "Priority flow control enabled. Cannot set link flow control.\n");
8094 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8097 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8100 return phy_start_aneg(phydev);
8102 if (hdev->pdev->revision == 0x20)
8105 return hclge_restart_autoneg(handle);
8108 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8109 u8 *auto_neg, u32 *speed, u8 *duplex)
8111 struct hclge_vport *vport = hclge_get_vport(handle);
8112 struct hclge_dev *hdev = vport->back;
8115 *speed = hdev->hw.mac.speed;
8117 *duplex = hdev->hw.mac.duplex;
8119 *auto_neg = hdev->hw.mac.autoneg;
8122 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8125 struct hclge_vport *vport = hclge_get_vport(handle);
8126 struct hclge_dev *hdev = vport->back;
8129 *media_type = hdev->hw.mac.media_type;
8132 *module_type = hdev->hw.mac.module_type;
8135 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8136 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8138 struct hclge_vport *vport = hclge_get_vport(handle);
8139 struct hclge_dev *hdev = vport->back;
8140 struct phy_device *phydev = hdev->hw.mac.phydev;
8141 int mdix_ctrl, mdix, is_resolved;
8142 unsigned int retval;
8145 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8146 *tp_mdix = ETH_TP_MDI_INVALID;
8150 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8152 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8153 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8154 HCLGE_PHY_MDIX_CTRL_S);
8156 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8157 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8158 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8160 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8162 switch (mdix_ctrl) {
8164 *tp_mdix_ctrl = ETH_TP_MDI;
8167 *tp_mdix_ctrl = ETH_TP_MDI_X;
8170 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8173 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8178 *tp_mdix = ETH_TP_MDI_INVALID;
8180 *tp_mdix = ETH_TP_MDI_X;
8182 *tp_mdix = ETH_TP_MDI;
8185 static void hclge_info_show(struct hclge_dev *hdev)
8187 struct device *dev = &hdev->pdev->dev;
8189 dev_info(dev, "PF info begin:\n");
8191 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8192 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8193 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8194 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8195 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8196 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8197 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8198 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8199 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8200 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8201 dev_info(dev, "This is %s PF\n",
8202 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8203 dev_info(dev, "DCB %s\n",
8204 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8205 dev_info(dev, "MQPRIO %s\n",
8206 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8208 dev_info(dev, "PF info end.\n");
8211 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8212 struct hclge_vport *vport)
8214 struct hnae3_client *client = vport->nic.client;
8215 struct hclge_dev *hdev = ae_dev->priv;
8218 ret = client->ops->init_instance(&vport->nic);
8222 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8223 hnae3_set_client_init_flag(client, ae_dev, 1);
8225 /* Enable nic hw error interrupts */
8226 ret = hclge_config_nic_hw_error(hdev, true);
8228 dev_err(&ae_dev->pdev->dev,
8229 "fail(%d) to enable hw error interrupts\n", ret);
8231 if (netif_msg_drv(&hdev->vport->nic))
8232 hclge_info_show(hdev);
8237 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8238 struct hclge_vport *vport)
8240 struct hnae3_client *client = vport->roce.client;
8241 struct hclge_dev *hdev = ae_dev->priv;
8244 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8248 client = hdev->roce_client;
8249 ret = hclge_init_roce_base_info(vport);
8253 ret = client->ops->init_instance(&vport->roce);
8257 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8258 hnae3_set_client_init_flag(client, ae_dev, 1);
8263 static int hclge_init_client_instance(struct hnae3_client *client,
8264 struct hnae3_ae_dev *ae_dev)
8266 struct hclge_dev *hdev = ae_dev->priv;
8267 struct hclge_vport *vport;
8270 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8271 vport = &hdev->vport[i];
8273 switch (client->type) {
8274 case HNAE3_CLIENT_KNIC:
8276 hdev->nic_client = client;
8277 vport->nic.client = client;
8278 ret = hclge_init_nic_client_instance(ae_dev, vport);
8282 ret = hclge_init_roce_client_instance(ae_dev, vport);
8287 case HNAE3_CLIENT_ROCE:
8288 if (hnae3_dev_roce_supported(hdev)) {
8289 hdev->roce_client = client;
8290 vport->roce.client = client;
8293 ret = hclge_init_roce_client_instance(ae_dev, vport);
8303 /* Enable roce ras interrupts */
8304 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8306 dev_err(&ae_dev->pdev->dev,
8307 "fail(%d) to enable roce ras interrupts\n", ret);
8312 hdev->nic_client = NULL;
8313 vport->nic.client = NULL;
8316 hdev->roce_client = NULL;
8317 vport->roce.client = NULL;
8321 static void hclge_uninit_client_instance(struct hnae3_client *client,
8322 struct hnae3_ae_dev *ae_dev)
8324 struct hclge_dev *hdev = ae_dev->priv;
8325 struct hclge_vport *vport;
8328 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8329 vport = &hdev->vport[i];
8330 if (hdev->roce_client) {
8331 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8332 hdev->roce_client->ops->uninit_instance(&vport->roce,
8334 hdev->roce_client = NULL;
8335 vport->roce.client = NULL;
8337 if (client->type == HNAE3_CLIENT_ROCE)
8339 if (hdev->nic_client && client->ops->uninit_instance) {
8340 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8341 client->ops->uninit_instance(&vport->nic, 0);
8342 hdev->nic_client = NULL;
8343 vport->nic.client = NULL;
8348 static int hclge_pci_init(struct hclge_dev *hdev)
8350 struct pci_dev *pdev = hdev->pdev;
8351 struct hclge_hw *hw;
8354 ret = pci_enable_device(pdev);
8356 dev_err(&pdev->dev, "failed to enable PCI device\n");
8360 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8362 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8365 "can't set consistent PCI DMA");
8366 goto err_disable_device;
8368 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8371 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8373 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8374 goto err_disable_device;
8377 pci_set_master(pdev);
8379 hw->io_base = pcim_iomap(pdev, 2, 0);
8381 dev_err(&pdev->dev, "Can't map configuration register space\n");
8383 goto err_clr_master;
8386 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8390 pci_clear_master(pdev);
8391 pci_release_regions(pdev);
8393 pci_disable_device(pdev);
8398 static void hclge_pci_uninit(struct hclge_dev *hdev)
8400 struct pci_dev *pdev = hdev->pdev;
8402 pcim_iounmap(pdev, hdev->hw.io_base);
8403 pci_free_irq_vectors(pdev);
8404 pci_clear_master(pdev);
8405 pci_release_mem_regions(pdev);
8406 pci_disable_device(pdev);
8409 static void hclge_state_init(struct hclge_dev *hdev)
8411 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8412 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8413 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8414 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8415 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8416 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8419 static void hclge_state_uninit(struct hclge_dev *hdev)
8421 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8422 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8424 if (hdev->service_timer.function)
8425 del_timer_sync(&hdev->service_timer);
8426 if (hdev->reset_timer.function)
8427 del_timer_sync(&hdev->reset_timer);
8428 if (hdev->service_task.func)
8429 cancel_work_sync(&hdev->service_task);
8430 if (hdev->rst_service_task.func)
8431 cancel_work_sync(&hdev->rst_service_task);
8432 if (hdev->mbx_service_task.func)
8433 cancel_work_sync(&hdev->mbx_service_task);
8436 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8438 #define HCLGE_FLR_WAIT_MS 100
8439 #define HCLGE_FLR_WAIT_CNT 50
8440 struct hclge_dev *hdev = ae_dev->priv;
8443 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8444 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8445 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8446 hclge_reset_event(hdev->pdev, NULL);
8448 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8449 cnt++ < HCLGE_FLR_WAIT_CNT)
8450 msleep(HCLGE_FLR_WAIT_MS);
8452 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8453 dev_err(&hdev->pdev->dev,
8454 "flr wait down timeout: %d\n", cnt);
8457 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8459 struct hclge_dev *hdev = ae_dev->priv;
8461 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8464 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8468 for (i = 0; i < hdev->num_alloc_vport; i++) {
8469 struct hclge_vport *vport = &hdev->vport[i];
8472 /* Send cmd to clear VF's FUNC_RST_ING */
8473 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8475 dev_warn(&hdev->pdev->dev,
8476 "clear vf(%d) rst failed %d!\n",
8477 vport->vport_id, ret);
8481 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8483 struct pci_dev *pdev = ae_dev->pdev;
8484 struct hclge_dev *hdev;
8487 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8494 hdev->ae_dev = ae_dev;
8495 hdev->reset_type = HNAE3_NONE_RESET;
8496 hdev->reset_level = HNAE3_FUNC_RESET;
8497 ae_dev->priv = hdev;
8498 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8500 mutex_init(&hdev->vport_lock);
8501 mutex_init(&hdev->vport_cfg_mutex);
8502 spin_lock_init(&hdev->fd_rule_lock);
8504 ret = hclge_pci_init(hdev);
8506 dev_err(&pdev->dev, "PCI init failed\n");
8510 /* Firmware command queue initialize */
8511 ret = hclge_cmd_queue_init(hdev);
8513 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8514 goto err_pci_uninit;
8517 /* Firmware command initialize */
8518 ret = hclge_cmd_init(hdev);
8520 goto err_cmd_uninit;
8522 ret = hclge_get_cap(hdev);
8524 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8526 goto err_cmd_uninit;
8529 ret = hclge_configure(hdev);
8531 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8532 goto err_cmd_uninit;
8535 ret = hclge_init_msi(hdev);
8537 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8538 goto err_cmd_uninit;
8541 ret = hclge_misc_irq_init(hdev);
8544 "Misc IRQ(vector0) init error, ret = %d.\n",
8546 goto err_msi_uninit;
8549 ret = hclge_alloc_tqps(hdev);
8551 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8552 goto err_msi_irq_uninit;
8555 ret = hclge_alloc_vport(hdev);
8557 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8558 goto err_msi_irq_uninit;
8561 ret = hclge_map_tqp(hdev);
8563 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8564 goto err_msi_irq_uninit;
8567 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8568 ret = hclge_mac_mdio_config(hdev);
8570 dev_err(&hdev->pdev->dev,
8571 "mdio config fail ret=%d\n", ret);
8572 goto err_msi_irq_uninit;
8576 ret = hclge_init_umv_space(hdev);
8578 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8579 goto err_mdiobus_unreg;
8582 ret = hclge_mac_init(hdev);
8584 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8585 goto err_mdiobus_unreg;
8588 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8590 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8591 goto err_mdiobus_unreg;
8594 ret = hclge_config_gro(hdev, true);
8596 goto err_mdiobus_unreg;
8598 ret = hclge_init_vlan_config(hdev);
8600 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8601 goto err_mdiobus_unreg;
8604 ret = hclge_tm_schd_init(hdev);
8606 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8607 goto err_mdiobus_unreg;
8610 hclge_rss_init_cfg(hdev);
8611 ret = hclge_rss_init_hw(hdev);
8613 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8614 goto err_mdiobus_unreg;
8617 ret = init_mgr_tbl(hdev);
8619 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8620 goto err_mdiobus_unreg;
8623 ret = hclge_init_fd_config(hdev);
8626 "fd table init fail, ret=%d\n", ret);
8627 goto err_mdiobus_unreg;
8630 INIT_KFIFO(hdev->mac_tnl_log);
8632 hclge_dcb_ops_set(hdev);
8634 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8635 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8636 INIT_WORK(&hdev->service_task, hclge_service_task);
8637 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8638 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8640 hclge_clear_all_event_cause(hdev);
8641 hclge_clear_resetting_state(hdev);
8643 /* Log and clear the hw errors those already occurred */
8644 hclge_handle_all_hns_hw_errors(ae_dev);
8646 /* request delayed reset for the error recovery because an immediate
8647 * global reset on a PF affecting pending initialization of other PFs
8649 if (ae_dev->hw_err_reset_req) {
8650 enum hnae3_reset_type reset_level;
8652 reset_level = hclge_get_reset_level(ae_dev,
8653 &ae_dev->hw_err_reset_req);
8654 hclge_set_def_reset_request(ae_dev, reset_level);
8655 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8658 /* Enable MISC vector(vector0) */
8659 hclge_enable_vector(&hdev->misc_vector, true);
8661 hclge_state_init(hdev);
8662 hdev->last_reset_time = jiffies;
8664 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8668 if (hdev->hw.mac.phydev)
8669 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8671 hclge_misc_irq_uninit(hdev);
8673 pci_free_irq_vectors(pdev);
8675 hclge_cmd_uninit(hdev);
8677 pcim_iounmap(pdev, hdev->hw.io_base);
8678 pci_clear_master(pdev);
8679 pci_release_regions(pdev);
8680 pci_disable_device(pdev);
8685 static void hclge_stats_clear(struct hclge_dev *hdev)
8687 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8690 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8692 struct hclge_vport *vport = hdev->vport;
8695 for (i = 0; i < hdev->num_alloc_vport; i++) {
8696 hclge_vport_stop(vport);
8701 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8703 struct hclge_dev *hdev = ae_dev->priv;
8704 struct pci_dev *pdev = ae_dev->pdev;
8707 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8709 hclge_stats_clear(hdev);
8710 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8711 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8713 ret = hclge_cmd_init(hdev);
8715 dev_err(&pdev->dev, "Cmd queue init failed\n");
8719 ret = hclge_map_tqp(hdev);
8721 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8725 hclge_reset_umv_space(hdev);
8727 ret = hclge_mac_init(hdev);
8729 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8733 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8735 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8739 ret = hclge_config_gro(hdev, true);
8743 ret = hclge_init_vlan_config(hdev);
8745 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8749 ret = hclge_tm_init_hw(hdev, true);
8751 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8755 ret = hclge_rss_init_hw(hdev);
8757 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8761 ret = hclge_init_fd_config(hdev);
8763 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8767 /* Re-enable the hw error interrupts because
8768 * the interrupts get disabled on global reset.
8770 ret = hclge_config_nic_hw_error(hdev, true);
8773 "fail(%d) to re-enable NIC hw error interrupts\n",
8778 if (hdev->roce_client) {
8779 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8782 "fail(%d) to re-enable roce ras interrupts\n",
8788 hclge_reset_vport_state(hdev);
8790 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8796 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8798 struct hclge_dev *hdev = ae_dev->priv;
8799 struct hclge_mac *mac = &hdev->hw.mac;
8801 hclge_state_uninit(hdev);
8804 mdiobus_unregister(mac->mdio_bus);
8806 hclge_uninit_umv_space(hdev);
8808 /* Disable MISC vector(vector0) */
8809 hclge_enable_vector(&hdev->misc_vector, false);
8810 synchronize_irq(hdev->misc_vector.vector_irq);
8812 /* Disable all hw interrupts */
8813 hclge_config_mac_tnl_int(hdev, false);
8814 hclge_config_nic_hw_error(hdev, false);
8815 hclge_config_rocee_ras_interrupt(hdev, false);
8817 hclge_cmd_uninit(hdev);
8818 hclge_misc_irq_uninit(hdev);
8819 hclge_pci_uninit(hdev);
8820 mutex_destroy(&hdev->vport_lock);
8821 hclge_uninit_vport_mac_table(hdev);
8822 hclge_uninit_vport_vlan_table(hdev);
8823 mutex_destroy(&hdev->vport_cfg_mutex);
8824 ae_dev->priv = NULL;
8827 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8829 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8830 struct hclge_vport *vport = hclge_get_vport(handle);
8831 struct hclge_dev *hdev = vport->back;
8833 return min_t(u32, hdev->rss_size_max,
8834 vport->alloc_tqps / kinfo->num_tc);
8837 static void hclge_get_channels(struct hnae3_handle *handle,
8838 struct ethtool_channels *ch)
8840 ch->max_combined = hclge_get_max_channels(handle);
8841 ch->other_count = 1;
8843 ch->combined_count = handle->kinfo.rss_size;
8846 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8847 u16 *alloc_tqps, u16 *max_rss_size)
8849 struct hclge_vport *vport = hclge_get_vport(handle);
8850 struct hclge_dev *hdev = vport->back;
8852 *alloc_tqps = vport->alloc_tqps;
8853 *max_rss_size = hdev->rss_size_max;
8856 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8857 bool rxfh_configured)
8859 struct hclge_vport *vport = hclge_get_vport(handle);
8860 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8861 struct hclge_dev *hdev = vport->back;
8862 int cur_rss_size = kinfo->rss_size;
8863 int cur_tqps = kinfo->num_tqps;
8864 u16 tc_offset[HCLGE_MAX_TC_NUM];
8865 u16 tc_valid[HCLGE_MAX_TC_NUM];
8866 u16 tc_size[HCLGE_MAX_TC_NUM];
8872 kinfo->req_rss_size = new_tqps_num;
8874 ret = hclge_tm_vport_map_update(hdev);
8876 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8880 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8881 roundup_size = ilog2(roundup_size);
8882 /* Set the RSS TC mode according to the new RSS size */
8883 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8886 if (!(hdev->hw_tc_map & BIT(i)))
8890 tc_size[i] = roundup_size;
8891 tc_offset[i] = kinfo->rss_size * i;
8893 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8897 /* RSS indirection table has been configuared by user */
8898 if (rxfh_configured)
8901 /* Reinitializes the rss indirect table according to the new RSS size */
8902 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8906 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8907 rss_indir[i] = i % kinfo->rss_size;
8909 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8911 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8918 dev_info(&hdev->pdev->dev,
8919 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8920 cur_rss_size, kinfo->rss_size,
8921 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8926 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8927 u32 *regs_num_64_bit)
8929 struct hclge_desc desc;
8933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8934 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8936 dev_err(&hdev->pdev->dev,
8937 "Query register number cmd failed, ret = %d.\n", ret);
8941 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8942 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8944 total_num = *regs_num_32_bit + *regs_num_64_bit;
8951 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8954 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8955 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
8957 struct hclge_desc *desc;
8958 u32 *reg_val = data;
8968 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
8969 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
8970 HCLGE_32_BIT_REG_RTN_DATANUM);
8971 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8975 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8976 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8978 dev_err(&hdev->pdev->dev,
8979 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8984 for (i = 0; i < cmd_num; i++) {
8986 desc_data = (__le32 *)(&desc[i].data[0]);
8987 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
8989 desc_data = (__le32 *)(&desc[i]);
8990 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8992 for (k = 0; k < n; k++) {
8993 *reg_val++ = le32_to_cpu(*desc_data++);
9005 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9008 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9009 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9011 struct hclge_desc *desc;
9012 u64 *reg_val = data;
9022 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9023 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9024 HCLGE_64_BIT_REG_RTN_DATANUM);
9025 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9029 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9030 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9032 dev_err(&hdev->pdev->dev,
9033 "Query 64 bit register cmd failed, ret = %d.\n", ret);
9038 for (i = 0; i < cmd_num; i++) {
9040 desc_data = (__le64 *)(&desc[i].data[0]);
9041 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9043 desc_data = (__le64 *)(&desc[i]);
9044 n = HCLGE_64_BIT_REG_RTN_DATANUM;
9046 for (k = 0; k < n; k++) {
9047 *reg_val++ = le64_to_cpu(*desc_data++);
9059 #define MAX_SEPARATE_NUM 4
9060 #define SEPARATOR_VALUE 0xFFFFFFFF
9061 #define REG_NUM_PER_LINE 4
9062 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9064 static int hclge_get_regs_len(struct hnae3_handle *handle)
9066 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9067 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9068 struct hclge_vport *vport = hclge_get_vport(handle);
9069 struct hclge_dev *hdev = vport->back;
9070 u32 regs_num_32_bit, regs_num_64_bit;
9073 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9075 dev_err(&hdev->pdev->dev,
9076 "Get register number failed, ret = %d.\n", ret);
9080 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9081 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9082 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9083 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9085 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9086 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9087 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9090 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9093 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9094 struct hclge_vport *vport = hclge_get_vport(handle);
9095 struct hclge_dev *hdev = vport->back;
9096 u32 regs_num_32_bit, regs_num_64_bit;
9097 int i, j, reg_um, separator_num;
9101 *version = hdev->fw_version;
9103 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9105 dev_err(&hdev->pdev->dev,
9106 "Get register number failed, ret = %d.\n", ret);
9110 /* fetching per-PF registers valus from PF PCIe register space */
9111 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9112 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9113 for (i = 0; i < reg_um; i++)
9114 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9115 for (i = 0; i < separator_num; i++)
9116 *reg++ = SEPARATOR_VALUE;
9118 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9119 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9120 for (i = 0; i < reg_um; i++)
9121 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9122 for (i = 0; i < separator_num; i++)
9123 *reg++ = SEPARATOR_VALUE;
9125 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9126 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9127 for (j = 0; j < kinfo->num_tqps; j++) {
9128 for (i = 0; i < reg_um; i++)
9129 *reg++ = hclge_read_dev(&hdev->hw,
9130 ring_reg_addr_list[i] +
9132 for (i = 0; i < separator_num; i++)
9133 *reg++ = SEPARATOR_VALUE;
9136 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9137 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9138 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9139 for (i = 0; i < reg_um; i++)
9140 *reg++ = hclge_read_dev(&hdev->hw,
9141 tqp_intr_reg_addr_list[i] +
9143 for (i = 0; i < separator_num; i++)
9144 *reg++ = SEPARATOR_VALUE;
9147 /* fetching PF common registers values from firmware */
9148 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9150 dev_err(&hdev->pdev->dev,
9151 "Get 32 bit register failed, ret = %d.\n", ret);
9155 reg += regs_num_32_bit;
9156 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9158 dev_err(&hdev->pdev->dev,
9159 "Get 64 bit register failed, ret = %d.\n", ret);
9162 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9164 struct hclge_set_led_state_cmd *req;
9165 struct hclge_desc desc;
9168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9170 req = (struct hclge_set_led_state_cmd *)desc.data;
9171 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9172 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9174 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9176 dev_err(&hdev->pdev->dev,
9177 "Send set led state cmd error, ret =%d\n", ret);
9182 enum hclge_led_status {
9185 HCLGE_LED_NO_CHANGE = 0xFF,
9188 static int hclge_set_led_id(struct hnae3_handle *handle,
9189 enum ethtool_phys_id_state status)
9191 struct hclge_vport *vport = hclge_get_vport(handle);
9192 struct hclge_dev *hdev = vport->back;
9195 case ETHTOOL_ID_ACTIVE:
9196 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9197 case ETHTOOL_ID_INACTIVE:
9198 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9204 static void hclge_get_link_mode(struct hnae3_handle *handle,
9205 unsigned long *supported,
9206 unsigned long *advertising)
9208 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9209 struct hclge_vport *vport = hclge_get_vport(handle);
9210 struct hclge_dev *hdev = vport->back;
9211 unsigned int idx = 0;
9213 for (; idx < size; idx++) {
9214 supported[idx] = hdev->hw.mac.supported[idx];
9215 advertising[idx] = hdev->hw.mac.advertising[idx];
9219 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9221 struct hclge_vport *vport = hclge_get_vport(handle);
9222 struct hclge_dev *hdev = vport->back;
9224 return hclge_config_gro(hdev, enable);
9227 static const struct hnae3_ae_ops hclge_ops = {
9228 .init_ae_dev = hclge_init_ae_dev,
9229 .uninit_ae_dev = hclge_uninit_ae_dev,
9230 .flr_prepare = hclge_flr_prepare,
9231 .flr_done = hclge_flr_done,
9232 .init_client_instance = hclge_init_client_instance,
9233 .uninit_client_instance = hclge_uninit_client_instance,
9234 .map_ring_to_vector = hclge_map_ring_to_vector,
9235 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9236 .get_vector = hclge_get_vector,
9237 .put_vector = hclge_put_vector,
9238 .set_promisc_mode = hclge_set_promisc_mode,
9239 .set_loopback = hclge_set_loopback,
9240 .start = hclge_ae_start,
9241 .stop = hclge_ae_stop,
9242 .client_start = hclge_client_start,
9243 .client_stop = hclge_client_stop,
9244 .get_status = hclge_get_status,
9245 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9246 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9247 .get_media_type = hclge_get_media_type,
9248 .check_port_speed = hclge_check_port_speed,
9249 .get_fec = hclge_get_fec,
9250 .set_fec = hclge_set_fec,
9251 .get_rss_key_size = hclge_get_rss_key_size,
9252 .get_rss_indir_size = hclge_get_rss_indir_size,
9253 .get_rss = hclge_get_rss,
9254 .set_rss = hclge_set_rss,
9255 .set_rss_tuple = hclge_set_rss_tuple,
9256 .get_rss_tuple = hclge_get_rss_tuple,
9257 .get_tc_size = hclge_get_tc_size,
9258 .get_mac_addr = hclge_get_mac_addr,
9259 .set_mac_addr = hclge_set_mac_addr,
9260 .do_ioctl = hclge_do_ioctl,
9261 .add_uc_addr = hclge_add_uc_addr,
9262 .rm_uc_addr = hclge_rm_uc_addr,
9263 .add_mc_addr = hclge_add_mc_addr,
9264 .rm_mc_addr = hclge_rm_mc_addr,
9265 .set_autoneg = hclge_set_autoneg,
9266 .get_autoneg = hclge_get_autoneg,
9267 .restart_autoneg = hclge_restart_autoneg,
9268 .get_pauseparam = hclge_get_pauseparam,
9269 .set_pauseparam = hclge_set_pauseparam,
9270 .set_mtu = hclge_set_mtu,
9271 .reset_queue = hclge_reset_tqp,
9272 .get_stats = hclge_get_stats,
9273 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9274 .update_stats = hclge_update_stats,
9275 .get_strings = hclge_get_strings,
9276 .get_sset_count = hclge_get_sset_count,
9277 .get_fw_version = hclge_get_fw_version,
9278 .get_mdix_mode = hclge_get_mdix_mode,
9279 .enable_vlan_filter = hclge_enable_vlan_filter,
9280 .set_vlan_filter = hclge_set_vlan_filter,
9281 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9282 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9283 .reset_event = hclge_reset_event,
9284 .get_reset_level = hclge_get_reset_level,
9285 .set_default_reset_request = hclge_set_def_reset_request,
9286 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9287 .set_channels = hclge_set_channels,
9288 .get_channels = hclge_get_channels,
9289 .get_regs_len = hclge_get_regs_len,
9290 .get_regs = hclge_get_regs,
9291 .set_led_id = hclge_set_led_id,
9292 .get_link_mode = hclge_get_link_mode,
9293 .add_fd_entry = hclge_add_fd_entry,
9294 .del_fd_entry = hclge_del_fd_entry,
9295 .del_all_fd_entries = hclge_del_all_fd_entries,
9296 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9297 .get_fd_rule_info = hclge_get_fd_rule_info,
9298 .get_fd_all_rules = hclge_get_all_rules,
9299 .restore_fd_rules = hclge_restore_fd_entries,
9300 .enable_fd = hclge_enable_fd,
9301 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9302 .dbg_run_cmd = hclge_dbg_run_cmd,
9303 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9304 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9305 .ae_dev_resetting = hclge_ae_dev_resetting,
9306 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9307 .set_gro_en = hclge_gro_en,
9308 .get_global_queue_id = hclge_covert_handle_qid_global,
9309 .set_timer_task = hclge_set_timer_task,
9310 .mac_connect_phy = hclge_mac_connect_phy,
9311 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9312 .restore_vlan_table = hclge_restore_vlan_table,
9315 static struct hnae3_ae_algo ae_algo = {
9317 .pdev_id_table = ae_algo_pci_tbl,
9320 static int hclge_init(void)
9322 pr_info("%s is initializing\n", HCLGE_NAME);
9324 hnae3_register_ae_algo(&ae_algo);
9329 static void hclge_exit(void)
9331 hnae3_unregister_ae_algo(&ae_algo);
9333 module_init(hclge_init);
9334 module_exit(hclge_exit);
9336 MODULE_LICENSE("GPL");
9337 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9338 MODULE_DESCRIPTION("HCLGE Driver");
9339 MODULE_VERSION(HCLGE_MOD_VERSION);