1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
39 static struct hnae3_ae_algo ae_algo;
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
300 static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
310 #define HCLGE_MAC_CMD_NUM 21
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 /* for special opcode 0032, only the first desc has the head */
329 if (unlikely(i == 0)) {
330 desc_data = (__le64 *)(&desc[i].data[0]);
331 n = HCLGE_RD_FIRST_STATS_NUM;
333 desc_data = (__le64 *)(&desc[i]);
334 n = HCLGE_RD_OTHER_STATS_NUM;
337 for (k = 0; k < n; k++) {
338 *data += le64_to_cpu(*desc_data);
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
389 struct hclge_desc desc;
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
415 /* The firmware supports the new statistics acquisition method */
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 le32_to_cpu(desc[0].data[1]);
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 le32_to_cpu(desc[0].data[1]);
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
503 return kinfo->num_tqps * (2);
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
517 buff = buff + ETH_GSTRING_LEN;
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
525 buff = buff + ETH_GSTRING_LEN;
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
548 char *buff = (char *)data;
551 if (stringset != ETH_SS_STATS)
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
557 buff = buff + ETH_GSTRING_LEN;
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
565 struct hnae3_handle *handle;
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
578 status = hclge_mac_update_stats(hdev);
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594 status = hclge_mac_update_stats(hdev);
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
600 status = hclge_tqps_update_stats(handle);
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 if (hdev->pdev->revision >= 0x21 ||
629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
641 hclge_tqps_get_sset_count(handle, stringset);
647 static void hclge_get_strings(struct hnae3_handle *handle,
651 u8 *p = (char *)data;
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
664 hns3_nic_test_strs[HNAE3_LOOP_APP],
666 p += ETH_GSTRING_LEN;
668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
672 p += ETH_GSTRING_LEN;
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
684 p += ETH_GSTRING_LEN;
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
697 ARRAY_SIZE(g_mac_stats_string),
699 p = hclge_tqps_get_stats(handle, p);
702 static int hclge_parse_func_status(struct hclge_dev *hdev,
703 struct hclge_func_status_cmd *status)
705 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
708 /* Set the pf to main pf */
709 if (status->pf_state & HCLGE_PF_STATE_MAIN)
710 hdev->flag |= HCLGE_FLAG_MAIN;
712 hdev->flag &= ~HCLGE_FLAG_MAIN;
717 static int hclge_query_function_status(struct hclge_dev *hdev)
719 struct hclge_func_status_cmd *req;
720 struct hclge_desc desc;
724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
725 req = (struct hclge_func_status_cmd *)desc.data;
728 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
730 dev_err(&hdev->pdev->dev,
731 "query function status failed %d.\n",
737 /* Check pf reset is done */
740 usleep_range(1000, 2000);
741 } while (timeout++ < 5);
743 ret = hclge_parse_func_status(hdev, req);
748 static int hclge_query_pf_resource(struct hclge_dev *hdev)
750 struct hclge_pf_res_cmd *req;
751 struct hclge_desc desc;
754 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
757 dev_err(&hdev->pdev->dev,
758 "query pf resource failed %d.\n", ret);
762 req = (struct hclge_pf_res_cmd *)desc.data;
763 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
764 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
766 if (req->tx_buf_size)
768 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
770 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
772 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
774 if (req->dv_buf_size)
776 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
778 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
780 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
782 if (hnae3_dev_roce_supported(hdev)) {
783 hdev->roce_base_msix_offset =
784 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
785 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
787 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
788 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
790 /* PF should have NIC vectors and Roce vectors,
791 * NIC vectors are queued before Roce vectors.
793 hdev->num_msi = hdev->num_roce_msi +
794 hdev->roce_base_msix_offset;
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
804 static int hclge_parse_speed(int speed_cmd, int *speed)
808 *speed = HCLGE_MAC_SPEED_10M;
811 *speed = HCLGE_MAC_SPEED_100M;
814 *speed = HCLGE_MAC_SPEED_1G;
817 *speed = HCLGE_MAC_SPEED_10G;
820 *speed = HCLGE_MAC_SPEED_25G;
823 *speed = HCLGE_MAC_SPEED_40G;
826 *speed = HCLGE_MAC_SPEED_50G;
829 *speed = HCLGE_MAC_SPEED_100G;
838 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
841 unsigned long *supported = hdev->hw.mac.supported;
843 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
844 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
847 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
848 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
851 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
852 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
855 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
856 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
859 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
860 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
863 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
864 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
867 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
870 unsigned long *supported = hdev->hw.mac.supported;
872 /* default to support all speed for GE port */
874 speed_ability = HCLGE_SUPPORT_GE;
876 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
877 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
880 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
881 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
883 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
887 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
888 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
889 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
892 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
893 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
894 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
897 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
899 u8 media_type = hdev->hw.mac.media_type;
901 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
902 hclge_parse_fiber_link_mode(hdev, speed_ability);
903 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
904 hclge_parse_copper_link_mode(hdev, speed_ability);
907 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
909 struct hclge_cfg_param_cmd *req;
910 u64 mac_addr_tmp_high;
914 req = (struct hclge_cfg_param_cmd *)desc[0].data;
916 /* get the configuration */
917 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
920 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
921 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
922 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
923 HCLGE_CFG_TQP_DESC_N_M,
924 HCLGE_CFG_TQP_DESC_N_S);
926 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
927 HCLGE_CFG_PHY_ADDR_M,
928 HCLGE_CFG_PHY_ADDR_S);
929 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
930 HCLGE_CFG_MEDIA_TP_M,
931 HCLGE_CFG_MEDIA_TP_S);
932 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
933 HCLGE_CFG_RX_BUF_LEN_M,
934 HCLGE_CFG_RX_BUF_LEN_S);
935 /* get mac_address */
936 mac_addr_tmp = __le32_to_cpu(req->param[2]);
937 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
938 HCLGE_CFG_MAC_ADDR_H_M,
939 HCLGE_CFG_MAC_ADDR_H_S);
941 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
943 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
944 HCLGE_CFG_DEFAULT_SPEED_M,
945 HCLGE_CFG_DEFAULT_SPEED_S);
946 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
947 HCLGE_CFG_RSS_SIZE_M,
948 HCLGE_CFG_RSS_SIZE_S);
950 for (i = 0; i < ETH_ALEN; i++)
951 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
953 req = (struct hclge_cfg_param_cmd *)desc[1].data;
954 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
956 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
957 HCLGE_CFG_SPEED_ABILITY_M,
958 HCLGE_CFG_SPEED_ABILITY_S);
959 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
960 HCLGE_CFG_UMV_TBL_SPACE_M,
961 HCLGE_CFG_UMV_TBL_SPACE_S);
963 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
966 /* hclge_get_cfg: query the static parameter from flash
967 * @hdev: pointer to struct hclge_dev
968 * @hcfg: the config structure to be getted
970 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
972 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
973 struct hclge_cfg_param_cmd *req;
976 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
979 req = (struct hclge_cfg_param_cmd *)desc[i].data;
980 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
982 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
983 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
984 /* Len should be united by 4 bytes when send to hardware */
985 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
986 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
987 req->offset = cpu_to_le32(offset);
990 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
992 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
996 hclge_parse_cfg(hcfg, desc);
1001 static int hclge_get_cap(struct hclge_dev *hdev)
1005 ret = hclge_query_function_status(hdev);
1007 dev_err(&hdev->pdev->dev,
1008 "query function status error %d.\n", ret);
1012 /* get pf resource */
1013 ret = hclge_query_pf_resource(hdev);
1015 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1020 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1022 #define HCLGE_MIN_TX_DESC 64
1023 #define HCLGE_MIN_RX_DESC 64
1025 if (!is_kdump_kernel())
1028 dev_info(&hdev->pdev->dev,
1029 "Running kdump kernel. Using minimal resources\n");
1031 /* minimal queue pairs equals to the number of vports */
1032 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1033 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1034 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1037 static int hclge_configure(struct hclge_dev *hdev)
1039 struct hclge_cfg cfg;
1042 ret = hclge_get_cfg(hdev, &cfg);
1044 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1048 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1049 hdev->base_tqp_pid = 0;
1050 hdev->rss_size_max = cfg.rss_size_max;
1051 hdev->rx_buf_len = cfg.rx_buf_len;
1052 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1053 hdev->hw.mac.media_type = cfg.media_type;
1054 hdev->hw.mac.phy_addr = cfg.phy_addr;
1055 hdev->num_tx_desc = cfg.tqp_desc_num;
1056 hdev->num_rx_desc = cfg.tqp_desc_num;
1057 hdev->tm_info.num_pg = 1;
1058 hdev->tc_max = cfg.tc_num;
1059 hdev->tm_info.hw_pfc_map = 0;
1060 hdev->wanted_umv_size = cfg.umv_space;
1062 if (hnae3_dev_fd_supported(hdev))
1065 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1067 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1071 hclge_parse_link_mode(hdev, cfg.speed_ability);
1073 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1074 (hdev->tc_max < 1)) {
1075 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1080 /* Dev does not support DCB */
1081 if (!hnae3_dev_dcb_supported(hdev)) {
1085 hdev->pfc_max = hdev->tc_max;
1088 hdev->tm_info.num_tc = 1;
1090 /* Currently not support uncontiuous tc */
1091 for (i = 0; i < hdev->tm_info.num_tc; i++)
1092 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1094 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1096 hclge_init_kdump_kernel_config(hdev);
1101 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1104 struct hclge_cfg_tso_status_cmd *req;
1105 struct hclge_desc desc;
1108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1110 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1113 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1114 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1115 req->tso_mss_min = cpu_to_le16(tso_mss);
1118 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1120 req->tso_mss_max = cpu_to_le16(tso_mss);
1122 return hclge_cmd_send(&hdev->hw, &desc, 1);
1125 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1127 struct hclge_cfg_gro_status_cmd *req;
1128 struct hclge_desc desc;
1131 if (!hnae3_dev_gro_supported(hdev))
1134 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1135 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1137 req->gro_en = cpu_to_le16(en ? 1 : 0);
1139 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1141 dev_err(&hdev->pdev->dev,
1142 "GRO hardware config cmd failed, ret = %d\n", ret);
1147 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1149 struct hclge_tqp *tqp;
1152 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1153 sizeof(struct hclge_tqp), GFP_KERNEL);
1159 for (i = 0; i < hdev->num_tqps; i++) {
1160 tqp->dev = &hdev->pdev->dev;
1163 tqp->q.ae_algo = &ae_algo;
1164 tqp->q.buf_size = hdev->rx_buf_len;
1165 tqp->q.tx_desc_num = hdev->num_tx_desc;
1166 tqp->q.rx_desc_num = hdev->num_rx_desc;
1167 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1168 i * HCLGE_TQP_REG_SIZE;
1176 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1177 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1179 struct hclge_tqp_map_cmd *req;
1180 struct hclge_desc desc;
1183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1185 req = (struct hclge_tqp_map_cmd *)desc.data;
1186 req->tqp_id = cpu_to_le16(tqp_pid);
1187 req->tqp_vf = func_id;
1188 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1189 1 << HCLGE_TQP_MAP_EN_B;
1190 req->tqp_vid = cpu_to_le16(tqp_vid);
1192 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1194 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1199 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1201 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1202 struct hclge_dev *hdev = vport->back;
1205 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1206 alloced < num_tqps; i++) {
1207 if (!hdev->htqp[i].alloced) {
1208 hdev->htqp[i].q.handle = &vport->nic;
1209 hdev->htqp[i].q.tqp_index = alloced;
1210 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1211 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1212 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1213 hdev->htqp[i].alloced = true;
1217 vport->alloc_tqps = alloced;
1218 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1219 vport->alloc_tqps / hdev->tm_info.num_tc);
1224 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1225 u16 num_tx_desc, u16 num_rx_desc)
1228 struct hnae3_handle *nic = &vport->nic;
1229 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1230 struct hclge_dev *hdev = vport->back;
1233 kinfo->num_tx_desc = num_tx_desc;
1234 kinfo->num_rx_desc = num_rx_desc;
1236 kinfo->rx_buf_len = hdev->rx_buf_len;
1238 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1239 sizeof(struct hnae3_queue *), GFP_KERNEL);
1243 ret = hclge_assign_tqp(vport, num_tqps);
1245 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1250 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1251 struct hclge_vport *vport)
1253 struct hnae3_handle *nic = &vport->nic;
1254 struct hnae3_knic_private_info *kinfo;
1257 kinfo = &nic->kinfo;
1258 for (i = 0; i < vport->alloc_tqps; i++) {
1259 struct hclge_tqp *q =
1260 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1264 is_pf = !(vport->vport_id);
1265 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1274 static int hclge_map_tqp(struct hclge_dev *hdev)
1276 struct hclge_vport *vport = hdev->vport;
1279 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1280 for (i = 0; i < num_vport; i++) {
1283 ret = hclge_map_tqp_to_vport(hdev, vport);
1293 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1295 /* this would be initialized later */
1298 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1300 struct hnae3_handle *nic = &vport->nic;
1301 struct hclge_dev *hdev = vport->back;
1304 nic->pdev = hdev->pdev;
1305 nic->ae_algo = &ae_algo;
1306 nic->numa_node_mask = hdev->numa_node_mask;
1308 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1309 ret = hclge_knic_setup(vport, num_tqps,
1310 hdev->num_tx_desc, hdev->num_rx_desc);
1313 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1318 hclge_unic_setup(vport, num_tqps);
1324 static int hclge_alloc_vport(struct hclge_dev *hdev)
1326 struct pci_dev *pdev = hdev->pdev;
1327 struct hclge_vport *vport;
1333 /* We need to alloc a vport for main NIC of PF */
1334 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1336 if (hdev->num_tqps < num_vport) {
1337 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1338 hdev->num_tqps, num_vport);
1342 /* Alloc the same number of TQPs for every vport */
1343 tqp_per_vport = hdev->num_tqps / num_vport;
1344 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1346 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1351 hdev->vport = vport;
1352 hdev->num_alloc_vport = num_vport;
1354 if (IS_ENABLED(CONFIG_PCI_IOV))
1355 hdev->num_alloc_vfs = hdev->num_req_vfs;
1357 for (i = 0; i < num_vport; i++) {
1359 vport->vport_id = i;
1360 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1361 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1362 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1363 INIT_LIST_HEAD(&vport->vlan_list);
1364 INIT_LIST_HEAD(&vport->uc_mac_list);
1365 INIT_LIST_HEAD(&vport->mc_mac_list);
1368 ret = hclge_vport_setup(vport, tqp_main_vport);
1370 ret = hclge_vport_setup(vport, tqp_per_vport);
1373 "vport setup failed for vport %d, %d\n",
1384 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1385 struct hclge_pkt_buf_alloc *buf_alloc)
1387 /* TX buffer size is unit by 128 byte */
1388 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1389 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1390 struct hclge_tx_buff_alloc_cmd *req;
1391 struct hclge_desc desc;
1395 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1398 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1399 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1401 req->tx_pkt_buff[i] =
1402 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1403 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1406 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1408 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1414 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1415 struct hclge_pkt_buf_alloc *buf_alloc)
1417 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1420 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1425 static int hclge_get_tc_num(struct hclge_dev *hdev)
1429 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1430 if (hdev->hw_tc_map & BIT(i))
1435 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1439 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1440 if (hdev->hw_tc_map & BIT(i) &&
1441 hdev->tm_info.hw_pfc_map & BIT(i))
1446 /* Get the number of pfc enabled TCs, which have private buffer */
1447 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1448 struct hclge_pkt_buf_alloc *buf_alloc)
1450 struct hclge_priv_buf *priv;
1453 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1454 priv = &buf_alloc->priv_buf[i];
1455 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1463 /* Get the number of pfc disabled TCs, which have private buffer */
1464 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1465 struct hclge_pkt_buf_alloc *buf_alloc)
1467 struct hclge_priv_buf *priv;
1470 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1471 priv = &buf_alloc->priv_buf[i];
1472 if (hdev->hw_tc_map & BIT(i) &&
1473 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1481 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1483 struct hclge_priv_buf *priv;
1487 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1488 priv = &buf_alloc->priv_buf[i];
1490 rx_priv += priv->buf_size;
1495 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1497 u32 i, total_tx_size = 0;
1499 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1500 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1502 return total_tx_size;
1505 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1506 struct hclge_pkt_buf_alloc *buf_alloc,
1509 u32 shared_buf_min, shared_buf_tc, shared_std;
1510 int tc_num, pfc_enable_num;
1511 u32 shared_buf, aligned_mps;
1515 tc_num = hclge_get_tc_num(hdev);
1516 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1517 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1519 if (hnae3_dev_dcb_supported(hdev))
1520 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1522 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1523 + hdev->dv_buf_size;
1525 shared_buf_tc = pfc_enable_num * aligned_mps +
1526 (tc_num - pfc_enable_num) * aligned_mps / 2 +
1528 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1529 HCLGE_BUF_SIZE_UNIT);
1531 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1532 if (rx_all < rx_priv + shared_std)
1535 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1536 buf_alloc->s_buf.buf_size = shared_buf;
1537 if (hnae3_dev_dcb_supported(hdev)) {
1538 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1539 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1540 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1542 buf_alloc->s_buf.self.high = aligned_mps +
1543 HCLGE_NON_DCB_ADDITIONAL_BUF;
1544 buf_alloc->s_buf.self.low =
1545 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1548 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1549 if ((hdev->hw_tc_map & BIT(i)) &&
1550 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1551 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1552 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1554 buf_alloc->s_buf.tc_thrd[i].low = 0;
1555 buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1562 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1563 struct hclge_pkt_buf_alloc *buf_alloc)
1567 total_size = hdev->pkt_buf_size;
1569 /* alloc tx buffer for all enabled tc */
1570 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1571 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1573 if (hdev->hw_tc_map & BIT(i)) {
1574 if (total_size < hdev->tx_buf_size)
1577 priv->tx_buf_size = hdev->tx_buf_size;
1579 priv->tx_buf_size = 0;
1582 total_size -= priv->tx_buf_size;
1588 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1589 struct hclge_pkt_buf_alloc *buf_alloc)
1591 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1592 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1595 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1596 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1603 if (!(hdev->hw_tc_map & BIT(i)))
1608 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1609 priv->wl.low = max ? aligned_mps : 256;
1610 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1611 HCLGE_BUF_SIZE_UNIT);
1614 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1617 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1620 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1623 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1624 struct hclge_pkt_buf_alloc *buf_alloc)
1626 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1627 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1630 /* let the last to be cleared first */
1631 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1632 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1634 if (hdev->hw_tc_map & BIT(i) &&
1635 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1636 /* Clear the no pfc TC private buffer */
1644 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1645 no_pfc_priv_num == 0)
1649 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1652 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1653 struct hclge_pkt_buf_alloc *buf_alloc)
1655 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1656 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1659 /* let the last to be cleared first */
1660 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1661 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1663 if (hdev->hw_tc_map & BIT(i) &&
1664 hdev->tm_info.hw_pfc_map & BIT(i)) {
1665 /* Reduce the number of pfc TC with private buffer */
1673 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1678 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1681 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1682 * @hdev: pointer to struct hclge_dev
1683 * @buf_alloc: pointer to buffer calculation data
1684 * @return: 0: calculate sucessful, negative: fail
1686 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1687 struct hclge_pkt_buf_alloc *buf_alloc)
1689 /* When DCB is not supported, rx private buffer is not allocated. */
1690 if (!hnae3_dev_dcb_supported(hdev)) {
1691 u32 rx_all = hdev->pkt_buf_size;
1693 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1694 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1700 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1703 /* try to decrease the buffer size */
1704 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1707 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1710 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1716 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1717 struct hclge_pkt_buf_alloc *buf_alloc)
1719 struct hclge_rx_priv_buff_cmd *req;
1720 struct hclge_desc desc;
1724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1725 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1727 /* Alloc private buffer TCs */
1728 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1729 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1732 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1734 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1738 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1739 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1741 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1743 dev_err(&hdev->pdev->dev,
1744 "rx private buffer alloc cmd failed %d\n", ret);
1749 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1750 struct hclge_pkt_buf_alloc *buf_alloc)
1752 struct hclge_rx_priv_wl_buf *req;
1753 struct hclge_priv_buf *priv;
1754 struct hclge_desc desc[2];
1758 for (i = 0; i < 2; i++) {
1759 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1761 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1763 /* The first descriptor set the NEXT bit to 1 */
1765 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1767 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1769 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1770 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1772 priv = &buf_alloc->priv_buf[idx];
1773 req->tc_wl[j].high =
1774 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1775 req->tc_wl[j].high |=
1776 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1778 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1779 req->tc_wl[j].low |=
1780 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1784 /* Send 2 descriptor at one time */
1785 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1787 dev_err(&hdev->pdev->dev,
1788 "rx private waterline config cmd failed %d\n",
1793 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1794 struct hclge_pkt_buf_alloc *buf_alloc)
1796 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1797 struct hclge_rx_com_thrd *req;
1798 struct hclge_desc desc[2];
1799 struct hclge_tc_thrd *tc;
1803 for (i = 0; i < 2; i++) {
1804 hclge_cmd_setup_basic_desc(&desc[i],
1805 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1806 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1808 /* The first descriptor set the NEXT bit to 1 */
1810 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1812 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1814 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1815 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1817 req->com_thrd[j].high =
1818 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1819 req->com_thrd[j].high |=
1820 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1821 req->com_thrd[j].low =
1822 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1823 req->com_thrd[j].low |=
1824 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1828 /* Send 2 descriptors at one time */
1829 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1831 dev_err(&hdev->pdev->dev,
1832 "common threshold config cmd failed %d\n", ret);
1836 static int hclge_common_wl_config(struct hclge_dev *hdev,
1837 struct hclge_pkt_buf_alloc *buf_alloc)
1839 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1840 struct hclge_rx_com_wl *req;
1841 struct hclge_desc desc;
1844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1846 req = (struct hclge_rx_com_wl *)desc.data;
1847 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1848 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1850 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1851 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1853 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1855 dev_err(&hdev->pdev->dev,
1856 "common waterline config cmd failed %d\n", ret);
1861 int hclge_buffer_alloc(struct hclge_dev *hdev)
1863 struct hclge_pkt_buf_alloc *pkt_buf;
1866 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1870 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1872 dev_err(&hdev->pdev->dev,
1873 "could not calc tx buffer size for all TCs %d\n", ret);
1877 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1879 dev_err(&hdev->pdev->dev,
1880 "could not alloc tx buffers %d\n", ret);
1884 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1886 dev_err(&hdev->pdev->dev,
1887 "could not calc rx priv buffer size for all TCs %d\n",
1892 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1894 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1899 if (hnae3_dev_dcb_supported(hdev)) {
1900 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1902 dev_err(&hdev->pdev->dev,
1903 "could not configure rx private waterline %d\n",
1908 ret = hclge_common_thrd_config(hdev, pkt_buf);
1910 dev_err(&hdev->pdev->dev,
1911 "could not configure common threshold %d\n",
1917 ret = hclge_common_wl_config(hdev, pkt_buf);
1919 dev_err(&hdev->pdev->dev,
1920 "could not configure common waterline %d\n", ret);
1927 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1929 struct hnae3_handle *roce = &vport->roce;
1930 struct hnae3_handle *nic = &vport->nic;
1932 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1934 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1935 vport->back->num_msi_left == 0)
1938 roce->rinfo.base_vector = vport->back->roce_base_vector;
1940 roce->rinfo.netdev = nic->kinfo.netdev;
1941 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1943 roce->pdev = nic->pdev;
1944 roce->ae_algo = nic->ae_algo;
1945 roce->numa_node_mask = nic->numa_node_mask;
1950 static int hclge_init_msi(struct hclge_dev *hdev)
1952 struct pci_dev *pdev = hdev->pdev;
1956 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1957 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1960 "failed(%d) to allocate MSI/MSI-X vectors\n",
1964 if (vectors < hdev->num_msi)
1965 dev_warn(&hdev->pdev->dev,
1966 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1967 hdev->num_msi, vectors);
1969 hdev->num_msi = vectors;
1970 hdev->num_msi_left = vectors;
1971 hdev->base_msi_vector = pdev->irq;
1972 hdev->roce_base_vector = hdev->base_msi_vector +
1973 hdev->roce_base_msix_offset;
1975 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1976 sizeof(u16), GFP_KERNEL);
1977 if (!hdev->vector_status) {
1978 pci_free_irq_vectors(pdev);
1982 for (i = 0; i < hdev->num_msi; i++)
1983 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1985 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1986 sizeof(int), GFP_KERNEL);
1987 if (!hdev->vector_irq) {
1988 pci_free_irq_vectors(pdev);
1995 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1998 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1999 duplex = HCLGE_MAC_FULL;
2004 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2007 struct hclge_config_mac_speed_dup_cmd *req;
2008 struct hclge_desc desc;
2011 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2015 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2018 case HCLGE_MAC_SPEED_10M:
2019 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2020 HCLGE_CFG_SPEED_S, 6);
2022 case HCLGE_MAC_SPEED_100M:
2023 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2024 HCLGE_CFG_SPEED_S, 7);
2026 case HCLGE_MAC_SPEED_1G:
2027 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2028 HCLGE_CFG_SPEED_S, 0);
2030 case HCLGE_MAC_SPEED_10G:
2031 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2032 HCLGE_CFG_SPEED_S, 1);
2034 case HCLGE_MAC_SPEED_25G:
2035 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2036 HCLGE_CFG_SPEED_S, 2);
2038 case HCLGE_MAC_SPEED_40G:
2039 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2040 HCLGE_CFG_SPEED_S, 3);
2042 case HCLGE_MAC_SPEED_50G:
2043 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2044 HCLGE_CFG_SPEED_S, 4);
2046 case HCLGE_MAC_SPEED_100G:
2047 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2048 HCLGE_CFG_SPEED_S, 5);
2051 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2055 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2058 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2060 dev_err(&hdev->pdev->dev,
2061 "mac speed/duplex config cmd failed %d.\n", ret);
2068 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2072 duplex = hclge_check_speed_dup(duplex, speed);
2073 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2076 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2080 hdev->hw.mac.speed = speed;
2081 hdev->hw.mac.duplex = duplex;
2086 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2089 struct hclge_vport *vport = hclge_get_vport(handle);
2090 struct hclge_dev *hdev = vport->back;
2092 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2095 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2097 struct hclge_config_auto_neg_cmd *req;
2098 struct hclge_desc desc;
2102 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2104 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2105 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2106 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2108 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2110 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2116 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2118 struct hclge_vport *vport = hclge_get_vport(handle);
2119 struct hclge_dev *hdev = vport->back;
2121 return hclge_set_autoneg_en(hdev, enable);
2124 static int hclge_get_autoneg(struct hnae3_handle *handle)
2126 struct hclge_vport *vport = hclge_get_vport(handle);
2127 struct hclge_dev *hdev = vport->back;
2128 struct phy_device *phydev = hdev->hw.mac.phydev;
2131 return phydev->autoneg;
2133 return hdev->hw.mac.autoneg;
2136 static int hclge_mac_init(struct hclge_dev *hdev)
2138 struct hclge_mac *mac = &hdev->hw.mac;
2141 hdev->support_sfp_query = true;
2142 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2143 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2144 hdev->hw.mac.duplex);
2146 dev_err(&hdev->pdev->dev,
2147 "Config mac speed dup fail ret=%d\n", ret);
2153 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2155 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2159 ret = hclge_buffer_alloc(hdev);
2161 dev_err(&hdev->pdev->dev,
2162 "allocate buffer fail, ret=%d\n", ret);
2167 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2169 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2170 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2171 schedule_work(&hdev->mbx_service_task);
2174 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2176 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2177 schedule_work(&hdev->rst_service_task);
2180 static void hclge_task_schedule(struct hclge_dev *hdev)
2182 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2183 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2184 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2185 (void)schedule_work(&hdev->service_task);
2188 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2190 struct hclge_link_status_cmd *req;
2191 struct hclge_desc desc;
2195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2198 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2203 req = (struct hclge_link_status_cmd *)desc.data;
2204 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2206 return !!link_status;
2209 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2214 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2217 mac_state = hclge_get_mac_link_status(hdev);
2219 if (hdev->hw.mac.phydev) {
2220 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2221 link_stat = mac_state &
2222 hdev->hw.mac.phydev->link;
2227 link_stat = mac_state;
2233 static void hclge_update_link_status(struct hclge_dev *hdev)
2235 struct hnae3_client *rclient = hdev->roce_client;
2236 struct hnae3_client *client = hdev->nic_client;
2237 struct hnae3_handle *rhandle;
2238 struct hnae3_handle *handle;
2244 state = hclge_get_mac_phy_link(hdev);
2245 if (state != hdev->hw.mac.link) {
2246 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2247 handle = &hdev->vport[i].nic;
2248 client->ops->link_status_change(handle, state);
2249 rhandle = &hdev->vport[i].roce;
2250 if (rclient && rclient->ops->link_status_change)
2251 rclient->ops->link_status_change(rhandle,
2254 hdev->hw.mac.link = state;
2258 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2260 struct hclge_sfp_speed_cmd *resp = NULL;
2261 struct hclge_desc desc;
2264 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2265 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2267 if (ret == -EOPNOTSUPP) {
2268 dev_warn(&hdev->pdev->dev,
2269 "IMP do not support get SFP speed %d\n", ret);
2272 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2276 *speed = resp->sfp_speed;
2281 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2283 struct hclge_mac mac = hdev->hw.mac;
2287 /* get the speed from SFP cmd when phy
2293 /* if IMP does not support get SFP/qSFP speed, return directly */
2294 if (!hdev->support_sfp_query)
2297 ret = hclge_get_sfp_speed(hdev, &speed);
2298 if (ret == -EOPNOTSUPP) {
2299 hdev->support_sfp_query = false;
2305 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2306 return 0; /* do nothing if no SFP */
2308 /* must config full duplex for SFP */
2309 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2312 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2314 struct hclge_vport *vport = hclge_get_vport(handle);
2315 struct hclge_dev *hdev = vport->back;
2317 return hclge_update_speed_duplex(hdev);
2320 static int hclge_get_status(struct hnae3_handle *handle)
2322 struct hclge_vport *vport = hclge_get_vport(handle);
2323 struct hclge_dev *hdev = vport->back;
2325 hclge_update_link_status(hdev);
2327 return hdev->hw.mac.link;
2330 static void hclge_service_timer(struct timer_list *t)
2332 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2334 mod_timer(&hdev->service_timer, jiffies + HZ);
2335 hdev->hw_stats.stats_timer++;
2336 hclge_task_schedule(hdev);
2339 static void hclge_service_complete(struct hclge_dev *hdev)
2341 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2343 /* Flush memory before next watchdog */
2344 smp_mb__before_atomic();
2345 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2348 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2350 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2352 /* fetch the events from their corresponding regs */
2353 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2354 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2355 msix_src_reg = hclge_read_dev(&hdev->hw,
2356 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2358 /* Assumption: If by any chance reset and mailbox events are reported
2359 * together then we will only process reset event in this go and will
2360 * defer the processing of the mailbox events. Since, we would have not
2361 * cleared RX CMDQ event this time we would receive again another
2362 * interrupt from H/W just for the mailbox.
2365 /* check for vector0 reset event sources */
2366 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2367 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2368 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2369 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2370 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2371 return HCLGE_VECTOR0_EVENT_RST;
2374 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2375 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2376 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2377 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2378 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2379 return HCLGE_VECTOR0_EVENT_RST;
2382 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2383 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2384 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2385 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2386 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2387 return HCLGE_VECTOR0_EVENT_RST;
2390 /* check for vector0 msix event source */
2391 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2392 return HCLGE_VECTOR0_EVENT_ERR;
2394 /* check for vector0 mailbox(=CMDQ RX) event source */
2395 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2396 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2397 *clearval = cmdq_src_reg;
2398 return HCLGE_VECTOR0_EVENT_MBX;
2401 return HCLGE_VECTOR0_EVENT_OTHER;
2404 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2407 switch (event_type) {
2408 case HCLGE_VECTOR0_EVENT_RST:
2409 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2411 case HCLGE_VECTOR0_EVENT_MBX:
2412 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2419 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2421 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2422 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2423 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2424 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2425 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2428 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2430 writel(enable ? 1 : 0, vector->addr);
2433 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2435 struct hclge_dev *hdev = data;
2439 hclge_enable_vector(&hdev->misc_vector, false);
2440 event_cause = hclge_check_event_cause(hdev, &clearval);
2442 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2443 switch (event_cause) {
2444 case HCLGE_VECTOR0_EVENT_ERR:
2445 /* we do not know what type of reset is required now. This could
2446 * only be decided after we fetch the type of errors which
2447 * caused this event. Therefore, we will do below for now:
2448 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2449 * have defered type of reset to be used.
2450 * 2. Schedule the reset serivce task.
2451 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2452 * will fetch the correct type of reset. This would be done
2453 * by first decoding the types of errors.
2455 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2457 case HCLGE_VECTOR0_EVENT_RST:
2458 hclge_reset_task_schedule(hdev);
2460 case HCLGE_VECTOR0_EVENT_MBX:
2461 /* If we are here then,
2462 * 1. Either we are not handling any mbx task and we are not
2465 * 2. We could be handling a mbx task but nothing more is
2467 * In both cases, we should schedule mbx task as there are more
2468 * mbx messages reported by this interrupt.
2470 hclge_mbx_task_schedule(hdev);
2473 dev_warn(&hdev->pdev->dev,
2474 "received unknown or unhandled event of vector0\n");
2478 /* clear the source of interrupt if it is not cause by reset */
2479 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2480 hclge_clear_event_cause(hdev, event_cause, clearval);
2481 hclge_enable_vector(&hdev->misc_vector, true);
2487 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2489 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2490 dev_warn(&hdev->pdev->dev,
2491 "vector(vector_id %d) has been freed.\n", vector_id);
2495 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2496 hdev->num_msi_left += 1;
2497 hdev->num_msi_used -= 1;
2500 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2502 struct hclge_misc_vector *vector = &hdev->misc_vector;
2504 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2506 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2507 hdev->vector_status[0] = 0;
2509 hdev->num_msi_left -= 1;
2510 hdev->num_msi_used += 1;
2513 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2517 hclge_get_misc_vector(hdev);
2519 /* this would be explicitly freed in the end */
2520 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2521 0, "hclge_misc", hdev);
2523 hclge_free_vector(hdev, 0);
2524 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2525 hdev->misc_vector.vector_irq);
2531 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2533 free_irq(hdev->misc_vector.vector_irq, hdev);
2534 hclge_free_vector(hdev, 0);
2537 int hclge_notify_client(struct hclge_dev *hdev,
2538 enum hnae3_reset_notify_type type)
2540 struct hnae3_client *client = hdev->nic_client;
2543 if (!client->ops->reset_notify)
2546 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2547 struct hnae3_handle *handle = &hdev->vport[i].nic;
2550 ret = client->ops->reset_notify(handle, type);
2552 dev_err(&hdev->pdev->dev,
2553 "notify nic client failed %d(%d)\n", type, ret);
2561 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2562 enum hnae3_reset_notify_type type)
2564 struct hnae3_client *client = hdev->roce_client;
2571 if (!client->ops->reset_notify)
2574 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2575 struct hnae3_handle *handle = &hdev->vport[i].roce;
2577 ret = client->ops->reset_notify(handle, type);
2579 dev_err(&hdev->pdev->dev,
2580 "notify roce client failed %d(%d)",
2589 static int hclge_reset_wait(struct hclge_dev *hdev)
2591 #define HCLGE_RESET_WATI_MS 100
2592 #define HCLGE_RESET_WAIT_CNT 200
2593 u32 val, reg, reg_bit;
2596 switch (hdev->reset_type) {
2597 case HNAE3_IMP_RESET:
2598 reg = HCLGE_GLOBAL_RESET_REG;
2599 reg_bit = HCLGE_IMP_RESET_BIT;
2601 case HNAE3_GLOBAL_RESET:
2602 reg = HCLGE_GLOBAL_RESET_REG;
2603 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2605 case HNAE3_CORE_RESET:
2606 reg = HCLGE_GLOBAL_RESET_REG;
2607 reg_bit = HCLGE_CORE_RESET_BIT;
2609 case HNAE3_FUNC_RESET:
2610 reg = HCLGE_FUN_RST_ING;
2611 reg_bit = HCLGE_FUN_RST_ING_B;
2613 case HNAE3_FLR_RESET:
2616 dev_err(&hdev->pdev->dev,
2617 "Wait for unsupported reset type: %d\n",
2622 if (hdev->reset_type == HNAE3_FLR_RESET) {
2623 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2624 cnt++ < HCLGE_RESET_WAIT_CNT)
2625 msleep(HCLGE_RESET_WATI_MS);
2627 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2628 dev_err(&hdev->pdev->dev,
2629 "flr wait timeout: %d\n", cnt);
2636 val = hclge_read_dev(&hdev->hw, reg);
2637 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2638 msleep(HCLGE_RESET_WATI_MS);
2639 val = hclge_read_dev(&hdev->hw, reg);
2643 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2644 dev_warn(&hdev->pdev->dev,
2645 "Wait for reset timeout: %d\n", hdev->reset_type);
2652 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2654 struct hclge_vf_rst_cmd *req;
2655 struct hclge_desc desc;
2657 req = (struct hclge_vf_rst_cmd *)desc.data;
2658 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2659 req->dest_vfid = func_id;
2664 return hclge_cmd_send(&hdev->hw, &desc, 1);
2667 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2671 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2672 struct hclge_vport *vport = &hdev->vport[i];
2675 /* Send cmd to set/clear VF's FUNC_RST_ING */
2676 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2678 dev_err(&hdev->pdev->dev,
2679 "set vf(%d) rst failed %d!\n",
2680 vport->vport_id, ret);
2684 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2687 /* Inform VF to process the reset.
2688 * hclge_inform_reset_assert_to_vf may fail if VF
2689 * driver is not loaded.
2691 ret = hclge_inform_reset_assert_to_vf(vport);
2693 dev_warn(&hdev->pdev->dev,
2694 "inform reset to vf(%d) failed %d!\n",
2695 vport->vport_id, ret);
2701 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2703 struct hclge_desc desc;
2704 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2708 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2709 req->fun_reset_vfid = func_id;
2711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2713 dev_err(&hdev->pdev->dev,
2714 "send function reset cmd fail, status =%d\n", ret);
2719 static void hclge_do_reset(struct hclge_dev *hdev)
2721 struct hnae3_handle *handle = &hdev->vport[0].nic;
2722 struct pci_dev *pdev = hdev->pdev;
2725 if (hclge_get_hw_reset_stat(handle)) {
2726 dev_info(&pdev->dev, "Hardware reset not finish\n");
2727 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2728 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2729 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2733 switch (hdev->reset_type) {
2734 case HNAE3_GLOBAL_RESET:
2735 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2736 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2737 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2738 dev_info(&pdev->dev, "Global Reset requested\n");
2740 case HNAE3_CORE_RESET:
2741 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2742 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2743 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2744 dev_info(&pdev->dev, "Core Reset requested\n");
2746 case HNAE3_FUNC_RESET:
2747 dev_info(&pdev->dev, "PF Reset requested\n");
2748 /* schedule again to check later */
2749 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2750 hclge_reset_task_schedule(hdev);
2752 case HNAE3_FLR_RESET:
2753 dev_info(&pdev->dev, "FLR requested\n");
2754 /* schedule again to check later */
2755 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2756 hclge_reset_task_schedule(hdev);
2759 dev_warn(&pdev->dev,
2760 "Unsupported reset type: %d\n", hdev->reset_type);
2765 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2766 unsigned long *addr)
2768 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2770 /* first, resolve any unknown reset type to the known type(s) */
2771 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2772 /* we will intentionally ignore any errors from this function
2773 * as we will end up in *some* reset request in any case
2775 hclge_handle_hw_msix_error(hdev, addr);
2776 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2777 /* We defered the clearing of the error event which caused
2778 * interrupt since it was not posssible to do that in
2779 * interrupt context (and this is the reason we introduced
2780 * new UNKNOWN reset type). Now, the errors have been
2781 * handled and cleared in hardware we can safely enable
2782 * interrupts. This is an exception to the norm.
2784 hclge_enable_vector(&hdev->misc_vector, true);
2787 /* return the highest priority reset level amongst all */
2788 if (test_bit(HNAE3_IMP_RESET, addr)) {
2789 rst_level = HNAE3_IMP_RESET;
2790 clear_bit(HNAE3_IMP_RESET, addr);
2791 clear_bit(HNAE3_GLOBAL_RESET, addr);
2792 clear_bit(HNAE3_CORE_RESET, addr);
2793 clear_bit(HNAE3_FUNC_RESET, addr);
2794 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2795 rst_level = HNAE3_GLOBAL_RESET;
2796 clear_bit(HNAE3_GLOBAL_RESET, addr);
2797 clear_bit(HNAE3_CORE_RESET, addr);
2798 clear_bit(HNAE3_FUNC_RESET, addr);
2799 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2800 rst_level = HNAE3_CORE_RESET;
2801 clear_bit(HNAE3_CORE_RESET, addr);
2802 clear_bit(HNAE3_FUNC_RESET, addr);
2803 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2804 rst_level = HNAE3_FUNC_RESET;
2805 clear_bit(HNAE3_FUNC_RESET, addr);
2806 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2807 rst_level = HNAE3_FLR_RESET;
2808 clear_bit(HNAE3_FLR_RESET, addr);
2811 if (hdev->reset_type != HNAE3_NONE_RESET &&
2812 rst_level < hdev->reset_type)
2813 return HNAE3_NONE_RESET;
2818 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2822 switch (hdev->reset_type) {
2823 case HNAE3_IMP_RESET:
2824 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2826 case HNAE3_GLOBAL_RESET:
2827 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2829 case HNAE3_CORE_RESET:
2830 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2839 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2840 hclge_enable_vector(&hdev->misc_vector, true);
2843 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2847 switch (hdev->reset_type) {
2848 case HNAE3_FUNC_RESET:
2850 case HNAE3_FLR_RESET:
2851 ret = hclge_set_all_vf_rst(hdev, true);
2860 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2865 switch (hdev->reset_type) {
2866 case HNAE3_FUNC_RESET:
2867 /* There is no mechanism for PF to know if VF has stopped IO
2868 * for now, just wait 100 ms for VF to stop IO
2871 ret = hclge_func_reset_cmd(hdev, 0);
2873 dev_err(&hdev->pdev->dev,
2874 "asserting function reset fail %d!\n", ret);
2878 /* After performaning pf reset, it is not necessary to do the
2879 * mailbox handling or send any command to firmware, because
2880 * any mailbox handling or command to firmware is only valid
2881 * after hclge_cmd_init is called.
2883 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2885 case HNAE3_FLR_RESET:
2886 /* There is no mechanism for PF to know if VF has stopped IO
2887 * for now, just wait 100 ms for VF to stop IO
2890 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2891 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2893 case HNAE3_IMP_RESET:
2894 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2895 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2896 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2902 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2907 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2909 #define MAX_RESET_FAIL_CNT 5
2910 #define RESET_UPGRADE_DELAY_SEC 10
2912 if (hdev->reset_pending) {
2913 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2914 hdev->reset_pending);
2916 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2917 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2918 BIT(HCLGE_IMP_RESET_BIT))) {
2919 dev_info(&hdev->pdev->dev,
2920 "reset failed because IMP Reset is pending\n");
2921 hclge_clear_reset_cause(hdev);
2923 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2924 hdev->reset_fail_cnt++;
2926 set_bit(hdev->reset_type, &hdev->reset_pending);
2927 dev_info(&hdev->pdev->dev,
2928 "re-schedule to wait for hw reset done\n");
2932 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2933 hclge_clear_reset_cause(hdev);
2934 mod_timer(&hdev->reset_timer,
2935 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2940 hclge_clear_reset_cause(hdev);
2941 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2945 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2949 switch (hdev->reset_type) {
2950 case HNAE3_FUNC_RESET:
2952 case HNAE3_FLR_RESET:
2953 ret = hclge_set_all_vf_rst(hdev, false);
2962 static void hclge_reset(struct hclge_dev *hdev)
2964 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2965 bool is_timeout = false;
2968 /* Initialize ae_dev reset status as well, in case enet layer wants to
2969 * know if device is undergoing reset
2971 ae_dev->reset_type = hdev->reset_type;
2972 hdev->reset_count++;
2973 /* perform reset of the stack & ae device for a client */
2974 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2978 ret = hclge_reset_prepare_down(hdev);
2983 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2985 goto err_reset_lock;
2989 ret = hclge_reset_prepare_wait(hdev);
2993 if (hclge_reset_wait(hdev)) {
2998 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3003 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3005 goto err_reset_lock;
3007 ret = hclge_reset_ae_dev(hdev->ae_dev);
3009 goto err_reset_lock;
3011 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3013 goto err_reset_lock;
3015 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3017 goto err_reset_lock;
3019 hclge_clear_reset_cause(hdev);
3021 ret = hclge_reset_prepare_up(hdev);
3023 goto err_reset_lock;
3025 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3027 goto err_reset_lock;
3031 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3035 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3039 hdev->last_reset_time = jiffies;
3040 hdev->reset_fail_cnt = 0;
3041 ae_dev->reset_type = HNAE3_NONE_RESET;
3042 del_timer(&hdev->reset_timer);
3049 if (hclge_reset_err_handle(hdev, is_timeout))
3050 hclge_reset_task_schedule(hdev);
3053 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3055 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3056 struct hclge_dev *hdev = ae_dev->priv;
3058 /* We might end up getting called broadly because of 2 below cases:
3059 * 1. Recoverable error was conveyed through APEI and only way to bring
3060 * normalcy is to reset.
3061 * 2. A new reset request from the stack due to timeout
3063 * For the first case,error event might not have ae handle available.
3064 * check if this is a new reset request and we are not here just because
3065 * last reset attempt did not succeed and watchdog hit us again. We will
3066 * know this if last reset request did not occur very recently (watchdog
3067 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3068 * In case of new request we reset the "reset level" to PF reset.
3069 * And if it is a repeat reset request of the most recent one then we
3070 * want to make sure we throttle the reset request. Therefore, we will
3071 * not allow it again before 3*HZ times.
3074 handle = &hdev->vport[0].nic;
3076 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3078 else if (hdev->default_reset_request)
3080 hclge_get_reset_level(hdev,
3081 &hdev->default_reset_request);
3082 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3083 hdev->reset_level = HNAE3_FUNC_RESET;
3085 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3088 /* request reset & schedule reset task */
3089 set_bit(hdev->reset_level, &hdev->reset_request);
3090 hclge_reset_task_schedule(hdev);
3092 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3093 hdev->reset_level++;
3096 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3097 enum hnae3_reset_type rst_type)
3099 struct hclge_dev *hdev = ae_dev->priv;
3101 set_bit(rst_type, &hdev->default_reset_request);
3104 static void hclge_reset_timer(struct timer_list *t)
3106 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3108 dev_info(&hdev->pdev->dev,
3109 "triggering global reset in reset timer\n");
3110 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3111 hclge_reset_event(hdev->pdev, NULL);
3114 static void hclge_reset_subtask(struct hclge_dev *hdev)
3116 /* check if there is any ongoing reset in the hardware. This status can
3117 * be checked from reset_pending. If there is then, we need to wait for
3118 * hardware to complete reset.
3119 * a. If we are able to figure out in reasonable time that hardware
3120 * has fully resetted then, we can proceed with driver, client
3122 * b. else, we can come back later to check this status so re-sched
3125 hdev->last_reset_time = jiffies;
3126 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3127 if (hdev->reset_type != HNAE3_NONE_RESET)
3130 /* check if we got any *new* reset requests to be honored */
3131 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3132 if (hdev->reset_type != HNAE3_NONE_RESET)
3133 hclge_do_reset(hdev);
3135 hdev->reset_type = HNAE3_NONE_RESET;
3138 static void hclge_reset_service_task(struct work_struct *work)
3140 struct hclge_dev *hdev =
3141 container_of(work, struct hclge_dev, rst_service_task);
3143 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3146 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3148 hclge_reset_subtask(hdev);
3150 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3153 static void hclge_mailbox_service_task(struct work_struct *work)
3155 struct hclge_dev *hdev =
3156 container_of(work, struct hclge_dev, mbx_service_task);
3158 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3161 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3163 hclge_mbx_handler(hdev);
3165 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3168 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3172 /* start from vport 1 for PF is always alive */
3173 for (i = 1; i < hdev->num_alloc_vport; i++) {
3174 struct hclge_vport *vport = &hdev->vport[i];
3176 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3177 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3179 /* If vf is not alive, set to default value */
3180 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3181 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3185 static void hclge_service_task(struct work_struct *work)
3187 struct hclge_dev *hdev =
3188 container_of(work, struct hclge_dev, service_task);
3190 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3191 hclge_update_stats_for_all(hdev);
3192 hdev->hw_stats.stats_timer = 0;
3195 hclge_update_speed_duplex(hdev);
3196 hclge_update_link_status(hdev);
3197 hclge_update_vport_alive(hdev);
3198 hclge_service_complete(hdev);
3201 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3203 /* VF handle has no client */
3204 if (!handle->client)
3205 return container_of(handle, struct hclge_vport, nic);
3206 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3207 return container_of(handle, struct hclge_vport, roce);
3209 return container_of(handle, struct hclge_vport, nic);
3212 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3213 struct hnae3_vector_info *vector_info)
3215 struct hclge_vport *vport = hclge_get_vport(handle);
3216 struct hnae3_vector_info *vector = vector_info;
3217 struct hclge_dev *hdev = vport->back;
3221 vector_num = min(hdev->num_msi_left, vector_num);
3223 for (j = 0; j < vector_num; j++) {
3224 for (i = 1; i < hdev->num_msi; i++) {
3225 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3226 vector->vector = pci_irq_vector(hdev->pdev, i);
3227 vector->io_addr = hdev->hw.io_base +
3228 HCLGE_VECTOR_REG_BASE +
3229 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3231 HCLGE_VECTOR_VF_OFFSET;
3232 hdev->vector_status[i] = vport->vport_id;
3233 hdev->vector_irq[i] = vector->vector;
3242 hdev->num_msi_left -= alloc;
3243 hdev->num_msi_used += alloc;
3248 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3252 for (i = 0; i < hdev->num_msi; i++)
3253 if (vector == hdev->vector_irq[i])
3259 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3261 struct hclge_vport *vport = hclge_get_vport(handle);
3262 struct hclge_dev *hdev = vport->back;
3265 vector_id = hclge_get_vector_index(hdev, vector);
3266 if (vector_id < 0) {
3267 dev_err(&hdev->pdev->dev,
3268 "Get vector index fail. vector_id =%d\n", vector_id);
3272 hclge_free_vector(hdev, vector_id);
3277 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3279 return HCLGE_RSS_KEY_SIZE;
3282 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3284 return HCLGE_RSS_IND_TBL_SIZE;
3287 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3288 const u8 hfunc, const u8 *key)
3290 struct hclge_rss_config_cmd *req;
3291 struct hclge_desc desc;
3296 req = (struct hclge_rss_config_cmd *)desc.data;
3298 for (key_offset = 0; key_offset < 3; key_offset++) {
3299 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3302 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3303 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3305 if (key_offset == 2)
3307 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3309 key_size = HCLGE_RSS_HASH_KEY_NUM;
3311 memcpy(req->hash_key,
3312 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3314 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3316 dev_err(&hdev->pdev->dev,
3317 "Configure RSS config fail, status = %d\n",
3325 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3327 struct hclge_rss_indirection_table_cmd *req;
3328 struct hclge_desc desc;
3332 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3334 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3335 hclge_cmd_setup_basic_desc
3336 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3338 req->start_table_index =
3339 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3340 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3342 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3343 req->rss_result[j] =
3344 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3346 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3348 dev_err(&hdev->pdev->dev,
3349 "Configure rss indir table fail,status = %d\n",
3357 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3358 u16 *tc_size, u16 *tc_offset)
3360 struct hclge_rss_tc_mode_cmd *req;
3361 struct hclge_desc desc;
3365 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3366 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3368 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3371 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3372 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3373 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3374 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3375 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3377 req->rss_tc_mode[i] = cpu_to_le16(mode);
3380 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3382 dev_err(&hdev->pdev->dev,
3383 "Configure rss tc mode fail, status = %d\n", ret);
3388 static void hclge_get_rss_type(struct hclge_vport *vport)
3390 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3391 vport->rss_tuple_sets.ipv4_udp_en ||
3392 vport->rss_tuple_sets.ipv4_sctp_en ||
3393 vport->rss_tuple_sets.ipv6_tcp_en ||
3394 vport->rss_tuple_sets.ipv6_udp_en ||
3395 vport->rss_tuple_sets.ipv6_sctp_en)
3396 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3397 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3398 vport->rss_tuple_sets.ipv6_fragment_en)
3399 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3401 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3404 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3406 struct hclge_rss_input_tuple_cmd *req;
3407 struct hclge_desc desc;
3410 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3412 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3414 /* Get the tuple cfg from pf */
3415 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3416 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3417 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3418 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3419 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3420 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3421 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3422 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3423 hclge_get_rss_type(&hdev->vport[0]);
3424 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3426 dev_err(&hdev->pdev->dev,
3427 "Configure rss input fail, status = %d\n", ret);
3431 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3434 struct hclge_vport *vport = hclge_get_vport(handle);
3437 /* Get hash algorithm */
3439 switch (vport->rss_algo) {
3440 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3441 *hfunc = ETH_RSS_HASH_TOP;
3443 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3444 *hfunc = ETH_RSS_HASH_XOR;
3447 *hfunc = ETH_RSS_HASH_UNKNOWN;
3452 /* Get the RSS Key required by the user */
3454 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3456 /* Get indirect table */
3458 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3459 indir[i] = vport->rss_indirection_tbl[i];
3464 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3465 const u8 *key, const u8 hfunc)
3467 struct hclge_vport *vport = hclge_get_vport(handle);
3468 struct hclge_dev *hdev = vport->back;
3472 /* Set the RSS Hash Key if specififed by the user */
3475 case ETH_RSS_HASH_TOP:
3476 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3478 case ETH_RSS_HASH_XOR:
3479 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3481 case ETH_RSS_HASH_NO_CHANGE:
3482 hash_algo = vport->rss_algo;
3488 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3492 /* Update the shadow RSS key with user specified qids */
3493 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3494 vport->rss_algo = hash_algo;
3497 /* Update the shadow RSS table with user specified qids */
3498 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3499 vport->rss_indirection_tbl[i] = indir[i];
3501 /* Update the hardware */
3502 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3505 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3507 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3509 if (nfc->data & RXH_L4_B_2_3)
3510 hash_sets |= HCLGE_D_PORT_BIT;
3512 hash_sets &= ~HCLGE_D_PORT_BIT;
3514 if (nfc->data & RXH_IP_SRC)
3515 hash_sets |= HCLGE_S_IP_BIT;
3517 hash_sets &= ~HCLGE_S_IP_BIT;
3519 if (nfc->data & RXH_IP_DST)
3520 hash_sets |= HCLGE_D_IP_BIT;
3522 hash_sets &= ~HCLGE_D_IP_BIT;
3524 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3525 hash_sets |= HCLGE_V_TAG_BIT;
3530 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3531 struct ethtool_rxnfc *nfc)
3533 struct hclge_vport *vport = hclge_get_vport(handle);
3534 struct hclge_dev *hdev = vport->back;
3535 struct hclge_rss_input_tuple_cmd *req;
3536 struct hclge_desc desc;
3540 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3541 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3544 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3547 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3548 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3549 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3550 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3551 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3552 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3553 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3554 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3556 tuple_sets = hclge_get_rss_hash_bits(nfc);
3557 switch (nfc->flow_type) {
3559 req->ipv4_tcp_en = tuple_sets;
3562 req->ipv6_tcp_en = tuple_sets;
3565 req->ipv4_udp_en = tuple_sets;
3568 req->ipv6_udp_en = tuple_sets;
3571 req->ipv4_sctp_en = tuple_sets;
3574 if ((nfc->data & RXH_L4_B_0_1) ||
3575 (nfc->data & RXH_L4_B_2_3))
3578 req->ipv6_sctp_en = tuple_sets;
3581 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3584 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3590 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3592 dev_err(&hdev->pdev->dev,
3593 "Set rss tuple fail, status = %d\n", ret);
3597 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3598 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3599 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3600 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3601 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3602 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3603 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3604 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3605 hclge_get_rss_type(vport);
3609 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3610 struct ethtool_rxnfc *nfc)
3612 struct hclge_vport *vport = hclge_get_vport(handle);
3617 switch (nfc->flow_type) {
3619 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3622 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3625 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3628 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3631 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3634 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3638 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3647 if (tuple_sets & HCLGE_D_PORT_BIT)
3648 nfc->data |= RXH_L4_B_2_3;
3649 if (tuple_sets & HCLGE_S_PORT_BIT)
3650 nfc->data |= RXH_L4_B_0_1;
3651 if (tuple_sets & HCLGE_D_IP_BIT)
3652 nfc->data |= RXH_IP_DST;
3653 if (tuple_sets & HCLGE_S_IP_BIT)
3654 nfc->data |= RXH_IP_SRC;
3659 static int hclge_get_tc_size(struct hnae3_handle *handle)
3661 struct hclge_vport *vport = hclge_get_vport(handle);
3662 struct hclge_dev *hdev = vport->back;
3664 return hdev->rss_size_max;
3667 int hclge_rss_init_hw(struct hclge_dev *hdev)
3669 struct hclge_vport *vport = hdev->vport;
3670 u8 *rss_indir = vport[0].rss_indirection_tbl;
3671 u16 rss_size = vport[0].alloc_rss_size;
3672 u8 *key = vport[0].rss_hash_key;
3673 u8 hfunc = vport[0].rss_algo;
3674 u16 tc_offset[HCLGE_MAX_TC_NUM];
3675 u16 tc_valid[HCLGE_MAX_TC_NUM];
3676 u16 tc_size[HCLGE_MAX_TC_NUM];
3680 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3684 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3688 ret = hclge_set_rss_input_tuple(hdev);
3692 /* Each TC have the same queue size, and tc_size set to hardware is
3693 * the log2 of roundup power of two of rss_size, the acutal queue
3694 * size is limited by indirection table.
3696 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3697 dev_err(&hdev->pdev->dev,
3698 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3703 roundup_size = roundup_pow_of_two(rss_size);
3704 roundup_size = ilog2(roundup_size);
3706 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3709 if (!(hdev->hw_tc_map & BIT(i)))
3713 tc_size[i] = roundup_size;
3714 tc_offset[i] = rss_size * i;
3717 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3720 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3722 struct hclge_vport *vport = hdev->vport;
3725 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3726 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3727 vport[j].rss_indirection_tbl[i] =
3728 i % vport[j].alloc_rss_size;
3732 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3734 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3735 struct hclge_vport *vport = hdev->vport;
3737 if (hdev->pdev->revision >= 0x21)
3738 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3740 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3741 vport[i].rss_tuple_sets.ipv4_tcp_en =
3742 HCLGE_RSS_INPUT_TUPLE_OTHER;
3743 vport[i].rss_tuple_sets.ipv4_udp_en =
3744 HCLGE_RSS_INPUT_TUPLE_OTHER;
3745 vport[i].rss_tuple_sets.ipv4_sctp_en =
3746 HCLGE_RSS_INPUT_TUPLE_SCTP;
3747 vport[i].rss_tuple_sets.ipv4_fragment_en =
3748 HCLGE_RSS_INPUT_TUPLE_OTHER;
3749 vport[i].rss_tuple_sets.ipv6_tcp_en =
3750 HCLGE_RSS_INPUT_TUPLE_OTHER;
3751 vport[i].rss_tuple_sets.ipv6_udp_en =
3752 HCLGE_RSS_INPUT_TUPLE_OTHER;
3753 vport[i].rss_tuple_sets.ipv6_sctp_en =
3754 HCLGE_RSS_INPUT_TUPLE_SCTP;
3755 vport[i].rss_tuple_sets.ipv6_fragment_en =
3756 HCLGE_RSS_INPUT_TUPLE_OTHER;
3758 vport[i].rss_algo = rss_algo;
3760 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3761 HCLGE_RSS_KEY_SIZE);
3764 hclge_rss_indir_init_cfg(hdev);
3767 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3768 int vector_id, bool en,
3769 struct hnae3_ring_chain_node *ring_chain)
3771 struct hclge_dev *hdev = vport->back;
3772 struct hnae3_ring_chain_node *node;
3773 struct hclge_desc desc;
3774 struct hclge_ctrl_vector_chain_cmd *req
3775 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3776 enum hclge_cmd_status status;
3777 enum hclge_opcode_type op;
3778 u16 tqp_type_and_id;
3781 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3782 hclge_cmd_setup_basic_desc(&desc, op, false);
3783 req->int_vector_id = vector_id;
3786 for (node = ring_chain; node; node = node->next) {
3787 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3788 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3790 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3791 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3792 HCLGE_TQP_ID_S, node->tqp_index);
3793 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3795 hnae3_get_field(node->int_gl_idx,
3796 HNAE3_RING_GL_IDX_M,
3797 HNAE3_RING_GL_IDX_S));
3798 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3799 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3800 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3801 req->vfid = vport->vport_id;
3803 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3805 dev_err(&hdev->pdev->dev,
3806 "Map TQP fail, status is %d.\n",
3812 hclge_cmd_setup_basic_desc(&desc,
3815 req->int_vector_id = vector_id;
3820 req->int_cause_num = i;
3821 req->vfid = vport->vport_id;
3822 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3824 dev_err(&hdev->pdev->dev,
3825 "Map TQP fail, status is %d.\n", status);
3833 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3835 struct hnae3_ring_chain_node *ring_chain)
3837 struct hclge_vport *vport = hclge_get_vport(handle);
3838 struct hclge_dev *hdev = vport->back;
3841 vector_id = hclge_get_vector_index(hdev, vector);
3842 if (vector_id < 0) {
3843 dev_err(&hdev->pdev->dev,
3844 "Get vector index fail. vector_id =%d\n", vector_id);
3848 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3851 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3853 struct hnae3_ring_chain_node *ring_chain)
3855 struct hclge_vport *vport = hclge_get_vport(handle);
3856 struct hclge_dev *hdev = vport->back;
3859 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3862 vector_id = hclge_get_vector_index(hdev, vector);
3863 if (vector_id < 0) {
3864 dev_err(&handle->pdev->dev,
3865 "Get vector index fail. ret =%d\n", vector_id);
3869 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3871 dev_err(&handle->pdev->dev,
3872 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3879 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3880 struct hclge_promisc_param *param)
3882 struct hclge_promisc_cfg_cmd *req;
3883 struct hclge_desc desc;
3886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3888 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3889 req->vf_id = param->vf_id;
3891 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3892 * pdev revision(0x20), new revision support them. The
3893 * value of this two fields will not return error when driver
3894 * send command to fireware in revision(0x20).
3896 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3897 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3899 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3901 dev_err(&hdev->pdev->dev,
3902 "Set promisc mode fail, status is %d.\n", ret);
3907 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3908 bool en_mc, bool en_bc, int vport_id)
3913 memset(param, 0, sizeof(struct hclge_promisc_param));
3915 param->enable = HCLGE_PROMISC_EN_UC;
3917 param->enable |= HCLGE_PROMISC_EN_MC;
3919 param->enable |= HCLGE_PROMISC_EN_BC;
3920 param->vf_id = vport_id;
3923 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3926 struct hclge_vport *vport = hclge_get_vport(handle);
3927 struct hclge_dev *hdev = vport->back;
3928 struct hclge_promisc_param param;
3929 bool en_bc_pmc = true;
3931 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3932 * always bypassed. So broadcast promisc should be disabled until
3933 * user enable promisc mode
3935 if (handle->pdev->revision == 0x20)
3936 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3938 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3940 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3943 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3945 struct hclge_get_fd_mode_cmd *req;
3946 struct hclge_desc desc;
3949 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3951 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3953 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3955 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3959 *fd_mode = req->mode;
3964 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3965 u32 *stage1_entry_num,
3966 u32 *stage2_entry_num,
3967 u16 *stage1_counter_num,
3968 u16 *stage2_counter_num)
3970 struct hclge_get_fd_allocation_cmd *req;
3971 struct hclge_desc desc;
3974 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3976 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3978 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3980 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3985 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3986 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3987 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3988 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3993 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3995 struct hclge_set_fd_key_config_cmd *req;
3996 struct hclge_fd_key_cfg *stage;
3997 struct hclge_desc desc;
4000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4002 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4003 stage = &hdev->fd_cfg.key_cfg[stage_num];
4004 req->stage = stage_num;
4005 req->key_select = stage->key_sel;
4006 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4007 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4008 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4009 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4010 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4011 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4013 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4015 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4020 static int hclge_init_fd_config(struct hclge_dev *hdev)
4022 #define LOW_2_WORDS 0x03
4023 struct hclge_fd_key_cfg *key_cfg;
4026 if (!hnae3_dev_fd_supported(hdev))
4029 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4033 switch (hdev->fd_cfg.fd_mode) {
4034 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4035 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4037 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4038 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4041 dev_err(&hdev->pdev->dev,
4042 "Unsupported flow director mode %d\n",
4043 hdev->fd_cfg.fd_mode);
4047 hdev->fd_cfg.proto_support =
4048 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4049 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4050 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4051 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4052 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4053 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4054 key_cfg->outer_sipv6_word_en = 0;
4055 key_cfg->outer_dipv6_word_en = 0;
4057 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4058 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4059 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4060 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4062 /* If use max 400bit key, we can support tuples for ether type */
4063 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4064 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4065 key_cfg->tuple_active |=
4066 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4069 /* roce_type is used to filter roce frames
4070 * dst_vport is used to specify the rule
4072 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4074 ret = hclge_get_fd_allocation(hdev,
4075 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4076 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4077 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4078 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4082 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4085 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4086 int loc, u8 *key, bool is_add)
4088 struct hclge_fd_tcam_config_1_cmd *req1;
4089 struct hclge_fd_tcam_config_2_cmd *req2;
4090 struct hclge_fd_tcam_config_3_cmd *req3;
4091 struct hclge_desc desc[3];
4094 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4095 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4096 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4097 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4098 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4100 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4101 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4102 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4104 req1->stage = stage;
4105 req1->xy_sel = sel_x ? 1 : 0;
4106 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4107 req1->index = cpu_to_le32(loc);
4108 req1->entry_vld = sel_x ? is_add : 0;
4111 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4112 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4113 sizeof(req2->tcam_data));
4114 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4115 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4118 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4120 dev_err(&hdev->pdev->dev,
4121 "config tcam key fail, ret=%d\n",
4127 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4128 struct hclge_fd_ad_data *action)
4130 struct hclge_fd_ad_config_cmd *req;
4131 struct hclge_desc desc;
4135 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4137 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4138 req->index = cpu_to_le32(loc);
4141 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4142 action->write_rule_id_to_bd);
4143 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4146 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4147 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4148 action->forward_to_direct_queue);
4149 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4151 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4152 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4153 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4154 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4155 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4156 action->counter_id);
4158 req->ad_data = cpu_to_le64(ad_data);
4159 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4161 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4166 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4167 struct hclge_fd_rule *rule)
4169 u16 tmp_x_s, tmp_y_s;
4170 u32 tmp_x_l, tmp_y_l;
4173 if (rule->unused_tuple & tuple_bit)
4176 switch (tuple_bit) {
4179 case BIT(INNER_DST_MAC):
4180 for (i = 0; i < 6; i++) {
4181 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4182 rule->tuples_mask.dst_mac[i]);
4183 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4184 rule->tuples_mask.dst_mac[i]);
4188 case BIT(INNER_SRC_MAC):
4189 for (i = 0; i < 6; i++) {
4190 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4191 rule->tuples.src_mac[i]);
4192 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4193 rule->tuples.src_mac[i]);
4197 case BIT(INNER_VLAN_TAG_FST):
4198 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4199 rule->tuples_mask.vlan_tag1);
4200 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4201 rule->tuples_mask.vlan_tag1);
4202 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4203 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4206 case BIT(INNER_ETH_TYPE):
4207 calc_x(tmp_x_s, rule->tuples.ether_proto,
4208 rule->tuples_mask.ether_proto);
4209 calc_y(tmp_y_s, rule->tuples.ether_proto,
4210 rule->tuples_mask.ether_proto);
4211 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4212 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4215 case BIT(INNER_IP_TOS):
4216 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4217 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4220 case BIT(INNER_IP_PROTO):
4221 calc_x(*key_x, rule->tuples.ip_proto,
4222 rule->tuples_mask.ip_proto);
4223 calc_y(*key_y, rule->tuples.ip_proto,
4224 rule->tuples_mask.ip_proto);
4227 case BIT(INNER_SRC_IP):
4228 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4229 rule->tuples_mask.src_ip[3]);
4230 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4231 rule->tuples_mask.src_ip[3]);
4232 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4233 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4236 case BIT(INNER_DST_IP):
4237 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4238 rule->tuples_mask.dst_ip[3]);
4239 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4240 rule->tuples_mask.dst_ip[3]);
4241 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4242 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4245 case BIT(INNER_SRC_PORT):
4246 calc_x(tmp_x_s, rule->tuples.src_port,
4247 rule->tuples_mask.src_port);
4248 calc_y(tmp_y_s, rule->tuples.src_port,
4249 rule->tuples_mask.src_port);
4250 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4251 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4254 case BIT(INNER_DST_PORT):
4255 calc_x(tmp_x_s, rule->tuples.dst_port,
4256 rule->tuples_mask.dst_port);
4257 calc_y(tmp_y_s, rule->tuples.dst_port,
4258 rule->tuples_mask.dst_port);
4259 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4260 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4268 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4269 u8 vf_id, u8 network_port_id)
4271 u32 port_number = 0;
4273 if (port_type == HOST_PORT) {
4274 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4276 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4278 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4280 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4281 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4282 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4288 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4289 __le32 *key_x, __le32 *key_y,
4290 struct hclge_fd_rule *rule)
4292 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4293 u8 cur_pos = 0, tuple_size, shift_bits;
4296 for (i = 0; i < MAX_META_DATA; i++) {
4297 tuple_size = meta_data_key_info[i].key_length;
4298 tuple_bit = key_cfg->meta_data_active & BIT(i);
4300 switch (tuple_bit) {
4301 case BIT(ROCE_TYPE):
4302 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4303 cur_pos += tuple_size;
4305 case BIT(DST_VPORT):
4306 port_number = hclge_get_port_number(HOST_PORT, 0,
4308 hnae3_set_field(meta_data,
4309 GENMASK(cur_pos + tuple_size, cur_pos),
4310 cur_pos, port_number);
4311 cur_pos += tuple_size;
4318 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4319 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4320 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4322 *key_x = cpu_to_le32(tmp_x << shift_bits);
4323 *key_y = cpu_to_le32(tmp_y << shift_bits);
4326 /* A complete key is combined with meta data key and tuple key.
4327 * Meta data key is stored at the MSB region, and tuple key is stored at
4328 * the LSB region, unused bits will be filled 0.
4330 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4331 struct hclge_fd_rule *rule)
4333 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4334 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4335 u8 *cur_key_x, *cur_key_y;
4336 int i, ret, tuple_size;
4337 u8 meta_data_region;
4339 memset(key_x, 0, sizeof(key_x));
4340 memset(key_y, 0, sizeof(key_y));
4344 for (i = 0 ; i < MAX_TUPLE; i++) {
4348 tuple_size = tuple_key_info[i].key_length / 8;
4349 check_tuple = key_cfg->tuple_active & BIT(i);
4351 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4354 cur_key_x += tuple_size;
4355 cur_key_y += tuple_size;
4359 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4360 MAX_META_DATA_LENGTH / 8;
4362 hclge_fd_convert_meta_data(key_cfg,
4363 (__le32 *)(key_x + meta_data_region),
4364 (__le32 *)(key_y + meta_data_region),
4367 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4370 dev_err(&hdev->pdev->dev,
4371 "fd key_y config fail, loc=%d, ret=%d\n",
4372 rule->queue_id, ret);
4376 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4379 dev_err(&hdev->pdev->dev,
4380 "fd key_x config fail, loc=%d, ret=%d\n",
4381 rule->queue_id, ret);
4385 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4386 struct hclge_fd_rule *rule)
4388 struct hclge_fd_ad_data ad_data;
4390 ad_data.ad_id = rule->location;
4392 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4393 ad_data.drop_packet = true;
4394 ad_data.forward_to_direct_queue = false;
4395 ad_data.queue_id = 0;
4397 ad_data.drop_packet = false;
4398 ad_data.forward_to_direct_queue = true;
4399 ad_data.queue_id = rule->queue_id;
4402 ad_data.use_counter = false;
4403 ad_data.counter_id = 0;
4405 ad_data.use_next_stage = false;
4406 ad_data.next_input_key = 0;
4408 ad_data.write_rule_id_to_bd = true;
4409 ad_data.rule_id = rule->location;
4411 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4414 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4415 struct ethtool_rx_flow_spec *fs, u32 *unused)
4417 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4418 struct ethtool_usrip4_spec *usr_ip4_spec;
4419 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4420 struct ethtool_usrip6_spec *usr_ip6_spec;
4421 struct ethhdr *ether_spec;
4423 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4426 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4429 if ((fs->flow_type & FLOW_EXT) &&
4430 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4431 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4435 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4439 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4440 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4442 if (!tcp_ip4_spec->ip4src)
4443 *unused |= BIT(INNER_SRC_IP);
4445 if (!tcp_ip4_spec->ip4dst)
4446 *unused |= BIT(INNER_DST_IP);
4448 if (!tcp_ip4_spec->psrc)
4449 *unused |= BIT(INNER_SRC_PORT);
4451 if (!tcp_ip4_spec->pdst)
4452 *unused |= BIT(INNER_DST_PORT);
4454 if (!tcp_ip4_spec->tos)
4455 *unused |= BIT(INNER_IP_TOS);
4459 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4460 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4461 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4463 if (!usr_ip4_spec->ip4src)
4464 *unused |= BIT(INNER_SRC_IP);
4466 if (!usr_ip4_spec->ip4dst)
4467 *unused |= BIT(INNER_DST_IP);
4469 if (!usr_ip4_spec->tos)
4470 *unused |= BIT(INNER_IP_TOS);
4472 if (!usr_ip4_spec->proto)
4473 *unused |= BIT(INNER_IP_PROTO);
4475 if (usr_ip4_spec->l4_4_bytes)
4478 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4485 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4486 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4489 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4490 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4491 *unused |= BIT(INNER_SRC_IP);
4493 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4494 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4495 *unused |= BIT(INNER_DST_IP);
4497 if (!tcp_ip6_spec->psrc)
4498 *unused |= BIT(INNER_SRC_PORT);
4500 if (!tcp_ip6_spec->pdst)
4501 *unused |= BIT(INNER_DST_PORT);
4503 if (tcp_ip6_spec->tclass)
4507 case IPV6_USER_FLOW:
4508 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4509 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4510 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4511 BIT(INNER_DST_PORT);
4513 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4514 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4515 *unused |= BIT(INNER_SRC_IP);
4517 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4518 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4519 *unused |= BIT(INNER_DST_IP);
4521 if (!usr_ip6_spec->l4_proto)
4522 *unused |= BIT(INNER_IP_PROTO);
4524 if (usr_ip6_spec->tclass)
4527 if (usr_ip6_spec->l4_4_bytes)
4532 ether_spec = &fs->h_u.ether_spec;
4533 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4534 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4535 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4537 if (is_zero_ether_addr(ether_spec->h_source))
4538 *unused |= BIT(INNER_SRC_MAC);
4540 if (is_zero_ether_addr(ether_spec->h_dest))
4541 *unused |= BIT(INNER_DST_MAC);
4543 if (!ether_spec->h_proto)
4544 *unused |= BIT(INNER_ETH_TYPE);
4551 if ((fs->flow_type & FLOW_EXT)) {
4552 if (fs->h_ext.vlan_etype)
4554 if (!fs->h_ext.vlan_tci)
4555 *unused |= BIT(INNER_VLAN_TAG_FST);
4557 if (fs->m_ext.vlan_tci) {
4558 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4562 *unused |= BIT(INNER_VLAN_TAG_FST);
4565 if (fs->flow_type & FLOW_MAC_EXT) {
4566 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4569 if (is_zero_ether_addr(fs->h_ext.h_dest))
4570 *unused |= BIT(INNER_DST_MAC);
4572 *unused &= ~(BIT(INNER_DST_MAC));
4578 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4580 struct hclge_fd_rule *rule = NULL;
4581 struct hlist_node *node2;
4583 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4584 if (rule->location >= location)
4588 return rule && rule->location == location;
4591 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4592 struct hclge_fd_rule *new_rule,
4596 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4597 struct hlist_node *node2;
4599 if (is_add && !new_rule)
4602 hlist_for_each_entry_safe(rule, node2,
4603 &hdev->fd_rule_list, rule_node) {
4604 if (rule->location >= location)
4609 if (rule && rule->location == location) {
4610 hlist_del(&rule->rule_node);
4612 hdev->hclge_fd_rule_num--;
4617 } else if (!is_add) {
4618 dev_err(&hdev->pdev->dev,
4619 "delete fail, rule %d is inexistent\n",
4624 INIT_HLIST_NODE(&new_rule->rule_node);
4627 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4629 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4631 hdev->hclge_fd_rule_num++;
4636 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4637 struct ethtool_rx_flow_spec *fs,
4638 struct hclge_fd_rule *rule)
4640 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4642 switch (flow_type) {
4646 rule->tuples.src_ip[3] =
4647 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4648 rule->tuples_mask.src_ip[3] =
4649 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4651 rule->tuples.dst_ip[3] =
4652 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4653 rule->tuples_mask.dst_ip[3] =
4654 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4656 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4657 rule->tuples_mask.src_port =
4658 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4660 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4661 rule->tuples_mask.dst_port =
4662 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4664 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4665 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4667 rule->tuples.ether_proto = ETH_P_IP;
4668 rule->tuples_mask.ether_proto = 0xFFFF;
4672 rule->tuples.src_ip[3] =
4673 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4674 rule->tuples_mask.src_ip[3] =
4675 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4677 rule->tuples.dst_ip[3] =
4678 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4679 rule->tuples_mask.dst_ip[3] =
4680 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4682 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4683 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4685 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4686 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4688 rule->tuples.ether_proto = ETH_P_IP;
4689 rule->tuples_mask.ether_proto = 0xFFFF;
4695 be32_to_cpu_array(rule->tuples.src_ip,
4696 fs->h_u.tcp_ip6_spec.ip6src, 4);
4697 be32_to_cpu_array(rule->tuples_mask.src_ip,
4698 fs->m_u.tcp_ip6_spec.ip6src, 4);
4700 be32_to_cpu_array(rule->tuples.dst_ip,
4701 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4702 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4703 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4705 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4706 rule->tuples_mask.src_port =
4707 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4709 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4710 rule->tuples_mask.dst_port =
4711 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4713 rule->tuples.ether_proto = ETH_P_IPV6;
4714 rule->tuples_mask.ether_proto = 0xFFFF;
4717 case IPV6_USER_FLOW:
4718 be32_to_cpu_array(rule->tuples.src_ip,
4719 fs->h_u.usr_ip6_spec.ip6src, 4);
4720 be32_to_cpu_array(rule->tuples_mask.src_ip,
4721 fs->m_u.usr_ip6_spec.ip6src, 4);
4723 be32_to_cpu_array(rule->tuples.dst_ip,
4724 fs->h_u.usr_ip6_spec.ip6dst, 4);
4725 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4726 fs->m_u.usr_ip6_spec.ip6dst, 4);
4728 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4729 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4731 rule->tuples.ether_proto = ETH_P_IPV6;
4732 rule->tuples_mask.ether_proto = 0xFFFF;
4736 ether_addr_copy(rule->tuples.src_mac,
4737 fs->h_u.ether_spec.h_source);
4738 ether_addr_copy(rule->tuples_mask.src_mac,
4739 fs->m_u.ether_spec.h_source);
4741 ether_addr_copy(rule->tuples.dst_mac,
4742 fs->h_u.ether_spec.h_dest);
4743 ether_addr_copy(rule->tuples_mask.dst_mac,
4744 fs->m_u.ether_spec.h_dest);
4746 rule->tuples.ether_proto =
4747 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4748 rule->tuples_mask.ether_proto =
4749 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4756 switch (flow_type) {
4759 rule->tuples.ip_proto = IPPROTO_SCTP;
4760 rule->tuples_mask.ip_proto = 0xFF;
4764 rule->tuples.ip_proto = IPPROTO_TCP;
4765 rule->tuples_mask.ip_proto = 0xFF;
4769 rule->tuples.ip_proto = IPPROTO_UDP;
4770 rule->tuples_mask.ip_proto = 0xFF;
4776 if ((fs->flow_type & FLOW_EXT)) {
4777 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4778 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4781 if (fs->flow_type & FLOW_MAC_EXT) {
4782 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4783 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4789 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4790 struct ethtool_rxnfc *cmd)
4792 struct hclge_vport *vport = hclge_get_vport(handle);
4793 struct hclge_dev *hdev = vport->back;
4794 u16 dst_vport_id = 0, q_index = 0;
4795 struct ethtool_rx_flow_spec *fs;
4796 struct hclge_fd_rule *rule;
4801 if (!hnae3_dev_fd_supported(hdev))
4805 dev_warn(&hdev->pdev->dev,
4806 "Please enable flow director first\n");
4810 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4812 ret = hclge_fd_check_spec(hdev, fs, &unused);
4814 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4818 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4819 action = HCLGE_FD_ACTION_DROP_PACKET;
4821 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4822 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4825 if (vf > hdev->num_req_vfs) {
4826 dev_err(&hdev->pdev->dev,
4827 "Error: vf id (%d) > max vf num (%d)\n",
4828 vf, hdev->num_req_vfs);
4832 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4833 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4836 dev_err(&hdev->pdev->dev,
4837 "Error: queue id (%d) > max tqp num (%d)\n",
4842 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4846 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4850 ret = hclge_fd_get_tuple(hdev, fs, rule);
4854 rule->flow_type = fs->flow_type;
4856 rule->location = fs->location;
4857 rule->unused_tuple = unused;
4858 rule->vf_id = dst_vport_id;
4859 rule->queue_id = q_index;
4860 rule->action = action;
4862 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4866 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4870 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4881 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4882 struct ethtool_rxnfc *cmd)
4884 struct hclge_vport *vport = hclge_get_vport(handle);
4885 struct hclge_dev *hdev = vport->back;
4886 struct ethtool_rx_flow_spec *fs;
4889 if (!hnae3_dev_fd_supported(hdev))
4892 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4894 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4897 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4898 dev_err(&hdev->pdev->dev,
4899 "Delete fail, rule %d is inexistent\n",
4904 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4905 fs->location, NULL, false);
4909 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4913 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4916 struct hclge_vport *vport = hclge_get_vport(handle);
4917 struct hclge_dev *hdev = vport->back;
4918 struct hclge_fd_rule *rule;
4919 struct hlist_node *node;
4921 if (!hnae3_dev_fd_supported(hdev))
4925 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4927 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4928 rule->location, NULL, false);
4929 hlist_del(&rule->rule_node);
4931 hdev->hclge_fd_rule_num--;
4934 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4936 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4937 rule->location, NULL, false);
4941 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4943 struct hclge_vport *vport = hclge_get_vport(handle);
4944 struct hclge_dev *hdev = vport->back;
4945 struct hclge_fd_rule *rule;
4946 struct hlist_node *node;
4949 /* Return ok here, because reset error handling will check this
4950 * return value. If error is returned here, the reset process will
4953 if (!hnae3_dev_fd_supported(hdev))
4956 /* if fd is disabled, should not restore it when reset */
4960 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4961 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4963 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4966 dev_warn(&hdev->pdev->dev,
4967 "Restore rule %d failed, remove it\n",
4969 hlist_del(&rule->rule_node);
4971 hdev->hclge_fd_rule_num--;
4977 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4978 struct ethtool_rxnfc *cmd)
4980 struct hclge_vport *vport = hclge_get_vport(handle);
4981 struct hclge_dev *hdev = vport->back;
4983 if (!hnae3_dev_fd_supported(hdev))
4986 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4987 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4992 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4993 struct ethtool_rxnfc *cmd)
4995 struct hclge_vport *vport = hclge_get_vport(handle);
4996 struct hclge_fd_rule *rule = NULL;
4997 struct hclge_dev *hdev = vport->back;
4998 struct ethtool_rx_flow_spec *fs;
4999 struct hlist_node *node2;
5001 if (!hnae3_dev_fd_supported(hdev))
5004 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5006 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5007 if (rule->location >= fs->location)
5011 if (!rule || fs->location != rule->location)
5014 fs->flow_type = rule->flow_type;
5015 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5019 fs->h_u.tcp_ip4_spec.ip4src =
5020 cpu_to_be32(rule->tuples.src_ip[3]);
5021 fs->m_u.tcp_ip4_spec.ip4src =
5022 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5023 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5025 fs->h_u.tcp_ip4_spec.ip4dst =
5026 cpu_to_be32(rule->tuples.dst_ip[3]);
5027 fs->m_u.tcp_ip4_spec.ip4dst =
5028 rule->unused_tuple & BIT(INNER_DST_IP) ?
5029 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5031 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5032 fs->m_u.tcp_ip4_spec.psrc =
5033 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5034 0 : cpu_to_be16(rule->tuples_mask.src_port);
5036 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5037 fs->m_u.tcp_ip4_spec.pdst =
5038 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5039 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5041 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5042 fs->m_u.tcp_ip4_spec.tos =
5043 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5044 0 : rule->tuples_mask.ip_tos;
5048 fs->h_u.usr_ip4_spec.ip4src =
5049 cpu_to_be32(rule->tuples.src_ip[3]);
5050 fs->m_u.tcp_ip4_spec.ip4src =
5051 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5052 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5054 fs->h_u.usr_ip4_spec.ip4dst =
5055 cpu_to_be32(rule->tuples.dst_ip[3]);
5056 fs->m_u.usr_ip4_spec.ip4dst =
5057 rule->unused_tuple & BIT(INNER_DST_IP) ?
5058 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5060 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5061 fs->m_u.usr_ip4_spec.tos =
5062 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5063 0 : rule->tuples_mask.ip_tos;
5065 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5066 fs->m_u.usr_ip4_spec.proto =
5067 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5068 0 : rule->tuples_mask.ip_proto;
5070 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5076 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5077 rule->tuples.src_ip, 4);
5078 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5079 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5081 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5082 rule->tuples_mask.src_ip, 4);
5084 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5085 rule->tuples.dst_ip, 4);
5086 if (rule->unused_tuple & BIT(INNER_DST_IP))
5087 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5089 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5090 rule->tuples_mask.dst_ip, 4);
5092 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5093 fs->m_u.tcp_ip6_spec.psrc =
5094 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5095 0 : cpu_to_be16(rule->tuples_mask.src_port);
5097 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5098 fs->m_u.tcp_ip6_spec.pdst =
5099 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5100 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5103 case IPV6_USER_FLOW:
5104 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5105 rule->tuples.src_ip, 4);
5106 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5107 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5109 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5110 rule->tuples_mask.src_ip, 4);
5112 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5113 rule->tuples.dst_ip, 4);
5114 if (rule->unused_tuple & BIT(INNER_DST_IP))
5115 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5117 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5118 rule->tuples_mask.dst_ip, 4);
5120 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5121 fs->m_u.usr_ip6_spec.l4_proto =
5122 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5123 0 : rule->tuples_mask.ip_proto;
5127 ether_addr_copy(fs->h_u.ether_spec.h_source,
5128 rule->tuples.src_mac);
5129 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5130 eth_zero_addr(fs->m_u.ether_spec.h_source);
5132 ether_addr_copy(fs->m_u.ether_spec.h_source,
5133 rule->tuples_mask.src_mac);
5135 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5136 rule->tuples.dst_mac);
5137 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5138 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5140 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5141 rule->tuples_mask.dst_mac);
5143 fs->h_u.ether_spec.h_proto =
5144 cpu_to_be16(rule->tuples.ether_proto);
5145 fs->m_u.ether_spec.h_proto =
5146 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5147 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5154 if (fs->flow_type & FLOW_EXT) {
5155 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5156 fs->m_ext.vlan_tci =
5157 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5158 cpu_to_be16(VLAN_VID_MASK) :
5159 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5162 if (fs->flow_type & FLOW_MAC_EXT) {
5163 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5164 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5165 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5167 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5168 rule->tuples_mask.dst_mac);
5171 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5172 fs->ring_cookie = RX_CLS_FLOW_DISC;
5176 fs->ring_cookie = rule->queue_id;
5177 vf_id = rule->vf_id;
5178 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5179 fs->ring_cookie |= vf_id;
5185 static int hclge_get_all_rules(struct hnae3_handle *handle,
5186 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5188 struct hclge_vport *vport = hclge_get_vport(handle);
5189 struct hclge_dev *hdev = vport->back;
5190 struct hclge_fd_rule *rule;
5191 struct hlist_node *node2;
5194 if (!hnae3_dev_fd_supported(hdev))
5197 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5199 hlist_for_each_entry_safe(rule, node2,
5200 &hdev->fd_rule_list, rule_node) {
5201 if (cnt == cmd->rule_cnt)
5204 rule_locs[cnt] = rule->location;
5208 cmd->rule_cnt = cnt;
5213 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5215 struct hclge_vport *vport = hclge_get_vport(handle);
5216 struct hclge_dev *hdev = vport->back;
5218 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5219 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5222 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5224 struct hclge_vport *vport = hclge_get_vport(handle);
5225 struct hclge_dev *hdev = vport->back;
5227 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5230 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5232 struct hclge_vport *vport = hclge_get_vport(handle);
5233 struct hclge_dev *hdev = vport->back;
5235 return hdev->reset_count;
5238 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5240 struct hclge_vport *vport = hclge_get_vport(handle);
5241 struct hclge_dev *hdev = vport->back;
5243 hdev->fd_en = enable;
5245 hclge_del_all_fd_entries(handle, false);
5247 hclge_restore_fd_entries(handle);
5250 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5252 struct hclge_desc desc;
5253 struct hclge_config_mac_mode_cmd *req =
5254 (struct hclge_config_mac_mode_cmd *)desc.data;
5258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5259 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5260 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5261 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5262 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5263 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5264 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5265 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5266 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5267 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5268 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5269 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5270 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5271 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5272 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5273 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5275 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5277 dev_err(&hdev->pdev->dev,
5278 "mac enable fail, ret =%d.\n", ret);
5281 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5283 struct hclge_config_mac_mode_cmd *req;
5284 struct hclge_desc desc;
5288 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5289 /* 1 Read out the MAC mode config at first */
5290 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5291 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5293 dev_err(&hdev->pdev->dev,
5294 "mac loopback get fail, ret =%d.\n", ret);
5298 /* 2 Then setup the loopback flag */
5299 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5300 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5301 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5302 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5304 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5306 /* 3 Config mac work mode with loopback flag
5307 * and its original configure parameters
5309 hclge_cmd_reuse_desc(&desc, false);
5310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5312 dev_err(&hdev->pdev->dev,
5313 "mac loopback set fail, ret =%d.\n", ret);
5317 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5318 enum hnae3_loop loop_mode)
5320 #define HCLGE_SERDES_RETRY_MS 10
5321 #define HCLGE_SERDES_RETRY_NUM 100
5323 #define HCLGE_MAC_LINK_STATUS_MS 20
5324 #define HCLGE_MAC_LINK_STATUS_NUM 10
5325 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5326 #define HCLGE_MAC_LINK_STATUS_UP 1
5328 struct hclge_serdes_lb_cmd *req;
5329 struct hclge_desc desc;
5330 int mac_link_ret = 0;
5334 req = (struct hclge_serdes_lb_cmd *)desc.data;
5335 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5337 switch (loop_mode) {
5338 case HNAE3_LOOP_SERIAL_SERDES:
5339 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5341 case HNAE3_LOOP_PARALLEL_SERDES:
5342 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5345 dev_err(&hdev->pdev->dev,
5346 "unsupported serdes loopback mode %d\n", loop_mode);
5351 req->enable = loop_mode_b;
5352 req->mask = loop_mode_b;
5353 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5355 req->mask = loop_mode_b;
5356 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5361 dev_err(&hdev->pdev->dev,
5362 "serdes loopback set fail, ret = %d\n", ret);
5367 msleep(HCLGE_SERDES_RETRY_MS);
5368 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5370 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5372 dev_err(&hdev->pdev->dev,
5373 "serdes loopback get, ret = %d\n", ret);
5376 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5377 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5379 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5380 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5382 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5383 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5387 hclge_cfg_mac_mode(hdev, en);
5391 /* serdes Internal loopback, independent of the network cable.*/
5392 msleep(HCLGE_MAC_LINK_STATUS_MS);
5393 ret = hclge_get_mac_link_status(hdev);
5394 if (ret == mac_link_ret)
5396 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5398 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5403 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5404 int stream_id, bool enable)
5406 struct hclge_desc desc;
5407 struct hclge_cfg_com_tqp_queue_cmd *req =
5408 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5411 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5412 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5413 req->stream_id = cpu_to_le16(stream_id);
5414 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5418 dev_err(&hdev->pdev->dev,
5419 "Tqp enable fail, status =%d.\n", ret);
5423 static int hclge_set_loopback(struct hnae3_handle *handle,
5424 enum hnae3_loop loop_mode, bool en)
5426 struct hclge_vport *vport = hclge_get_vport(handle);
5427 struct hnae3_knic_private_info *kinfo;
5428 struct hclge_dev *hdev = vport->back;
5431 switch (loop_mode) {
5432 case HNAE3_LOOP_APP:
5433 ret = hclge_set_app_loopback(hdev, en);
5435 case HNAE3_LOOP_SERIAL_SERDES:
5436 case HNAE3_LOOP_PARALLEL_SERDES:
5437 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5441 dev_err(&hdev->pdev->dev,
5442 "loop_mode %d is not supported\n", loop_mode);
5449 kinfo = &vport->nic.kinfo;
5450 for (i = 0; i < kinfo->num_tqps; i++) {
5451 ret = hclge_tqp_enable(hdev, i, 0, en);
5459 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5461 struct hclge_vport *vport = hclge_get_vport(handle);
5462 struct hnae3_knic_private_info *kinfo;
5463 struct hnae3_queue *queue;
5464 struct hclge_tqp *tqp;
5467 kinfo = &vport->nic.kinfo;
5468 for (i = 0; i < kinfo->num_tqps; i++) {
5469 queue = handle->kinfo.tqp[i];
5470 tqp = container_of(queue, struct hclge_tqp, q);
5471 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5475 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5477 struct hclge_vport *vport = hclge_get_vport(handle);
5478 struct hclge_dev *hdev = vport->back;
5481 mod_timer(&hdev->service_timer, jiffies + HZ);
5483 del_timer_sync(&hdev->service_timer);
5484 cancel_work_sync(&hdev->service_task);
5485 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5489 static int hclge_ae_start(struct hnae3_handle *handle)
5491 struct hclge_vport *vport = hclge_get_vport(handle);
5492 struct hclge_dev *hdev = vport->back;
5495 hclge_cfg_mac_mode(hdev, true);
5496 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5497 hdev->hw.mac.link = 0;
5499 /* reset tqp stats */
5500 hclge_reset_tqp_stats(handle);
5502 hclge_mac_start_phy(hdev);
5507 static void hclge_ae_stop(struct hnae3_handle *handle)
5509 struct hclge_vport *vport = hclge_get_vport(handle);
5510 struct hclge_dev *hdev = vport->back;
5513 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5515 /* If it is not PF reset, the firmware will disable the MAC,
5516 * so it only need to stop phy here.
5518 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5519 hdev->reset_type != HNAE3_FUNC_RESET) {
5520 hclge_mac_stop_phy(hdev);
5524 for (i = 0; i < handle->kinfo.num_tqps; i++)
5525 hclge_reset_tqp(handle, i);
5528 hclge_cfg_mac_mode(hdev, false);
5530 hclge_mac_stop_phy(hdev);
5532 /* reset tqp stats */
5533 hclge_reset_tqp_stats(handle);
5534 hclge_update_link_status(hdev);
5537 int hclge_vport_start(struct hclge_vport *vport)
5539 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5540 vport->last_active_jiffies = jiffies;
5544 void hclge_vport_stop(struct hclge_vport *vport)
5546 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5549 static int hclge_client_start(struct hnae3_handle *handle)
5551 struct hclge_vport *vport = hclge_get_vport(handle);
5553 return hclge_vport_start(vport);
5556 static void hclge_client_stop(struct hnae3_handle *handle)
5558 struct hclge_vport *vport = hclge_get_vport(handle);
5560 hclge_vport_stop(vport);
5563 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5564 u16 cmdq_resp, u8 resp_code,
5565 enum hclge_mac_vlan_tbl_opcode op)
5567 struct hclge_dev *hdev = vport->back;
5568 int return_status = -EIO;
5571 dev_err(&hdev->pdev->dev,
5572 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5577 if (op == HCLGE_MAC_VLAN_ADD) {
5578 if ((!resp_code) || (resp_code == 1)) {
5580 } else if (resp_code == 2) {
5581 return_status = -ENOSPC;
5582 dev_err(&hdev->pdev->dev,
5583 "add mac addr failed for uc_overflow.\n");
5584 } else if (resp_code == 3) {
5585 return_status = -ENOSPC;
5586 dev_err(&hdev->pdev->dev,
5587 "add mac addr failed for mc_overflow.\n");
5589 dev_err(&hdev->pdev->dev,
5590 "add mac addr failed for undefined, code=%d.\n",
5593 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5596 } else if (resp_code == 1) {
5597 return_status = -ENOENT;
5598 dev_dbg(&hdev->pdev->dev,
5599 "remove mac addr failed for miss.\n");
5601 dev_err(&hdev->pdev->dev,
5602 "remove mac addr failed for undefined, code=%d.\n",
5605 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5608 } else if (resp_code == 1) {
5609 return_status = -ENOENT;
5610 dev_dbg(&hdev->pdev->dev,
5611 "lookup mac addr failed for miss.\n");
5613 dev_err(&hdev->pdev->dev,
5614 "lookup mac addr failed for undefined, code=%d.\n",
5618 return_status = -EINVAL;
5619 dev_err(&hdev->pdev->dev,
5620 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5624 return return_status;
5627 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5632 if (vfid > 255 || vfid < 0)
5635 if (vfid >= 0 && vfid <= 191) {
5636 word_num = vfid / 32;
5637 bit_num = vfid % 32;
5639 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5641 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5643 word_num = (vfid - 192) / 32;
5644 bit_num = vfid % 32;
5646 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5648 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5654 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5656 #define HCLGE_DESC_NUMBER 3
5657 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5660 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5661 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5662 if (desc[i].data[j])
5668 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5669 const u8 *addr, bool is_mc)
5671 const unsigned char *mac_addr = addr;
5672 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5673 (mac_addr[0]) | (mac_addr[1] << 8);
5674 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5676 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5678 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5679 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5682 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5683 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5686 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5687 struct hclge_mac_vlan_tbl_entry_cmd *req)
5689 struct hclge_dev *hdev = vport->back;
5690 struct hclge_desc desc;
5695 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5697 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5701 dev_err(&hdev->pdev->dev,
5702 "del mac addr failed for cmd_send, ret =%d.\n",
5706 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5707 retval = le16_to_cpu(desc.retval);
5709 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5710 HCLGE_MAC_VLAN_REMOVE);
5713 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5714 struct hclge_mac_vlan_tbl_entry_cmd *req,
5715 struct hclge_desc *desc,
5718 struct hclge_dev *hdev = vport->back;
5723 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5725 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5726 memcpy(desc[0].data,
5728 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5729 hclge_cmd_setup_basic_desc(&desc[1],
5730 HCLGE_OPC_MAC_VLAN_ADD,
5732 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5733 hclge_cmd_setup_basic_desc(&desc[2],
5734 HCLGE_OPC_MAC_VLAN_ADD,
5736 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5738 memcpy(desc[0].data,
5740 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5741 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5744 dev_err(&hdev->pdev->dev,
5745 "lookup mac addr failed for cmd_send, ret =%d.\n",
5749 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5750 retval = le16_to_cpu(desc[0].retval);
5752 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5753 HCLGE_MAC_VLAN_LKUP);
5756 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5757 struct hclge_mac_vlan_tbl_entry_cmd *req,
5758 struct hclge_desc *mc_desc)
5760 struct hclge_dev *hdev = vport->back;
5767 struct hclge_desc desc;
5769 hclge_cmd_setup_basic_desc(&desc,
5770 HCLGE_OPC_MAC_VLAN_ADD,
5772 memcpy(desc.data, req,
5773 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5774 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5775 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5776 retval = le16_to_cpu(desc.retval);
5778 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5780 HCLGE_MAC_VLAN_ADD);
5782 hclge_cmd_reuse_desc(&mc_desc[0], false);
5783 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5784 hclge_cmd_reuse_desc(&mc_desc[1], false);
5785 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5786 hclge_cmd_reuse_desc(&mc_desc[2], false);
5787 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5788 memcpy(mc_desc[0].data, req,
5789 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5790 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5791 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5792 retval = le16_to_cpu(mc_desc[0].retval);
5794 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5796 HCLGE_MAC_VLAN_ADD);
5800 dev_err(&hdev->pdev->dev,
5801 "add mac addr failed for cmd_send, ret =%d.\n",
5809 static int hclge_init_umv_space(struct hclge_dev *hdev)
5811 u16 allocated_size = 0;
5814 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5819 if (allocated_size < hdev->wanted_umv_size)
5820 dev_warn(&hdev->pdev->dev,
5821 "Alloc umv space failed, want %d, get %d\n",
5822 hdev->wanted_umv_size, allocated_size);
5824 mutex_init(&hdev->umv_mutex);
5825 hdev->max_umv_size = allocated_size;
5826 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5827 hdev->share_umv_size = hdev->priv_umv_size +
5828 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5833 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5837 if (hdev->max_umv_size > 0) {
5838 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5842 hdev->max_umv_size = 0;
5844 mutex_destroy(&hdev->umv_mutex);
5849 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5850 u16 *allocated_size, bool is_alloc)
5852 struct hclge_umv_spc_alc_cmd *req;
5853 struct hclge_desc desc;
5856 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5857 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5858 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5859 req->space_size = cpu_to_le32(space_size);
5861 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5863 dev_err(&hdev->pdev->dev,
5864 "%s umv space failed for cmd_send, ret =%d\n",
5865 is_alloc ? "allocate" : "free", ret);
5869 if (is_alloc && allocated_size)
5870 *allocated_size = le32_to_cpu(desc.data[1]);
5875 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5877 struct hclge_vport *vport;
5880 for (i = 0; i < hdev->num_alloc_vport; i++) {
5881 vport = &hdev->vport[i];
5882 vport->used_umv_num = 0;
5885 mutex_lock(&hdev->umv_mutex);
5886 hdev->share_umv_size = hdev->priv_umv_size +
5887 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5888 mutex_unlock(&hdev->umv_mutex);
5891 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5893 struct hclge_dev *hdev = vport->back;
5896 mutex_lock(&hdev->umv_mutex);
5897 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5898 hdev->share_umv_size == 0);
5899 mutex_unlock(&hdev->umv_mutex);
5904 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5906 struct hclge_dev *hdev = vport->back;
5908 mutex_lock(&hdev->umv_mutex);
5910 if (vport->used_umv_num > hdev->priv_umv_size)
5911 hdev->share_umv_size++;
5913 if (vport->used_umv_num > 0)
5914 vport->used_umv_num--;
5916 if (vport->used_umv_num >= hdev->priv_umv_size &&
5917 hdev->share_umv_size > 0)
5918 hdev->share_umv_size--;
5919 vport->used_umv_num++;
5921 mutex_unlock(&hdev->umv_mutex);
5924 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5925 const unsigned char *addr)
5927 struct hclge_vport *vport = hclge_get_vport(handle);
5929 return hclge_add_uc_addr_common(vport, addr);
5932 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5933 const unsigned char *addr)
5935 struct hclge_dev *hdev = vport->back;
5936 struct hclge_mac_vlan_tbl_entry_cmd req;
5937 struct hclge_desc desc;
5938 u16 egress_port = 0;
5941 /* mac addr check */
5942 if (is_zero_ether_addr(addr) ||
5943 is_broadcast_ether_addr(addr) ||
5944 is_multicast_ether_addr(addr)) {
5945 dev_err(&hdev->pdev->dev,
5946 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5948 is_zero_ether_addr(addr),
5949 is_broadcast_ether_addr(addr),
5950 is_multicast_ether_addr(addr));
5954 memset(&req, 0, sizeof(req));
5956 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5957 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5959 req.egress_port = cpu_to_le16(egress_port);
5961 hclge_prepare_mac_addr(&req, addr, false);
5963 /* Lookup the mac address in the mac_vlan table, and add
5964 * it if the entry is inexistent. Repeated unicast entry
5965 * is not allowed in the mac vlan table.
5967 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5968 if (ret == -ENOENT) {
5969 if (!hclge_is_umv_space_full(vport)) {
5970 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5972 hclge_update_umv_space(vport, false);
5976 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5977 hdev->priv_umv_size);
5982 /* check if we just hit the duplicate */
5984 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5985 vport->vport_id, addr);
5989 dev_err(&hdev->pdev->dev,
5990 "PF failed to add unicast entry(%pM) in the MAC table\n",
5996 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5997 const unsigned char *addr)
5999 struct hclge_vport *vport = hclge_get_vport(handle);
6001 return hclge_rm_uc_addr_common(vport, addr);
6004 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6005 const unsigned char *addr)
6007 struct hclge_dev *hdev = vport->back;
6008 struct hclge_mac_vlan_tbl_entry_cmd req;
6011 /* mac addr check */
6012 if (is_zero_ether_addr(addr) ||
6013 is_broadcast_ether_addr(addr) ||
6014 is_multicast_ether_addr(addr)) {
6015 dev_dbg(&hdev->pdev->dev,
6016 "Remove mac err! invalid mac:%pM.\n",
6021 memset(&req, 0, sizeof(req));
6022 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6023 hclge_prepare_mac_addr(&req, addr, false);
6024 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6026 hclge_update_umv_space(vport, true);
6031 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6032 const unsigned char *addr)
6034 struct hclge_vport *vport = hclge_get_vport(handle);
6036 return hclge_add_mc_addr_common(vport, addr);
6039 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6040 const unsigned char *addr)
6042 struct hclge_dev *hdev = vport->back;
6043 struct hclge_mac_vlan_tbl_entry_cmd req;
6044 struct hclge_desc desc[3];
6047 /* mac addr check */
6048 if (!is_multicast_ether_addr(addr)) {
6049 dev_err(&hdev->pdev->dev,
6050 "Add mc mac err! invalid mac:%pM.\n",
6054 memset(&req, 0, sizeof(req));
6055 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6056 hclge_prepare_mac_addr(&req, addr, true);
6057 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6059 /* This mac addr exist, update VFID for it */
6060 hclge_update_desc_vfid(desc, vport->vport_id, false);
6061 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6063 /* This mac addr do not exist, add new entry for it */
6064 memset(desc[0].data, 0, sizeof(desc[0].data));
6065 memset(desc[1].data, 0, sizeof(desc[0].data));
6066 memset(desc[2].data, 0, sizeof(desc[0].data));
6067 hclge_update_desc_vfid(desc, vport->vport_id, false);
6068 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6071 if (status == -ENOSPC)
6072 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6077 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6078 const unsigned char *addr)
6080 struct hclge_vport *vport = hclge_get_vport(handle);
6082 return hclge_rm_mc_addr_common(vport, addr);
6085 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6086 const unsigned char *addr)
6088 struct hclge_dev *hdev = vport->back;
6089 struct hclge_mac_vlan_tbl_entry_cmd req;
6090 enum hclge_cmd_status status;
6091 struct hclge_desc desc[3];
6093 /* mac addr check */
6094 if (!is_multicast_ether_addr(addr)) {
6095 dev_dbg(&hdev->pdev->dev,
6096 "Remove mc mac err! invalid mac:%pM.\n",
6101 memset(&req, 0, sizeof(req));
6102 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6103 hclge_prepare_mac_addr(&req, addr, true);
6104 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6106 /* This mac addr exist, remove this handle's VFID for it */
6107 hclge_update_desc_vfid(desc, vport->vport_id, true);
6109 if (hclge_is_all_function_id_zero(desc))
6110 /* All the vfid is zero, so need to delete this entry */
6111 status = hclge_remove_mac_vlan_tbl(vport, &req);
6113 /* Not all the vfid is zero, update the vfid */
6114 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6117 /* Maybe this mac address is in mta table, but it cannot be
6118 * deleted here because an entry of mta represents an address
6119 * range rather than a specific address. the delete action to
6120 * all entries will take effect in update_mta_status called by
6121 * hns3_nic_set_rx_mode.
6129 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6130 enum HCLGE_MAC_ADDR_TYPE mac_type)
6132 struct hclge_vport_mac_addr_cfg *mac_cfg;
6133 struct list_head *list;
6135 if (!vport->vport_id)
6138 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6142 mac_cfg->hd_tbl_status = true;
6143 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6145 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6146 &vport->uc_mac_list : &vport->mc_mac_list;
6148 list_add_tail(&mac_cfg->node, list);
6151 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6153 enum HCLGE_MAC_ADDR_TYPE mac_type)
6155 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6156 struct list_head *list;
6157 bool uc_flag, mc_flag;
6159 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6160 &vport->uc_mac_list : &vport->mc_mac_list;
6162 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6163 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6165 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6166 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6167 if (uc_flag && mac_cfg->hd_tbl_status)
6168 hclge_rm_uc_addr_common(vport, mac_addr);
6170 if (mc_flag && mac_cfg->hd_tbl_status)
6171 hclge_rm_mc_addr_common(vport, mac_addr);
6173 list_del(&mac_cfg->node);
6180 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6181 enum HCLGE_MAC_ADDR_TYPE mac_type)
6183 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6184 struct list_head *list;
6186 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6187 &vport->uc_mac_list : &vport->mc_mac_list;
6189 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6190 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6191 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6193 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6194 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6196 mac_cfg->hd_tbl_status = false;
6198 list_del(&mac_cfg->node);
6204 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6206 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6207 struct hclge_vport *vport;
6210 mutex_lock(&hdev->vport_cfg_mutex);
6211 for (i = 0; i < hdev->num_alloc_vport; i++) {
6212 vport = &hdev->vport[i];
6213 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6214 list_del(&mac->node);
6218 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6219 list_del(&mac->node);
6223 mutex_unlock(&hdev->vport_cfg_mutex);
6226 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6227 u16 cmdq_resp, u8 resp_code)
6229 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6230 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6231 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6232 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6237 dev_err(&hdev->pdev->dev,
6238 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6243 switch (resp_code) {
6244 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6245 case HCLGE_ETHERTYPE_ALREADY_ADD:
6248 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6249 dev_err(&hdev->pdev->dev,
6250 "add mac ethertype failed for manager table overflow.\n");
6251 return_status = -EIO;
6253 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6254 dev_err(&hdev->pdev->dev,
6255 "add mac ethertype failed for key conflict.\n");
6256 return_status = -EIO;
6259 dev_err(&hdev->pdev->dev,
6260 "add mac ethertype failed for undefined, code=%d.\n",
6262 return_status = -EIO;
6265 return return_status;
6268 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6269 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6271 struct hclge_desc desc;
6276 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6277 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6281 dev_err(&hdev->pdev->dev,
6282 "add mac ethertype failed for cmd_send, ret =%d.\n",
6287 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6288 retval = le16_to_cpu(desc.retval);
6290 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6293 static int init_mgr_tbl(struct hclge_dev *hdev)
6298 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6299 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6301 dev_err(&hdev->pdev->dev,
6302 "add mac ethertype failed, ret =%d.\n",
6311 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6313 struct hclge_vport *vport = hclge_get_vport(handle);
6314 struct hclge_dev *hdev = vport->back;
6316 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6319 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6322 const unsigned char *new_addr = (const unsigned char *)p;
6323 struct hclge_vport *vport = hclge_get_vport(handle);
6324 struct hclge_dev *hdev = vport->back;
6327 /* mac addr check */
6328 if (is_zero_ether_addr(new_addr) ||
6329 is_broadcast_ether_addr(new_addr) ||
6330 is_multicast_ether_addr(new_addr)) {
6331 dev_err(&hdev->pdev->dev,
6332 "Change uc mac err! invalid mac:%p.\n",
6337 if ((!is_first || is_kdump_kernel()) &&
6338 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6339 dev_warn(&hdev->pdev->dev,
6340 "remove old uc mac address fail.\n");
6342 ret = hclge_add_uc_addr(handle, new_addr);
6344 dev_err(&hdev->pdev->dev,
6345 "add uc mac address fail, ret =%d.\n",
6349 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6350 dev_err(&hdev->pdev->dev,
6351 "restore uc mac address fail.\n");
6356 ret = hclge_pause_addr_cfg(hdev, new_addr);
6358 dev_err(&hdev->pdev->dev,
6359 "configure mac pause address fail, ret =%d.\n",
6364 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6369 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6372 struct hclge_vport *vport = hclge_get_vport(handle);
6373 struct hclge_dev *hdev = vport->back;
6375 if (!hdev->hw.mac.phydev)
6378 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6381 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6382 u8 fe_type, bool filter_en, u8 vf_id)
6384 struct hclge_vlan_filter_ctrl_cmd *req;
6385 struct hclge_desc desc;
6388 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6390 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6391 req->vlan_type = vlan_type;
6392 req->vlan_fe = filter_en ? fe_type : 0;
6395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6397 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6403 #define HCLGE_FILTER_TYPE_VF 0
6404 #define HCLGE_FILTER_TYPE_PORT 1
6405 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6406 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6407 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6408 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6409 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6410 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6411 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6412 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6413 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6415 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6417 struct hclge_vport *vport = hclge_get_vport(handle);
6418 struct hclge_dev *hdev = vport->back;
6420 if (hdev->pdev->revision >= 0x21) {
6421 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6422 HCLGE_FILTER_FE_EGRESS, enable, 0);
6423 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6424 HCLGE_FILTER_FE_INGRESS, enable, 0);
6426 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6427 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6431 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6433 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6436 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6437 bool is_kill, u16 vlan, u8 qos,
6440 #define HCLGE_MAX_VF_BYTES 16
6441 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6442 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6443 struct hclge_desc desc[2];
6448 hclge_cmd_setup_basic_desc(&desc[0],
6449 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6450 hclge_cmd_setup_basic_desc(&desc[1],
6451 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6453 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6455 vf_byte_off = vfid / 8;
6456 vf_byte_val = 1 << (vfid % 8);
6458 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6459 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6461 req0->vlan_id = cpu_to_le16(vlan);
6462 req0->vlan_cfg = is_kill;
6464 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6465 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6467 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6469 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6471 dev_err(&hdev->pdev->dev,
6472 "Send vf vlan command fail, ret =%d.\n",
6478 #define HCLGE_VF_VLAN_NO_ENTRY 2
6479 if (!req0->resp_code || req0->resp_code == 1)
6482 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6483 dev_warn(&hdev->pdev->dev,
6484 "vf vlan table is full, vf vlan filter is disabled\n");
6488 dev_err(&hdev->pdev->dev,
6489 "Add vf vlan filter fail, ret =%d.\n",
6492 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6493 if (!req0->resp_code)
6496 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6497 dev_warn(&hdev->pdev->dev,
6498 "vlan %d filter is not in vf vlan table\n",
6503 dev_err(&hdev->pdev->dev,
6504 "Kill vf vlan filter fail, ret =%d.\n",
6511 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6512 u16 vlan_id, bool is_kill)
6514 struct hclge_vlan_filter_pf_cfg_cmd *req;
6515 struct hclge_desc desc;
6516 u8 vlan_offset_byte_val;
6517 u8 vlan_offset_byte;
6521 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6523 vlan_offset_160 = vlan_id / 160;
6524 vlan_offset_byte = (vlan_id % 160) / 8;
6525 vlan_offset_byte_val = 1 << (vlan_id % 8);
6527 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6528 req->vlan_offset = vlan_offset_160;
6529 req->vlan_cfg = is_kill;
6530 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6534 dev_err(&hdev->pdev->dev,
6535 "port vlan command, send fail, ret =%d.\n", ret);
6539 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6540 u16 vport_id, u16 vlan_id, u8 qos,
6543 u16 vport_idx, vport_num = 0;
6546 if (is_kill && !vlan_id)
6549 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6552 dev_err(&hdev->pdev->dev,
6553 "Set %d vport vlan filter config fail, ret =%d.\n",
6558 /* vlan 0 may be added twice when 8021q module is enabled */
6559 if (!is_kill && !vlan_id &&
6560 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6563 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6564 dev_err(&hdev->pdev->dev,
6565 "Add port vlan failed, vport %d is already in vlan %d\n",
6571 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6572 dev_err(&hdev->pdev->dev,
6573 "Delete port vlan failed, vport %d is not in vlan %d\n",
6578 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6581 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6582 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6588 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6589 u16 vlan_id, bool is_kill)
6591 struct hclge_vport *vport = hclge_get_vport(handle);
6592 struct hclge_dev *hdev = vport->back;
6594 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6598 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6599 u16 vlan, u8 qos, __be16 proto)
6601 struct hclge_vport *vport = hclge_get_vport(handle);
6602 struct hclge_dev *hdev = vport->back;
6604 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6606 if (proto != htons(ETH_P_8021Q))
6607 return -EPROTONOSUPPORT;
6609 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6612 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6614 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6615 struct hclge_vport_vtag_tx_cfg_cmd *req;
6616 struct hclge_dev *hdev = vport->back;
6617 struct hclge_desc desc;
6620 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6622 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6623 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6624 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6625 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6626 vcfg->accept_tag1 ? 1 : 0);
6627 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6628 vcfg->accept_untag1 ? 1 : 0);
6629 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6630 vcfg->accept_tag2 ? 1 : 0);
6631 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6632 vcfg->accept_untag2 ? 1 : 0);
6633 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6634 vcfg->insert_tag1_en ? 1 : 0);
6635 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6636 vcfg->insert_tag2_en ? 1 : 0);
6637 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6639 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6640 req->vf_bitmap[req->vf_offset] =
6641 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6643 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6645 dev_err(&hdev->pdev->dev,
6646 "Send port txvlan cfg command fail, ret =%d\n",
6652 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6654 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6655 struct hclge_vport_vtag_rx_cfg_cmd *req;
6656 struct hclge_dev *hdev = vport->back;
6657 struct hclge_desc desc;
6660 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6662 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6663 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6664 vcfg->strip_tag1_en ? 1 : 0);
6665 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6666 vcfg->strip_tag2_en ? 1 : 0);
6667 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6668 vcfg->vlan1_vlan_prionly ? 1 : 0);
6669 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6670 vcfg->vlan2_vlan_prionly ? 1 : 0);
6672 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6673 req->vf_bitmap[req->vf_offset] =
6674 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6676 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6678 dev_err(&hdev->pdev->dev,
6679 "Send port rxvlan cfg command fail, ret =%d\n",
6685 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6686 u16 port_base_vlan_state,
6691 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6692 vport->txvlan_cfg.accept_tag1 = true;
6693 vport->txvlan_cfg.insert_tag1_en = false;
6694 vport->txvlan_cfg.default_tag1 = 0;
6696 vport->txvlan_cfg.accept_tag1 = false;
6697 vport->txvlan_cfg.insert_tag1_en = true;
6698 vport->txvlan_cfg.default_tag1 = vlan_tag;
6701 vport->txvlan_cfg.accept_untag1 = true;
6703 /* accept_tag2 and accept_untag2 are not supported on
6704 * pdev revision(0x20), new revision support them,
6705 * this two fields can not be configured by user.
6707 vport->txvlan_cfg.accept_tag2 = true;
6708 vport->txvlan_cfg.accept_untag2 = true;
6709 vport->txvlan_cfg.insert_tag2_en = false;
6710 vport->txvlan_cfg.default_tag2 = 0;
6712 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6713 vport->rxvlan_cfg.strip_tag1_en = false;
6714 vport->rxvlan_cfg.strip_tag2_en =
6715 vport->rxvlan_cfg.rx_vlan_offload_en;
6717 vport->rxvlan_cfg.strip_tag1_en =
6718 vport->rxvlan_cfg.rx_vlan_offload_en;
6719 vport->rxvlan_cfg.strip_tag2_en = true;
6721 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6722 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6724 ret = hclge_set_vlan_tx_offload_cfg(vport);
6728 return hclge_set_vlan_rx_offload_cfg(vport);
6731 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6733 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6734 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6735 struct hclge_desc desc;
6738 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6739 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6740 rx_req->ot_fst_vlan_type =
6741 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6742 rx_req->ot_sec_vlan_type =
6743 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6744 rx_req->in_fst_vlan_type =
6745 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6746 rx_req->in_sec_vlan_type =
6747 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6749 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6751 dev_err(&hdev->pdev->dev,
6752 "Send rxvlan protocol type command fail, ret =%d\n",
6757 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6759 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6760 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6761 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6763 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6765 dev_err(&hdev->pdev->dev,
6766 "Send txvlan protocol type command fail, ret =%d\n",
6772 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6774 #define HCLGE_DEF_VLAN_TYPE 0x8100
6776 struct hnae3_handle *handle = &hdev->vport[0].nic;
6777 struct hclge_vport *vport;
6781 if (hdev->pdev->revision >= 0x21) {
6782 /* for revision 0x21, vf vlan filter is per function */
6783 for (i = 0; i < hdev->num_alloc_vport; i++) {
6784 vport = &hdev->vport[i];
6785 ret = hclge_set_vlan_filter_ctrl(hdev,
6786 HCLGE_FILTER_TYPE_VF,
6787 HCLGE_FILTER_FE_EGRESS,
6794 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6795 HCLGE_FILTER_FE_INGRESS, true,
6800 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6801 HCLGE_FILTER_FE_EGRESS_V1_B,
6807 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6809 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6810 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6811 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6812 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6813 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6814 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6816 ret = hclge_set_vlan_protocol_type(hdev);
6820 for (i = 0; i < hdev->num_alloc_vport; i++) {
6823 vport = &hdev->vport[i];
6824 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6826 ret = hclge_vlan_offload_cfg(vport,
6827 vport->port_base_vlan_cfg.state,
6833 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6836 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
6838 struct hclge_vport_vlan_cfg *vlan;
6840 /* vlan 0 is reserved */
6844 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6848 vlan->hd_tbl_status = true;
6849 vlan->vlan_id = vlan_id;
6851 list_add_tail(&vlan->node, &vport->vlan_list);
6854 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6857 struct hclge_vport_vlan_cfg *vlan, *tmp;
6858 struct hclge_dev *hdev = vport->back;
6860 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6861 if (vlan->vlan_id == vlan_id) {
6862 if (is_write_tbl && vlan->hd_tbl_status)
6863 hclge_set_vlan_filter_hw(hdev,
6869 list_del(&vlan->node);
6876 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6878 struct hclge_vport_vlan_cfg *vlan, *tmp;
6879 struct hclge_dev *hdev = vport->back;
6881 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6882 if (vlan->hd_tbl_status)
6883 hclge_set_vlan_filter_hw(hdev,
6889 vlan->hd_tbl_status = false;
6891 list_del(&vlan->node);
6897 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6899 struct hclge_vport_vlan_cfg *vlan, *tmp;
6900 struct hclge_vport *vport;
6903 mutex_lock(&hdev->vport_cfg_mutex);
6904 for (i = 0; i < hdev->num_alloc_vport; i++) {
6905 vport = &hdev->vport[i];
6906 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6907 list_del(&vlan->node);
6911 mutex_unlock(&hdev->vport_cfg_mutex);
6914 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6916 struct hclge_vport *vport = hclge_get_vport(handle);
6918 vport->rxvlan_cfg.strip_tag1_en = false;
6919 vport->rxvlan_cfg.strip_tag2_en = enable;
6920 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6921 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6923 return hclge_set_vlan_rx_offload_cfg(vport);
6926 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6928 struct hclge_config_max_frm_size_cmd *req;
6929 struct hclge_desc desc;
6931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6933 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6934 req->max_frm_size = cpu_to_le16(new_mps);
6935 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6937 return hclge_cmd_send(&hdev->hw, &desc, 1);
6940 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6942 struct hclge_vport *vport = hclge_get_vport(handle);
6944 return hclge_set_vport_mtu(vport, new_mtu);
6947 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6949 struct hclge_dev *hdev = vport->back;
6950 int i, max_frm_size, ret = 0;
6952 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6953 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6954 max_frm_size > HCLGE_MAC_MAX_FRAME)
6957 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6958 mutex_lock(&hdev->vport_lock);
6959 /* VF's mps must fit within hdev->mps */
6960 if (vport->vport_id && max_frm_size > hdev->mps) {
6961 mutex_unlock(&hdev->vport_lock);
6963 } else if (vport->vport_id) {
6964 vport->mps = max_frm_size;
6965 mutex_unlock(&hdev->vport_lock);
6969 /* PF's mps must be greater then VF's mps */
6970 for (i = 1; i < hdev->num_alloc_vport; i++)
6971 if (max_frm_size < hdev->vport[i].mps) {
6972 mutex_unlock(&hdev->vport_lock);
6976 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6978 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6980 dev_err(&hdev->pdev->dev,
6981 "Change mtu fail, ret =%d\n", ret);
6985 hdev->mps = max_frm_size;
6986 vport->mps = max_frm_size;
6988 ret = hclge_buffer_alloc(hdev);
6990 dev_err(&hdev->pdev->dev,
6991 "Allocate buffer fail, ret =%d\n", ret);
6994 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6995 mutex_unlock(&hdev->vport_lock);
6999 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7002 struct hclge_reset_tqp_queue_cmd *req;
7003 struct hclge_desc desc;
7006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7008 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7009 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7010 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7012 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7014 dev_err(&hdev->pdev->dev,
7015 "Send tqp reset cmd error, status =%d\n", ret);
7022 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7024 struct hclge_reset_tqp_queue_cmd *req;
7025 struct hclge_desc desc;
7028 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7030 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7031 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7033 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7035 dev_err(&hdev->pdev->dev,
7036 "Get reset status error, status =%d\n", ret);
7040 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7043 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7045 struct hnae3_queue *queue;
7046 struct hclge_tqp *tqp;
7048 queue = handle->kinfo.tqp[queue_id];
7049 tqp = container_of(queue, struct hclge_tqp, q);
7054 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7056 struct hclge_vport *vport = hclge_get_vport(handle);
7057 struct hclge_dev *hdev = vport->back;
7058 int reset_try_times = 0;
7063 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7065 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7067 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7071 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7073 dev_err(&hdev->pdev->dev,
7074 "Send reset tqp cmd fail, ret = %d\n", ret);
7078 reset_try_times = 0;
7079 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7080 /* Wait for tqp hw reset */
7082 reset_status = hclge_get_reset_status(hdev, queue_gid);
7087 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7088 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7092 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7094 dev_err(&hdev->pdev->dev,
7095 "Deassert the soft reset fail, ret = %d\n", ret);
7100 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7102 struct hclge_dev *hdev = vport->back;
7103 int reset_try_times = 0;
7108 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7110 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7112 dev_warn(&hdev->pdev->dev,
7113 "Send reset tqp cmd fail, ret = %d\n", ret);
7117 reset_try_times = 0;
7118 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7119 /* Wait for tqp hw reset */
7121 reset_status = hclge_get_reset_status(hdev, queue_gid);
7126 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7127 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7131 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7133 dev_warn(&hdev->pdev->dev,
7134 "Deassert the soft reset fail, ret = %d\n", ret);
7137 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7139 struct hclge_vport *vport = hclge_get_vport(handle);
7140 struct hclge_dev *hdev = vport->back;
7142 return hdev->fw_version;
7145 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7147 struct phy_device *phydev = hdev->hw.mac.phydev;
7152 phy_set_asym_pause(phydev, rx_en, tx_en);
7155 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7160 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7161 else if (rx_en && !tx_en)
7162 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7163 else if (!rx_en && tx_en)
7164 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7166 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7168 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7171 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7173 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7178 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7183 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7185 struct phy_device *phydev = hdev->hw.mac.phydev;
7186 u16 remote_advertising = 0;
7187 u16 local_advertising = 0;
7188 u32 rx_pause, tx_pause;
7191 if (!phydev->link || !phydev->autoneg)
7194 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7197 remote_advertising = LPA_PAUSE_CAP;
7199 if (phydev->asym_pause)
7200 remote_advertising |= LPA_PAUSE_ASYM;
7202 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7203 remote_advertising);
7204 tx_pause = flowctl & FLOW_CTRL_TX;
7205 rx_pause = flowctl & FLOW_CTRL_RX;
7207 if (phydev->duplex == HCLGE_MAC_HALF) {
7212 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7215 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7216 u32 *rx_en, u32 *tx_en)
7218 struct hclge_vport *vport = hclge_get_vport(handle);
7219 struct hclge_dev *hdev = vport->back;
7221 *auto_neg = hclge_get_autoneg(handle);
7223 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7229 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7232 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7235 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7244 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7245 u32 rx_en, u32 tx_en)
7247 struct hclge_vport *vport = hclge_get_vport(handle);
7248 struct hclge_dev *hdev = vport->back;
7249 struct phy_device *phydev = hdev->hw.mac.phydev;
7252 fc_autoneg = hclge_get_autoneg(handle);
7253 if (auto_neg != fc_autoneg) {
7254 dev_info(&hdev->pdev->dev,
7255 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7259 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7260 dev_info(&hdev->pdev->dev,
7261 "Priority flow control enabled. Cannot set link flow control.\n");
7265 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7268 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7270 /* Only support flow control negotiation for netdev with
7271 * phy attached for now.
7276 return phy_start_aneg(phydev);
7279 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7280 u8 *auto_neg, u32 *speed, u8 *duplex)
7282 struct hclge_vport *vport = hclge_get_vport(handle);
7283 struct hclge_dev *hdev = vport->back;
7286 *speed = hdev->hw.mac.speed;
7288 *duplex = hdev->hw.mac.duplex;
7290 *auto_neg = hdev->hw.mac.autoneg;
7293 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7295 struct hclge_vport *vport = hclge_get_vport(handle);
7296 struct hclge_dev *hdev = vport->back;
7299 *media_type = hdev->hw.mac.media_type;
7302 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7303 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7305 struct hclge_vport *vport = hclge_get_vport(handle);
7306 struct hclge_dev *hdev = vport->back;
7307 struct phy_device *phydev = hdev->hw.mac.phydev;
7308 int mdix_ctrl, mdix, retval, is_resolved;
7311 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7312 *tp_mdix = ETH_TP_MDI_INVALID;
7316 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7318 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7319 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7320 HCLGE_PHY_MDIX_CTRL_S);
7322 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7323 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7324 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7326 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7328 switch (mdix_ctrl) {
7330 *tp_mdix_ctrl = ETH_TP_MDI;
7333 *tp_mdix_ctrl = ETH_TP_MDI_X;
7336 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7339 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7344 *tp_mdix = ETH_TP_MDI_INVALID;
7346 *tp_mdix = ETH_TP_MDI_X;
7348 *tp_mdix = ETH_TP_MDI;
7351 static int hclge_init_client_instance(struct hnae3_client *client,
7352 struct hnae3_ae_dev *ae_dev)
7354 struct hclge_dev *hdev = ae_dev->priv;
7355 struct hclge_vport *vport;
7358 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7359 vport = &hdev->vport[i];
7361 switch (client->type) {
7362 case HNAE3_CLIENT_KNIC:
7364 hdev->nic_client = client;
7365 vport->nic.client = client;
7366 ret = client->ops->init_instance(&vport->nic);
7370 hnae3_set_client_init_flag(client, ae_dev, 1);
7372 if (hdev->roce_client &&
7373 hnae3_dev_roce_supported(hdev)) {
7374 struct hnae3_client *rc = hdev->roce_client;
7376 ret = hclge_init_roce_base_info(vport);
7380 ret = rc->ops->init_instance(&vport->roce);
7384 hnae3_set_client_init_flag(hdev->roce_client,
7389 case HNAE3_CLIENT_UNIC:
7390 hdev->nic_client = client;
7391 vport->nic.client = client;
7393 ret = client->ops->init_instance(&vport->nic);
7397 hnae3_set_client_init_flag(client, ae_dev, 1);
7400 case HNAE3_CLIENT_ROCE:
7401 if (hnae3_dev_roce_supported(hdev)) {
7402 hdev->roce_client = client;
7403 vport->roce.client = client;
7406 if (hdev->roce_client && hdev->nic_client) {
7407 ret = hclge_init_roce_base_info(vport);
7411 ret = client->ops->init_instance(&vport->roce);
7415 hnae3_set_client_init_flag(client, ae_dev, 1);
7427 hdev->nic_client = NULL;
7428 vport->nic.client = NULL;
7431 hdev->roce_client = NULL;
7432 vport->roce.client = NULL;
7436 static void hclge_uninit_client_instance(struct hnae3_client *client,
7437 struct hnae3_ae_dev *ae_dev)
7439 struct hclge_dev *hdev = ae_dev->priv;
7440 struct hclge_vport *vport;
7443 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7444 vport = &hdev->vport[i];
7445 if (hdev->roce_client) {
7446 hdev->roce_client->ops->uninit_instance(&vport->roce,
7448 hdev->roce_client = NULL;
7449 vport->roce.client = NULL;
7451 if (client->type == HNAE3_CLIENT_ROCE)
7453 if (hdev->nic_client && client->ops->uninit_instance) {
7454 client->ops->uninit_instance(&vport->nic, 0);
7455 hdev->nic_client = NULL;
7456 vport->nic.client = NULL;
7461 static int hclge_pci_init(struct hclge_dev *hdev)
7463 struct pci_dev *pdev = hdev->pdev;
7464 struct hclge_hw *hw;
7467 ret = pci_enable_device(pdev);
7469 dev_err(&pdev->dev, "failed to enable PCI device\n");
7473 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7475 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7478 "can't set consistent PCI DMA");
7479 goto err_disable_device;
7481 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7484 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7486 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7487 goto err_disable_device;
7490 pci_set_master(pdev);
7492 hw->io_base = pcim_iomap(pdev, 2, 0);
7494 dev_err(&pdev->dev, "Can't map configuration register space\n");
7496 goto err_clr_master;
7499 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7503 pci_clear_master(pdev);
7504 pci_release_regions(pdev);
7506 pci_disable_device(pdev);
7511 static void hclge_pci_uninit(struct hclge_dev *hdev)
7513 struct pci_dev *pdev = hdev->pdev;
7515 pcim_iounmap(pdev, hdev->hw.io_base);
7516 pci_free_irq_vectors(pdev);
7517 pci_clear_master(pdev);
7518 pci_release_mem_regions(pdev);
7519 pci_disable_device(pdev);
7522 static void hclge_state_init(struct hclge_dev *hdev)
7524 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7525 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7526 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7527 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7528 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7529 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7532 static void hclge_state_uninit(struct hclge_dev *hdev)
7534 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7536 if (hdev->service_timer.function)
7537 del_timer_sync(&hdev->service_timer);
7538 if (hdev->reset_timer.function)
7539 del_timer_sync(&hdev->reset_timer);
7540 if (hdev->service_task.func)
7541 cancel_work_sync(&hdev->service_task);
7542 if (hdev->rst_service_task.func)
7543 cancel_work_sync(&hdev->rst_service_task);
7544 if (hdev->mbx_service_task.func)
7545 cancel_work_sync(&hdev->mbx_service_task);
7548 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7550 #define HCLGE_FLR_WAIT_MS 100
7551 #define HCLGE_FLR_WAIT_CNT 50
7552 struct hclge_dev *hdev = ae_dev->priv;
7555 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7556 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7557 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7558 hclge_reset_event(hdev->pdev, NULL);
7560 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7561 cnt++ < HCLGE_FLR_WAIT_CNT)
7562 msleep(HCLGE_FLR_WAIT_MS);
7564 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7565 dev_err(&hdev->pdev->dev,
7566 "flr wait down timeout: %d\n", cnt);
7569 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7571 struct hclge_dev *hdev = ae_dev->priv;
7573 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7576 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7578 struct pci_dev *pdev = ae_dev->pdev;
7579 struct hclge_dev *hdev;
7582 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7589 hdev->ae_dev = ae_dev;
7590 hdev->reset_type = HNAE3_NONE_RESET;
7591 hdev->reset_level = HNAE3_FUNC_RESET;
7592 ae_dev->priv = hdev;
7593 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7595 mutex_init(&hdev->vport_lock);
7596 mutex_init(&hdev->vport_cfg_mutex);
7598 ret = hclge_pci_init(hdev);
7600 dev_err(&pdev->dev, "PCI init failed\n");
7604 /* Firmware command queue initialize */
7605 ret = hclge_cmd_queue_init(hdev);
7607 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7608 goto err_pci_uninit;
7611 /* Firmware command initialize */
7612 ret = hclge_cmd_init(hdev);
7614 goto err_cmd_uninit;
7616 ret = hclge_get_cap(hdev);
7618 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7620 goto err_cmd_uninit;
7623 ret = hclge_configure(hdev);
7625 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7626 goto err_cmd_uninit;
7629 ret = hclge_init_msi(hdev);
7631 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7632 goto err_cmd_uninit;
7635 ret = hclge_misc_irq_init(hdev);
7638 "Misc IRQ(vector0) init error, ret = %d.\n",
7640 goto err_msi_uninit;
7643 ret = hclge_alloc_tqps(hdev);
7645 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7646 goto err_msi_irq_uninit;
7649 ret = hclge_alloc_vport(hdev);
7651 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7652 goto err_msi_irq_uninit;
7655 ret = hclge_map_tqp(hdev);
7657 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7658 goto err_msi_irq_uninit;
7661 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7662 ret = hclge_mac_mdio_config(hdev);
7664 dev_err(&hdev->pdev->dev,
7665 "mdio config fail ret=%d\n", ret);
7666 goto err_msi_irq_uninit;
7670 ret = hclge_init_umv_space(hdev);
7672 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7673 goto err_mdiobus_unreg;
7676 ret = hclge_mac_init(hdev);
7678 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7679 goto err_mdiobus_unreg;
7682 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7684 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7685 goto err_mdiobus_unreg;
7688 ret = hclge_config_gro(hdev, true);
7690 goto err_mdiobus_unreg;
7692 ret = hclge_init_vlan_config(hdev);
7694 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7695 goto err_mdiobus_unreg;
7698 ret = hclge_tm_schd_init(hdev);
7700 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7701 goto err_mdiobus_unreg;
7704 hclge_rss_init_cfg(hdev);
7705 ret = hclge_rss_init_hw(hdev);
7707 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7708 goto err_mdiobus_unreg;
7711 ret = init_mgr_tbl(hdev);
7713 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7714 goto err_mdiobus_unreg;
7717 ret = hclge_init_fd_config(hdev);
7720 "fd table init fail, ret=%d\n", ret);
7721 goto err_mdiobus_unreg;
7724 ret = hclge_hw_error_set_state(hdev, true);
7727 "fail(%d) to enable hw error interrupts\n", ret);
7728 goto err_mdiobus_unreg;
7731 hclge_dcb_ops_set(hdev);
7733 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7734 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7735 INIT_WORK(&hdev->service_task, hclge_service_task);
7736 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7737 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7739 hclge_clear_all_event_cause(hdev);
7741 /* Enable MISC vector(vector0) */
7742 hclge_enable_vector(&hdev->misc_vector, true);
7744 hclge_state_init(hdev);
7745 hdev->last_reset_time = jiffies;
7747 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7751 if (hdev->hw.mac.phydev)
7752 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7754 hclge_misc_irq_uninit(hdev);
7756 pci_free_irq_vectors(pdev);
7758 hclge_cmd_uninit(hdev);
7760 pcim_iounmap(pdev, hdev->hw.io_base);
7761 pci_clear_master(pdev);
7762 pci_release_regions(pdev);
7763 pci_disable_device(pdev);
7768 static void hclge_stats_clear(struct hclge_dev *hdev)
7770 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7773 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7775 struct hclge_vport *vport = hdev->vport;
7778 for (i = 0; i < hdev->num_alloc_vport; i++) {
7779 hclge_vport_stop(vport);
7784 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7786 struct hclge_dev *hdev = ae_dev->priv;
7787 struct pci_dev *pdev = ae_dev->pdev;
7790 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7792 hclge_stats_clear(hdev);
7793 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7795 ret = hclge_cmd_init(hdev);
7797 dev_err(&pdev->dev, "Cmd queue init failed\n");
7801 ret = hclge_map_tqp(hdev);
7803 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7807 hclge_reset_umv_space(hdev);
7809 ret = hclge_mac_init(hdev);
7811 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7815 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7817 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7821 ret = hclge_config_gro(hdev, true);
7825 ret = hclge_init_vlan_config(hdev);
7827 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7831 ret = hclge_tm_init_hw(hdev, true);
7833 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7837 ret = hclge_rss_init_hw(hdev);
7839 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7843 ret = hclge_init_fd_config(hdev);
7846 "fd table init fail, ret=%d\n", ret);
7850 /* Re-enable the hw error interrupts because
7851 * the interrupts get disabled on core/global reset.
7853 ret = hclge_hw_error_set_state(hdev, true);
7856 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7860 hclge_reset_vport_state(hdev);
7862 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7868 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7870 struct hclge_dev *hdev = ae_dev->priv;
7871 struct hclge_mac *mac = &hdev->hw.mac;
7873 hclge_state_uninit(hdev);
7876 mdiobus_unregister(mac->mdio_bus);
7878 hclge_uninit_umv_space(hdev);
7880 /* Disable MISC vector(vector0) */
7881 hclge_enable_vector(&hdev->misc_vector, false);
7882 synchronize_irq(hdev->misc_vector.vector_irq);
7884 hclge_hw_error_set_state(hdev, false);
7885 hclge_cmd_uninit(hdev);
7886 hclge_misc_irq_uninit(hdev);
7887 hclge_pci_uninit(hdev);
7888 mutex_destroy(&hdev->vport_lock);
7889 hclge_uninit_vport_mac_table(hdev);
7890 hclge_uninit_vport_vlan_table(hdev);
7891 mutex_destroy(&hdev->vport_cfg_mutex);
7892 ae_dev->priv = NULL;
7895 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7897 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7898 struct hclge_vport *vport = hclge_get_vport(handle);
7899 struct hclge_dev *hdev = vport->back;
7901 return min_t(u32, hdev->rss_size_max,
7902 vport->alloc_tqps / kinfo->num_tc);
7905 static void hclge_get_channels(struct hnae3_handle *handle,
7906 struct ethtool_channels *ch)
7908 ch->max_combined = hclge_get_max_channels(handle);
7909 ch->other_count = 1;
7911 ch->combined_count = handle->kinfo.rss_size;
7914 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7915 u16 *alloc_tqps, u16 *max_rss_size)
7917 struct hclge_vport *vport = hclge_get_vport(handle);
7918 struct hclge_dev *hdev = vport->back;
7920 *alloc_tqps = vport->alloc_tqps;
7921 *max_rss_size = hdev->rss_size_max;
7924 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7925 bool rxfh_configured)
7927 struct hclge_vport *vport = hclge_get_vport(handle);
7928 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7929 struct hclge_dev *hdev = vport->back;
7930 int cur_rss_size = kinfo->rss_size;
7931 int cur_tqps = kinfo->num_tqps;
7932 u16 tc_offset[HCLGE_MAX_TC_NUM];
7933 u16 tc_valid[HCLGE_MAX_TC_NUM];
7934 u16 tc_size[HCLGE_MAX_TC_NUM];
7939 kinfo->req_rss_size = new_tqps_num;
7941 ret = hclge_tm_vport_map_update(hdev);
7943 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7947 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7948 roundup_size = ilog2(roundup_size);
7949 /* Set the RSS TC mode according to the new RSS size */
7950 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7953 if (!(hdev->hw_tc_map & BIT(i)))
7957 tc_size[i] = roundup_size;
7958 tc_offset[i] = kinfo->rss_size * i;
7960 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7964 /* RSS indirection table has been configuared by user */
7965 if (rxfh_configured)
7968 /* Reinitializes the rss indirect table according to the new RSS size */
7969 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7973 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7974 rss_indir[i] = i % kinfo->rss_size;
7976 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7978 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7985 dev_info(&hdev->pdev->dev,
7986 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7987 cur_rss_size, kinfo->rss_size,
7988 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7993 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7994 u32 *regs_num_64_bit)
7996 struct hclge_desc desc;
8000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8001 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8003 dev_err(&hdev->pdev->dev,
8004 "Query register number cmd failed, ret = %d.\n", ret);
8008 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8009 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8011 total_num = *regs_num_32_bit + *regs_num_64_bit;
8018 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8021 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8023 struct hclge_desc *desc;
8024 u32 *reg_val = data;
8033 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8034 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8038 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8039 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8041 dev_err(&hdev->pdev->dev,
8042 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8047 for (i = 0; i < cmd_num; i++) {
8049 desc_data = (__le32 *)(&desc[i].data[0]);
8050 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8052 desc_data = (__le32 *)(&desc[i]);
8053 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8055 for (k = 0; k < n; k++) {
8056 *reg_val++ = le32_to_cpu(*desc_data++);
8068 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8071 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8073 struct hclge_desc *desc;
8074 u64 *reg_val = data;
8083 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8084 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8088 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8089 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8091 dev_err(&hdev->pdev->dev,
8092 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8097 for (i = 0; i < cmd_num; i++) {
8099 desc_data = (__le64 *)(&desc[i].data[0]);
8100 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8102 desc_data = (__le64 *)(&desc[i]);
8103 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8105 for (k = 0; k < n; k++) {
8106 *reg_val++ = le64_to_cpu(*desc_data++);
8118 #define MAX_SEPARATE_NUM 4
8119 #define SEPARATOR_VALUE 0xFFFFFFFF
8120 #define REG_NUM_PER_LINE 4
8121 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8123 static int hclge_get_regs_len(struct hnae3_handle *handle)
8125 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8126 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8127 struct hclge_vport *vport = hclge_get_vport(handle);
8128 struct hclge_dev *hdev = vport->back;
8129 u32 regs_num_32_bit, regs_num_64_bit;
8132 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8134 dev_err(&hdev->pdev->dev,
8135 "Get register number failed, ret = %d.\n", ret);
8139 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8140 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8141 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8142 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8144 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8145 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8146 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8149 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8152 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8153 struct hclge_vport *vport = hclge_get_vport(handle);
8154 struct hclge_dev *hdev = vport->back;
8155 u32 regs_num_32_bit, regs_num_64_bit;
8156 int i, j, reg_um, separator_num;
8160 *version = hdev->fw_version;
8162 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8164 dev_err(&hdev->pdev->dev,
8165 "Get register number failed, ret = %d.\n", ret);
8169 /* fetching per-PF registers valus from PF PCIe register space */
8170 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8171 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8172 for (i = 0; i < reg_um; i++)
8173 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8174 for (i = 0; i < separator_num; i++)
8175 *reg++ = SEPARATOR_VALUE;
8177 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8178 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8179 for (i = 0; i < reg_um; i++)
8180 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8181 for (i = 0; i < separator_num; i++)
8182 *reg++ = SEPARATOR_VALUE;
8184 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8185 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8186 for (j = 0; j < kinfo->num_tqps; j++) {
8187 for (i = 0; i < reg_um; i++)
8188 *reg++ = hclge_read_dev(&hdev->hw,
8189 ring_reg_addr_list[i] +
8191 for (i = 0; i < separator_num; i++)
8192 *reg++ = SEPARATOR_VALUE;
8195 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8196 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8197 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8198 for (i = 0; i < reg_um; i++)
8199 *reg++ = hclge_read_dev(&hdev->hw,
8200 tqp_intr_reg_addr_list[i] +
8202 for (i = 0; i < separator_num; i++)
8203 *reg++ = SEPARATOR_VALUE;
8206 /* fetching PF common registers values from firmware */
8207 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8209 dev_err(&hdev->pdev->dev,
8210 "Get 32 bit register failed, ret = %d.\n", ret);
8214 reg += regs_num_32_bit;
8215 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8217 dev_err(&hdev->pdev->dev,
8218 "Get 64 bit register failed, ret = %d.\n", ret);
8221 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8223 struct hclge_set_led_state_cmd *req;
8224 struct hclge_desc desc;
8227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8229 req = (struct hclge_set_led_state_cmd *)desc.data;
8230 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8231 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8233 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8235 dev_err(&hdev->pdev->dev,
8236 "Send set led state cmd error, ret =%d\n", ret);
8241 enum hclge_led_status {
8244 HCLGE_LED_NO_CHANGE = 0xFF,
8247 static int hclge_set_led_id(struct hnae3_handle *handle,
8248 enum ethtool_phys_id_state status)
8250 struct hclge_vport *vport = hclge_get_vport(handle);
8251 struct hclge_dev *hdev = vport->back;
8254 case ETHTOOL_ID_ACTIVE:
8255 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8256 case ETHTOOL_ID_INACTIVE:
8257 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8263 static void hclge_get_link_mode(struct hnae3_handle *handle,
8264 unsigned long *supported,
8265 unsigned long *advertising)
8267 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8268 struct hclge_vport *vport = hclge_get_vport(handle);
8269 struct hclge_dev *hdev = vport->back;
8270 unsigned int idx = 0;
8272 for (; idx < size; idx++) {
8273 supported[idx] = hdev->hw.mac.supported[idx];
8274 advertising[idx] = hdev->hw.mac.advertising[idx];
8278 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8280 struct hclge_vport *vport = hclge_get_vport(handle);
8281 struct hclge_dev *hdev = vport->back;
8283 return hclge_config_gro(hdev, enable);
8286 static const struct hnae3_ae_ops hclge_ops = {
8287 .init_ae_dev = hclge_init_ae_dev,
8288 .uninit_ae_dev = hclge_uninit_ae_dev,
8289 .flr_prepare = hclge_flr_prepare,
8290 .flr_done = hclge_flr_done,
8291 .init_client_instance = hclge_init_client_instance,
8292 .uninit_client_instance = hclge_uninit_client_instance,
8293 .map_ring_to_vector = hclge_map_ring_to_vector,
8294 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8295 .get_vector = hclge_get_vector,
8296 .put_vector = hclge_put_vector,
8297 .set_promisc_mode = hclge_set_promisc_mode,
8298 .set_loopback = hclge_set_loopback,
8299 .start = hclge_ae_start,
8300 .stop = hclge_ae_stop,
8301 .client_start = hclge_client_start,
8302 .client_stop = hclge_client_stop,
8303 .get_status = hclge_get_status,
8304 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8305 .update_speed_duplex_h = hclge_update_speed_duplex_h,
8306 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8307 .get_media_type = hclge_get_media_type,
8308 .get_rss_key_size = hclge_get_rss_key_size,
8309 .get_rss_indir_size = hclge_get_rss_indir_size,
8310 .get_rss = hclge_get_rss,
8311 .set_rss = hclge_set_rss,
8312 .set_rss_tuple = hclge_set_rss_tuple,
8313 .get_rss_tuple = hclge_get_rss_tuple,
8314 .get_tc_size = hclge_get_tc_size,
8315 .get_mac_addr = hclge_get_mac_addr,
8316 .set_mac_addr = hclge_set_mac_addr,
8317 .do_ioctl = hclge_do_ioctl,
8318 .add_uc_addr = hclge_add_uc_addr,
8319 .rm_uc_addr = hclge_rm_uc_addr,
8320 .add_mc_addr = hclge_add_mc_addr,
8321 .rm_mc_addr = hclge_rm_mc_addr,
8322 .set_autoneg = hclge_set_autoneg,
8323 .get_autoneg = hclge_get_autoneg,
8324 .get_pauseparam = hclge_get_pauseparam,
8325 .set_pauseparam = hclge_set_pauseparam,
8326 .set_mtu = hclge_set_mtu,
8327 .reset_queue = hclge_reset_tqp,
8328 .get_stats = hclge_get_stats,
8329 .update_stats = hclge_update_stats,
8330 .get_strings = hclge_get_strings,
8331 .get_sset_count = hclge_get_sset_count,
8332 .get_fw_version = hclge_get_fw_version,
8333 .get_mdix_mode = hclge_get_mdix_mode,
8334 .enable_vlan_filter = hclge_enable_vlan_filter,
8335 .set_vlan_filter = hclge_set_vlan_filter,
8336 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8337 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8338 .reset_event = hclge_reset_event,
8339 .set_default_reset_request = hclge_set_def_reset_request,
8340 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8341 .set_channels = hclge_set_channels,
8342 .get_channels = hclge_get_channels,
8343 .get_regs_len = hclge_get_regs_len,
8344 .get_regs = hclge_get_regs,
8345 .set_led_id = hclge_set_led_id,
8346 .get_link_mode = hclge_get_link_mode,
8347 .add_fd_entry = hclge_add_fd_entry,
8348 .del_fd_entry = hclge_del_fd_entry,
8349 .del_all_fd_entries = hclge_del_all_fd_entries,
8350 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8351 .get_fd_rule_info = hclge_get_fd_rule_info,
8352 .get_fd_all_rules = hclge_get_all_rules,
8353 .restore_fd_rules = hclge_restore_fd_entries,
8354 .enable_fd = hclge_enable_fd,
8355 .dbg_run_cmd = hclge_dbg_run_cmd,
8356 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8357 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8358 .ae_dev_resetting = hclge_ae_dev_resetting,
8359 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8360 .set_gro_en = hclge_gro_en,
8361 .get_global_queue_id = hclge_covert_handle_qid_global,
8362 .set_timer_task = hclge_set_timer_task,
8363 .mac_connect_phy = hclge_mac_connect_phy,
8364 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8367 static struct hnae3_ae_algo ae_algo = {
8369 .pdev_id_table = ae_algo_pci_tbl,
8372 static int hclge_init(void)
8374 pr_info("%s is initializing\n", HCLGE_NAME);
8376 hnae3_register_ae_algo(&ae_algo);
8381 static void hclge_exit(void)
8383 hnae3_unregister_ae_algo(&ae_algo);
8385 module_init(hclge_init);
8386 module_exit(hclge_exit);
8388 MODULE_LICENSE("GPL");
8389 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8390 MODULE_DESCRIPTION("HCLGE Driver");
8391 MODULE_VERSION(HCLGE_MOD_VERSION);