1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(ETH_P_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1466 struct hnae3_handle *nic = &vport->nic;
1467 struct hclge_dev *hdev = vport->back;
1470 nic->pdev = hdev->pdev;
1471 nic->ae_algo = &ae_algo;
1472 nic->numa_node_mask = hdev->numa_node_mask;
1474 ret = hclge_knic_setup(vport, num_tqps,
1475 hdev->num_tx_desc, hdev->num_rx_desc);
1477 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1482 static int hclge_alloc_vport(struct hclge_dev *hdev)
1484 struct pci_dev *pdev = hdev->pdev;
1485 struct hclge_vport *vport;
1491 /* We need to alloc a vport for main NIC of PF */
1492 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1494 if (hdev->num_tqps < num_vport) {
1495 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1496 hdev->num_tqps, num_vport);
1500 /* Alloc the same number of TQPs for every vport */
1501 tqp_per_vport = hdev->num_tqps / num_vport;
1502 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1504 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1509 hdev->vport = vport;
1510 hdev->num_alloc_vport = num_vport;
1512 if (IS_ENABLED(CONFIG_PCI_IOV))
1513 hdev->num_alloc_vfs = hdev->num_req_vfs;
1515 for (i = 0; i < num_vport; i++) {
1517 vport->vport_id = i;
1518 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1519 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1520 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1521 INIT_LIST_HEAD(&vport->vlan_list);
1522 INIT_LIST_HEAD(&vport->uc_mac_list);
1523 INIT_LIST_HEAD(&vport->mc_mac_list);
1526 ret = hclge_vport_setup(vport, tqp_main_vport);
1528 ret = hclge_vport_setup(vport, tqp_per_vport);
1531 "vport setup failed for vport %d, %d\n",
1542 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1543 struct hclge_pkt_buf_alloc *buf_alloc)
1545 /* TX buffer size is unit by 128 byte */
1546 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1547 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1548 struct hclge_tx_buff_alloc_cmd *req;
1549 struct hclge_desc desc;
1553 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1555 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1556 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1557 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1559 req->tx_pkt_buff[i] =
1560 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1561 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1564 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1566 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1572 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1573 struct hclge_pkt_buf_alloc *buf_alloc)
1575 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1578 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1583 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1587 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1588 if (hdev->hw_tc_map & BIT(i))
1593 /* Get the number of pfc enabled TCs, which have private buffer */
1594 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1595 struct hclge_pkt_buf_alloc *buf_alloc)
1597 struct hclge_priv_buf *priv;
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1601 priv = &buf_alloc->priv_buf[i];
1602 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1610 /* Get the number of pfc disabled TCs, which have private buffer */
1611 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1612 struct hclge_pkt_buf_alloc *buf_alloc)
1614 struct hclge_priv_buf *priv;
1617 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1618 priv = &buf_alloc->priv_buf[i];
1619 if (hdev->hw_tc_map & BIT(i) &&
1620 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1628 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1630 struct hclge_priv_buf *priv;
1634 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1635 priv = &buf_alloc->priv_buf[i];
1637 rx_priv += priv->buf_size;
1642 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1644 u32 i, total_tx_size = 0;
1646 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1647 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1649 return total_tx_size;
1652 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1653 struct hclge_pkt_buf_alloc *buf_alloc,
1656 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1657 u32 tc_num = hclge_get_tc_num(hdev);
1658 u32 shared_buf, aligned_mps;
1662 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1664 if (hnae3_dev_dcb_supported(hdev))
1665 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1667 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1668 + hdev->dv_buf_size;
1670 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1671 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1672 HCLGE_BUF_SIZE_UNIT);
1674 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1675 if (rx_all < rx_priv + shared_std)
1678 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1679 buf_alloc->s_buf.buf_size = shared_buf;
1680 if (hnae3_dev_dcb_supported(hdev)) {
1681 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1682 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1683 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1685 buf_alloc->s_buf.self.high = aligned_mps +
1686 HCLGE_NON_DCB_ADDITIONAL_BUF;
1687 buf_alloc->s_buf.self.low = aligned_mps;
1690 if (hnae3_dev_dcb_supported(hdev)) {
1692 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1694 hi_thrd = shared_buf - hdev->dv_buf_size;
1696 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1697 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1698 lo_thrd = hi_thrd - aligned_mps / 2;
1700 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1701 lo_thrd = aligned_mps;
1704 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1705 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1706 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1712 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1713 struct hclge_pkt_buf_alloc *buf_alloc)
1717 total_size = hdev->pkt_buf_size;
1719 /* alloc tx buffer for all enabled tc */
1720 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1721 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1723 if (hdev->hw_tc_map & BIT(i)) {
1724 if (total_size < hdev->tx_buf_size)
1727 priv->tx_buf_size = hdev->tx_buf_size;
1729 priv->tx_buf_size = 0;
1732 total_size -= priv->tx_buf_size;
1738 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1739 struct hclge_pkt_buf_alloc *buf_alloc)
1741 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1742 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1745 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1746 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1753 if (!(hdev->hw_tc_map & BIT(i)))
1758 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1759 priv->wl.low = max ? aligned_mps : 256;
1760 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1761 HCLGE_BUF_SIZE_UNIT);
1764 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1767 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1770 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1773 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1774 struct hclge_pkt_buf_alloc *buf_alloc)
1776 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1777 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1780 /* let the last to be cleared first */
1781 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1782 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1784 if (hdev->hw_tc_map & BIT(i) &&
1785 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1786 /* Clear the no pfc TC private buffer */
1794 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1795 no_pfc_priv_num == 0)
1799 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1802 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1803 struct hclge_pkt_buf_alloc *buf_alloc)
1805 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1806 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1809 /* let the last to be cleared first */
1810 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1811 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1813 if (hdev->hw_tc_map & BIT(i) &&
1814 hdev->tm_info.hw_pfc_map & BIT(i)) {
1815 /* Reduce the number of pfc TC with private buffer */
1823 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1828 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1831 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1832 * @hdev: pointer to struct hclge_dev
1833 * @buf_alloc: pointer to buffer calculation data
1834 * @return: 0: calculate sucessful, negative: fail
1836 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1837 struct hclge_pkt_buf_alloc *buf_alloc)
1839 /* When DCB is not supported, rx private buffer is not allocated. */
1840 if (!hnae3_dev_dcb_supported(hdev)) {
1841 u32 rx_all = hdev->pkt_buf_size;
1843 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1844 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1850 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1853 /* try to decrease the buffer size */
1854 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1857 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1860 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1866 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1867 struct hclge_pkt_buf_alloc *buf_alloc)
1869 struct hclge_rx_priv_buff_cmd *req;
1870 struct hclge_desc desc;
1874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1875 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1877 /* Alloc private buffer TCs */
1878 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1879 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1882 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1884 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1888 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1889 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1891 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1893 dev_err(&hdev->pdev->dev,
1894 "rx private buffer alloc cmd failed %d\n", ret);
1899 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1900 struct hclge_pkt_buf_alloc *buf_alloc)
1902 struct hclge_rx_priv_wl_buf *req;
1903 struct hclge_priv_buf *priv;
1904 struct hclge_desc desc[2];
1908 for (i = 0; i < 2; i++) {
1909 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1911 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1913 /* The first descriptor set the NEXT bit to 1 */
1915 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1917 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1919 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1920 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1922 priv = &buf_alloc->priv_buf[idx];
1923 req->tc_wl[j].high =
1924 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1925 req->tc_wl[j].high |=
1926 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1928 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1929 req->tc_wl[j].low |=
1930 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1934 /* Send 2 descriptor at one time */
1935 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1937 dev_err(&hdev->pdev->dev,
1938 "rx private waterline config cmd failed %d\n",
1943 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1944 struct hclge_pkt_buf_alloc *buf_alloc)
1946 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1947 struct hclge_rx_com_thrd *req;
1948 struct hclge_desc desc[2];
1949 struct hclge_tc_thrd *tc;
1953 for (i = 0; i < 2; i++) {
1954 hclge_cmd_setup_basic_desc(&desc[i],
1955 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1956 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1958 /* The first descriptor set the NEXT bit to 1 */
1960 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1962 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1964 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1965 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1967 req->com_thrd[j].high =
1968 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1969 req->com_thrd[j].high |=
1970 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1971 req->com_thrd[j].low =
1972 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1973 req->com_thrd[j].low |=
1974 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1978 /* Send 2 descriptors at one time */
1979 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1981 dev_err(&hdev->pdev->dev,
1982 "common threshold config cmd failed %d\n", ret);
1986 static int hclge_common_wl_config(struct hclge_dev *hdev,
1987 struct hclge_pkt_buf_alloc *buf_alloc)
1989 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1990 struct hclge_rx_com_wl *req;
1991 struct hclge_desc desc;
1994 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1996 req = (struct hclge_rx_com_wl *)desc.data;
1997 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1998 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2000 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2001 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2003 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2005 dev_err(&hdev->pdev->dev,
2006 "common waterline config cmd failed %d\n", ret);
2011 int hclge_buffer_alloc(struct hclge_dev *hdev)
2013 struct hclge_pkt_buf_alloc *pkt_buf;
2016 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2020 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2022 dev_err(&hdev->pdev->dev,
2023 "could not calc tx buffer size for all TCs %d\n", ret);
2027 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2029 dev_err(&hdev->pdev->dev,
2030 "could not alloc tx buffers %d\n", ret);
2034 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2036 dev_err(&hdev->pdev->dev,
2037 "could not calc rx priv buffer size for all TCs %d\n",
2042 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2044 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2049 if (hnae3_dev_dcb_supported(hdev)) {
2050 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2052 dev_err(&hdev->pdev->dev,
2053 "could not configure rx private waterline %d\n",
2058 ret = hclge_common_thrd_config(hdev, pkt_buf);
2060 dev_err(&hdev->pdev->dev,
2061 "could not configure common threshold %d\n",
2067 ret = hclge_common_wl_config(hdev, pkt_buf);
2069 dev_err(&hdev->pdev->dev,
2070 "could not configure common waterline %d\n", ret);
2077 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2079 struct hnae3_handle *roce = &vport->roce;
2080 struct hnae3_handle *nic = &vport->nic;
2082 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2084 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2085 vport->back->num_msi_left == 0)
2088 roce->rinfo.base_vector = vport->back->roce_base_vector;
2090 roce->rinfo.netdev = nic->kinfo.netdev;
2091 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2093 roce->pdev = nic->pdev;
2094 roce->ae_algo = nic->ae_algo;
2095 roce->numa_node_mask = nic->numa_node_mask;
2100 static int hclge_init_msi(struct hclge_dev *hdev)
2102 struct pci_dev *pdev = hdev->pdev;
2106 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2107 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2110 "failed(%d) to allocate MSI/MSI-X vectors\n",
2114 if (vectors < hdev->num_msi)
2115 dev_warn(&hdev->pdev->dev,
2116 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2117 hdev->num_msi, vectors);
2119 hdev->num_msi = vectors;
2120 hdev->num_msi_left = vectors;
2121 hdev->base_msi_vector = pdev->irq;
2122 hdev->roce_base_vector = hdev->base_msi_vector +
2123 hdev->roce_base_msix_offset;
2125 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2126 sizeof(u16), GFP_KERNEL);
2127 if (!hdev->vector_status) {
2128 pci_free_irq_vectors(pdev);
2132 for (i = 0; i < hdev->num_msi; i++)
2133 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2135 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2136 sizeof(int), GFP_KERNEL);
2137 if (!hdev->vector_irq) {
2138 pci_free_irq_vectors(pdev);
2145 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2148 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2149 duplex = HCLGE_MAC_FULL;
2154 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2157 struct hclge_config_mac_speed_dup_cmd *req;
2158 struct hclge_desc desc;
2161 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2165 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2168 case HCLGE_MAC_SPEED_10M:
2169 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2170 HCLGE_CFG_SPEED_S, 6);
2172 case HCLGE_MAC_SPEED_100M:
2173 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2174 HCLGE_CFG_SPEED_S, 7);
2176 case HCLGE_MAC_SPEED_1G:
2177 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2178 HCLGE_CFG_SPEED_S, 0);
2180 case HCLGE_MAC_SPEED_10G:
2181 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2182 HCLGE_CFG_SPEED_S, 1);
2184 case HCLGE_MAC_SPEED_25G:
2185 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2186 HCLGE_CFG_SPEED_S, 2);
2188 case HCLGE_MAC_SPEED_40G:
2189 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2190 HCLGE_CFG_SPEED_S, 3);
2192 case HCLGE_MAC_SPEED_50G:
2193 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2194 HCLGE_CFG_SPEED_S, 4);
2196 case HCLGE_MAC_SPEED_100G:
2197 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2198 HCLGE_CFG_SPEED_S, 5);
2201 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2205 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2208 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2210 dev_err(&hdev->pdev->dev,
2211 "mac speed/duplex config cmd failed %d.\n", ret);
2218 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2222 duplex = hclge_check_speed_dup(duplex, speed);
2223 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2226 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2230 hdev->hw.mac.speed = speed;
2231 hdev->hw.mac.duplex = duplex;
2236 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2239 struct hclge_vport *vport = hclge_get_vport(handle);
2240 struct hclge_dev *hdev = vport->back;
2242 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2245 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2247 struct hclge_config_auto_neg_cmd *req;
2248 struct hclge_desc desc;
2252 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2254 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2255 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2256 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2258 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2260 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2266 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2268 struct hclge_vport *vport = hclge_get_vport(handle);
2269 struct hclge_dev *hdev = vport->back;
2271 if (!hdev->hw.mac.support_autoneg) {
2273 dev_err(&hdev->pdev->dev,
2274 "autoneg is not supported by current port\n");
2281 return hclge_set_autoneg_en(hdev, enable);
2284 static int hclge_get_autoneg(struct hnae3_handle *handle)
2286 struct hclge_vport *vport = hclge_get_vport(handle);
2287 struct hclge_dev *hdev = vport->back;
2288 struct phy_device *phydev = hdev->hw.mac.phydev;
2291 return phydev->autoneg;
2293 return hdev->hw.mac.autoneg;
2296 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2298 struct hclge_vport *vport = hclge_get_vport(handle);
2299 struct hclge_dev *hdev = vport->back;
2302 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2304 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2307 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2310 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2312 struct hclge_config_fec_cmd *req;
2313 struct hclge_desc desc;
2316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2318 req = (struct hclge_config_fec_cmd *)desc.data;
2319 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2320 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2321 if (fec_mode & BIT(HNAE3_FEC_RS))
2322 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2323 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2324 if (fec_mode & BIT(HNAE3_FEC_BASER))
2325 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2326 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2328 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2330 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2335 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2337 struct hclge_vport *vport = hclge_get_vport(handle);
2338 struct hclge_dev *hdev = vport->back;
2339 struct hclge_mac *mac = &hdev->hw.mac;
2342 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2343 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2347 ret = hclge_set_fec_hw(hdev, fec_mode);
2351 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2355 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2358 struct hclge_vport *vport = hclge_get_vport(handle);
2359 struct hclge_dev *hdev = vport->back;
2360 struct hclge_mac *mac = &hdev->hw.mac;
2363 *fec_ability = mac->fec_ability;
2365 *fec_mode = mac->fec_mode;
2368 static int hclge_mac_init(struct hclge_dev *hdev)
2370 struct hclge_mac *mac = &hdev->hw.mac;
2373 hdev->support_sfp_query = true;
2374 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2375 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2376 hdev->hw.mac.duplex);
2378 dev_err(&hdev->pdev->dev,
2379 "Config mac speed dup fail ret=%d\n", ret);
2385 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2386 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2388 dev_err(&hdev->pdev->dev,
2389 "Fec mode init fail, ret = %d\n", ret);
2394 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2396 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2400 ret = hclge_buffer_alloc(hdev);
2402 dev_err(&hdev->pdev->dev,
2403 "allocate buffer fail, ret=%d\n", ret);
2408 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2410 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2411 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2412 schedule_work(&hdev->mbx_service_task);
2415 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2417 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2418 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2419 schedule_work(&hdev->rst_service_task);
2422 static void hclge_task_schedule(struct hclge_dev *hdev)
2424 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2425 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2426 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2427 (void)schedule_work(&hdev->service_task);
2430 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2432 struct hclge_link_status_cmd *req;
2433 struct hclge_desc desc;
2437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2438 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2440 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2445 req = (struct hclge_link_status_cmd *)desc.data;
2446 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2448 return !!link_status;
2451 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2456 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2459 mac_state = hclge_get_mac_link_status(hdev);
2461 if (hdev->hw.mac.phydev) {
2462 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2463 link_stat = mac_state &
2464 hdev->hw.mac.phydev->link;
2469 link_stat = mac_state;
2475 static void hclge_update_link_status(struct hclge_dev *hdev)
2477 struct hnae3_client *rclient = hdev->roce_client;
2478 struct hnae3_client *client = hdev->nic_client;
2479 struct hnae3_handle *rhandle;
2480 struct hnae3_handle *handle;
2486 state = hclge_get_mac_phy_link(hdev);
2487 if (state != hdev->hw.mac.link) {
2488 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2489 handle = &hdev->vport[i].nic;
2490 client->ops->link_status_change(handle, state);
2491 hclge_config_mac_tnl_int(hdev, state);
2492 rhandle = &hdev->vport[i].roce;
2493 if (rclient && rclient->ops->link_status_change)
2494 rclient->ops->link_status_change(rhandle,
2497 hdev->hw.mac.link = state;
2501 static void hclge_update_port_capability(struct hclge_mac *mac)
2503 /* update fec ability by speed */
2504 hclge_convert_setting_fec(mac);
2506 /* firmware can not identify back plane type, the media type
2507 * read from configuration can help deal it
2509 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2510 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2511 mac->module_type = HNAE3_MODULE_TYPE_KR;
2512 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2513 mac->module_type = HNAE3_MODULE_TYPE_TP;
2515 if (mac->support_autoneg == true) {
2516 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2517 linkmode_copy(mac->advertising, mac->supported);
2519 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2521 linkmode_zero(mac->advertising);
2525 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2527 struct hclge_sfp_info_cmd *resp = NULL;
2528 struct hclge_desc desc;
2531 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2532 resp = (struct hclge_sfp_info_cmd *)desc.data;
2533 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2534 if (ret == -EOPNOTSUPP) {
2535 dev_warn(&hdev->pdev->dev,
2536 "IMP do not support get SFP speed %d\n", ret);
2539 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2543 *speed = le32_to_cpu(resp->speed);
2548 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2550 struct hclge_sfp_info_cmd *resp;
2551 struct hclge_desc desc;
2554 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2555 resp = (struct hclge_sfp_info_cmd *)desc.data;
2557 resp->query_type = QUERY_ACTIVE_SPEED;
2559 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2560 if (ret == -EOPNOTSUPP) {
2561 dev_warn(&hdev->pdev->dev,
2562 "IMP does not support get SFP info %d\n", ret);
2565 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2569 mac->speed = le32_to_cpu(resp->speed);
2570 /* if resp->speed_ability is 0, it means it's an old version
2571 * firmware, do not update these params
2573 if (resp->speed_ability) {
2574 mac->module_type = le32_to_cpu(resp->module_type);
2575 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2576 mac->autoneg = resp->autoneg;
2577 mac->support_autoneg = resp->autoneg_ability;
2578 if (!resp->active_fec)
2581 mac->fec_mode = BIT(resp->active_fec);
2583 mac->speed_type = QUERY_SFP_SPEED;
2589 static int hclge_update_port_info(struct hclge_dev *hdev)
2591 struct hclge_mac *mac = &hdev->hw.mac;
2592 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2595 /* get the port info from SFP cmd if not copper port */
2596 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2599 /* if IMP does not support get SFP/qSFP info, return directly */
2600 if (!hdev->support_sfp_query)
2603 if (hdev->pdev->revision >= 0x21)
2604 ret = hclge_get_sfp_info(hdev, mac);
2606 ret = hclge_get_sfp_speed(hdev, &speed);
2608 if (ret == -EOPNOTSUPP) {
2609 hdev->support_sfp_query = false;
2615 if (hdev->pdev->revision >= 0x21) {
2616 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2617 hclge_update_port_capability(mac);
2620 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2623 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2624 return 0; /* do nothing if no SFP */
2626 /* must config full duplex for SFP */
2627 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2631 static int hclge_get_status(struct hnae3_handle *handle)
2633 struct hclge_vport *vport = hclge_get_vport(handle);
2634 struct hclge_dev *hdev = vport->back;
2636 hclge_update_link_status(hdev);
2638 return hdev->hw.mac.link;
2641 static void hclge_service_timer(struct timer_list *t)
2643 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2645 mod_timer(&hdev->service_timer, jiffies + HZ);
2646 hdev->hw_stats.stats_timer++;
2647 hdev->fd_arfs_expire_timer++;
2648 hclge_task_schedule(hdev);
2651 static void hclge_service_complete(struct hclge_dev *hdev)
2653 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2655 /* Flush memory before next watchdog */
2656 smp_mb__before_atomic();
2657 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2660 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2662 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2664 /* fetch the events from their corresponding regs */
2665 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2666 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2667 msix_src_reg = hclge_read_dev(&hdev->hw,
2668 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2670 /* Assumption: If by any chance reset and mailbox events are reported
2671 * together then we will only process reset event in this go and will
2672 * defer the processing of the mailbox events. Since, we would have not
2673 * cleared RX CMDQ event this time we would receive again another
2674 * interrupt from H/W just for the mailbox.
2677 /* check for vector0 reset event sources */
2678 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2679 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2680 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2681 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2682 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2683 hdev->rst_stats.imp_rst_cnt++;
2684 return HCLGE_VECTOR0_EVENT_RST;
2687 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2688 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2689 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2691 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2692 hdev->rst_stats.global_rst_cnt++;
2693 return HCLGE_VECTOR0_EVENT_RST;
2696 /* check for vector0 msix event source */
2697 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2698 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2700 return HCLGE_VECTOR0_EVENT_ERR;
2703 /* check for vector0 mailbox(=CMDQ RX) event source */
2704 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2705 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2706 *clearval = cmdq_src_reg;
2707 return HCLGE_VECTOR0_EVENT_MBX;
2710 /* print other vector0 event source */
2711 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2712 cmdq_src_reg, msix_src_reg);
2713 return HCLGE_VECTOR0_EVENT_OTHER;
2716 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2719 switch (event_type) {
2720 case HCLGE_VECTOR0_EVENT_RST:
2721 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2723 case HCLGE_VECTOR0_EVENT_MBX:
2724 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2731 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2733 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2734 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2735 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2736 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2737 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2740 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2742 writel(enable ? 1 : 0, vector->addr);
2745 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2747 struct hclge_dev *hdev = data;
2751 hclge_enable_vector(&hdev->misc_vector, false);
2752 event_cause = hclge_check_event_cause(hdev, &clearval);
2754 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2755 switch (event_cause) {
2756 case HCLGE_VECTOR0_EVENT_ERR:
2757 /* we do not know what type of reset is required now. This could
2758 * only be decided after we fetch the type of errors which
2759 * caused this event. Therefore, we will do below for now:
2760 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2761 * have defered type of reset to be used.
2762 * 2. Schedule the reset serivce task.
2763 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2764 * will fetch the correct type of reset. This would be done
2765 * by first decoding the types of errors.
2767 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2769 case HCLGE_VECTOR0_EVENT_RST:
2770 hclge_reset_task_schedule(hdev);
2772 case HCLGE_VECTOR0_EVENT_MBX:
2773 /* If we are here then,
2774 * 1. Either we are not handling any mbx task and we are not
2777 * 2. We could be handling a mbx task but nothing more is
2779 * In both cases, we should schedule mbx task as there are more
2780 * mbx messages reported by this interrupt.
2782 hclge_mbx_task_schedule(hdev);
2785 dev_warn(&hdev->pdev->dev,
2786 "received unknown or unhandled event of vector0\n");
2790 /* clear the source of interrupt if it is not cause by reset */
2791 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2792 hclge_clear_event_cause(hdev, event_cause, clearval);
2793 hclge_enable_vector(&hdev->misc_vector, true);
2799 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2801 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2802 dev_warn(&hdev->pdev->dev,
2803 "vector(vector_id %d) has been freed.\n", vector_id);
2807 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2808 hdev->num_msi_left += 1;
2809 hdev->num_msi_used -= 1;
2812 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2814 struct hclge_misc_vector *vector = &hdev->misc_vector;
2816 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2818 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2819 hdev->vector_status[0] = 0;
2821 hdev->num_msi_left -= 1;
2822 hdev->num_msi_used += 1;
2825 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2829 hclge_get_misc_vector(hdev);
2831 /* this would be explicitly freed in the end */
2832 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2833 0, "hclge_misc", hdev);
2835 hclge_free_vector(hdev, 0);
2836 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2837 hdev->misc_vector.vector_irq);
2843 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2845 free_irq(hdev->misc_vector.vector_irq, hdev);
2846 hclge_free_vector(hdev, 0);
2849 int hclge_notify_client(struct hclge_dev *hdev,
2850 enum hnae3_reset_notify_type type)
2852 struct hnae3_client *client = hdev->nic_client;
2855 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2859 if (!client->ops->reset_notify)
2862 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2863 struct hnae3_handle *handle = &hdev->vport[i].nic;
2866 ret = client->ops->reset_notify(handle, type);
2868 dev_err(&hdev->pdev->dev,
2869 "notify nic client failed %d(%d)\n", type, ret);
2877 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2878 enum hnae3_reset_notify_type type)
2880 struct hnae3_client *client = hdev->roce_client;
2884 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2888 if (!client->ops->reset_notify)
2891 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2892 struct hnae3_handle *handle = &hdev->vport[i].roce;
2894 ret = client->ops->reset_notify(handle, type);
2896 dev_err(&hdev->pdev->dev,
2897 "notify roce client failed %d(%d)",
2906 static int hclge_reset_wait(struct hclge_dev *hdev)
2908 #define HCLGE_RESET_WATI_MS 100
2909 #define HCLGE_RESET_WAIT_CNT 200
2910 u32 val, reg, reg_bit;
2913 switch (hdev->reset_type) {
2914 case HNAE3_IMP_RESET:
2915 reg = HCLGE_GLOBAL_RESET_REG;
2916 reg_bit = HCLGE_IMP_RESET_BIT;
2918 case HNAE3_GLOBAL_RESET:
2919 reg = HCLGE_GLOBAL_RESET_REG;
2920 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2922 case HNAE3_FUNC_RESET:
2923 reg = HCLGE_FUN_RST_ING;
2924 reg_bit = HCLGE_FUN_RST_ING_B;
2926 case HNAE3_FLR_RESET:
2929 dev_err(&hdev->pdev->dev,
2930 "Wait for unsupported reset type: %d\n",
2935 if (hdev->reset_type == HNAE3_FLR_RESET) {
2936 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2937 cnt++ < HCLGE_RESET_WAIT_CNT)
2938 msleep(HCLGE_RESET_WATI_MS);
2940 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2941 dev_err(&hdev->pdev->dev,
2942 "flr wait timeout: %d\n", cnt);
2949 val = hclge_read_dev(&hdev->hw, reg);
2950 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2951 msleep(HCLGE_RESET_WATI_MS);
2952 val = hclge_read_dev(&hdev->hw, reg);
2956 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2957 dev_warn(&hdev->pdev->dev,
2958 "Wait for reset timeout: %d\n", hdev->reset_type);
2965 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2967 struct hclge_vf_rst_cmd *req;
2968 struct hclge_desc desc;
2970 req = (struct hclge_vf_rst_cmd *)desc.data;
2971 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2972 req->dest_vfid = func_id;
2977 return hclge_cmd_send(&hdev->hw, &desc, 1);
2980 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2984 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2985 struct hclge_vport *vport = &hdev->vport[i];
2988 /* Send cmd to set/clear VF's FUNC_RST_ING */
2989 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2991 dev_err(&hdev->pdev->dev,
2992 "set vf(%d) rst failed %d!\n",
2993 vport->vport_id, ret);
2997 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3000 /* Inform VF to process the reset.
3001 * hclge_inform_reset_assert_to_vf may fail if VF
3002 * driver is not loaded.
3004 ret = hclge_inform_reset_assert_to_vf(vport);
3006 dev_warn(&hdev->pdev->dev,
3007 "inform reset to vf(%d) failed %d!\n",
3008 vport->vport_id, ret);
3014 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3016 struct hclge_desc desc;
3017 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3020 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3021 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3022 req->fun_reset_vfid = func_id;
3024 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3026 dev_err(&hdev->pdev->dev,
3027 "send function reset cmd fail, status =%d\n", ret);
3032 static void hclge_do_reset(struct hclge_dev *hdev)
3034 struct hnae3_handle *handle = &hdev->vport[0].nic;
3035 struct pci_dev *pdev = hdev->pdev;
3038 if (hclge_get_hw_reset_stat(handle)) {
3039 dev_info(&pdev->dev, "Hardware reset not finish\n");
3040 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3041 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3042 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3046 switch (hdev->reset_type) {
3047 case HNAE3_GLOBAL_RESET:
3048 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3049 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3050 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3051 dev_info(&pdev->dev, "Global Reset requested\n");
3053 case HNAE3_FUNC_RESET:
3054 dev_info(&pdev->dev, "PF Reset requested\n");
3055 /* schedule again to check later */
3056 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3057 hclge_reset_task_schedule(hdev);
3059 case HNAE3_FLR_RESET:
3060 dev_info(&pdev->dev, "FLR requested\n");
3061 /* schedule again to check later */
3062 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3063 hclge_reset_task_schedule(hdev);
3066 dev_warn(&pdev->dev,
3067 "Unsupported reset type: %d\n", hdev->reset_type);
3072 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3073 unsigned long *addr)
3075 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3077 /* first, resolve any unknown reset type to the known type(s) */
3078 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3079 /* we will intentionally ignore any errors from this function
3080 * as we will end up in *some* reset request in any case
3082 hclge_handle_hw_msix_error(hdev, addr);
3083 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3084 /* We defered the clearing of the error event which caused
3085 * interrupt since it was not posssible to do that in
3086 * interrupt context (and this is the reason we introduced
3087 * new UNKNOWN reset type). Now, the errors have been
3088 * handled and cleared in hardware we can safely enable
3089 * interrupts. This is an exception to the norm.
3091 hclge_enable_vector(&hdev->misc_vector, true);
3094 /* return the highest priority reset level amongst all */
3095 if (test_bit(HNAE3_IMP_RESET, addr)) {
3096 rst_level = HNAE3_IMP_RESET;
3097 clear_bit(HNAE3_IMP_RESET, addr);
3098 clear_bit(HNAE3_GLOBAL_RESET, addr);
3099 clear_bit(HNAE3_FUNC_RESET, addr);
3100 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3101 rst_level = HNAE3_GLOBAL_RESET;
3102 clear_bit(HNAE3_GLOBAL_RESET, addr);
3103 clear_bit(HNAE3_FUNC_RESET, addr);
3104 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3105 rst_level = HNAE3_FUNC_RESET;
3106 clear_bit(HNAE3_FUNC_RESET, addr);
3107 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3108 rst_level = HNAE3_FLR_RESET;
3109 clear_bit(HNAE3_FLR_RESET, addr);
3112 if (hdev->reset_type != HNAE3_NONE_RESET &&
3113 rst_level < hdev->reset_type)
3114 return HNAE3_NONE_RESET;
3119 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3123 switch (hdev->reset_type) {
3124 case HNAE3_IMP_RESET:
3125 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3127 case HNAE3_GLOBAL_RESET:
3128 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3137 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3138 hclge_enable_vector(&hdev->misc_vector, true);
3141 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3145 switch (hdev->reset_type) {
3146 case HNAE3_FUNC_RESET:
3148 case HNAE3_FLR_RESET:
3149 ret = hclge_set_all_vf_rst(hdev, true);
3158 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3160 #define HCLGE_RESET_SYNC_TIME 100
3165 switch (hdev->reset_type) {
3166 case HNAE3_FUNC_RESET:
3167 /* There is no mechanism for PF to know if VF has stopped IO
3168 * for now, just wait 100 ms for VF to stop IO
3170 msleep(HCLGE_RESET_SYNC_TIME);
3171 ret = hclge_func_reset_cmd(hdev, 0);
3173 dev_err(&hdev->pdev->dev,
3174 "asserting function reset fail %d!\n", ret);
3178 /* After performaning pf reset, it is not necessary to do the
3179 * mailbox handling or send any command to firmware, because
3180 * any mailbox handling or command to firmware is only valid
3181 * after hclge_cmd_init is called.
3183 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3184 hdev->rst_stats.pf_rst_cnt++;
3186 case HNAE3_FLR_RESET:
3187 /* There is no mechanism for PF to know if VF has stopped IO
3188 * for now, just wait 100 ms for VF to stop IO
3190 msleep(HCLGE_RESET_SYNC_TIME);
3191 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3192 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3193 hdev->rst_stats.flr_rst_cnt++;
3195 case HNAE3_IMP_RESET:
3196 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3197 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3198 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3204 /* inform hardware that preparatory work is done */
3205 msleep(HCLGE_RESET_SYNC_TIME);
3206 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3207 HCLGE_NIC_CMQ_ENABLE);
3208 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3213 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3215 #define MAX_RESET_FAIL_CNT 5
3216 #define RESET_UPGRADE_DELAY_SEC 10
3218 if (hdev->reset_pending) {
3219 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3220 hdev->reset_pending);
3222 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3223 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3224 BIT(HCLGE_IMP_RESET_BIT))) {
3225 dev_info(&hdev->pdev->dev,
3226 "reset failed because IMP Reset is pending\n");
3227 hclge_clear_reset_cause(hdev);
3229 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3230 hdev->reset_fail_cnt++;
3232 set_bit(hdev->reset_type, &hdev->reset_pending);
3233 dev_info(&hdev->pdev->dev,
3234 "re-schedule to wait for hw reset done\n");
3238 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3239 hclge_clear_reset_cause(hdev);
3240 mod_timer(&hdev->reset_timer,
3241 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3246 hclge_clear_reset_cause(hdev);
3247 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3251 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3255 switch (hdev->reset_type) {
3256 case HNAE3_FUNC_RESET:
3258 case HNAE3_FLR_RESET:
3259 ret = hclge_set_all_vf_rst(hdev, false);
3268 static void hclge_reset(struct hclge_dev *hdev)
3270 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3271 bool is_timeout = false;
3274 /* Initialize ae_dev reset status as well, in case enet layer wants to
3275 * know if device is undergoing reset
3277 ae_dev->reset_type = hdev->reset_type;
3278 hdev->rst_stats.reset_cnt++;
3279 /* perform reset of the stack & ae device for a client */
3280 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3284 ret = hclge_reset_prepare_down(hdev);
3289 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3291 goto err_reset_lock;
3295 ret = hclge_reset_prepare_wait(hdev);
3299 if (hclge_reset_wait(hdev)) {
3304 hdev->rst_stats.hw_reset_done_cnt++;
3306 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3311 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3313 goto err_reset_lock;
3315 ret = hclge_reset_ae_dev(hdev->ae_dev);
3317 goto err_reset_lock;
3319 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3321 goto err_reset_lock;
3323 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3325 goto err_reset_lock;
3327 hclge_clear_reset_cause(hdev);
3329 ret = hclge_reset_prepare_up(hdev);
3331 goto err_reset_lock;
3333 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3335 goto err_reset_lock;
3339 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3343 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3347 hdev->last_reset_time = jiffies;
3348 hdev->reset_fail_cnt = 0;
3349 hdev->rst_stats.reset_done_cnt++;
3350 ae_dev->reset_type = HNAE3_NONE_RESET;
3351 del_timer(&hdev->reset_timer);
3358 if (hclge_reset_err_handle(hdev, is_timeout))
3359 hclge_reset_task_schedule(hdev);
3362 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3364 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3365 struct hclge_dev *hdev = ae_dev->priv;
3367 /* We might end up getting called broadly because of 2 below cases:
3368 * 1. Recoverable error was conveyed through APEI and only way to bring
3369 * normalcy is to reset.
3370 * 2. A new reset request from the stack due to timeout
3372 * For the first case,error event might not have ae handle available.
3373 * check if this is a new reset request and we are not here just because
3374 * last reset attempt did not succeed and watchdog hit us again. We will
3375 * know this if last reset request did not occur very recently (watchdog
3376 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3377 * In case of new request we reset the "reset level" to PF reset.
3378 * And if it is a repeat reset request of the most recent one then we
3379 * want to make sure we throttle the reset request. Therefore, we will
3380 * not allow it again before 3*HZ times.
3383 handle = &hdev->vport[0].nic;
3385 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3387 else if (hdev->default_reset_request)
3389 hclge_get_reset_level(hdev,
3390 &hdev->default_reset_request);
3391 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3392 hdev->reset_level = HNAE3_FUNC_RESET;
3394 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3397 /* request reset & schedule reset task */
3398 set_bit(hdev->reset_level, &hdev->reset_request);
3399 hclge_reset_task_schedule(hdev);
3401 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3402 hdev->reset_level++;
3405 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3406 enum hnae3_reset_type rst_type)
3408 struct hclge_dev *hdev = ae_dev->priv;
3410 set_bit(rst_type, &hdev->default_reset_request);
3413 static void hclge_reset_timer(struct timer_list *t)
3415 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3417 dev_info(&hdev->pdev->dev,
3418 "triggering global reset in reset timer\n");
3419 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3420 hclge_reset_event(hdev->pdev, NULL);
3423 static void hclge_reset_subtask(struct hclge_dev *hdev)
3425 /* check if there is any ongoing reset in the hardware. This status can
3426 * be checked from reset_pending. If there is then, we need to wait for
3427 * hardware to complete reset.
3428 * a. If we are able to figure out in reasonable time that hardware
3429 * has fully resetted then, we can proceed with driver, client
3431 * b. else, we can come back later to check this status so re-sched
3434 hdev->last_reset_time = jiffies;
3435 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3436 if (hdev->reset_type != HNAE3_NONE_RESET)
3439 /* check if we got any *new* reset requests to be honored */
3440 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3441 if (hdev->reset_type != HNAE3_NONE_RESET)
3442 hclge_do_reset(hdev);
3444 hdev->reset_type = HNAE3_NONE_RESET;
3447 static void hclge_reset_service_task(struct work_struct *work)
3449 struct hclge_dev *hdev =
3450 container_of(work, struct hclge_dev, rst_service_task);
3452 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3455 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3457 hclge_reset_subtask(hdev);
3459 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3462 static void hclge_mailbox_service_task(struct work_struct *work)
3464 struct hclge_dev *hdev =
3465 container_of(work, struct hclge_dev, mbx_service_task);
3467 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3470 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3472 hclge_mbx_handler(hdev);
3474 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3477 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3481 /* start from vport 1 for PF is always alive */
3482 for (i = 1; i < hdev->num_alloc_vport; i++) {
3483 struct hclge_vport *vport = &hdev->vport[i];
3485 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3486 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3488 /* If vf is not alive, set to default value */
3489 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3490 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3494 static void hclge_service_task(struct work_struct *work)
3496 struct hclge_dev *hdev =
3497 container_of(work, struct hclge_dev, service_task);
3499 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3500 hclge_update_stats_for_all(hdev);
3501 hdev->hw_stats.stats_timer = 0;
3504 hclge_update_port_info(hdev);
3505 hclge_update_link_status(hdev);
3506 hclge_update_vport_alive(hdev);
3507 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3508 hclge_rfs_filter_expire(hdev);
3509 hdev->fd_arfs_expire_timer = 0;
3511 hclge_service_complete(hdev);
3514 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3516 /* VF handle has no client */
3517 if (!handle->client)
3518 return container_of(handle, struct hclge_vport, nic);
3519 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3520 return container_of(handle, struct hclge_vport, roce);
3522 return container_of(handle, struct hclge_vport, nic);
3525 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3526 struct hnae3_vector_info *vector_info)
3528 struct hclge_vport *vport = hclge_get_vport(handle);
3529 struct hnae3_vector_info *vector = vector_info;
3530 struct hclge_dev *hdev = vport->back;
3534 vector_num = min(hdev->num_msi_left, vector_num);
3536 for (j = 0; j < vector_num; j++) {
3537 for (i = 1; i < hdev->num_msi; i++) {
3538 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3539 vector->vector = pci_irq_vector(hdev->pdev, i);
3540 vector->io_addr = hdev->hw.io_base +
3541 HCLGE_VECTOR_REG_BASE +
3542 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3544 HCLGE_VECTOR_VF_OFFSET;
3545 hdev->vector_status[i] = vport->vport_id;
3546 hdev->vector_irq[i] = vector->vector;
3555 hdev->num_msi_left -= alloc;
3556 hdev->num_msi_used += alloc;
3561 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3565 for (i = 0; i < hdev->num_msi; i++)
3566 if (vector == hdev->vector_irq[i])
3572 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3574 struct hclge_vport *vport = hclge_get_vport(handle);
3575 struct hclge_dev *hdev = vport->back;
3578 vector_id = hclge_get_vector_index(hdev, vector);
3579 if (vector_id < 0) {
3580 dev_err(&hdev->pdev->dev,
3581 "Get vector index fail. vector_id =%d\n", vector_id);
3585 hclge_free_vector(hdev, vector_id);
3590 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3592 return HCLGE_RSS_KEY_SIZE;
3595 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3597 return HCLGE_RSS_IND_TBL_SIZE;
3600 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3601 const u8 hfunc, const u8 *key)
3603 struct hclge_rss_config_cmd *req;
3604 struct hclge_desc desc;
3609 req = (struct hclge_rss_config_cmd *)desc.data;
3611 for (key_offset = 0; key_offset < 3; key_offset++) {
3612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3615 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3616 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3618 if (key_offset == 2)
3620 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3622 key_size = HCLGE_RSS_HASH_KEY_NUM;
3624 memcpy(req->hash_key,
3625 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3627 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3629 dev_err(&hdev->pdev->dev,
3630 "Configure RSS config fail, status = %d\n",
3638 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3640 struct hclge_rss_indirection_table_cmd *req;
3641 struct hclge_desc desc;
3645 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3647 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3648 hclge_cmd_setup_basic_desc
3649 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3651 req->start_table_index =
3652 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3653 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3655 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3656 req->rss_result[j] =
3657 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3659 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3661 dev_err(&hdev->pdev->dev,
3662 "Configure rss indir table fail,status = %d\n",
3670 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3671 u16 *tc_size, u16 *tc_offset)
3673 struct hclge_rss_tc_mode_cmd *req;
3674 struct hclge_desc desc;
3678 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3679 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3681 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3684 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3685 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3686 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3687 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3688 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3690 req->rss_tc_mode[i] = cpu_to_le16(mode);
3693 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3695 dev_err(&hdev->pdev->dev,
3696 "Configure rss tc mode fail, status = %d\n", ret);
3701 static void hclge_get_rss_type(struct hclge_vport *vport)
3703 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3704 vport->rss_tuple_sets.ipv4_udp_en ||
3705 vport->rss_tuple_sets.ipv4_sctp_en ||
3706 vport->rss_tuple_sets.ipv6_tcp_en ||
3707 vport->rss_tuple_sets.ipv6_udp_en ||
3708 vport->rss_tuple_sets.ipv6_sctp_en)
3709 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3710 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3711 vport->rss_tuple_sets.ipv6_fragment_en)
3712 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3714 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3717 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3719 struct hclge_rss_input_tuple_cmd *req;
3720 struct hclge_desc desc;
3723 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3725 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3727 /* Get the tuple cfg from pf */
3728 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3729 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3730 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3731 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3732 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3733 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3734 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3735 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3736 hclge_get_rss_type(&hdev->vport[0]);
3737 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3739 dev_err(&hdev->pdev->dev,
3740 "Configure rss input fail, status = %d\n", ret);
3744 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3747 struct hclge_vport *vport = hclge_get_vport(handle);
3750 /* Get hash algorithm */
3752 switch (vport->rss_algo) {
3753 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3754 *hfunc = ETH_RSS_HASH_TOP;
3756 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3757 *hfunc = ETH_RSS_HASH_XOR;
3760 *hfunc = ETH_RSS_HASH_UNKNOWN;
3765 /* Get the RSS Key required by the user */
3767 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3769 /* Get indirect table */
3771 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3772 indir[i] = vport->rss_indirection_tbl[i];
3777 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3778 const u8 *key, const u8 hfunc)
3780 struct hclge_vport *vport = hclge_get_vport(handle);
3781 struct hclge_dev *hdev = vport->back;
3785 /* Set the RSS Hash Key if specififed by the user */
3788 case ETH_RSS_HASH_TOP:
3789 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3791 case ETH_RSS_HASH_XOR:
3792 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3794 case ETH_RSS_HASH_NO_CHANGE:
3795 hash_algo = vport->rss_algo;
3801 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3805 /* Update the shadow RSS key with user specified qids */
3806 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3807 vport->rss_algo = hash_algo;
3810 /* Update the shadow RSS table with user specified qids */
3811 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3812 vport->rss_indirection_tbl[i] = indir[i];
3814 /* Update the hardware */
3815 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3818 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3820 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3822 if (nfc->data & RXH_L4_B_2_3)
3823 hash_sets |= HCLGE_D_PORT_BIT;
3825 hash_sets &= ~HCLGE_D_PORT_BIT;
3827 if (nfc->data & RXH_IP_SRC)
3828 hash_sets |= HCLGE_S_IP_BIT;
3830 hash_sets &= ~HCLGE_S_IP_BIT;
3832 if (nfc->data & RXH_IP_DST)
3833 hash_sets |= HCLGE_D_IP_BIT;
3835 hash_sets &= ~HCLGE_D_IP_BIT;
3837 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3838 hash_sets |= HCLGE_V_TAG_BIT;
3843 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3844 struct ethtool_rxnfc *nfc)
3846 struct hclge_vport *vport = hclge_get_vport(handle);
3847 struct hclge_dev *hdev = vport->back;
3848 struct hclge_rss_input_tuple_cmd *req;
3849 struct hclge_desc desc;
3853 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3854 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3857 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3860 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3861 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3862 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3863 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3864 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3865 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3866 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3867 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3869 tuple_sets = hclge_get_rss_hash_bits(nfc);
3870 switch (nfc->flow_type) {
3872 req->ipv4_tcp_en = tuple_sets;
3875 req->ipv6_tcp_en = tuple_sets;
3878 req->ipv4_udp_en = tuple_sets;
3881 req->ipv6_udp_en = tuple_sets;
3884 req->ipv4_sctp_en = tuple_sets;
3887 if ((nfc->data & RXH_L4_B_0_1) ||
3888 (nfc->data & RXH_L4_B_2_3))
3891 req->ipv6_sctp_en = tuple_sets;
3894 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3897 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3903 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3905 dev_err(&hdev->pdev->dev,
3906 "Set rss tuple fail, status = %d\n", ret);
3910 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3911 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3912 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3913 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3914 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3915 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3916 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3917 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3918 hclge_get_rss_type(vport);
3922 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3923 struct ethtool_rxnfc *nfc)
3925 struct hclge_vport *vport = hclge_get_vport(handle);
3930 switch (nfc->flow_type) {
3932 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3935 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3938 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3941 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3944 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3947 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3951 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3960 if (tuple_sets & HCLGE_D_PORT_BIT)
3961 nfc->data |= RXH_L4_B_2_3;
3962 if (tuple_sets & HCLGE_S_PORT_BIT)
3963 nfc->data |= RXH_L4_B_0_1;
3964 if (tuple_sets & HCLGE_D_IP_BIT)
3965 nfc->data |= RXH_IP_DST;
3966 if (tuple_sets & HCLGE_S_IP_BIT)
3967 nfc->data |= RXH_IP_SRC;
3972 static int hclge_get_tc_size(struct hnae3_handle *handle)
3974 struct hclge_vport *vport = hclge_get_vport(handle);
3975 struct hclge_dev *hdev = vport->back;
3977 return hdev->rss_size_max;
3980 int hclge_rss_init_hw(struct hclge_dev *hdev)
3982 struct hclge_vport *vport = hdev->vport;
3983 u8 *rss_indir = vport[0].rss_indirection_tbl;
3984 u16 rss_size = vport[0].alloc_rss_size;
3985 u8 *key = vport[0].rss_hash_key;
3986 u8 hfunc = vport[0].rss_algo;
3987 u16 tc_offset[HCLGE_MAX_TC_NUM];
3988 u16 tc_valid[HCLGE_MAX_TC_NUM];
3989 u16 tc_size[HCLGE_MAX_TC_NUM];
3993 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3997 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4001 ret = hclge_set_rss_input_tuple(hdev);
4005 /* Each TC have the same queue size, and tc_size set to hardware is
4006 * the log2 of roundup power of two of rss_size, the acutal queue
4007 * size is limited by indirection table.
4009 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4010 dev_err(&hdev->pdev->dev,
4011 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4016 roundup_size = roundup_pow_of_two(rss_size);
4017 roundup_size = ilog2(roundup_size);
4019 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4022 if (!(hdev->hw_tc_map & BIT(i)))
4026 tc_size[i] = roundup_size;
4027 tc_offset[i] = rss_size * i;
4030 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4033 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4035 struct hclge_vport *vport = hdev->vport;
4038 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4039 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4040 vport[j].rss_indirection_tbl[i] =
4041 i % vport[j].alloc_rss_size;
4045 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4047 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4048 struct hclge_vport *vport = hdev->vport;
4050 if (hdev->pdev->revision >= 0x21)
4051 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4053 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4054 vport[i].rss_tuple_sets.ipv4_tcp_en =
4055 HCLGE_RSS_INPUT_TUPLE_OTHER;
4056 vport[i].rss_tuple_sets.ipv4_udp_en =
4057 HCLGE_RSS_INPUT_TUPLE_OTHER;
4058 vport[i].rss_tuple_sets.ipv4_sctp_en =
4059 HCLGE_RSS_INPUT_TUPLE_SCTP;
4060 vport[i].rss_tuple_sets.ipv4_fragment_en =
4061 HCLGE_RSS_INPUT_TUPLE_OTHER;
4062 vport[i].rss_tuple_sets.ipv6_tcp_en =
4063 HCLGE_RSS_INPUT_TUPLE_OTHER;
4064 vport[i].rss_tuple_sets.ipv6_udp_en =
4065 HCLGE_RSS_INPUT_TUPLE_OTHER;
4066 vport[i].rss_tuple_sets.ipv6_sctp_en =
4067 HCLGE_RSS_INPUT_TUPLE_SCTP;
4068 vport[i].rss_tuple_sets.ipv6_fragment_en =
4069 HCLGE_RSS_INPUT_TUPLE_OTHER;
4071 vport[i].rss_algo = rss_algo;
4073 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4074 HCLGE_RSS_KEY_SIZE);
4077 hclge_rss_indir_init_cfg(hdev);
4080 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4081 int vector_id, bool en,
4082 struct hnae3_ring_chain_node *ring_chain)
4084 struct hclge_dev *hdev = vport->back;
4085 struct hnae3_ring_chain_node *node;
4086 struct hclge_desc desc;
4087 struct hclge_ctrl_vector_chain_cmd *req
4088 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4089 enum hclge_cmd_status status;
4090 enum hclge_opcode_type op;
4091 u16 tqp_type_and_id;
4094 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4095 hclge_cmd_setup_basic_desc(&desc, op, false);
4096 req->int_vector_id = vector_id;
4099 for (node = ring_chain; node; node = node->next) {
4100 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4101 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4103 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4104 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4105 HCLGE_TQP_ID_S, node->tqp_index);
4106 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4108 hnae3_get_field(node->int_gl_idx,
4109 HNAE3_RING_GL_IDX_M,
4110 HNAE3_RING_GL_IDX_S));
4111 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4112 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4113 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4114 req->vfid = vport->vport_id;
4116 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4118 dev_err(&hdev->pdev->dev,
4119 "Map TQP fail, status is %d.\n",
4125 hclge_cmd_setup_basic_desc(&desc,
4128 req->int_vector_id = vector_id;
4133 req->int_cause_num = i;
4134 req->vfid = vport->vport_id;
4135 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4137 dev_err(&hdev->pdev->dev,
4138 "Map TQP fail, status is %d.\n", status);
4146 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4148 struct hnae3_ring_chain_node *ring_chain)
4150 struct hclge_vport *vport = hclge_get_vport(handle);
4151 struct hclge_dev *hdev = vport->back;
4154 vector_id = hclge_get_vector_index(hdev, vector);
4155 if (vector_id < 0) {
4156 dev_err(&hdev->pdev->dev,
4157 "Get vector index fail. vector_id =%d\n", vector_id);
4161 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4164 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4166 struct hnae3_ring_chain_node *ring_chain)
4168 struct hclge_vport *vport = hclge_get_vport(handle);
4169 struct hclge_dev *hdev = vport->back;
4172 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4175 vector_id = hclge_get_vector_index(hdev, vector);
4176 if (vector_id < 0) {
4177 dev_err(&handle->pdev->dev,
4178 "Get vector index fail. ret =%d\n", vector_id);
4182 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4184 dev_err(&handle->pdev->dev,
4185 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4192 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4193 struct hclge_promisc_param *param)
4195 struct hclge_promisc_cfg_cmd *req;
4196 struct hclge_desc desc;
4199 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4201 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4202 req->vf_id = param->vf_id;
4204 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4205 * pdev revision(0x20), new revision support them. The
4206 * value of this two fields will not return error when driver
4207 * send command to fireware in revision(0x20).
4209 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4210 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4212 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4214 dev_err(&hdev->pdev->dev,
4215 "Set promisc mode fail, status is %d.\n", ret);
4220 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4221 bool en_mc, bool en_bc, int vport_id)
4226 memset(param, 0, sizeof(struct hclge_promisc_param));
4228 param->enable = HCLGE_PROMISC_EN_UC;
4230 param->enable |= HCLGE_PROMISC_EN_MC;
4232 param->enable |= HCLGE_PROMISC_EN_BC;
4233 param->vf_id = vport_id;
4236 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4239 struct hclge_vport *vport = hclge_get_vport(handle);
4240 struct hclge_dev *hdev = vport->back;
4241 struct hclge_promisc_param param;
4242 bool en_bc_pmc = true;
4244 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4245 * always bypassed. So broadcast promisc should be disabled until
4246 * user enable promisc mode
4248 if (handle->pdev->revision == 0x20)
4249 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4251 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4253 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4256 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4258 struct hclge_get_fd_mode_cmd *req;
4259 struct hclge_desc desc;
4262 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4264 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4268 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4272 *fd_mode = req->mode;
4277 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4278 u32 *stage1_entry_num,
4279 u32 *stage2_entry_num,
4280 u16 *stage1_counter_num,
4281 u16 *stage2_counter_num)
4283 struct hclge_get_fd_allocation_cmd *req;
4284 struct hclge_desc desc;
4287 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4289 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4291 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4293 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4298 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4299 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4300 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4301 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4306 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4308 struct hclge_set_fd_key_config_cmd *req;
4309 struct hclge_fd_key_cfg *stage;
4310 struct hclge_desc desc;
4313 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4315 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4316 stage = &hdev->fd_cfg.key_cfg[stage_num];
4317 req->stage = stage_num;
4318 req->key_select = stage->key_sel;
4319 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4320 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4321 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4322 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4323 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4324 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4326 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4328 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4333 static int hclge_init_fd_config(struct hclge_dev *hdev)
4335 #define LOW_2_WORDS 0x03
4336 struct hclge_fd_key_cfg *key_cfg;
4339 if (!hnae3_dev_fd_supported(hdev))
4342 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4346 switch (hdev->fd_cfg.fd_mode) {
4347 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4348 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4350 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4351 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4354 dev_err(&hdev->pdev->dev,
4355 "Unsupported flow director mode %d\n",
4356 hdev->fd_cfg.fd_mode);
4360 hdev->fd_cfg.proto_support =
4361 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4362 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4363 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4364 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4365 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4366 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4367 key_cfg->outer_sipv6_word_en = 0;
4368 key_cfg->outer_dipv6_word_en = 0;
4370 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4371 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4372 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4373 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4375 /* If use max 400bit key, we can support tuples for ether type */
4376 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4377 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4378 key_cfg->tuple_active |=
4379 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4382 /* roce_type is used to filter roce frames
4383 * dst_vport is used to specify the rule
4385 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4387 ret = hclge_get_fd_allocation(hdev,
4388 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4389 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4390 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4391 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4395 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4398 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4399 int loc, u8 *key, bool is_add)
4401 struct hclge_fd_tcam_config_1_cmd *req1;
4402 struct hclge_fd_tcam_config_2_cmd *req2;
4403 struct hclge_fd_tcam_config_3_cmd *req3;
4404 struct hclge_desc desc[3];
4407 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4408 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4409 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4410 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4411 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4413 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4414 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4415 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4417 req1->stage = stage;
4418 req1->xy_sel = sel_x ? 1 : 0;
4419 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4420 req1->index = cpu_to_le32(loc);
4421 req1->entry_vld = sel_x ? is_add : 0;
4424 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4425 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4426 sizeof(req2->tcam_data));
4427 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4428 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4431 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4433 dev_err(&hdev->pdev->dev,
4434 "config tcam key fail, ret=%d\n",
4440 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4441 struct hclge_fd_ad_data *action)
4443 struct hclge_fd_ad_config_cmd *req;
4444 struct hclge_desc desc;
4448 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4450 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4451 req->index = cpu_to_le32(loc);
4454 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4455 action->write_rule_id_to_bd);
4456 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4459 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4460 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4461 action->forward_to_direct_queue);
4462 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4464 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4465 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4466 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4467 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4468 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4469 action->counter_id);
4471 req->ad_data = cpu_to_le64(ad_data);
4472 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4474 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4479 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4480 struct hclge_fd_rule *rule)
4482 u16 tmp_x_s, tmp_y_s;
4483 u32 tmp_x_l, tmp_y_l;
4486 if (rule->unused_tuple & tuple_bit)
4489 switch (tuple_bit) {
4492 case BIT(INNER_DST_MAC):
4493 for (i = 0; i < 6; i++) {
4494 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4495 rule->tuples_mask.dst_mac[i]);
4496 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4497 rule->tuples_mask.dst_mac[i]);
4501 case BIT(INNER_SRC_MAC):
4502 for (i = 0; i < 6; i++) {
4503 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4504 rule->tuples.src_mac[i]);
4505 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4506 rule->tuples.src_mac[i]);
4510 case BIT(INNER_VLAN_TAG_FST):
4511 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4512 rule->tuples_mask.vlan_tag1);
4513 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4514 rule->tuples_mask.vlan_tag1);
4515 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4516 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4519 case BIT(INNER_ETH_TYPE):
4520 calc_x(tmp_x_s, rule->tuples.ether_proto,
4521 rule->tuples_mask.ether_proto);
4522 calc_y(tmp_y_s, rule->tuples.ether_proto,
4523 rule->tuples_mask.ether_proto);
4524 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4525 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4528 case BIT(INNER_IP_TOS):
4529 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4530 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4533 case BIT(INNER_IP_PROTO):
4534 calc_x(*key_x, rule->tuples.ip_proto,
4535 rule->tuples_mask.ip_proto);
4536 calc_y(*key_y, rule->tuples.ip_proto,
4537 rule->tuples_mask.ip_proto);
4540 case BIT(INNER_SRC_IP):
4541 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4542 rule->tuples_mask.src_ip[3]);
4543 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4544 rule->tuples_mask.src_ip[3]);
4545 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4546 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4549 case BIT(INNER_DST_IP):
4550 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4551 rule->tuples_mask.dst_ip[3]);
4552 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4553 rule->tuples_mask.dst_ip[3]);
4554 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4555 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4558 case BIT(INNER_SRC_PORT):
4559 calc_x(tmp_x_s, rule->tuples.src_port,
4560 rule->tuples_mask.src_port);
4561 calc_y(tmp_y_s, rule->tuples.src_port,
4562 rule->tuples_mask.src_port);
4563 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4564 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4567 case BIT(INNER_DST_PORT):
4568 calc_x(tmp_x_s, rule->tuples.dst_port,
4569 rule->tuples_mask.dst_port);
4570 calc_y(tmp_y_s, rule->tuples.dst_port,
4571 rule->tuples_mask.dst_port);
4572 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4573 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4581 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4582 u8 vf_id, u8 network_port_id)
4584 u32 port_number = 0;
4586 if (port_type == HOST_PORT) {
4587 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4589 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4591 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4593 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4594 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4595 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4601 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4602 __le32 *key_x, __le32 *key_y,
4603 struct hclge_fd_rule *rule)
4605 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4606 u8 cur_pos = 0, tuple_size, shift_bits;
4609 for (i = 0; i < MAX_META_DATA; i++) {
4610 tuple_size = meta_data_key_info[i].key_length;
4611 tuple_bit = key_cfg->meta_data_active & BIT(i);
4613 switch (tuple_bit) {
4614 case BIT(ROCE_TYPE):
4615 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4616 cur_pos += tuple_size;
4618 case BIT(DST_VPORT):
4619 port_number = hclge_get_port_number(HOST_PORT, 0,
4621 hnae3_set_field(meta_data,
4622 GENMASK(cur_pos + tuple_size, cur_pos),
4623 cur_pos, port_number);
4624 cur_pos += tuple_size;
4631 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4632 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4633 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4635 *key_x = cpu_to_le32(tmp_x << shift_bits);
4636 *key_y = cpu_to_le32(tmp_y << shift_bits);
4639 /* A complete key is combined with meta data key and tuple key.
4640 * Meta data key is stored at the MSB region, and tuple key is stored at
4641 * the LSB region, unused bits will be filled 0.
4643 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4644 struct hclge_fd_rule *rule)
4646 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4647 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4648 u8 *cur_key_x, *cur_key_y;
4649 int i, ret, tuple_size;
4650 u8 meta_data_region;
4652 memset(key_x, 0, sizeof(key_x));
4653 memset(key_y, 0, sizeof(key_y));
4657 for (i = 0 ; i < MAX_TUPLE; i++) {
4661 tuple_size = tuple_key_info[i].key_length / 8;
4662 check_tuple = key_cfg->tuple_active & BIT(i);
4664 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4667 cur_key_x += tuple_size;
4668 cur_key_y += tuple_size;
4672 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4673 MAX_META_DATA_LENGTH / 8;
4675 hclge_fd_convert_meta_data(key_cfg,
4676 (__le32 *)(key_x + meta_data_region),
4677 (__le32 *)(key_y + meta_data_region),
4680 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4683 dev_err(&hdev->pdev->dev,
4684 "fd key_y config fail, loc=%d, ret=%d\n",
4685 rule->queue_id, ret);
4689 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4692 dev_err(&hdev->pdev->dev,
4693 "fd key_x config fail, loc=%d, ret=%d\n",
4694 rule->queue_id, ret);
4698 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4699 struct hclge_fd_rule *rule)
4701 struct hclge_fd_ad_data ad_data;
4703 ad_data.ad_id = rule->location;
4705 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4706 ad_data.drop_packet = true;
4707 ad_data.forward_to_direct_queue = false;
4708 ad_data.queue_id = 0;
4710 ad_data.drop_packet = false;
4711 ad_data.forward_to_direct_queue = true;
4712 ad_data.queue_id = rule->queue_id;
4715 ad_data.use_counter = false;
4716 ad_data.counter_id = 0;
4718 ad_data.use_next_stage = false;
4719 ad_data.next_input_key = 0;
4721 ad_data.write_rule_id_to_bd = true;
4722 ad_data.rule_id = rule->location;
4724 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4727 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4728 struct ethtool_rx_flow_spec *fs, u32 *unused)
4730 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4731 struct ethtool_usrip4_spec *usr_ip4_spec;
4732 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4733 struct ethtool_usrip6_spec *usr_ip6_spec;
4734 struct ethhdr *ether_spec;
4736 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4739 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4742 if ((fs->flow_type & FLOW_EXT) &&
4743 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4744 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4748 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4752 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4753 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4755 if (!tcp_ip4_spec->ip4src)
4756 *unused |= BIT(INNER_SRC_IP);
4758 if (!tcp_ip4_spec->ip4dst)
4759 *unused |= BIT(INNER_DST_IP);
4761 if (!tcp_ip4_spec->psrc)
4762 *unused |= BIT(INNER_SRC_PORT);
4764 if (!tcp_ip4_spec->pdst)
4765 *unused |= BIT(INNER_DST_PORT);
4767 if (!tcp_ip4_spec->tos)
4768 *unused |= BIT(INNER_IP_TOS);
4772 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4773 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4774 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4776 if (!usr_ip4_spec->ip4src)
4777 *unused |= BIT(INNER_SRC_IP);
4779 if (!usr_ip4_spec->ip4dst)
4780 *unused |= BIT(INNER_DST_IP);
4782 if (!usr_ip4_spec->tos)
4783 *unused |= BIT(INNER_IP_TOS);
4785 if (!usr_ip4_spec->proto)
4786 *unused |= BIT(INNER_IP_PROTO);
4788 if (usr_ip4_spec->l4_4_bytes)
4791 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4798 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4799 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4802 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4803 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4804 *unused |= BIT(INNER_SRC_IP);
4806 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4807 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4808 *unused |= BIT(INNER_DST_IP);
4810 if (!tcp_ip6_spec->psrc)
4811 *unused |= BIT(INNER_SRC_PORT);
4813 if (!tcp_ip6_spec->pdst)
4814 *unused |= BIT(INNER_DST_PORT);
4816 if (tcp_ip6_spec->tclass)
4820 case IPV6_USER_FLOW:
4821 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4822 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4823 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4824 BIT(INNER_DST_PORT);
4826 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4827 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4828 *unused |= BIT(INNER_SRC_IP);
4830 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4831 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4832 *unused |= BIT(INNER_DST_IP);
4834 if (!usr_ip6_spec->l4_proto)
4835 *unused |= BIT(INNER_IP_PROTO);
4837 if (usr_ip6_spec->tclass)
4840 if (usr_ip6_spec->l4_4_bytes)
4845 ether_spec = &fs->h_u.ether_spec;
4846 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4847 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4848 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4850 if (is_zero_ether_addr(ether_spec->h_source))
4851 *unused |= BIT(INNER_SRC_MAC);
4853 if (is_zero_ether_addr(ether_spec->h_dest))
4854 *unused |= BIT(INNER_DST_MAC);
4856 if (!ether_spec->h_proto)
4857 *unused |= BIT(INNER_ETH_TYPE);
4864 if ((fs->flow_type & FLOW_EXT)) {
4865 if (fs->h_ext.vlan_etype)
4867 if (!fs->h_ext.vlan_tci)
4868 *unused |= BIT(INNER_VLAN_TAG_FST);
4870 if (fs->m_ext.vlan_tci) {
4871 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4875 *unused |= BIT(INNER_VLAN_TAG_FST);
4878 if (fs->flow_type & FLOW_MAC_EXT) {
4879 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4882 if (is_zero_ether_addr(fs->h_ext.h_dest))
4883 *unused |= BIT(INNER_DST_MAC);
4885 *unused &= ~(BIT(INNER_DST_MAC));
4891 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4893 struct hclge_fd_rule *rule = NULL;
4894 struct hlist_node *node2;
4896 spin_lock_bh(&hdev->fd_rule_lock);
4897 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4898 if (rule->location >= location)
4902 spin_unlock_bh(&hdev->fd_rule_lock);
4904 return rule && rule->location == location;
4907 /* make sure being called after lock up with fd_rule_lock */
4908 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4909 struct hclge_fd_rule *new_rule,
4913 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4914 struct hlist_node *node2;
4916 if (is_add && !new_rule)
4919 hlist_for_each_entry_safe(rule, node2,
4920 &hdev->fd_rule_list, rule_node) {
4921 if (rule->location >= location)
4926 if (rule && rule->location == location) {
4927 hlist_del(&rule->rule_node);
4929 hdev->hclge_fd_rule_num--;
4932 if (!hdev->hclge_fd_rule_num)
4933 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4934 clear_bit(location, hdev->fd_bmap);
4938 } else if (!is_add) {
4939 dev_err(&hdev->pdev->dev,
4940 "delete fail, rule %d is inexistent\n",
4945 INIT_HLIST_NODE(&new_rule->rule_node);
4948 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4950 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4952 set_bit(location, hdev->fd_bmap);
4953 hdev->hclge_fd_rule_num++;
4954 hdev->fd_active_type = new_rule->rule_type;
4959 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4960 struct ethtool_rx_flow_spec *fs,
4961 struct hclge_fd_rule *rule)
4963 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4965 switch (flow_type) {
4969 rule->tuples.src_ip[3] =
4970 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4971 rule->tuples_mask.src_ip[3] =
4972 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4974 rule->tuples.dst_ip[3] =
4975 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4976 rule->tuples_mask.dst_ip[3] =
4977 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4979 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4980 rule->tuples_mask.src_port =
4981 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4983 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4984 rule->tuples_mask.dst_port =
4985 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4987 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4988 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4990 rule->tuples.ether_proto = ETH_P_IP;
4991 rule->tuples_mask.ether_proto = 0xFFFF;
4995 rule->tuples.src_ip[3] =
4996 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4997 rule->tuples_mask.src_ip[3] =
4998 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5000 rule->tuples.dst_ip[3] =
5001 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5002 rule->tuples_mask.dst_ip[3] =
5003 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5005 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5006 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5008 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5009 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5011 rule->tuples.ether_proto = ETH_P_IP;
5012 rule->tuples_mask.ether_proto = 0xFFFF;
5018 be32_to_cpu_array(rule->tuples.src_ip,
5019 fs->h_u.tcp_ip6_spec.ip6src, 4);
5020 be32_to_cpu_array(rule->tuples_mask.src_ip,
5021 fs->m_u.tcp_ip6_spec.ip6src, 4);
5023 be32_to_cpu_array(rule->tuples.dst_ip,
5024 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5025 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5026 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5028 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5029 rule->tuples_mask.src_port =
5030 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5032 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5033 rule->tuples_mask.dst_port =
5034 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5036 rule->tuples.ether_proto = ETH_P_IPV6;
5037 rule->tuples_mask.ether_proto = 0xFFFF;
5040 case IPV6_USER_FLOW:
5041 be32_to_cpu_array(rule->tuples.src_ip,
5042 fs->h_u.usr_ip6_spec.ip6src, 4);
5043 be32_to_cpu_array(rule->tuples_mask.src_ip,
5044 fs->m_u.usr_ip6_spec.ip6src, 4);
5046 be32_to_cpu_array(rule->tuples.dst_ip,
5047 fs->h_u.usr_ip6_spec.ip6dst, 4);
5048 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5049 fs->m_u.usr_ip6_spec.ip6dst, 4);
5051 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5052 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5054 rule->tuples.ether_proto = ETH_P_IPV6;
5055 rule->tuples_mask.ether_proto = 0xFFFF;
5059 ether_addr_copy(rule->tuples.src_mac,
5060 fs->h_u.ether_spec.h_source);
5061 ether_addr_copy(rule->tuples_mask.src_mac,
5062 fs->m_u.ether_spec.h_source);
5064 ether_addr_copy(rule->tuples.dst_mac,
5065 fs->h_u.ether_spec.h_dest);
5066 ether_addr_copy(rule->tuples_mask.dst_mac,
5067 fs->m_u.ether_spec.h_dest);
5069 rule->tuples.ether_proto =
5070 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5071 rule->tuples_mask.ether_proto =
5072 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5079 switch (flow_type) {
5082 rule->tuples.ip_proto = IPPROTO_SCTP;
5083 rule->tuples_mask.ip_proto = 0xFF;
5087 rule->tuples.ip_proto = IPPROTO_TCP;
5088 rule->tuples_mask.ip_proto = 0xFF;
5092 rule->tuples.ip_proto = IPPROTO_UDP;
5093 rule->tuples_mask.ip_proto = 0xFF;
5099 if ((fs->flow_type & FLOW_EXT)) {
5100 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5101 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5104 if (fs->flow_type & FLOW_MAC_EXT) {
5105 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5106 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5112 /* make sure being called after lock up with fd_rule_lock */
5113 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5114 struct hclge_fd_rule *rule)
5119 dev_err(&hdev->pdev->dev,
5120 "The flow director rule is NULL\n");
5124 /* it will never fail here, so needn't to check return value */
5125 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5127 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5131 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5138 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5142 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5143 struct ethtool_rxnfc *cmd)
5145 struct hclge_vport *vport = hclge_get_vport(handle);
5146 struct hclge_dev *hdev = vport->back;
5147 u16 dst_vport_id = 0, q_index = 0;
5148 struct ethtool_rx_flow_spec *fs;
5149 struct hclge_fd_rule *rule;
5154 if (!hnae3_dev_fd_supported(hdev))
5158 dev_warn(&hdev->pdev->dev,
5159 "Please enable flow director first\n");
5163 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5165 ret = hclge_fd_check_spec(hdev, fs, &unused);
5167 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5171 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5172 action = HCLGE_FD_ACTION_DROP_PACKET;
5174 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5175 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5178 if (vf > hdev->num_req_vfs) {
5179 dev_err(&hdev->pdev->dev,
5180 "Error: vf id (%d) > max vf num (%d)\n",
5181 vf, hdev->num_req_vfs);
5185 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5186 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5189 dev_err(&hdev->pdev->dev,
5190 "Error: queue id (%d) > max tqp num (%d)\n",
5195 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5199 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5203 ret = hclge_fd_get_tuple(hdev, fs, rule);
5209 rule->flow_type = fs->flow_type;
5211 rule->location = fs->location;
5212 rule->unused_tuple = unused;
5213 rule->vf_id = dst_vport_id;
5214 rule->queue_id = q_index;
5215 rule->action = action;
5216 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5218 /* to avoid rule conflict, when user configure rule by ethtool,
5219 * we need to clear all arfs rules
5221 hclge_clear_arfs_rules(handle);
5223 spin_lock_bh(&hdev->fd_rule_lock);
5224 ret = hclge_fd_config_rule(hdev, rule);
5226 spin_unlock_bh(&hdev->fd_rule_lock);
5231 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5232 struct ethtool_rxnfc *cmd)
5234 struct hclge_vport *vport = hclge_get_vport(handle);
5235 struct hclge_dev *hdev = vport->back;
5236 struct ethtool_rx_flow_spec *fs;
5239 if (!hnae3_dev_fd_supported(hdev))
5242 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5244 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5247 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5248 dev_err(&hdev->pdev->dev,
5249 "Delete fail, rule %d is inexistent\n",
5254 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5255 fs->location, NULL, false);
5259 spin_lock_bh(&hdev->fd_rule_lock);
5260 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5262 spin_unlock_bh(&hdev->fd_rule_lock);
5267 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5270 struct hclge_vport *vport = hclge_get_vport(handle);
5271 struct hclge_dev *hdev = vport->back;
5272 struct hclge_fd_rule *rule;
5273 struct hlist_node *node;
5276 if (!hnae3_dev_fd_supported(hdev))
5279 spin_lock_bh(&hdev->fd_rule_lock);
5280 for_each_set_bit(location, hdev->fd_bmap,
5281 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5282 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5286 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5288 hlist_del(&rule->rule_node);
5291 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5292 hdev->hclge_fd_rule_num = 0;
5293 bitmap_zero(hdev->fd_bmap,
5294 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5297 spin_unlock_bh(&hdev->fd_rule_lock);
5300 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5302 struct hclge_vport *vport = hclge_get_vport(handle);
5303 struct hclge_dev *hdev = vport->back;
5304 struct hclge_fd_rule *rule;
5305 struct hlist_node *node;
5308 /* Return ok here, because reset error handling will check this
5309 * return value. If error is returned here, the reset process will
5312 if (!hnae3_dev_fd_supported(hdev))
5315 /* if fd is disabled, should not restore it when reset */
5319 spin_lock_bh(&hdev->fd_rule_lock);
5320 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5321 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5323 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5326 dev_warn(&hdev->pdev->dev,
5327 "Restore rule %d failed, remove it\n",
5329 clear_bit(rule->location, hdev->fd_bmap);
5330 hlist_del(&rule->rule_node);
5332 hdev->hclge_fd_rule_num--;
5336 if (hdev->hclge_fd_rule_num)
5337 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5339 spin_unlock_bh(&hdev->fd_rule_lock);
5344 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5345 struct ethtool_rxnfc *cmd)
5347 struct hclge_vport *vport = hclge_get_vport(handle);
5348 struct hclge_dev *hdev = vport->back;
5350 if (!hnae3_dev_fd_supported(hdev))
5353 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5354 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5359 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5360 struct ethtool_rxnfc *cmd)
5362 struct hclge_vport *vport = hclge_get_vport(handle);
5363 struct hclge_fd_rule *rule = NULL;
5364 struct hclge_dev *hdev = vport->back;
5365 struct ethtool_rx_flow_spec *fs;
5366 struct hlist_node *node2;
5368 if (!hnae3_dev_fd_supported(hdev))
5371 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5373 spin_lock_bh(&hdev->fd_rule_lock);
5375 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5376 if (rule->location >= fs->location)
5380 if (!rule || fs->location != rule->location) {
5381 spin_unlock_bh(&hdev->fd_rule_lock);
5386 fs->flow_type = rule->flow_type;
5387 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5391 fs->h_u.tcp_ip4_spec.ip4src =
5392 cpu_to_be32(rule->tuples.src_ip[3]);
5393 fs->m_u.tcp_ip4_spec.ip4src =
5394 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5395 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5397 fs->h_u.tcp_ip4_spec.ip4dst =
5398 cpu_to_be32(rule->tuples.dst_ip[3]);
5399 fs->m_u.tcp_ip4_spec.ip4dst =
5400 rule->unused_tuple & BIT(INNER_DST_IP) ?
5401 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5403 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5404 fs->m_u.tcp_ip4_spec.psrc =
5405 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5406 0 : cpu_to_be16(rule->tuples_mask.src_port);
5408 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5409 fs->m_u.tcp_ip4_spec.pdst =
5410 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5411 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5413 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5414 fs->m_u.tcp_ip4_spec.tos =
5415 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5416 0 : rule->tuples_mask.ip_tos;
5420 fs->h_u.usr_ip4_spec.ip4src =
5421 cpu_to_be32(rule->tuples.src_ip[3]);
5422 fs->m_u.tcp_ip4_spec.ip4src =
5423 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5424 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5426 fs->h_u.usr_ip4_spec.ip4dst =
5427 cpu_to_be32(rule->tuples.dst_ip[3]);
5428 fs->m_u.usr_ip4_spec.ip4dst =
5429 rule->unused_tuple & BIT(INNER_DST_IP) ?
5430 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5432 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5433 fs->m_u.usr_ip4_spec.tos =
5434 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5435 0 : rule->tuples_mask.ip_tos;
5437 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5438 fs->m_u.usr_ip4_spec.proto =
5439 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5440 0 : rule->tuples_mask.ip_proto;
5442 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5448 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5449 rule->tuples.src_ip, 4);
5450 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5451 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5453 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5454 rule->tuples_mask.src_ip, 4);
5456 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5457 rule->tuples.dst_ip, 4);
5458 if (rule->unused_tuple & BIT(INNER_DST_IP))
5459 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5461 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5462 rule->tuples_mask.dst_ip, 4);
5464 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5465 fs->m_u.tcp_ip6_spec.psrc =
5466 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5467 0 : cpu_to_be16(rule->tuples_mask.src_port);
5469 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5470 fs->m_u.tcp_ip6_spec.pdst =
5471 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5472 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5475 case IPV6_USER_FLOW:
5476 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5477 rule->tuples.src_ip, 4);
5478 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5479 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5481 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5482 rule->tuples_mask.src_ip, 4);
5484 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5485 rule->tuples.dst_ip, 4);
5486 if (rule->unused_tuple & BIT(INNER_DST_IP))
5487 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5489 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5490 rule->tuples_mask.dst_ip, 4);
5492 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5493 fs->m_u.usr_ip6_spec.l4_proto =
5494 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5495 0 : rule->tuples_mask.ip_proto;
5499 ether_addr_copy(fs->h_u.ether_spec.h_source,
5500 rule->tuples.src_mac);
5501 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5502 eth_zero_addr(fs->m_u.ether_spec.h_source);
5504 ether_addr_copy(fs->m_u.ether_spec.h_source,
5505 rule->tuples_mask.src_mac);
5507 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5508 rule->tuples.dst_mac);
5509 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5510 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5512 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5513 rule->tuples_mask.dst_mac);
5515 fs->h_u.ether_spec.h_proto =
5516 cpu_to_be16(rule->tuples.ether_proto);
5517 fs->m_u.ether_spec.h_proto =
5518 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5519 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5523 spin_unlock_bh(&hdev->fd_rule_lock);
5527 if (fs->flow_type & FLOW_EXT) {
5528 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5529 fs->m_ext.vlan_tci =
5530 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5531 cpu_to_be16(VLAN_VID_MASK) :
5532 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5535 if (fs->flow_type & FLOW_MAC_EXT) {
5536 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5537 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5538 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5540 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5541 rule->tuples_mask.dst_mac);
5544 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5545 fs->ring_cookie = RX_CLS_FLOW_DISC;
5549 fs->ring_cookie = rule->queue_id;
5550 vf_id = rule->vf_id;
5551 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5552 fs->ring_cookie |= vf_id;
5555 spin_unlock_bh(&hdev->fd_rule_lock);
5560 static int hclge_get_all_rules(struct hnae3_handle *handle,
5561 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5563 struct hclge_vport *vport = hclge_get_vport(handle);
5564 struct hclge_dev *hdev = vport->back;
5565 struct hclge_fd_rule *rule;
5566 struct hlist_node *node2;
5569 if (!hnae3_dev_fd_supported(hdev))
5572 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5574 spin_lock_bh(&hdev->fd_rule_lock);
5575 hlist_for_each_entry_safe(rule, node2,
5576 &hdev->fd_rule_list, rule_node) {
5577 if (cnt == cmd->rule_cnt) {
5578 spin_unlock_bh(&hdev->fd_rule_lock);
5582 rule_locs[cnt] = rule->location;
5586 spin_unlock_bh(&hdev->fd_rule_lock);
5588 cmd->rule_cnt = cnt;
5593 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5594 struct hclge_fd_rule_tuples *tuples)
5596 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5597 tuples->ip_proto = fkeys->basic.ip_proto;
5598 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5600 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5601 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5602 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5604 memcpy(tuples->src_ip,
5605 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5606 sizeof(tuples->src_ip));
5607 memcpy(tuples->dst_ip,
5608 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5609 sizeof(tuples->dst_ip));
5613 /* traverse all rules, check whether an existed rule has the same tuples */
5614 static struct hclge_fd_rule *
5615 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5616 const struct hclge_fd_rule_tuples *tuples)
5618 struct hclge_fd_rule *rule = NULL;
5619 struct hlist_node *node;
5621 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5622 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5629 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5630 struct hclge_fd_rule *rule)
5632 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5633 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5634 BIT(INNER_SRC_PORT);
5637 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5638 if (tuples->ether_proto == ETH_P_IP) {
5639 if (tuples->ip_proto == IPPROTO_TCP)
5640 rule->flow_type = TCP_V4_FLOW;
5642 rule->flow_type = UDP_V4_FLOW;
5644 if (tuples->ip_proto == IPPROTO_TCP)
5645 rule->flow_type = TCP_V6_FLOW;
5647 rule->flow_type = UDP_V6_FLOW;
5649 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5650 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5653 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5654 u16 flow_id, struct flow_keys *fkeys)
5656 struct hclge_vport *vport = hclge_get_vport(handle);
5657 struct hclge_fd_rule_tuples new_tuples;
5658 struct hclge_dev *hdev = vport->back;
5659 struct hclge_fd_rule *rule;
5664 if (!hnae3_dev_fd_supported(hdev))
5667 memset(&new_tuples, 0, sizeof(new_tuples));
5668 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5670 spin_lock_bh(&hdev->fd_rule_lock);
5672 /* when there is already fd rule existed add by user,
5673 * arfs should not work
5675 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5676 spin_unlock_bh(&hdev->fd_rule_lock);
5681 /* check is there flow director filter existed for this flow,
5682 * if not, create a new filter for it;
5683 * if filter exist with different queue id, modify the filter;
5684 * if filter exist with same queue id, do nothing
5686 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5688 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5689 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5690 spin_unlock_bh(&hdev->fd_rule_lock);
5695 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5697 spin_unlock_bh(&hdev->fd_rule_lock);
5702 set_bit(bit_id, hdev->fd_bmap);
5703 rule->location = bit_id;
5704 rule->flow_id = flow_id;
5705 rule->queue_id = queue_id;
5706 hclge_fd_build_arfs_rule(&new_tuples, rule);
5707 ret = hclge_fd_config_rule(hdev, rule);
5709 spin_unlock_bh(&hdev->fd_rule_lock);
5714 return rule->location;
5717 spin_unlock_bh(&hdev->fd_rule_lock);
5719 if (rule->queue_id == queue_id)
5720 return rule->location;
5722 tmp_queue_id = rule->queue_id;
5723 rule->queue_id = queue_id;
5724 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5726 rule->queue_id = tmp_queue_id;
5730 return rule->location;
5733 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5735 #ifdef CONFIG_RFS_ACCEL
5736 struct hnae3_handle *handle = &hdev->vport[0].nic;
5737 struct hclge_fd_rule *rule;
5738 struct hlist_node *node;
5739 HLIST_HEAD(del_list);
5741 spin_lock_bh(&hdev->fd_rule_lock);
5742 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5743 spin_unlock_bh(&hdev->fd_rule_lock);
5746 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5747 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5748 rule->flow_id, rule->location)) {
5749 hlist_del_init(&rule->rule_node);
5750 hlist_add_head(&rule->rule_node, &del_list);
5751 hdev->hclge_fd_rule_num--;
5752 clear_bit(rule->location, hdev->fd_bmap);
5755 spin_unlock_bh(&hdev->fd_rule_lock);
5757 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5758 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5759 rule->location, NULL, false);
5765 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5767 #ifdef CONFIG_RFS_ACCEL
5768 struct hclge_vport *vport = hclge_get_vport(handle);
5769 struct hclge_dev *hdev = vport->back;
5771 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5772 hclge_del_all_fd_entries(handle, true);
5776 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5778 struct hclge_vport *vport = hclge_get_vport(handle);
5779 struct hclge_dev *hdev = vport->back;
5781 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5782 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5785 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5787 struct hclge_vport *vport = hclge_get_vport(handle);
5788 struct hclge_dev *hdev = vport->back;
5790 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5793 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5795 struct hclge_vport *vport = hclge_get_vport(handle);
5796 struct hclge_dev *hdev = vport->back;
5798 return hdev->rst_stats.hw_reset_done_cnt;
5801 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5803 struct hclge_vport *vport = hclge_get_vport(handle);
5804 struct hclge_dev *hdev = vport->back;
5807 hdev->fd_en = enable;
5808 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5810 hclge_del_all_fd_entries(handle, clear);
5812 hclge_restore_fd_entries(handle);
5815 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5817 struct hclge_desc desc;
5818 struct hclge_config_mac_mode_cmd *req =
5819 (struct hclge_config_mac_mode_cmd *)desc.data;
5823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5824 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5825 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5826 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5827 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5828 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5829 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5830 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5831 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5832 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5833 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5834 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5835 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5836 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5837 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5838 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5842 dev_err(&hdev->pdev->dev,
5843 "mac enable fail, ret =%d.\n", ret);
5846 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5848 struct hclge_config_mac_mode_cmd *req;
5849 struct hclge_desc desc;
5853 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5854 /* 1 Read out the MAC mode config at first */
5855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5856 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5858 dev_err(&hdev->pdev->dev,
5859 "mac loopback get fail, ret =%d.\n", ret);
5863 /* 2 Then setup the loopback flag */
5864 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5869 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5871 /* 3 Config mac work mode with loopback flag
5872 * and its original configure parameters
5874 hclge_cmd_reuse_desc(&desc, false);
5875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5877 dev_err(&hdev->pdev->dev,
5878 "mac loopback set fail, ret =%d.\n", ret);
5882 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5883 enum hnae3_loop loop_mode)
5885 #define HCLGE_SERDES_RETRY_MS 10
5886 #define HCLGE_SERDES_RETRY_NUM 100
5888 #define HCLGE_MAC_LINK_STATUS_MS 10
5889 #define HCLGE_MAC_LINK_STATUS_NUM 100
5890 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5891 #define HCLGE_MAC_LINK_STATUS_UP 1
5893 struct hclge_serdes_lb_cmd *req;
5894 struct hclge_desc desc;
5895 int mac_link_ret = 0;
5899 req = (struct hclge_serdes_lb_cmd *)desc.data;
5900 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5902 switch (loop_mode) {
5903 case HNAE3_LOOP_SERIAL_SERDES:
5904 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5906 case HNAE3_LOOP_PARALLEL_SERDES:
5907 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5910 dev_err(&hdev->pdev->dev,
5911 "unsupported serdes loopback mode %d\n", loop_mode);
5916 req->enable = loop_mode_b;
5917 req->mask = loop_mode_b;
5918 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5920 req->mask = loop_mode_b;
5921 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5924 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5926 dev_err(&hdev->pdev->dev,
5927 "serdes loopback set fail, ret = %d\n", ret);
5932 msleep(HCLGE_SERDES_RETRY_MS);
5933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5935 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5937 dev_err(&hdev->pdev->dev,
5938 "serdes loopback get, ret = %d\n", ret);
5941 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5942 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5944 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5945 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5947 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5948 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5952 hclge_cfg_mac_mode(hdev, en);
5956 /* serdes Internal loopback, independent of the network cable.*/
5957 msleep(HCLGE_MAC_LINK_STATUS_MS);
5958 ret = hclge_get_mac_link_status(hdev);
5959 if (ret == mac_link_ret)
5961 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5963 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5968 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5969 int stream_id, bool enable)
5971 struct hclge_desc desc;
5972 struct hclge_cfg_com_tqp_queue_cmd *req =
5973 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5976 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5977 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5978 req->stream_id = cpu_to_le16(stream_id);
5979 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5983 dev_err(&hdev->pdev->dev,
5984 "Tqp enable fail, status =%d.\n", ret);
5988 static int hclge_set_loopback(struct hnae3_handle *handle,
5989 enum hnae3_loop loop_mode, bool en)
5991 struct hclge_vport *vport = hclge_get_vport(handle);
5992 struct hnae3_knic_private_info *kinfo;
5993 struct hclge_dev *hdev = vport->back;
5996 switch (loop_mode) {
5997 case HNAE3_LOOP_APP:
5998 ret = hclge_set_app_loopback(hdev, en);
6000 case HNAE3_LOOP_SERIAL_SERDES:
6001 case HNAE3_LOOP_PARALLEL_SERDES:
6002 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6006 dev_err(&hdev->pdev->dev,
6007 "loop_mode %d is not supported\n", loop_mode);
6014 kinfo = &vport->nic.kinfo;
6015 for (i = 0; i < kinfo->num_tqps; i++) {
6016 ret = hclge_tqp_enable(hdev, i, 0, en);
6024 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6026 struct hclge_vport *vport = hclge_get_vport(handle);
6027 struct hnae3_knic_private_info *kinfo;
6028 struct hnae3_queue *queue;
6029 struct hclge_tqp *tqp;
6032 kinfo = &vport->nic.kinfo;
6033 for (i = 0; i < kinfo->num_tqps; i++) {
6034 queue = handle->kinfo.tqp[i];
6035 tqp = container_of(queue, struct hclge_tqp, q);
6036 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6040 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6042 struct hclge_vport *vport = hclge_get_vport(handle);
6043 struct hclge_dev *hdev = vport->back;
6046 mod_timer(&hdev->service_timer, jiffies + HZ);
6048 del_timer_sync(&hdev->service_timer);
6049 cancel_work_sync(&hdev->service_task);
6050 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6054 static int hclge_ae_start(struct hnae3_handle *handle)
6056 struct hclge_vport *vport = hclge_get_vport(handle);
6057 struct hclge_dev *hdev = vport->back;
6060 hclge_cfg_mac_mode(hdev, true);
6061 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6062 hdev->hw.mac.link = 0;
6064 /* reset tqp stats */
6065 hclge_reset_tqp_stats(handle);
6067 hclge_mac_start_phy(hdev);
6072 static void hclge_ae_stop(struct hnae3_handle *handle)
6074 struct hclge_vport *vport = hclge_get_vport(handle);
6075 struct hclge_dev *hdev = vport->back;
6078 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6080 hclge_clear_arfs_rules(handle);
6082 /* If it is not PF reset, the firmware will disable the MAC,
6083 * so it only need to stop phy here.
6085 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6086 hdev->reset_type != HNAE3_FUNC_RESET) {
6087 hclge_mac_stop_phy(hdev);
6091 for (i = 0; i < handle->kinfo.num_tqps; i++)
6092 hclge_reset_tqp(handle, i);
6095 hclge_cfg_mac_mode(hdev, false);
6097 hclge_mac_stop_phy(hdev);
6099 /* reset tqp stats */
6100 hclge_reset_tqp_stats(handle);
6101 hclge_update_link_status(hdev);
6104 int hclge_vport_start(struct hclge_vport *vport)
6106 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6107 vport->last_active_jiffies = jiffies;
6111 void hclge_vport_stop(struct hclge_vport *vport)
6113 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6116 static int hclge_client_start(struct hnae3_handle *handle)
6118 struct hclge_vport *vport = hclge_get_vport(handle);
6120 return hclge_vport_start(vport);
6123 static void hclge_client_stop(struct hnae3_handle *handle)
6125 struct hclge_vport *vport = hclge_get_vport(handle);
6127 hclge_vport_stop(vport);
6130 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6131 u16 cmdq_resp, u8 resp_code,
6132 enum hclge_mac_vlan_tbl_opcode op)
6134 struct hclge_dev *hdev = vport->back;
6135 int return_status = -EIO;
6138 dev_err(&hdev->pdev->dev,
6139 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6144 if (op == HCLGE_MAC_VLAN_ADD) {
6145 if ((!resp_code) || (resp_code == 1)) {
6147 } else if (resp_code == 2) {
6148 return_status = -ENOSPC;
6149 dev_err(&hdev->pdev->dev,
6150 "add mac addr failed for uc_overflow.\n");
6151 } else if (resp_code == 3) {
6152 return_status = -ENOSPC;
6153 dev_err(&hdev->pdev->dev,
6154 "add mac addr failed for mc_overflow.\n");
6156 dev_err(&hdev->pdev->dev,
6157 "add mac addr failed for undefined, code=%d.\n",
6160 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6163 } else if (resp_code == 1) {
6164 return_status = -ENOENT;
6165 dev_dbg(&hdev->pdev->dev,
6166 "remove mac addr failed for miss.\n");
6168 dev_err(&hdev->pdev->dev,
6169 "remove mac addr failed for undefined, code=%d.\n",
6172 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6175 } else if (resp_code == 1) {
6176 return_status = -ENOENT;
6177 dev_dbg(&hdev->pdev->dev,
6178 "lookup mac addr failed for miss.\n");
6180 dev_err(&hdev->pdev->dev,
6181 "lookup mac addr failed for undefined, code=%d.\n",
6185 return_status = -EINVAL;
6186 dev_err(&hdev->pdev->dev,
6187 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6191 return return_status;
6194 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6199 if (vfid > 255 || vfid < 0)
6202 if (vfid >= 0 && vfid <= 191) {
6203 word_num = vfid / 32;
6204 bit_num = vfid % 32;
6206 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6208 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6210 word_num = (vfid - 192) / 32;
6211 bit_num = vfid % 32;
6213 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6215 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6221 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6223 #define HCLGE_DESC_NUMBER 3
6224 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6227 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6228 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6229 if (desc[i].data[j])
6235 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6236 const u8 *addr, bool is_mc)
6238 const unsigned char *mac_addr = addr;
6239 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6240 (mac_addr[0]) | (mac_addr[1] << 8);
6241 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6243 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6245 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6246 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6249 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6250 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6253 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6254 struct hclge_mac_vlan_tbl_entry_cmd *req)
6256 struct hclge_dev *hdev = vport->back;
6257 struct hclge_desc desc;
6262 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6264 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6268 dev_err(&hdev->pdev->dev,
6269 "del mac addr failed for cmd_send, ret =%d.\n",
6273 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6274 retval = le16_to_cpu(desc.retval);
6276 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6277 HCLGE_MAC_VLAN_REMOVE);
6280 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6281 struct hclge_mac_vlan_tbl_entry_cmd *req,
6282 struct hclge_desc *desc,
6285 struct hclge_dev *hdev = vport->back;
6290 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6292 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6293 memcpy(desc[0].data,
6295 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6296 hclge_cmd_setup_basic_desc(&desc[1],
6297 HCLGE_OPC_MAC_VLAN_ADD,
6299 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6300 hclge_cmd_setup_basic_desc(&desc[2],
6301 HCLGE_OPC_MAC_VLAN_ADD,
6303 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6305 memcpy(desc[0].data,
6307 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6308 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6311 dev_err(&hdev->pdev->dev,
6312 "lookup mac addr failed for cmd_send, ret =%d.\n",
6316 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6317 retval = le16_to_cpu(desc[0].retval);
6319 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6320 HCLGE_MAC_VLAN_LKUP);
6323 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6324 struct hclge_mac_vlan_tbl_entry_cmd *req,
6325 struct hclge_desc *mc_desc)
6327 struct hclge_dev *hdev = vport->back;
6334 struct hclge_desc desc;
6336 hclge_cmd_setup_basic_desc(&desc,
6337 HCLGE_OPC_MAC_VLAN_ADD,
6339 memcpy(desc.data, req,
6340 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6342 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6343 retval = le16_to_cpu(desc.retval);
6345 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6347 HCLGE_MAC_VLAN_ADD);
6349 hclge_cmd_reuse_desc(&mc_desc[0], false);
6350 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6351 hclge_cmd_reuse_desc(&mc_desc[1], false);
6352 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6353 hclge_cmd_reuse_desc(&mc_desc[2], false);
6354 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6355 memcpy(mc_desc[0].data, req,
6356 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6357 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6358 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6359 retval = le16_to_cpu(mc_desc[0].retval);
6361 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6363 HCLGE_MAC_VLAN_ADD);
6367 dev_err(&hdev->pdev->dev,
6368 "add mac addr failed for cmd_send, ret =%d.\n",
6376 static int hclge_init_umv_space(struct hclge_dev *hdev)
6378 u16 allocated_size = 0;
6381 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6386 if (allocated_size < hdev->wanted_umv_size)
6387 dev_warn(&hdev->pdev->dev,
6388 "Alloc umv space failed, want %d, get %d\n",
6389 hdev->wanted_umv_size, allocated_size);
6391 mutex_init(&hdev->umv_mutex);
6392 hdev->max_umv_size = allocated_size;
6393 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6394 hdev->share_umv_size = hdev->priv_umv_size +
6395 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6400 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6404 if (hdev->max_umv_size > 0) {
6405 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6409 hdev->max_umv_size = 0;
6411 mutex_destroy(&hdev->umv_mutex);
6416 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6417 u16 *allocated_size, bool is_alloc)
6419 struct hclge_umv_spc_alc_cmd *req;
6420 struct hclge_desc desc;
6423 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6424 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6425 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6426 req->space_size = cpu_to_le32(space_size);
6428 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6430 dev_err(&hdev->pdev->dev,
6431 "%s umv space failed for cmd_send, ret =%d\n",
6432 is_alloc ? "allocate" : "free", ret);
6436 if (is_alloc && allocated_size)
6437 *allocated_size = le32_to_cpu(desc.data[1]);
6442 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6444 struct hclge_vport *vport;
6447 for (i = 0; i < hdev->num_alloc_vport; i++) {
6448 vport = &hdev->vport[i];
6449 vport->used_umv_num = 0;
6452 mutex_lock(&hdev->umv_mutex);
6453 hdev->share_umv_size = hdev->priv_umv_size +
6454 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6455 mutex_unlock(&hdev->umv_mutex);
6458 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6460 struct hclge_dev *hdev = vport->back;
6463 mutex_lock(&hdev->umv_mutex);
6464 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6465 hdev->share_umv_size == 0);
6466 mutex_unlock(&hdev->umv_mutex);
6471 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6473 struct hclge_dev *hdev = vport->back;
6475 mutex_lock(&hdev->umv_mutex);
6477 if (vport->used_umv_num > hdev->priv_umv_size)
6478 hdev->share_umv_size++;
6480 if (vport->used_umv_num > 0)
6481 vport->used_umv_num--;
6483 if (vport->used_umv_num >= hdev->priv_umv_size &&
6484 hdev->share_umv_size > 0)
6485 hdev->share_umv_size--;
6486 vport->used_umv_num++;
6488 mutex_unlock(&hdev->umv_mutex);
6491 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6492 const unsigned char *addr)
6494 struct hclge_vport *vport = hclge_get_vport(handle);
6496 return hclge_add_uc_addr_common(vport, addr);
6499 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6500 const unsigned char *addr)
6502 struct hclge_dev *hdev = vport->back;
6503 struct hclge_mac_vlan_tbl_entry_cmd req;
6504 struct hclge_desc desc;
6505 u16 egress_port = 0;
6508 /* mac addr check */
6509 if (is_zero_ether_addr(addr) ||
6510 is_broadcast_ether_addr(addr) ||
6511 is_multicast_ether_addr(addr)) {
6512 dev_err(&hdev->pdev->dev,
6513 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6515 is_zero_ether_addr(addr),
6516 is_broadcast_ether_addr(addr),
6517 is_multicast_ether_addr(addr));
6521 memset(&req, 0, sizeof(req));
6523 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6524 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6526 req.egress_port = cpu_to_le16(egress_port);
6528 hclge_prepare_mac_addr(&req, addr, false);
6530 /* Lookup the mac address in the mac_vlan table, and add
6531 * it if the entry is inexistent. Repeated unicast entry
6532 * is not allowed in the mac vlan table.
6534 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6535 if (ret == -ENOENT) {
6536 if (!hclge_is_umv_space_full(vport)) {
6537 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6539 hclge_update_umv_space(vport, false);
6543 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6544 hdev->priv_umv_size);
6549 /* check if we just hit the duplicate */
6551 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6552 vport->vport_id, addr);
6556 dev_err(&hdev->pdev->dev,
6557 "PF failed to add unicast entry(%pM) in the MAC table\n",
6563 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6564 const unsigned char *addr)
6566 struct hclge_vport *vport = hclge_get_vport(handle);
6568 return hclge_rm_uc_addr_common(vport, addr);
6571 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6572 const unsigned char *addr)
6574 struct hclge_dev *hdev = vport->back;
6575 struct hclge_mac_vlan_tbl_entry_cmd req;
6578 /* mac addr check */
6579 if (is_zero_ether_addr(addr) ||
6580 is_broadcast_ether_addr(addr) ||
6581 is_multicast_ether_addr(addr)) {
6582 dev_dbg(&hdev->pdev->dev,
6583 "Remove mac err! invalid mac:%pM.\n",
6588 memset(&req, 0, sizeof(req));
6589 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6590 hclge_prepare_mac_addr(&req, addr, false);
6591 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6593 hclge_update_umv_space(vport, true);
6598 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6599 const unsigned char *addr)
6601 struct hclge_vport *vport = hclge_get_vport(handle);
6603 return hclge_add_mc_addr_common(vport, addr);
6606 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6607 const unsigned char *addr)
6609 struct hclge_dev *hdev = vport->back;
6610 struct hclge_mac_vlan_tbl_entry_cmd req;
6611 struct hclge_desc desc[3];
6614 /* mac addr check */
6615 if (!is_multicast_ether_addr(addr)) {
6616 dev_err(&hdev->pdev->dev,
6617 "Add mc mac err! invalid mac:%pM.\n",
6621 memset(&req, 0, sizeof(req));
6622 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6623 hclge_prepare_mac_addr(&req, addr, true);
6624 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6626 /* This mac addr exist, update VFID for it */
6627 hclge_update_desc_vfid(desc, vport->vport_id, false);
6628 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6630 /* This mac addr do not exist, add new entry for it */
6631 memset(desc[0].data, 0, sizeof(desc[0].data));
6632 memset(desc[1].data, 0, sizeof(desc[0].data));
6633 memset(desc[2].data, 0, sizeof(desc[0].data));
6634 hclge_update_desc_vfid(desc, vport->vport_id, false);
6635 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6638 if (status == -ENOSPC)
6639 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6644 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6645 const unsigned char *addr)
6647 struct hclge_vport *vport = hclge_get_vport(handle);
6649 return hclge_rm_mc_addr_common(vport, addr);
6652 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6653 const unsigned char *addr)
6655 struct hclge_dev *hdev = vport->back;
6656 struct hclge_mac_vlan_tbl_entry_cmd req;
6657 enum hclge_cmd_status status;
6658 struct hclge_desc desc[3];
6660 /* mac addr check */
6661 if (!is_multicast_ether_addr(addr)) {
6662 dev_dbg(&hdev->pdev->dev,
6663 "Remove mc mac err! invalid mac:%pM.\n",
6668 memset(&req, 0, sizeof(req));
6669 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6670 hclge_prepare_mac_addr(&req, addr, true);
6671 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6673 /* This mac addr exist, remove this handle's VFID for it */
6674 hclge_update_desc_vfid(desc, vport->vport_id, true);
6676 if (hclge_is_all_function_id_zero(desc))
6677 /* All the vfid is zero, so need to delete this entry */
6678 status = hclge_remove_mac_vlan_tbl(vport, &req);
6680 /* Not all the vfid is zero, update the vfid */
6681 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6684 /* Maybe this mac address is in mta table, but it cannot be
6685 * deleted here because an entry of mta represents an address
6686 * range rather than a specific address. the delete action to
6687 * all entries will take effect in update_mta_status called by
6688 * hns3_nic_set_rx_mode.
6696 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6697 enum HCLGE_MAC_ADDR_TYPE mac_type)
6699 struct hclge_vport_mac_addr_cfg *mac_cfg;
6700 struct list_head *list;
6702 if (!vport->vport_id)
6705 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6709 mac_cfg->hd_tbl_status = true;
6710 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6712 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6713 &vport->uc_mac_list : &vport->mc_mac_list;
6715 list_add_tail(&mac_cfg->node, list);
6718 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6720 enum HCLGE_MAC_ADDR_TYPE mac_type)
6722 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6723 struct list_head *list;
6724 bool uc_flag, mc_flag;
6726 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6727 &vport->uc_mac_list : &vport->mc_mac_list;
6729 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6730 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6732 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6733 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6734 if (uc_flag && mac_cfg->hd_tbl_status)
6735 hclge_rm_uc_addr_common(vport, mac_addr);
6737 if (mc_flag && mac_cfg->hd_tbl_status)
6738 hclge_rm_mc_addr_common(vport, mac_addr);
6740 list_del(&mac_cfg->node);
6747 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6748 enum HCLGE_MAC_ADDR_TYPE mac_type)
6750 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6751 struct list_head *list;
6753 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6754 &vport->uc_mac_list : &vport->mc_mac_list;
6756 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6757 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6758 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6760 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6761 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6763 mac_cfg->hd_tbl_status = false;
6765 list_del(&mac_cfg->node);
6771 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6773 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6774 struct hclge_vport *vport;
6777 mutex_lock(&hdev->vport_cfg_mutex);
6778 for (i = 0; i < hdev->num_alloc_vport; i++) {
6779 vport = &hdev->vport[i];
6780 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6781 list_del(&mac->node);
6785 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6786 list_del(&mac->node);
6790 mutex_unlock(&hdev->vport_cfg_mutex);
6793 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6794 u16 cmdq_resp, u8 resp_code)
6796 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6797 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6798 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6799 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6804 dev_err(&hdev->pdev->dev,
6805 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6810 switch (resp_code) {
6811 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6812 case HCLGE_ETHERTYPE_ALREADY_ADD:
6815 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6816 dev_err(&hdev->pdev->dev,
6817 "add mac ethertype failed for manager table overflow.\n");
6818 return_status = -EIO;
6820 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6821 dev_err(&hdev->pdev->dev,
6822 "add mac ethertype failed for key conflict.\n");
6823 return_status = -EIO;
6826 dev_err(&hdev->pdev->dev,
6827 "add mac ethertype failed for undefined, code=%d.\n",
6829 return_status = -EIO;
6832 return return_status;
6835 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6836 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6838 struct hclge_desc desc;
6843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6844 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6846 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6848 dev_err(&hdev->pdev->dev,
6849 "add mac ethertype failed for cmd_send, ret =%d.\n",
6854 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6855 retval = le16_to_cpu(desc.retval);
6857 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6860 static int init_mgr_tbl(struct hclge_dev *hdev)
6865 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6866 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6868 dev_err(&hdev->pdev->dev,
6869 "add mac ethertype failed, ret =%d.\n",
6878 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6880 struct hclge_vport *vport = hclge_get_vport(handle);
6881 struct hclge_dev *hdev = vport->back;
6883 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6886 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6889 const unsigned char *new_addr = (const unsigned char *)p;
6890 struct hclge_vport *vport = hclge_get_vport(handle);
6891 struct hclge_dev *hdev = vport->back;
6894 /* mac addr check */
6895 if (is_zero_ether_addr(new_addr) ||
6896 is_broadcast_ether_addr(new_addr) ||
6897 is_multicast_ether_addr(new_addr)) {
6898 dev_err(&hdev->pdev->dev,
6899 "Change uc mac err! invalid mac:%p.\n",
6904 if ((!is_first || is_kdump_kernel()) &&
6905 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6906 dev_warn(&hdev->pdev->dev,
6907 "remove old uc mac address fail.\n");
6909 ret = hclge_add_uc_addr(handle, new_addr);
6911 dev_err(&hdev->pdev->dev,
6912 "add uc mac address fail, ret =%d.\n",
6916 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6917 dev_err(&hdev->pdev->dev,
6918 "restore uc mac address fail.\n");
6923 ret = hclge_pause_addr_cfg(hdev, new_addr);
6925 dev_err(&hdev->pdev->dev,
6926 "configure mac pause address fail, ret =%d.\n",
6931 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6936 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6939 struct hclge_vport *vport = hclge_get_vport(handle);
6940 struct hclge_dev *hdev = vport->back;
6942 if (!hdev->hw.mac.phydev)
6945 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6948 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6949 u8 fe_type, bool filter_en, u8 vf_id)
6951 struct hclge_vlan_filter_ctrl_cmd *req;
6952 struct hclge_desc desc;
6955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6957 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6958 req->vlan_type = vlan_type;
6959 req->vlan_fe = filter_en ? fe_type : 0;
6962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6964 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6970 #define HCLGE_FILTER_TYPE_VF 0
6971 #define HCLGE_FILTER_TYPE_PORT 1
6972 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6973 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6974 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6975 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6976 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6977 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6978 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6979 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6980 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6982 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6984 struct hclge_vport *vport = hclge_get_vport(handle);
6985 struct hclge_dev *hdev = vport->back;
6987 if (hdev->pdev->revision >= 0x21) {
6988 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6989 HCLGE_FILTER_FE_EGRESS, enable, 0);
6990 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6991 HCLGE_FILTER_FE_INGRESS, enable, 0);
6993 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6994 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6998 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7000 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7003 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7004 bool is_kill, u16 vlan, u8 qos,
7007 #define HCLGE_MAX_VF_BYTES 16
7008 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7009 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7010 struct hclge_desc desc[2];
7015 /* if vf vlan table is full, firmware will close vf vlan filter, it
7016 * is unable and unnecessary to add new vlan id to vf vlan filter
7018 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7021 hclge_cmd_setup_basic_desc(&desc[0],
7022 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7023 hclge_cmd_setup_basic_desc(&desc[1],
7024 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7026 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7028 vf_byte_off = vfid / 8;
7029 vf_byte_val = 1 << (vfid % 8);
7031 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7032 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7034 req0->vlan_id = cpu_to_le16(vlan);
7035 req0->vlan_cfg = is_kill;
7037 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7038 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7040 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7042 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7044 dev_err(&hdev->pdev->dev,
7045 "Send vf vlan command fail, ret =%d.\n",
7051 #define HCLGE_VF_VLAN_NO_ENTRY 2
7052 if (!req0->resp_code || req0->resp_code == 1)
7055 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7056 set_bit(vfid, hdev->vf_vlan_full);
7057 dev_warn(&hdev->pdev->dev,
7058 "vf vlan table is full, vf vlan filter is disabled\n");
7062 dev_err(&hdev->pdev->dev,
7063 "Add vf vlan filter fail, ret =%d.\n",
7066 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7067 if (!req0->resp_code)
7070 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7071 dev_warn(&hdev->pdev->dev,
7072 "vlan %d filter is not in vf vlan table\n",
7077 dev_err(&hdev->pdev->dev,
7078 "Kill vf vlan filter fail, ret =%d.\n",
7085 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7086 u16 vlan_id, bool is_kill)
7088 struct hclge_vlan_filter_pf_cfg_cmd *req;
7089 struct hclge_desc desc;
7090 u8 vlan_offset_byte_val;
7091 u8 vlan_offset_byte;
7095 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7097 vlan_offset_160 = vlan_id / 160;
7098 vlan_offset_byte = (vlan_id % 160) / 8;
7099 vlan_offset_byte_val = 1 << (vlan_id % 8);
7101 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7102 req->vlan_offset = vlan_offset_160;
7103 req->vlan_cfg = is_kill;
7104 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7106 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7108 dev_err(&hdev->pdev->dev,
7109 "port vlan command, send fail, ret =%d.\n", ret);
7113 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7114 u16 vport_id, u16 vlan_id, u8 qos,
7117 u16 vport_idx, vport_num = 0;
7120 if (is_kill && !vlan_id)
7123 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7126 dev_err(&hdev->pdev->dev,
7127 "Set %d vport vlan filter config fail, ret =%d.\n",
7132 /* vlan 0 may be added twice when 8021q module is enabled */
7133 if (!is_kill && !vlan_id &&
7134 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7137 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7138 dev_err(&hdev->pdev->dev,
7139 "Add port vlan failed, vport %d is already in vlan %d\n",
7145 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7146 dev_err(&hdev->pdev->dev,
7147 "Delete port vlan failed, vport %d is not in vlan %d\n",
7152 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7155 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7156 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7162 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7164 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7165 struct hclge_vport_vtag_tx_cfg_cmd *req;
7166 struct hclge_dev *hdev = vport->back;
7167 struct hclge_desc desc;
7170 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7172 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7173 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7174 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7175 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7176 vcfg->accept_tag1 ? 1 : 0);
7177 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7178 vcfg->accept_untag1 ? 1 : 0);
7179 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7180 vcfg->accept_tag2 ? 1 : 0);
7181 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7182 vcfg->accept_untag2 ? 1 : 0);
7183 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7184 vcfg->insert_tag1_en ? 1 : 0);
7185 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7186 vcfg->insert_tag2_en ? 1 : 0);
7187 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7189 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7190 req->vf_bitmap[req->vf_offset] =
7191 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7193 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7195 dev_err(&hdev->pdev->dev,
7196 "Send port txvlan cfg command fail, ret =%d\n",
7202 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7204 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7205 struct hclge_vport_vtag_rx_cfg_cmd *req;
7206 struct hclge_dev *hdev = vport->back;
7207 struct hclge_desc desc;
7210 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7212 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7213 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7214 vcfg->strip_tag1_en ? 1 : 0);
7215 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7216 vcfg->strip_tag2_en ? 1 : 0);
7217 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7218 vcfg->vlan1_vlan_prionly ? 1 : 0);
7219 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7220 vcfg->vlan2_vlan_prionly ? 1 : 0);
7222 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7223 req->vf_bitmap[req->vf_offset] =
7224 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7226 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7228 dev_err(&hdev->pdev->dev,
7229 "Send port rxvlan cfg command fail, ret =%d\n",
7235 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7236 u16 port_base_vlan_state,
7241 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7242 vport->txvlan_cfg.accept_tag1 = true;
7243 vport->txvlan_cfg.insert_tag1_en = false;
7244 vport->txvlan_cfg.default_tag1 = 0;
7246 vport->txvlan_cfg.accept_tag1 = false;
7247 vport->txvlan_cfg.insert_tag1_en = true;
7248 vport->txvlan_cfg.default_tag1 = vlan_tag;
7251 vport->txvlan_cfg.accept_untag1 = true;
7253 /* accept_tag2 and accept_untag2 are not supported on
7254 * pdev revision(0x20), new revision support them,
7255 * this two fields can not be configured by user.
7257 vport->txvlan_cfg.accept_tag2 = true;
7258 vport->txvlan_cfg.accept_untag2 = true;
7259 vport->txvlan_cfg.insert_tag2_en = false;
7260 vport->txvlan_cfg.default_tag2 = 0;
7262 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7263 vport->rxvlan_cfg.strip_tag1_en = false;
7264 vport->rxvlan_cfg.strip_tag2_en =
7265 vport->rxvlan_cfg.rx_vlan_offload_en;
7267 vport->rxvlan_cfg.strip_tag1_en =
7268 vport->rxvlan_cfg.rx_vlan_offload_en;
7269 vport->rxvlan_cfg.strip_tag2_en = true;
7271 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7272 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7274 ret = hclge_set_vlan_tx_offload_cfg(vport);
7278 return hclge_set_vlan_rx_offload_cfg(vport);
7281 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7283 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7284 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7285 struct hclge_desc desc;
7288 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7289 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7290 rx_req->ot_fst_vlan_type =
7291 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7292 rx_req->ot_sec_vlan_type =
7293 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7294 rx_req->in_fst_vlan_type =
7295 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7296 rx_req->in_sec_vlan_type =
7297 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7299 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7301 dev_err(&hdev->pdev->dev,
7302 "Send rxvlan protocol type command fail, ret =%d\n",
7307 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7309 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7310 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7311 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7313 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7315 dev_err(&hdev->pdev->dev,
7316 "Send txvlan protocol type command fail, ret =%d\n",
7322 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7324 #define HCLGE_DEF_VLAN_TYPE 0x8100
7326 struct hnae3_handle *handle = &hdev->vport[0].nic;
7327 struct hclge_vport *vport;
7331 if (hdev->pdev->revision >= 0x21) {
7332 /* for revision 0x21, vf vlan filter is per function */
7333 for (i = 0; i < hdev->num_alloc_vport; i++) {
7334 vport = &hdev->vport[i];
7335 ret = hclge_set_vlan_filter_ctrl(hdev,
7336 HCLGE_FILTER_TYPE_VF,
7337 HCLGE_FILTER_FE_EGRESS,
7344 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7345 HCLGE_FILTER_FE_INGRESS, true,
7350 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7351 HCLGE_FILTER_FE_EGRESS_V1_B,
7357 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7359 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7360 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7361 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7362 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7363 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7364 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7366 ret = hclge_set_vlan_protocol_type(hdev);
7370 for (i = 0; i < hdev->num_alloc_vport; i++) {
7373 vport = &hdev->vport[i];
7374 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7376 ret = hclge_vlan_offload_cfg(vport,
7377 vport->port_base_vlan_cfg.state,
7383 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7386 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7389 struct hclge_vport_vlan_cfg *vlan;
7391 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7395 vlan->hd_tbl_status = writen_to_tbl;
7396 vlan->vlan_id = vlan_id;
7398 list_add_tail(&vlan->node, &vport->vlan_list);
7401 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7403 struct hclge_vport_vlan_cfg *vlan, *tmp;
7404 struct hclge_dev *hdev = vport->back;
7407 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7408 if (!vlan->hd_tbl_status) {
7409 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7411 vlan->vlan_id, 0, false);
7413 dev_err(&hdev->pdev->dev,
7414 "restore vport vlan list failed, ret=%d\n",
7419 vlan->hd_tbl_status = true;
7425 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7428 struct hclge_vport_vlan_cfg *vlan, *tmp;
7429 struct hclge_dev *hdev = vport->back;
7431 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7432 if (vlan->vlan_id == vlan_id) {
7433 if (is_write_tbl && vlan->hd_tbl_status)
7434 hclge_set_vlan_filter_hw(hdev,
7440 list_del(&vlan->node);
7447 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7449 struct hclge_vport_vlan_cfg *vlan, *tmp;
7450 struct hclge_dev *hdev = vport->back;
7452 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7453 if (vlan->hd_tbl_status)
7454 hclge_set_vlan_filter_hw(hdev,
7460 vlan->hd_tbl_status = false;
7462 list_del(&vlan->node);
7468 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7470 struct hclge_vport_vlan_cfg *vlan, *tmp;
7471 struct hclge_vport *vport;
7474 mutex_lock(&hdev->vport_cfg_mutex);
7475 for (i = 0; i < hdev->num_alloc_vport; i++) {
7476 vport = &hdev->vport[i];
7477 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7478 list_del(&vlan->node);
7482 mutex_unlock(&hdev->vport_cfg_mutex);
7485 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7487 struct hclge_vport *vport = hclge_get_vport(handle);
7488 struct hclge_vport_vlan_cfg *vlan, *tmp;
7489 struct hclge_dev *hdev = vport->back;
7490 u16 vlan_proto, qos;
7494 mutex_lock(&hdev->vport_cfg_mutex);
7495 for (i = 0; i < hdev->num_alloc_vport; i++) {
7496 vport = &hdev->vport[i];
7497 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7498 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7499 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7500 state = vport->port_base_vlan_cfg.state;
7502 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7503 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7504 vport->vport_id, vlan_id, qos,
7509 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7510 if (vlan->hd_tbl_status)
7511 hclge_set_vlan_filter_hw(hdev,
7519 mutex_unlock(&hdev->vport_cfg_mutex);
7522 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7524 struct hclge_vport *vport = hclge_get_vport(handle);
7526 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7527 vport->rxvlan_cfg.strip_tag1_en = false;
7528 vport->rxvlan_cfg.strip_tag2_en = enable;
7530 vport->rxvlan_cfg.strip_tag1_en = enable;
7531 vport->rxvlan_cfg.strip_tag2_en = true;
7533 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7534 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7535 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7537 return hclge_set_vlan_rx_offload_cfg(vport);
7540 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7541 u16 port_base_vlan_state,
7542 struct hclge_vlan_info *new_info,
7543 struct hclge_vlan_info *old_info)
7545 struct hclge_dev *hdev = vport->back;
7548 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7549 hclge_rm_vport_all_vlan_table(vport, false);
7550 return hclge_set_vlan_filter_hw(hdev,
7551 htons(new_info->vlan_proto),
7554 new_info->qos, false);
7557 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7558 vport->vport_id, old_info->vlan_tag,
7559 old_info->qos, true);
7563 return hclge_add_vport_all_vlan_table(vport);
7566 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7567 struct hclge_vlan_info *vlan_info)
7569 struct hnae3_handle *nic = &vport->nic;
7570 struct hclge_vlan_info *old_vlan_info;
7571 struct hclge_dev *hdev = vport->back;
7574 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7576 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7580 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7581 /* add new VLAN tag */
7582 ret = hclge_set_vlan_filter_hw(hdev,
7583 htons(vlan_info->vlan_proto),
7585 vlan_info->vlan_tag,
7586 vlan_info->qos, false);
7590 /* remove old VLAN tag */
7591 ret = hclge_set_vlan_filter_hw(hdev,
7592 htons(old_vlan_info->vlan_proto),
7594 old_vlan_info->vlan_tag,
7595 old_vlan_info->qos, true);
7602 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7607 /* update state only when disable/enable port based VLAN */
7608 vport->port_base_vlan_cfg.state = state;
7609 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7610 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7612 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7615 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7616 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7617 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7622 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7623 enum hnae3_port_base_vlan_state state,
7626 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7628 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7630 return HNAE3_PORT_BASE_VLAN_ENABLE;
7633 return HNAE3_PORT_BASE_VLAN_DISABLE;
7634 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7635 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7637 return HNAE3_PORT_BASE_VLAN_MODIFY;
7641 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7642 u16 vlan, u8 qos, __be16 proto)
7644 struct hclge_vport *vport = hclge_get_vport(handle);
7645 struct hclge_dev *hdev = vport->back;
7646 struct hclge_vlan_info vlan_info;
7650 if (hdev->pdev->revision == 0x20)
7653 /* qos is a 3 bits value, so can not be bigger than 7 */
7654 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7656 if (proto != htons(ETH_P_8021Q))
7657 return -EPROTONOSUPPORT;
7659 vport = &hdev->vport[vfid];
7660 state = hclge_get_port_base_vlan_state(vport,
7661 vport->port_base_vlan_cfg.state,
7663 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7666 vlan_info.vlan_tag = vlan;
7667 vlan_info.qos = qos;
7668 vlan_info.vlan_proto = ntohs(proto);
7670 /* update port based VLAN for PF */
7672 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7673 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7674 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7679 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7680 return hclge_update_port_base_vlan_cfg(vport, state,
7683 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7691 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7692 u16 vlan_id, bool is_kill)
7694 struct hclge_vport *vport = hclge_get_vport(handle);
7695 struct hclge_dev *hdev = vport->back;
7696 bool writen_to_tbl = false;
7699 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7700 * filter entry. In this case, we don't update VLAN filter table
7701 * when user add new VLAN or remove exist VLAN, just update the vport
7702 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7703 * table until port based VLAN disabled
7705 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7706 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7707 vlan_id, 0, is_kill);
7708 writen_to_tbl = true;
7715 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7717 hclge_add_vport_vlan_table(vport, vlan_id,
7723 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7725 struct hclge_config_max_frm_size_cmd *req;
7726 struct hclge_desc desc;
7728 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7730 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7731 req->max_frm_size = cpu_to_le16(new_mps);
7732 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7734 return hclge_cmd_send(&hdev->hw, &desc, 1);
7737 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7739 struct hclge_vport *vport = hclge_get_vport(handle);
7741 return hclge_set_vport_mtu(vport, new_mtu);
7744 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7746 struct hclge_dev *hdev = vport->back;
7747 int i, max_frm_size, ret = 0;
7749 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7750 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7751 max_frm_size > HCLGE_MAC_MAX_FRAME)
7754 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7755 mutex_lock(&hdev->vport_lock);
7756 /* VF's mps must fit within hdev->mps */
7757 if (vport->vport_id && max_frm_size > hdev->mps) {
7758 mutex_unlock(&hdev->vport_lock);
7760 } else if (vport->vport_id) {
7761 vport->mps = max_frm_size;
7762 mutex_unlock(&hdev->vport_lock);
7766 /* PF's mps must be greater then VF's mps */
7767 for (i = 1; i < hdev->num_alloc_vport; i++)
7768 if (max_frm_size < hdev->vport[i].mps) {
7769 mutex_unlock(&hdev->vport_lock);
7773 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7775 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7777 dev_err(&hdev->pdev->dev,
7778 "Change mtu fail, ret =%d\n", ret);
7782 hdev->mps = max_frm_size;
7783 vport->mps = max_frm_size;
7785 ret = hclge_buffer_alloc(hdev);
7787 dev_err(&hdev->pdev->dev,
7788 "Allocate buffer fail, ret =%d\n", ret);
7791 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7792 mutex_unlock(&hdev->vport_lock);
7796 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7799 struct hclge_reset_tqp_queue_cmd *req;
7800 struct hclge_desc desc;
7803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7805 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7806 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7807 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7809 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7811 dev_err(&hdev->pdev->dev,
7812 "Send tqp reset cmd error, status =%d\n", ret);
7819 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7821 struct hclge_reset_tqp_queue_cmd *req;
7822 struct hclge_desc desc;
7825 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7827 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7828 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7832 dev_err(&hdev->pdev->dev,
7833 "Get reset status error, status =%d\n", ret);
7837 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7840 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7842 struct hnae3_queue *queue;
7843 struct hclge_tqp *tqp;
7845 queue = handle->kinfo.tqp[queue_id];
7846 tqp = container_of(queue, struct hclge_tqp, q);
7851 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7853 struct hclge_vport *vport = hclge_get_vport(handle);
7854 struct hclge_dev *hdev = vport->back;
7855 int reset_try_times = 0;
7860 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7862 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7864 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7868 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7870 dev_err(&hdev->pdev->dev,
7871 "Send reset tqp cmd fail, ret = %d\n", ret);
7875 reset_try_times = 0;
7876 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7877 /* Wait for tqp hw reset */
7879 reset_status = hclge_get_reset_status(hdev, queue_gid);
7884 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7885 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7889 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7891 dev_err(&hdev->pdev->dev,
7892 "Deassert the soft reset fail, ret = %d\n", ret);
7897 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7899 struct hclge_dev *hdev = vport->back;
7900 int reset_try_times = 0;
7905 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7907 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7909 dev_warn(&hdev->pdev->dev,
7910 "Send reset tqp cmd fail, ret = %d\n", ret);
7914 reset_try_times = 0;
7915 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7916 /* Wait for tqp hw reset */
7918 reset_status = hclge_get_reset_status(hdev, queue_gid);
7923 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7924 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7928 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7930 dev_warn(&hdev->pdev->dev,
7931 "Deassert the soft reset fail, ret = %d\n", ret);
7934 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7936 struct hclge_vport *vport = hclge_get_vport(handle);
7937 struct hclge_dev *hdev = vport->back;
7939 return hdev->fw_version;
7942 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7944 struct phy_device *phydev = hdev->hw.mac.phydev;
7949 phy_set_asym_pause(phydev, rx_en, tx_en);
7952 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7957 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7958 else if (rx_en && !tx_en)
7959 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7960 else if (!rx_en && tx_en)
7961 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7963 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7965 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7968 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7970 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7975 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7980 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7982 struct phy_device *phydev = hdev->hw.mac.phydev;
7983 u16 remote_advertising = 0;
7984 u16 local_advertising = 0;
7985 u32 rx_pause, tx_pause;
7988 if (!phydev->link || !phydev->autoneg)
7991 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7994 remote_advertising = LPA_PAUSE_CAP;
7996 if (phydev->asym_pause)
7997 remote_advertising |= LPA_PAUSE_ASYM;
7999 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8000 remote_advertising);
8001 tx_pause = flowctl & FLOW_CTRL_TX;
8002 rx_pause = flowctl & FLOW_CTRL_RX;
8004 if (phydev->duplex == HCLGE_MAC_HALF) {
8009 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8012 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8013 u32 *rx_en, u32 *tx_en)
8015 struct hclge_vport *vport = hclge_get_vport(handle);
8016 struct hclge_dev *hdev = vport->back;
8018 *auto_neg = hclge_get_autoneg(handle);
8020 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8026 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8029 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8032 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8041 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8042 u32 rx_en, u32 tx_en)
8044 struct hclge_vport *vport = hclge_get_vport(handle);
8045 struct hclge_dev *hdev = vport->back;
8046 struct phy_device *phydev = hdev->hw.mac.phydev;
8049 fc_autoneg = hclge_get_autoneg(handle);
8050 if (auto_neg != fc_autoneg) {
8051 dev_info(&hdev->pdev->dev,
8052 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8056 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8057 dev_info(&hdev->pdev->dev,
8058 "Priority flow control enabled. Cannot set link flow control.\n");
8062 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8065 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8068 return phy_start_aneg(phydev);
8070 if (hdev->pdev->revision == 0x20)
8073 return hclge_restart_autoneg(handle);
8076 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8077 u8 *auto_neg, u32 *speed, u8 *duplex)
8079 struct hclge_vport *vport = hclge_get_vport(handle);
8080 struct hclge_dev *hdev = vport->back;
8083 *speed = hdev->hw.mac.speed;
8085 *duplex = hdev->hw.mac.duplex;
8087 *auto_neg = hdev->hw.mac.autoneg;
8090 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8093 struct hclge_vport *vport = hclge_get_vport(handle);
8094 struct hclge_dev *hdev = vport->back;
8097 *media_type = hdev->hw.mac.media_type;
8100 *module_type = hdev->hw.mac.module_type;
8103 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8104 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8106 struct hclge_vport *vport = hclge_get_vport(handle);
8107 struct hclge_dev *hdev = vport->back;
8108 struct phy_device *phydev = hdev->hw.mac.phydev;
8109 int mdix_ctrl, mdix, retval, is_resolved;
8112 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8113 *tp_mdix = ETH_TP_MDI_INVALID;
8117 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8119 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8120 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8121 HCLGE_PHY_MDIX_CTRL_S);
8123 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8124 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8125 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8127 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8129 switch (mdix_ctrl) {
8131 *tp_mdix_ctrl = ETH_TP_MDI;
8134 *tp_mdix_ctrl = ETH_TP_MDI_X;
8137 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8140 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8145 *tp_mdix = ETH_TP_MDI_INVALID;
8147 *tp_mdix = ETH_TP_MDI_X;
8149 *tp_mdix = ETH_TP_MDI;
8152 static void hclge_info_show(struct hclge_dev *hdev)
8154 struct device *dev = &hdev->pdev->dev;
8156 dev_info(dev, "PF info begin:\n");
8158 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8159 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8160 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8161 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8162 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8163 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8164 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8165 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8166 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8167 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8168 dev_info(dev, "This is %s PF\n",
8169 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8170 dev_info(dev, "DCB %s\n",
8171 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8172 dev_info(dev, "MQPRIO %s\n",
8173 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8175 dev_info(dev, "PF info end.\n");
8178 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8179 struct hclge_vport *vport)
8181 struct hnae3_client *client = vport->nic.client;
8182 struct hclge_dev *hdev = ae_dev->priv;
8185 ret = client->ops->init_instance(&vport->nic);
8189 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8190 hnae3_set_client_init_flag(client, ae_dev, 1);
8192 /* Enable nic hw error interrupts */
8193 ret = hclge_config_nic_hw_error(hdev, true);
8195 dev_err(&ae_dev->pdev->dev,
8196 "fail(%d) to enable hw error interrupts\n", ret);
8198 if (netif_msg_drv(&hdev->vport->nic))
8199 hclge_info_show(hdev);
8204 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8205 struct hclge_vport *vport)
8207 struct hnae3_client *client = vport->roce.client;
8208 struct hclge_dev *hdev = ae_dev->priv;
8211 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8215 client = hdev->roce_client;
8216 ret = hclge_init_roce_base_info(vport);
8220 ret = client->ops->init_instance(&vport->roce);
8224 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8225 hnae3_set_client_init_flag(client, ae_dev, 1);
8230 static int hclge_init_client_instance(struct hnae3_client *client,
8231 struct hnae3_ae_dev *ae_dev)
8233 struct hclge_dev *hdev = ae_dev->priv;
8234 struct hclge_vport *vport;
8237 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8238 vport = &hdev->vport[i];
8240 switch (client->type) {
8241 case HNAE3_CLIENT_KNIC:
8243 hdev->nic_client = client;
8244 vport->nic.client = client;
8245 ret = hclge_init_nic_client_instance(ae_dev, vport);
8249 ret = hclge_init_roce_client_instance(ae_dev, vport);
8254 case HNAE3_CLIENT_ROCE:
8255 if (hnae3_dev_roce_supported(hdev)) {
8256 hdev->roce_client = client;
8257 vport->roce.client = client;
8260 ret = hclge_init_roce_client_instance(ae_dev, vport);
8270 /* Enable roce ras interrupts */
8271 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8273 dev_err(&ae_dev->pdev->dev,
8274 "fail(%d) to enable roce ras interrupts\n", ret);
8279 hdev->nic_client = NULL;
8280 vport->nic.client = NULL;
8283 hdev->roce_client = NULL;
8284 vport->roce.client = NULL;
8288 static void hclge_uninit_client_instance(struct hnae3_client *client,
8289 struct hnae3_ae_dev *ae_dev)
8291 struct hclge_dev *hdev = ae_dev->priv;
8292 struct hclge_vport *vport;
8295 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8296 vport = &hdev->vport[i];
8297 if (hdev->roce_client) {
8298 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8299 hdev->roce_client->ops->uninit_instance(&vport->roce,
8301 hdev->roce_client = NULL;
8302 vport->roce.client = NULL;
8304 if (client->type == HNAE3_CLIENT_ROCE)
8306 if (hdev->nic_client && client->ops->uninit_instance) {
8307 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8308 client->ops->uninit_instance(&vport->nic, 0);
8309 hdev->nic_client = NULL;
8310 vport->nic.client = NULL;
8315 static int hclge_pci_init(struct hclge_dev *hdev)
8317 struct pci_dev *pdev = hdev->pdev;
8318 struct hclge_hw *hw;
8321 ret = pci_enable_device(pdev);
8323 dev_err(&pdev->dev, "failed to enable PCI device\n");
8327 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8329 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8332 "can't set consistent PCI DMA");
8333 goto err_disable_device;
8335 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8338 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8340 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8341 goto err_disable_device;
8344 pci_set_master(pdev);
8346 hw->io_base = pcim_iomap(pdev, 2, 0);
8348 dev_err(&pdev->dev, "Can't map configuration register space\n");
8350 goto err_clr_master;
8353 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8357 pci_clear_master(pdev);
8358 pci_release_regions(pdev);
8360 pci_disable_device(pdev);
8365 static void hclge_pci_uninit(struct hclge_dev *hdev)
8367 struct pci_dev *pdev = hdev->pdev;
8369 pcim_iounmap(pdev, hdev->hw.io_base);
8370 pci_free_irq_vectors(pdev);
8371 pci_clear_master(pdev);
8372 pci_release_mem_regions(pdev);
8373 pci_disable_device(pdev);
8376 static void hclge_state_init(struct hclge_dev *hdev)
8378 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8379 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8380 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8381 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8382 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8383 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8386 static void hclge_state_uninit(struct hclge_dev *hdev)
8388 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8389 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8391 if (hdev->service_timer.function)
8392 del_timer_sync(&hdev->service_timer);
8393 if (hdev->reset_timer.function)
8394 del_timer_sync(&hdev->reset_timer);
8395 if (hdev->service_task.func)
8396 cancel_work_sync(&hdev->service_task);
8397 if (hdev->rst_service_task.func)
8398 cancel_work_sync(&hdev->rst_service_task);
8399 if (hdev->mbx_service_task.func)
8400 cancel_work_sync(&hdev->mbx_service_task);
8403 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8405 #define HCLGE_FLR_WAIT_MS 100
8406 #define HCLGE_FLR_WAIT_CNT 50
8407 struct hclge_dev *hdev = ae_dev->priv;
8410 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8411 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8412 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8413 hclge_reset_event(hdev->pdev, NULL);
8415 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8416 cnt++ < HCLGE_FLR_WAIT_CNT)
8417 msleep(HCLGE_FLR_WAIT_MS);
8419 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8420 dev_err(&hdev->pdev->dev,
8421 "flr wait down timeout: %d\n", cnt);
8424 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8426 struct hclge_dev *hdev = ae_dev->priv;
8428 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8431 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8433 struct pci_dev *pdev = ae_dev->pdev;
8434 struct hclge_dev *hdev;
8437 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8444 hdev->ae_dev = ae_dev;
8445 hdev->reset_type = HNAE3_NONE_RESET;
8446 hdev->reset_level = HNAE3_FUNC_RESET;
8447 ae_dev->priv = hdev;
8448 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8450 mutex_init(&hdev->vport_lock);
8451 mutex_init(&hdev->vport_cfg_mutex);
8452 spin_lock_init(&hdev->fd_rule_lock);
8454 ret = hclge_pci_init(hdev);
8456 dev_err(&pdev->dev, "PCI init failed\n");
8460 /* Firmware command queue initialize */
8461 ret = hclge_cmd_queue_init(hdev);
8463 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8464 goto err_pci_uninit;
8467 /* Firmware command initialize */
8468 ret = hclge_cmd_init(hdev);
8470 goto err_cmd_uninit;
8472 ret = hclge_get_cap(hdev);
8474 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8476 goto err_cmd_uninit;
8479 ret = hclge_configure(hdev);
8481 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8482 goto err_cmd_uninit;
8485 ret = hclge_init_msi(hdev);
8487 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8488 goto err_cmd_uninit;
8491 ret = hclge_misc_irq_init(hdev);
8494 "Misc IRQ(vector0) init error, ret = %d.\n",
8496 goto err_msi_uninit;
8499 ret = hclge_alloc_tqps(hdev);
8501 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8502 goto err_msi_irq_uninit;
8505 ret = hclge_alloc_vport(hdev);
8507 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8508 goto err_msi_irq_uninit;
8511 ret = hclge_map_tqp(hdev);
8513 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8514 goto err_msi_irq_uninit;
8517 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8518 ret = hclge_mac_mdio_config(hdev);
8520 dev_err(&hdev->pdev->dev,
8521 "mdio config fail ret=%d\n", ret);
8522 goto err_msi_irq_uninit;
8526 ret = hclge_init_umv_space(hdev);
8528 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8529 goto err_mdiobus_unreg;
8532 ret = hclge_mac_init(hdev);
8534 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8535 goto err_mdiobus_unreg;
8538 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8540 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8541 goto err_mdiobus_unreg;
8544 ret = hclge_config_gro(hdev, true);
8546 goto err_mdiobus_unreg;
8548 ret = hclge_init_vlan_config(hdev);
8550 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8551 goto err_mdiobus_unreg;
8554 ret = hclge_tm_schd_init(hdev);
8556 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8557 goto err_mdiobus_unreg;
8560 hclge_rss_init_cfg(hdev);
8561 ret = hclge_rss_init_hw(hdev);
8563 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8564 goto err_mdiobus_unreg;
8567 ret = init_mgr_tbl(hdev);
8569 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8570 goto err_mdiobus_unreg;
8573 ret = hclge_init_fd_config(hdev);
8576 "fd table init fail, ret=%d\n", ret);
8577 goto err_mdiobus_unreg;
8580 INIT_KFIFO(hdev->mac_tnl_log);
8582 hclge_dcb_ops_set(hdev);
8584 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8585 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8586 INIT_WORK(&hdev->service_task, hclge_service_task);
8587 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8588 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8590 hclge_clear_all_event_cause(hdev);
8592 /* Enable MISC vector(vector0) */
8593 hclge_enable_vector(&hdev->misc_vector, true);
8595 hclge_state_init(hdev);
8596 hdev->last_reset_time = jiffies;
8598 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8602 if (hdev->hw.mac.phydev)
8603 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8605 hclge_misc_irq_uninit(hdev);
8607 pci_free_irq_vectors(pdev);
8609 hclge_cmd_uninit(hdev);
8611 pcim_iounmap(pdev, hdev->hw.io_base);
8612 pci_clear_master(pdev);
8613 pci_release_regions(pdev);
8614 pci_disable_device(pdev);
8619 static void hclge_stats_clear(struct hclge_dev *hdev)
8621 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8624 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8626 struct hclge_vport *vport = hdev->vport;
8629 for (i = 0; i < hdev->num_alloc_vport; i++) {
8630 hclge_vport_stop(vport);
8635 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8637 struct hclge_dev *hdev = ae_dev->priv;
8638 struct pci_dev *pdev = ae_dev->pdev;
8641 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8643 hclge_stats_clear(hdev);
8644 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8645 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8647 ret = hclge_cmd_init(hdev);
8649 dev_err(&pdev->dev, "Cmd queue init failed\n");
8653 ret = hclge_map_tqp(hdev);
8655 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8659 hclge_reset_umv_space(hdev);
8661 ret = hclge_mac_init(hdev);
8663 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8667 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8669 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8673 ret = hclge_config_gro(hdev, true);
8677 ret = hclge_init_vlan_config(hdev);
8679 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8683 ret = hclge_tm_init_hw(hdev, true);
8685 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8689 ret = hclge_rss_init_hw(hdev);
8691 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8695 ret = hclge_init_fd_config(hdev);
8698 "fd table init fail, ret=%d\n", ret);
8702 /* Re-enable the hw error interrupts because
8703 * the interrupts get disabled on global reset.
8705 ret = hclge_config_nic_hw_error(hdev, true);
8708 "fail(%d) to re-enable NIC hw error interrupts\n",
8713 if (hdev->roce_client) {
8714 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8717 "fail(%d) to re-enable roce ras interrupts\n",
8723 hclge_reset_vport_state(hdev);
8725 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8731 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8733 struct hclge_dev *hdev = ae_dev->priv;
8734 struct hclge_mac *mac = &hdev->hw.mac;
8736 hclge_state_uninit(hdev);
8739 mdiobus_unregister(mac->mdio_bus);
8741 hclge_uninit_umv_space(hdev);
8743 /* Disable MISC vector(vector0) */
8744 hclge_enable_vector(&hdev->misc_vector, false);
8745 synchronize_irq(hdev->misc_vector.vector_irq);
8747 /* Disable all hw interrupts */
8748 hclge_config_mac_tnl_int(hdev, false);
8749 hclge_config_nic_hw_error(hdev, false);
8750 hclge_config_rocee_ras_interrupt(hdev, false);
8752 hclge_cmd_uninit(hdev);
8753 hclge_misc_irq_uninit(hdev);
8754 hclge_pci_uninit(hdev);
8755 mutex_destroy(&hdev->vport_lock);
8756 hclge_uninit_vport_mac_table(hdev);
8757 hclge_uninit_vport_vlan_table(hdev);
8758 mutex_destroy(&hdev->vport_cfg_mutex);
8759 ae_dev->priv = NULL;
8762 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8764 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8765 struct hclge_vport *vport = hclge_get_vport(handle);
8766 struct hclge_dev *hdev = vport->back;
8768 return min_t(u32, hdev->rss_size_max,
8769 vport->alloc_tqps / kinfo->num_tc);
8772 static void hclge_get_channels(struct hnae3_handle *handle,
8773 struct ethtool_channels *ch)
8775 ch->max_combined = hclge_get_max_channels(handle);
8776 ch->other_count = 1;
8778 ch->combined_count = handle->kinfo.rss_size;
8781 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8782 u16 *alloc_tqps, u16 *max_rss_size)
8784 struct hclge_vport *vport = hclge_get_vport(handle);
8785 struct hclge_dev *hdev = vport->back;
8787 *alloc_tqps = vport->alloc_tqps;
8788 *max_rss_size = hdev->rss_size_max;
8791 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8792 bool rxfh_configured)
8794 struct hclge_vport *vport = hclge_get_vport(handle);
8795 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8796 struct hclge_dev *hdev = vport->back;
8797 int cur_rss_size = kinfo->rss_size;
8798 int cur_tqps = kinfo->num_tqps;
8799 u16 tc_offset[HCLGE_MAX_TC_NUM];
8800 u16 tc_valid[HCLGE_MAX_TC_NUM];
8801 u16 tc_size[HCLGE_MAX_TC_NUM];
8806 kinfo->req_rss_size = new_tqps_num;
8808 ret = hclge_tm_vport_map_update(hdev);
8810 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8814 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8815 roundup_size = ilog2(roundup_size);
8816 /* Set the RSS TC mode according to the new RSS size */
8817 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8820 if (!(hdev->hw_tc_map & BIT(i)))
8824 tc_size[i] = roundup_size;
8825 tc_offset[i] = kinfo->rss_size * i;
8827 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8831 /* RSS indirection table has been configuared by user */
8832 if (rxfh_configured)
8835 /* Reinitializes the rss indirect table according to the new RSS size */
8836 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8840 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8841 rss_indir[i] = i % kinfo->rss_size;
8843 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8845 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8852 dev_info(&hdev->pdev->dev,
8853 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8854 cur_rss_size, kinfo->rss_size,
8855 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8860 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8861 u32 *regs_num_64_bit)
8863 struct hclge_desc desc;
8867 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8868 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8870 dev_err(&hdev->pdev->dev,
8871 "Query register number cmd failed, ret = %d.\n", ret);
8875 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8876 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8878 total_num = *regs_num_32_bit + *regs_num_64_bit;
8885 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8888 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8890 struct hclge_desc *desc;
8891 u32 *reg_val = data;
8900 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8901 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8905 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8906 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8908 dev_err(&hdev->pdev->dev,
8909 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8914 for (i = 0; i < cmd_num; i++) {
8916 desc_data = (__le32 *)(&desc[i].data[0]);
8917 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8919 desc_data = (__le32 *)(&desc[i]);
8920 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8922 for (k = 0; k < n; k++) {
8923 *reg_val++ = le32_to_cpu(*desc_data++);
8935 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8938 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8940 struct hclge_desc *desc;
8941 u64 *reg_val = data;
8950 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8951 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8955 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8956 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8958 dev_err(&hdev->pdev->dev,
8959 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8964 for (i = 0; i < cmd_num; i++) {
8966 desc_data = (__le64 *)(&desc[i].data[0]);
8967 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8969 desc_data = (__le64 *)(&desc[i]);
8970 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8972 for (k = 0; k < n; k++) {
8973 *reg_val++ = le64_to_cpu(*desc_data++);
8985 #define MAX_SEPARATE_NUM 4
8986 #define SEPARATOR_VALUE 0xFFFFFFFF
8987 #define REG_NUM_PER_LINE 4
8988 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8990 static int hclge_get_regs_len(struct hnae3_handle *handle)
8992 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8993 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8994 struct hclge_vport *vport = hclge_get_vport(handle);
8995 struct hclge_dev *hdev = vport->back;
8996 u32 regs_num_32_bit, regs_num_64_bit;
8999 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9001 dev_err(&hdev->pdev->dev,
9002 "Get register number failed, ret = %d.\n", ret);
9006 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9007 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9008 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9009 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9011 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9012 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9013 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9016 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9019 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9020 struct hclge_vport *vport = hclge_get_vport(handle);
9021 struct hclge_dev *hdev = vport->back;
9022 u32 regs_num_32_bit, regs_num_64_bit;
9023 int i, j, reg_um, separator_num;
9027 *version = hdev->fw_version;
9029 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9031 dev_err(&hdev->pdev->dev,
9032 "Get register number failed, ret = %d.\n", ret);
9036 /* fetching per-PF registers valus from PF PCIe register space */
9037 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9038 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9039 for (i = 0; i < reg_um; i++)
9040 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9041 for (i = 0; i < separator_num; i++)
9042 *reg++ = SEPARATOR_VALUE;
9044 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9045 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9046 for (i = 0; i < reg_um; i++)
9047 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9048 for (i = 0; i < separator_num; i++)
9049 *reg++ = SEPARATOR_VALUE;
9051 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9052 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9053 for (j = 0; j < kinfo->num_tqps; j++) {
9054 for (i = 0; i < reg_um; i++)
9055 *reg++ = hclge_read_dev(&hdev->hw,
9056 ring_reg_addr_list[i] +
9058 for (i = 0; i < separator_num; i++)
9059 *reg++ = SEPARATOR_VALUE;
9062 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9063 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9064 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9065 for (i = 0; i < reg_um; i++)
9066 *reg++ = hclge_read_dev(&hdev->hw,
9067 tqp_intr_reg_addr_list[i] +
9069 for (i = 0; i < separator_num; i++)
9070 *reg++ = SEPARATOR_VALUE;
9073 /* fetching PF common registers values from firmware */
9074 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9076 dev_err(&hdev->pdev->dev,
9077 "Get 32 bit register failed, ret = %d.\n", ret);
9081 reg += regs_num_32_bit;
9082 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9084 dev_err(&hdev->pdev->dev,
9085 "Get 64 bit register failed, ret = %d.\n", ret);
9088 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9090 struct hclge_set_led_state_cmd *req;
9091 struct hclge_desc desc;
9094 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9096 req = (struct hclge_set_led_state_cmd *)desc.data;
9097 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9098 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9100 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9102 dev_err(&hdev->pdev->dev,
9103 "Send set led state cmd error, ret =%d\n", ret);
9108 enum hclge_led_status {
9111 HCLGE_LED_NO_CHANGE = 0xFF,
9114 static int hclge_set_led_id(struct hnae3_handle *handle,
9115 enum ethtool_phys_id_state status)
9117 struct hclge_vport *vport = hclge_get_vport(handle);
9118 struct hclge_dev *hdev = vport->back;
9121 case ETHTOOL_ID_ACTIVE:
9122 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9123 case ETHTOOL_ID_INACTIVE:
9124 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9130 static void hclge_get_link_mode(struct hnae3_handle *handle,
9131 unsigned long *supported,
9132 unsigned long *advertising)
9134 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9135 struct hclge_vport *vport = hclge_get_vport(handle);
9136 struct hclge_dev *hdev = vport->back;
9137 unsigned int idx = 0;
9139 for (; idx < size; idx++) {
9140 supported[idx] = hdev->hw.mac.supported[idx];
9141 advertising[idx] = hdev->hw.mac.advertising[idx];
9145 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9147 struct hclge_vport *vport = hclge_get_vport(handle);
9148 struct hclge_dev *hdev = vport->back;
9150 return hclge_config_gro(hdev, enable);
9153 static const struct hnae3_ae_ops hclge_ops = {
9154 .init_ae_dev = hclge_init_ae_dev,
9155 .uninit_ae_dev = hclge_uninit_ae_dev,
9156 .flr_prepare = hclge_flr_prepare,
9157 .flr_done = hclge_flr_done,
9158 .init_client_instance = hclge_init_client_instance,
9159 .uninit_client_instance = hclge_uninit_client_instance,
9160 .map_ring_to_vector = hclge_map_ring_to_vector,
9161 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9162 .get_vector = hclge_get_vector,
9163 .put_vector = hclge_put_vector,
9164 .set_promisc_mode = hclge_set_promisc_mode,
9165 .set_loopback = hclge_set_loopback,
9166 .start = hclge_ae_start,
9167 .stop = hclge_ae_stop,
9168 .client_start = hclge_client_start,
9169 .client_stop = hclge_client_stop,
9170 .get_status = hclge_get_status,
9171 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9172 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9173 .get_media_type = hclge_get_media_type,
9174 .check_port_speed = hclge_check_port_speed,
9175 .get_fec = hclge_get_fec,
9176 .set_fec = hclge_set_fec,
9177 .get_rss_key_size = hclge_get_rss_key_size,
9178 .get_rss_indir_size = hclge_get_rss_indir_size,
9179 .get_rss = hclge_get_rss,
9180 .set_rss = hclge_set_rss,
9181 .set_rss_tuple = hclge_set_rss_tuple,
9182 .get_rss_tuple = hclge_get_rss_tuple,
9183 .get_tc_size = hclge_get_tc_size,
9184 .get_mac_addr = hclge_get_mac_addr,
9185 .set_mac_addr = hclge_set_mac_addr,
9186 .do_ioctl = hclge_do_ioctl,
9187 .add_uc_addr = hclge_add_uc_addr,
9188 .rm_uc_addr = hclge_rm_uc_addr,
9189 .add_mc_addr = hclge_add_mc_addr,
9190 .rm_mc_addr = hclge_rm_mc_addr,
9191 .set_autoneg = hclge_set_autoneg,
9192 .get_autoneg = hclge_get_autoneg,
9193 .restart_autoneg = hclge_restart_autoneg,
9194 .get_pauseparam = hclge_get_pauseparam,
9195 .set_pauseparam = hclge_set_pauseparam,
9196 .set_mtu = hclge_set_mtu,
9197 .reset_queue = hclge_reset_tqp,
9198 .get_stats = hclge_get_stats,
9199 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9200 .update_stats = hclge_update_stats,
9201 .get_strings = hclge_get_strings,
9202 .get_sset_count = hclge_get_sset_count,
9203 .get_fw_version = hclge_get_fw_version,
9204 .get_mdix_mode = hclge_get_mdix_mode,
9205 .enable_vlan_filter = hclge_enable_vlan_filter,
9206 .set_vlan_filter = hclge_set_vlan_filter,
9207 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9208 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9209 .reset_event = hclge_reset_event,
9210 .set_default_reset_request = hclge_set_def_reset_request,
9211 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9212 .set_channels = hclge_set_channels,
9213 .get_channels = hclge_get_channels,
9214 .get_regs_len = hclge_get_regs_len,
9215 .get_regs = hclge_get_regs,
9216 .set_led_id = hclge_set_led_id,
9217 .get_link_mode = hclge_get_link_mode,
9218 .add_fd_entry = hclge_add_fd_entry,
9219 .del_fd_entry = hclge_del_fd_entry,
9220 .del_all_fd_entries = hclge_del_all_fd_entries,
9221 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9222 .get_fd_rule_info = hclge_get_fd_rule_info,
9223 .get_fd_all_rules = hclge_get_all_rules,
9224 .restore_fd_rules = hclge_restore_fd_entries,
9225 .enable_fd = hclge_enable_fd,
9226 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9227 .dbg_run_cmd = hclge_dbg_run_cmd,
9228 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9229 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9230 .ae_dev_resetting = hclge_ae_dev_resetting,
9231 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9232 .set_gro_en = hclge_gro_en,
9233 .get_global_queue_id = hclge_covert_handle_qid_global,
9234 .set_timer_task = hclge_set_timer_task,
9235 .mac_connect_phy = hclge_mac_connect_phy,
9236 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9237 .restore_vlan_table = hclge_restore_vlan_table,
9240 static struct hnae3_ae_algo ae_algo = {
9242 .pdev_id_table = ae_algo_pci_tbl,
9245 static int hclge_init(void)
9247 pr_info("%s is initializing\n", HCLGE_NAME);
9249 hnae3_register_ae_algo(&ae_algo);
9254 static void hclge_exit(void)
9256 hnae3_unregister_ae_algo(&ae_algo);
9258 module_init(hclge_init);
9259 module_exit(hclge_exit);
9261 MODULE_LICENSE("GPL");
9262 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9263 MODULE_DESCRIPTION("HCLGE Driver");
9264 MODULE_VERSION(HCLGE_MOD_VERSION);