1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2431 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2432 schedule_work(&hdev->rst_service_task);
2435 static void hclge_task_schedule(struct hclge_dev *hdev)
2437 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2438 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2439 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2440 (void)schedule_work(&hdev->service_task);
2443 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2445 struct hclge_link_status_cmd *req;
2446 struct hclge_desc desc;
2450 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2451 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2453 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2458 req = (struct hclge_link_status_cmd *)desc.data;
2459 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2461 return !!link_status;
2464 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2469 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2472 mac_state = hclge_get_mac_link_status(hdev);
2474 if (hdev->hw.mac.phydev) {
2475 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2476 link_stat = mac_state &
2477 hdev->hw.mac.phydev->link;
2482 link_stat = mac_state;
2488 static void hclge_update_link_status(struct hclge_dev *hdev)
2490 struct hnae3_client *rclient = hdev->roce_client;
2491 struct hnae3_client *client = hdev->nic_client;
2492 struct hnae3_handle *rhandle;
2493 struct hnae3_handle *handle;
2499 state = hclge_get_mac_phy_link(hdev);
2500 if (state != hdev->hw.mac.link) {
2501 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2502 handle = &hdev->vport[i].nic;
2503 client->ops->link_status_change(handle, state);
2504 hclge_config_mac_tnl_int(hdev, state);
2505 rhandle = &hdev->vport[i].roce;
2506 if (rclient && rclient->ops->link_status_change)
2507 rclient->ops->link_status_change(rhandle,
2510 hdev->hw.mac.link = state;
2514 static void hclge_update_port_capability(struct hclge_mac *mac)
2516 /* update fec ability by speed */
2517 hclge_convert_setting_fec(mac);
2519 /* firmware can not identify back plane type, the media type
2520 * read from configuration can help deal it
2522 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2523 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2524 mac->module_type = HNAE3_MODULE_TYPE_KR;
2525 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2526 mac->module_type = HNAE3_MODULE_TYPE_TP;
2528 if (mac->support_autoneg == true) {
2529 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2530 linkmode_copy(mac->advertising, mac->supported);
2532 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2534 linkmode_zero(mac->advertising);
2538 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2540 struct hclge_sfp_info_cmd *resp = NULL;
2541 struct hclge_desc desc;
2544 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2545 resp = (struct hclge_sfp_info_cmd *)desc.data;
2546 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2547 if (ret == -EOPNOTSUPP) {
2548 dev_warn(&hdev->pdev->dev,
2549 "IMP do not support get SFP speed %d\n", ret);
2552 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2556 *speed = le32_to_cpu(resp->speed);
2561 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2563 struct hclge_sfp_info_cmd *resp;
2564 struct hclge_desc desc;
2567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2568 resp = (struct hclge_sfp_info_cmd *)desc.data;
2570 resp->query_type = QUERY_ACTIVE_SPEED;
2572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573 if (ret == -EOPNOTSUPP) {
2574 dev_warn(&hdev->pdev->dev,
2575 "IMP does not support get SFP info %d\n", ret);
2578 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2582 mac->speed = le32_to_cpu(resp->speed);
2583 /* if resp->speed_ability is 0, it means it's an old version
2584 * firmware, do not update these params
2586 if (resp->speed_ability) {
2587 mac->module_type = le32_to_cpu(resp->module_type);
2588 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2589 mac->autoneg = resp->autoneg;
2590 mac->support_autoneg = resp->autoneg_ability;
2591 if (!resp->active_fec)
2594 mac->fec_mode = BIT(resp->active_fec);
2596 mac->speed_type = QUERY_SFP_SPEED;
2602 static int hclge_update_port_info(struct hclge_dev *hdev)
2604 struct hclge_mac *mac = &hdev->hw.mac;
2605 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2608 /* get the port info from SFP cmd if not copper port */
2609 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2612 /* if IMP does not support get SFP/qSFP info, return directly */
2613 if (!hdev->support_sfp_query)
2616 if (hdev->pdev->revision >= 0x21)
2617 ret = hclge_get_sfp_info(hdev, mac);
2619 ret = hclge_get_sfp_speed(hdev, &speed);
2621 if (ret == -EOPNOTSUPP) {
2622 hdev->support_sfp_query = false;
2628 if (hdev->pdev->revision >= 0x21) {
2629 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2630 hclge_update_port_capability(mac);
2633 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2636 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2637 return 0; /* do nothing if no SFP */
2639 /* must config full duplex for SFP */
2640 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2644 static int hclge_get_status(struct hnae3_handle *handle)
2646 struct hclge_vport *vport = hclge_get_vport(handle);
2647 struct hclge_dev *hdev = vport->back;
2649 hclge_update_link_status(hdev);
2651 return hdev->hw.mac.link;
2654 static void hclge_service_timer(struct timer_list *t)
2656 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2658 mod_timer(&hdev->service_timer, jiffies + HZ);
2659 hdev->hw_stats.stats_timer++;
2660 hdev->fd_arfs_expire_timer++;
2661 hclge_task_schedule(hdev);
2664 static void hclge_service_complete(struct hclge_dev *hdev)
2666 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2668 /* Flush memory before next watchdog */
2669 smp_mb__before_atomic();
2670 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2673 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2675 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2677 /* fetch the events from their corresponding regs */
2678 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2679 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2680 msix_src_reg = hclge_read_dev(&hdev->hw,
2681 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2683 /* Assumption: If by any chance reset and mailbox events are reported
2684 * together then we will only process reset event in this go and will
2685 * defer the processing of the mailbox events. Since, we would have not
2686 * cleared RX CMDQ event this time we would receive again another
2687 * interrupt from H/W just for the mailbox.
2690 /* check for vector0 reset event sources */
2691 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2692 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2693 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2694 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2695 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2696 hdev->rst_stats.imp_rst_cnt++;
2697 return HCLGE_VECTOR0_EVENT_RST;
2700 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2701 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2702 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2703 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2704 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2705 hdev->rst_stats.global_rst_cnt++;
2706 return HCLGE_VECTOR0_EVENT_RST;
2709 /* check for vector0 msix event source */
2710 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2711 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2713 return HCLGE_VECTOR0_EVENT_ERR;
2716 /* check for vector0 mailbox(=CMDQ RX) event source */
2717 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2718 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2719 *clearval = cmdq_src_reg;
2720 return HCLGE_VECTOR0_EVENT_MBX;
2723 /* print other vector0 event source */
2724 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2725 cmdq_src_reg, msix_src_reg);
2726 return HCLGE_VECTOR0_EVENT_OTHER;
2729 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2732 switch (event_type) {
2733 case HCLGE_VECTOR0_EVENT_RST:
2734 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2736 case HCLGE_VECTOR0_EVENT_MBX:
2737 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2744 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2746 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2747 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2748 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2749 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2750 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2753 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2755 writel(enable ? 1 : 0, vector->addr);
2758 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2760 struct hclge_dev *hdev = data;
2764 hclge_enable_vector(&hdev->misc_vector, false);
2765 event_cause = hclge_check_event_cause(hdev, &clearval);
2767 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2768 switch (event_cause) {
2769 case HCLGE_VECTOR0_EVENT_ERR:
2770 /* we do not know what type of reset is required now. This could
2771 * only be decided after we fetch the type of errors which
2772 * caused this event. Therefore, we will do below for now:
2773 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2774 * have defered type of reset to be used.
2775 * 2. Schedule the reset serivce task.
2776 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2777 * will fetch the correct type of reset. This would be done
2778 * by first decoding the types of errors.
2780 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2782 case HCLGE_VECTOR0_EVENT_RST:
2783 hclge_reset_task_schedule(hdev);
2785 case HCLGE_VECTOR0_EVENT_MBX:
2786 /* If we are here then,
2787 * 1. Either we are not handling any mbx task and we are not
2790 * 2. We could be handling a mbx task but nothing more is
2792 * In both cases, we should schedule mbx task as there are more
2793 * mbx messages reported by this interrupt.
2795 hclge_mbx_task_schedule(hdev);
2798 dev_warn(&hdev->pdev->dev,
2799 "received unknown or unhandled event of vector0\n");
2803 /* clear the source of interrupt if it is not cause by reset */
2804 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2805 hclge_clear_event_cause(hdev, event_cause, clearval);
2806 hclge_enable_vector(&hdev->misc_vector, true);
2812 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2814 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2815 dev_warn(&hdev->pdev->dev,
2816 "vector(vector_id %d) has been freed.\n", vector_id);
2820 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2821 hdev->num_msi_left += 1;
2822 hdev->num_msi_used -= 1;
2825 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2827 struct hclge_misc_vector *vector = &hdev->misc_vector;
2829 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2831 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2832 hdev->vector_status[0] = 0;
2834 hdev->num_msi_left -= 1;
2835 hdev->num_msi_used += 1;
2838 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2842 hclge_get_misc_vector(hdev);
2844 /* this would be explicitly freed in the end */
2845 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2846 0, "hclge_misc", hdev);
2848 hclge_free_vector(hdev, 0);
2849 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2850 hdev->misc_vector.vector_irq);
2856 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2858 free_irq(hdev->misc_vector.vector_irq, hdev);
2859 hclge_free_vector(hdev, 0);
2862 int hclge_notify_client(struct hclge_dev *hdev,
2863 enum hnae3_reset_notify_type type)
2865 struct hnae3_client *client = hdev->nic_client;
2868 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2872 if (!client->ops->reset_notify)
2875 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2876 struct hnae3_handle *handle = &hdev->vport[i].nic;
2879 ret = client->ops->reset_notify(handle, type);
2881 dev_err(&hdev->pdev->dev,
2882 "notify nic client failed %d(%d)\n", type, ret);
2890 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2891 enum hnae3_reset_notify_type type)
2893 struct hnae3_client *client = hdev->roce_client;
2897 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2901 if (!client->ops->reset_notify)
2904 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2905 struct hnae3_handle *handle = &hdev->vport[i].roce;
2907 ret = client->ops->reset_notify(handle, type);
2909 dev_err(&hdev->pdev->dev,
2910 "notify roce client failed %d(%d)",
2919 static int hclge_reset_wait(struct hclge_dev *hdev)
2921 #define HCLGE_RESET_WATI_MS 100
2922 #define HCLGE_RESET_WAIT_CNT 200
2923 u32 val, reg, reg_bit;
2926 switch (hdev->reset_type) {
2927 case HNAE3_IMP_RESET:
2928 reg = HCLGE_GLOBAL_RESET_REG;
2929 reg_bit = HCLGE_IMP_RESET_BIT;
2931 case HNAE3_GLOBAL_RESET:
2932 reg = HCLGE_GLOBAL_RESET_REG;
2933 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2935 case HNAE3_FUNC_RESET:
2936 reg = HCLGE_FUN_RST_ING;
2937 reg_bit = HCLGE_FUN_RST_ING_B;
2939 case HNAE3_FLR_RESET:
2942 dev_err(&hdev->pdev->dev,
2943 "Wait for unsupported reset type: %d\n",
2948 if (hdev->reset_type == HNAE3_FLR_RESET) {
2949 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2950 cnt++ < HCLGE_RESET_WAIT_CNT)
2951 msleep(HCLGE_RESET_WATI_MS);
2953 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2954 dev_err(&hdev->pdev->dev,
2955 "flr wait timeout: %d\n", cnt);
2962 val = hclge_read_dev(&hdev->hw, reg);
2963 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2964 msleep(HCLGE_RESET_WATI_MS);
2965 val = hclge_read_dev(&hdev->hw, reg);
2969 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2970 dev_warn(&hdev->pdev->dev,
2971 "Wait for reset timeout: %d\n", hdev->reset_type);
2978 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2980 struct hclge_vf_rst_cmd *req;
2981 struct hclge_desc desc;
2983 req = (struct hclge_vf_rst_cmd *)desc.data;
2984 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2985 req->dest_vfid = func_id;
2990 return hclge_cmd_send(&hdev->hw, &desc, 1);
2993 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2997 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2998 struct hclge_vport *vport = &hdev->vport[i];
3001 /* Send cmd to set/clear VF's FUNC_RST_ING */
3002 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3004 dev_err(&hdev->pdev->dev,
3005 "set vf(%d) rst failed %d!\n",
3006 vport->vport_id, ret);
3010 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3013 /* Inform VF to process the reset.
3014 * hclge_inform_reset_assert_to_vf may fail if VF
3015 * driver is not loaded.
3017 ret = hclge_inform_reset_assert_to_vf(vport);
3019 dev_warn(&hdev->pdev->dev,
3020 "inform reset to vf(%d) failed %d!\n",
3021 vport->vport_id, ret);
3027 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3029 struct hclge_desc desc;
3030 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3033 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3034 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3035 req->fun_reset_vfid = func_id;
3037 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3039 dev_err(&hdev->pdev->dev,
3040 "send function reset cmd fail, status =%d\n", ret);
3045 static void hclge_do_reset(struct hclge_dev *hdev)
3047 struct hnae3_handle *handle = &hdev->vport[0].nic;
3048 struct pci_dev *pdev = hdev->pdev;
3051 if (hclge_get_hw_reset_stat(handle)) {
3052 dev_info(&pdev->dev, "Hardware reset not finish\n");
3053 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3054 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3055 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3059 switch (hdev->reset_type) {
3060 case HNAE3_GLOBAL_RESET:
3061 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3062 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3063 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3064 dev_info(&pdev->dev, "Global Reset requested\n");
3066 case HNAE3_FUNC_RESET:
3067 dev_info(&pdev->dev, "PF Reset requested\n");
3068 /* schedule again to check later */
3069 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3070 hclge_reset_task_schedule(hdev);
3072 case HNAE3_FLR_RESET:
3073 dev_info(&pdev->dev, "FLR requested\n");
3074 /* schedule again to check later */
3075 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3076 hclge_reset_task_schedule(hdev);
3079 dev_warn(&pdev->dev,
3080 "Unsupported reset type: %d\n", hdev->reset_type);
3085 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3086 unsigned long *addr)
3088 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3090 /* first, resolve any unknown reset type to the known type(s) */
3091 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3092 /* we will intentionally ignore any errors from this function
3093 * as we will end up in *some* reset request in any case
3095 hclge_handle_hw_msix_error(hdev, addr);
3096 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3097 /* We defered the clearing of the error event which caused
3098 * interrupt since it was not posssible to do that in
3099 * interrupt context (and this is the reason we introduced
3100 * new UNKNOWN reset type). Now, the errors have been
3101 * handled and cleared in hardware we can safely enable
3102 * interrupts. This is an exception to the norm.
3104 hclge_enable_vector(&hdev->misc_vector, true);
3107 /* return the highest priority reset level amongst all */
3108 if (test_bit(HNAE3_IMP_RESET, addr)) {
3109 rst_level = HNAE3_IMP_RESET;
3110 clear_bit(HNAE3_IMP_RESET, addr);
3111 clear_bit(HNAE3_GLOBAL_RESET, addr);
3112 clear_bit(HNAE3_FUNC_RESET, addr);
3113 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3114 rst_level = HNAE3_GLOBAL_RESET;
3115 clear_bit(HNAE3_GLOBAL_RESET, addr);
3116 clear_bit(HNAE3_FUNC_RESET, addr);
3117 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3118 rst_level = HNAE3_FUNC_RESET;
3119 clear_bit(HNAE3_FUNC_RESET, addr);
3120 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3121 rst_level = HNAE3_FLR_RESET;
3122 clear_bit(HNAE3_FLR_RESET, addr);
3125 if (hdev->reset_type != HNAE3_NONE_RESET &&
3126 rst_level < hdev->reset_type)
3127 return HNAE3_NONE_RESET;
3132 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3136 switch (hdev->reset_type) {
3137 case HNAE3_IMP_RESET:
3138 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3140 case HNAE3_GLOBAL_RESET:
3141 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3150 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3151 hclge_enable_vector(&hdev->misc_vector, true);
3154 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3158 switch (hdev->reset_type) {
3159 case HNAE3_FUNC_RESET:
3161 case HNAE3_FLR_RESET:
3162 ret = hclge_set_all_vf_rst(hdev, true);
3171 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3173 #define HCLGE_RESET_SYNC_TIME 100
3178 switch (hdev->reset_type) {
3179 case HNAE3_FUNC_RESET:
3180 /* There is no mechanism for PF to know if VF has stopped IO
3181 * for now, just wait 100 ms for VF to stop IO
3183 msleep(HCLGE_RESET_SYNC_TIME);
3184 ret = hclge_func_reset_cmd(hdev, 0);
3186 dev_err(&hdev->pdev->dev,
3187 "asserting function reset fail %d!\n", ret);
3191 /* After performaning pf reset, it is not necessary to do the
3192 * mailbox handling or send any command to firmware, because
3193 * any mailbox handling or command to firmware is only valid
3194 * after hclge_cmd_init is called.
3196 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3197 hdev->rst_stats.pf_rst_cnt++;
3199 case HNAE3_FLR_RESET:
3200 /* There is no mechanism for PF to know if VF has stopped IO
3201 * for now, just wait 100 ms for VF to stop IO
3203 msleep(HCLGE_RESET_SYNC_TIME);
3204 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3205 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3206 hdev->rst_stats.flr_rst_cnt++;
3208 case HNAE3_IMP_RESET:
3209 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3210 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3211 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3217 /* inform hardware that preparatory work is done */
3218 msleep(HCLGE_RESET_SYNC_TIME);
3219 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3220 HCLGE_NIC_CMQ_ENABLE);
3221 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3226 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3228 #define MAX_RESET_FAIL_CNT 5
3229 #define RESET_UPGRADE_DELAY_SEC 10
3231 if (hdev->reset_pending) {
3232 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3233 hdev->reset_pending);
3235 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3236 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3237 BIT(HCLGE_IMP_RESET_BIT))) {
3238 dev_info(&hdev->pdev->dev,
3239 "reset failed because IMP Reset is pending\n");
3240 hclge_clear_reset_cause(hdev);
3242 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3243 hdev->reset_fail_cnt++;
3245 set_bit(hdev->reset_type, &hdev->reset_pending);
3246 dev_info(&hdev->pdev->dev,
3247 "re-schedule to wait for hw reset done\n");
3251 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3252 hclge_clear_reset_cause(hdev);
3253 mod_timer(&hdev->reset_timer,
3254 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3259 hclge_clear_reset_cause(hdev);
3260 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3264 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3268 switch (hdev->reset_type) {
3269 case HNAE3_FUNC_RESET:
3271 case HNAE3_FLR_RESET:
3272 ret = hclge_set_all_vf_rst(hdev, false);
3281 static void hclge_reset(struct hclge_dev *hdev)
3283 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3284 bool is_timeout = false;
3287 /* Initialize ae_dev reset status as well, in case enet layer wants to
3288 * know if device is undergoing reset
3290 ae_dev->reset_type = hdev->reset_type;
3291 hdev->rst_stats.reset_cnt++;
3292 /* perform reset of the stack & ae device for a client */
3293 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3297 ret = hclge_reset_prepare_down(hdev);
3302 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3304 goto err_reset_lock;
3308 ret = hclge_reset_prepare_wait(hdev);
3312 if (hclge_reset_wait(hdev)) {
3317 hdev->rst_stats.hw_reset_done_cnt++;
3319 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3324 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3326 goto err_reset_lock;
3328 ret = hclge_reset_ae_dev(hdev->ae_dev);
3330 goto err_reset_lock;
3332 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3334 goto err_reset_lock;
3336 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3338 goto err_reset_lock;
3340 hclge_clear_reset_cause(hdev);
3342 ret = hclge_reset_prepare_up(hdev);
3344 goto err_reset_lock;
3346 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3348 goto err_reset_lock;
3352 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3356 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3360 hdev->last_reset_time = jiffies;
3361 hdev->reset_fail_cnt = 0;
3362 hdev->rst_stats.reset_done_cnt++;
3363 ae_dev->reset_type = HNAE3_NONE_RESET;
3364 del_timer(&hdev->reset_timer);
3371 if (hclge_reset_err_handle(hdev, is_timeout))
3372 hclge_reset_task_schedule(hdev);
3375 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3377 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3378 struct hclge_dev *hdev = ae_dev->priv;
3380 /* We might end up getting called broadly because of 2 below cases:
3381 * 1. Recoverable error was conveyed through APEI and only way to bring
3382 * normalcy is to reset.
3383 * 2. A new reset request from the stack due to timeout
3385 * For the first case,error event might not have ae handle available.
3386 * check if this is a new reset request and we are not here just because
3387 * last reset attempt did not succeed and watchdog hit us again. We will
3388 * know this if last reset request did not occur very recently (watchdog
3389 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3390 * In case of new request we reset the "reset level" to PF reset.
3391 * And if it is a repeat reset request of the most recent one then we
3392 * want to make sure we throttle the reset request. Therefore, we will
3393 * not allow it again before 3*HZ times.
3396 handle = &hdev->vport[0].nic;
3398 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3400 else if (hdev->default_reset_request)
3402 hclge_get_reset_level(hdev,
3403 &hdev->default_reset_request);
3404 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3405 hdev->reset_level = HNAE3_FUNC_RESET;
3407 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3410 /* request reset & schedule reset task */
3411 set_bit(hdev->reset_level, &hdev->reset_request);
3412 hclge_reset_task_schedule(hdev);
3414 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3415 hdev->reset_level++;
3418 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3419 enum hnae3_reset_type rst_type)
3421 struct hclge_dev *hdev = ae_dev->priv;
3423 set_bit(rst_type, &hdev->default_reset_request);
3426 static void hclge_reset_timer(struct timer_list *t)
3428 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3430 dev_info(&hdev->pdev->dev,
3431 "triggering global reset in reset timer\n");
3432 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3433 hclge_reset_event(hdev->pdev, NULL);
3436 static void hclge_reset_subtask(struct hclge_dev *hdev)
3438 /* check if there is any ongoing reset in the hardware. This status can
3439 * be checked from reset_pending. If there is then, we need to wait for
3440 * hardware to complete reset.
3441 * a. If we are able to figure out in reasonable time that hardware
3442 * has fully resetted then, we can proceed with driver, client
3444 * b. else, we can come back later to check this status so re-sched
3447 hdev->last_reset_time = jiffies;
3448 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3449 if (hdev->reset_type != HNAE3_NONE_RESET)
3452 /* check if we got any *new* reset requests to be honored */
3453 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3454 if (hdev->reset_type != HNAE3_NONE_RESET)
3455 hclge_do_reset(hdev);
3457 hdev->reset_type = HNAE3_NONE_RESET;
3460 static void hclge_reset_service_task(struct work_struct *work)
3462 struct hclge_dev *hdev =
3463 container_of(work, struct hclge_dev, rst_service_task);
3465 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3468 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3470 hclge_reset_subtask(hdev);
3472 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3475 static void hclge_mailbox_service_task(struct work_struct *work)
3477 struct hclge_dev *hdev =
3478 container_of(work, struct hclge_dev, mbx_service_task);
3480 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3483 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3485 hclge_mbx_handler(hdev);
3487 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3490 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3494 /* start from vport 1 for PF is always alive */
3495 for (i = 1; i < hdev->num_alloc_vport; i++) {
3496 struct hclge_vport *vport = &hdev->vport[i];
3498 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3499 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3501 /* If vf is not alive, set to default value */
3502 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3503 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3507 static void hclge_service_task(struct work_struct *work)
3509 struct hclge_dev *hdev =
3510 container_of(work, struct hclge_dev, service_task);
3512 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3513 hclge_update_stats_for_all(hdev);
3514 hdev->hw_stats.stats_timer = 0;
3517 hclge_update_port_info(hdev);
3518 hclge_update_link_status(hdev);
3519 hclge_update_vport_alive(hdev);
3520 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3521 hclge_rfs_filter_expire(hdev);
3522 hdev->fd_arfs_expire_timer = 0;
3524 hclge_service_complete(hdev);
3527 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3529 /* VF handle has no client */
3530 if (!handle->client)
3531 return container_of(handle, struct hclge_vport, nic);
3532 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3533 return container_of(handle, struct hclge_vport, roce);
3535 return container_of(handle, struct hclge_vport, nic);
3538 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3539 struct hnae3_vector_info *vector_info)
3541 struct hclge_vport *vport = hclge_get_vport(handle);
3542 struct hnae3_vector_info *vector = vector_info;
3543 struct hclge_dev *hdev = vport->back;
3547 vector_num = min(hdev->num_msi_left, vector_num);
3549 for (j = 0; j < vector_num; j++) {
3550 for (i = 1; i < hdev->num_msi; i++) {
3551 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3552 vector->vector = pci_irq_vector(hdev->pdev, i);
3553 vector->io_addr = hdev->hw.io_base +
3554 HCLGE_VECTOR_REG_BASE +
3555 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3557 HCLGE_VECTOR_VF_OFFSET;
3558 hdev->vector_status[i] = vport->vport_id;
3559 hdev->vector_irq[i] = vector->vector;
3568 hdev->num_msi_left -= alloc;
3569 hdev->num_msi_used += alloc;
3574 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3578 for (i = 0; i < hdev->num_msi; i++)
3579 if (vector == hdev->vector_irq[i])
3585 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3587 struct hclge_vport *vport = hclge_get_vport(handle);
3588 struct hclge_dev *hdev = vport->back;
3591 vector_id = hclge_get_vector_index(hdev, vector);
3592 if (vector_id < 0) {
3593 dev_err(&hdev->pdev->dev,
3594 "Get vector index fail. vector_id =%d\n", vector_id);
3598 hclge_free_vector(hdev, vector_id);
3603 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3605 return HCLGE_RSS_KEY_SIZE;
3608 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3610 return HCLGE_RSS_IND_TBL_SIZE;
3613 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3614 const u8 hfunc, const u8 *key)
3616 struct hclge_rss_config_cmd *req;
3617 struct hclge_desc desc;
3622 req = (struct hclge_rss_config_cmd *)desc.data;
3624 for (key_offset = 0; key_offset < 3; key_offset++) {
3625 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3628 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3629 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3631 if (key_offset == 2)
3633 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3635 key_size = HCLGE_RSS_HASH_KEY_NUM;
3637 memcpy(req->hash_key,
3638 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3640 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3642 dev_err(&hdev->pdev->dev,
3643 "Configure RSS config fail, status = %d\n",
3651 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3653 struct hclge_rss_indirection_table_cmd *req;
3654 struct hclge_desc desc;
3658 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3660 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3661 hclge_cmd_setup_basic_desc
3662 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3664 req->start_table_index =
3665 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3666 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3668 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3669 req->rss_result[j] =
3670 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3674 dev_err(&hdev->pdev->dev,
3675 "Configure rss indir table fail,status = %d\n",
3683 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3684 u16 *tc_size, u16 *tc_offset)
3686 struct hclge_rss_tc_mode_cmd *req;
3687 struct hclge_desc desc;
3691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3692 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3694 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3697 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3698 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3699 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3700 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3701 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3703 req->rss_tc_mode[i] = cpu_to_le16(mode);
3706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3708 dev_err(&hdev->pdev->dev,
3709 "Configure rss tc mode fail, status = %d\n", ret);
3714 static void hclge_get_rss_type(struct hclge_vport *vport)
3716 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3717 vport->rss_tuple_sets.ipv4_udp_en ||
3718 vport->rss_tuple_sets.ipv4_sctp_en ||
3719 vport->rss_tuple_sets.ipv6_tcp_en ||
3720 vport->rss_tuple_sets.ipv6_udp_en ||
3721 vport->rss_tuple_sets.ipv6_sctp_en)
3722 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3723 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3724 vport->rss_tuple_sets.ipv6_fragment_en)
3725 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3727 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3730 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3732 struct hclge_rss_input_tuple_cmd *req;
3733 struct hclge_desc desc;
3736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3738 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3740 /* Get the tuple cfg from pf */
3741 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3742 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3743 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3744 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3745 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3746 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3747 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3748 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3749 hclge_get_rss_type(&hdev->vport[0]);
3750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3752 dev_err(&hdev->pdev->dev,
3753 "Configure rss input fail, status = %d\n", ret);
3757 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3760 struct hclge_vport *vport = hclge_get_vport(handle);
3763 /* Get hash algorithm */
3765 switch (vport->rss_algo) {
3766 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3767 *hfunc = ETH_RSS_HASH_TOP;
3769 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3770 *hfunc = ETH_RSS_HASH_XOR;
3773 *hfunc = ETH_RSS_HASH_UNKNOWN;
3778 /* Get the RSS Key required by the user */
3780 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3782 /* Get indirect table */
3784 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3785 indir[i] = vport->rss_indirection_tbl[i];
3790 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3791 const u8 *key, const u8 hfunc)
3793 struct hclge_vport *vport = hclge_get_vport(handle);
3794 struct hclge_dev *hdev = vport->back;
3798 /* Set the RSS Hash Key if specififed by the user */
3801 case ETH_RSS_HASH_TOP:
3802 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3804 case ETH_RSS_HASH_XOR:
3805 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3807 case ETH_RSS_HASH_NO_CHANGE:
3808 hash_algo = vport->rss_algo;
3814 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3818 /* Update the shadow RSS key with user specified qids */
3819 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3820 vport->rss_algo = hash_algo;
3823 /* Update the shadow RSS table with user specified qids */
3824 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3825 vport->rss_indirection_tbl[i] = indir[i];
3827 /* Update the hardware */
3828 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3831 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3833 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3835 if (nfc->data & RXH_L4_B_2_3)
3836 hash_sets |= HCLGE_D_PORT_BIT;
3838 hash_sets &= ~HCLGE_D_PORT_BIT;
3840 if (nfc->data & RXH_IP_SRC)
3841 hash_sets |= HCLGE_S_IP_BIT;
3843 hash_sets &= ~HCLGE_S_IP_BIT;
3845 if (nfc->data & RXH_IP_DST)
3846 hash_sets |= HCLGE_D_IP_BIT;
3848 hash_sets &= ~HCLGE_D_IP_BIT;
3850 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3851 hash_sets |= HCLGE_V_TAG_BIT;
3856 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3857 struct ethtool_rxnfc *nfc)
3859 struct hclge_vport *vport = hclge_get_vport(handle);
3860 struct hclge_dev *hdev = vport->back;
3861 struct hclge_rss_input_tuple_cmd *req;
3862 struct hclge_desc desc;
3866 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3867 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3870 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3871 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3873 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3874 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3875 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3876 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3877 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3878 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3879 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3880 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3882 tuple_sets = hclge_get_rss_hash_bits(nfc);
3883 switch (nfc->flow_type) {
3885 req->ipv4_tcp_en = tuple_sets;
3888 req->ipv6_tcp_en = tuple_sets;
3891 req->ipv4_udp_en = tuple_sets;
3894 req->ipv6_udp_en = tuple_sets;
3897 req->ipv4_sctp_en = tuple_sets;
3900 if ((nfc->data & RXH_L4_B_0_1) ||
3901 (nfc->data & RXH_L4_B_2_3))
3904 req->ipv6_sctp_en = tuple_sets;
3907 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3910 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3918 dev_err(&hdev->pdev->dev,
3919 "Set rss tuple fail, status = %d\n", ret);
3923 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3924 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3925 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3926 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3927 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3928 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3929 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3930 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3931 hclge_get_rss_type(vport);
3935 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3936 struct ethtool_rxnfc *nfc)
3938 struct hclge_vport *vport = hclge_get_vport(handle);
3943 switch (nfc->flow_type) {
3945 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3948 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3951 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3954 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3957 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3960 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3964 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3973 if (tuple_sets & HCLGE_D_PORT_BIT)
3974 nfc->data |= RXH_L4_B_2_3;
3975 if (tuple_sets & HCLGE_S_PORT_BIT)
3976 nfc->data |= RXH_L4_B_0_1;
3977 if (tuple_sets & HCLGE_D_IP_BIT)
3978 nfc->data |= RXH_IP_DST;
3979 if (tuple_sets & HCLGE_S_IP_BIT)
3980 nfc->data |= RXH_IP_SRC;
3985 static int hclge_get_tc_size(struct hnae3_handle *handle)
3987 struct hclge_vport *vport = hclge_get_vport(handle);
3988 struct hclge_dev *hdev = vport->back;
3990 return hdev->rss_size_max;
3993 int hclge_rss_init_hw(struct hclge_dev *hdev)
3995 struct hclge_vport *vport = hdev->vport;
3996 u8 *rss_indir = vport[0].rss_indirection_tbl;
3997 u16 rss_size = vport[0].alloc_rss_size;
3998 u8 *key = vport[0].rss_hash_key;
3999 u8 hfunc = vport[0].rss_algo;
4000 u16 tc_offset[HCLGE_MAX_TC_NUM];
4001 u16 tc_valid[HCLGE_MAX_TC_NUM];
4002 u16 tc_size[HCLGE_MAX_TC_NUM];
4006 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4010 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4014 ret = hclge_set_rss_input_tuple(hdev);
4018 /* Each TC have the same queue size, and tc_size set to hardware is
4019 * the log2 of roundup power of two of rss_size, the acutal queue
4020 * size is limited by indirection table.
4022 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4023 dev_err(&hdev->pdev->dev,
4024 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4029 roundup_size = roundup_pow_of_two(rss_size);
4030 roundup_size = ilog2(roundup_size);
4032 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4035 if (!(hdev->hw_tc_map & BIT(i)))
4039 tc_size[i] = roundup_size;
4040 tc_offset[i] = rss_size * i;
4043 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4046 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4048 struct hclge_vport *vport = hdev->vport;
4051 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4052 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4053 vport[j].rss_indirection_tbl[i] =
4054 i % vport[j].alloc_rss_size;
4058 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4060 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4061 struct hclge_vport *vport = hdev->vport;
4063 if (hdev->pdev->revision >= 0x21)
4064 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4066 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4067 vport[i].rss_tuple_sets.ipv4_tcp_en =
4068 HCLGE_RSS_INPUT_TUPLE_OTHER;
4069 vport[i].rss_tuple_sets.ipv4_udp_en =
4070 HCLGE_RSS_INPUT_TUPLE_OTHER;
4071 vport[i].rss_tuple_sets.ipv4_sctp_en =
4072 HCLGE_RSS_INPUT_TUPLE_SCTP;
4073 vport[i].rss_tuple_sets.ipv4_fragment_en =
4074 HCLGE_RSS_INPUT_TUPLE_OTHER;
4075 vport[i].rss_tuple_sets.ipv6_tcp_en =
4076 HCLGE_RSS_INPUT_TUPLE_OTHER;
4077 vport[i].rss_tuple_sets.ipv6_udp_en =
4078 HCLGE_RSS_INPUT_TUPLE_OTHER;
4079 vport[i].rss_tuple_sets.ipv6_sctp_en =
4080 HCLGE_RSS_INPUT_TUPLE_SCTP;
4081 vport[i].rss_tuple_sets.ipv6_fragment_en =
4082 HCLGE_RSS_INPUT_TUPLE_OTHER;
4084 vport[i].rss_algo = rss_algo;
4086 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4087 HCLGE_RSS_KEY_SIZE);
4090 hclge_rss_indir_init_cfg(hdev);
4093 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4094 int vector_id, bool en,
4095 struct hnae3_ring_chain_node *ring_chain)
4097 struct hclge_dev *hdev = vport->back;
4098 struct hnae3_ring_chain_node *node;
4099 struct hclge_desc desc;
4100 struct hclge_ctrl_vector_chain_cmd *req
4101 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4102 enum hclge_cmd_status status;
4103 enum hclge_opcode_type op;
4104 u16 tqp_type_and_id;
4107 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4108 hclge_cmd_setup_basic_desc(&desc, op, false);
4109 req->int_vector_id = vector_id;
4112 for (node = ring_chain; node; node = node->next) {
4113 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4114 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4116 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4117 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4118 HCLGE_TQP_ID_S, node->tqp_index);
4119 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4121 hnae3_get_field(node->int_gl_idx,
4122 HNAE3_RING_GL_IDX_M,
4123 HNAE3_RING_GL_IDX_S));
4124 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4125 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4126 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4127 req->vfid = vport->vport_id;
4129 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4131 dev_err(&hdev->pdev->dev,
4132 "Map TQP fail, status is %d.\n",
4138 hclge_cmd_setup_basic_desc(&desc,
4141 req->int_vector_id = vector_id;
4146 req->int_cause_num = i;
4147 req->vfid = vport->vport_id;
4148 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4150 dev_err(&hdev->pdev->dev,
4151 "Map TQP fail, status is %d.\n", status);
4159 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4161 struct hnae3_ring_chain_node *ring_chain)
4163 struct hclge_vport *vport = hclge_get_vport(handle);
4164 struct hclge_dev *hdev = vport->back;
4167 vector_id = hclge_get_vector_index(hdev, vector);
4168 if (vector_id < 0) {
4169 dev_err(&hdev->pdev->dev,
4170 "Get vector index fail. vector_id =%d\n", vector_id);
4174 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4177 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4179 struct hnae3_ring_chain_node *ring_chain)
4181 struct hclge_vport *vport = hclge_get_vport(handle);
4182 struct hclge_dev *hdev = vport->back;
4185 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4188 vector_id = hclge_get_vector_index(hdev, vector);
4189 if (vector_id < 0) {
4190 dev_err(&handle->pdev->dev,
4191 "Get vector index fail. ret =%d\n", vector_id);
4195 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4197 dev_err(&handle->pdev->dev,
4198 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4205 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4206 struct hclge_promisc_param *param)
4208 struct hclge_promisc_cfg_cmd *req;
4209 struct hclge_desc desc;
4212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4214 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4215 req->vf_id = param->vf_id;
4217 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4218 * pdev revision(0x20), new revision support them. The
4219 * value of this two fields will not return error when driver
4220 * send command to fireware in revision(0x20).
4222 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4223 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4225 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4227 dev_err(&hdev->pdev->dev,
4228 "Set promisc mode fail, status is %d.\n", ret);
4233 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4234 bool en_mc, bool en_bc, int vport_id)
4239 memset(param, 0, sizeof(struct hclge_promisc_param));
4241 param->enable = HCLGE_PROMISC_EN_UC;
4243 param->enable |= HCLGE_PROMISC_EN_MC;
4245 param->enable |= HCLGE_PROMISC_EN_BC;
4246 param->vf_id = vport_id;
4249 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4252 struct hclge_vport *vport = hclge_get_vport(handle);
4253 struct hclge_dev *hdev = vport->back;
4254 struct hclge_promisc_param param;
4255 bool en_bc_pmc = true;
4257 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4258 * always bypassed. So broadcast promisc should be disabled until
4259 * user enable promisc mode
4261 if (handle->pdev->revision == 0x20)
4262 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4264 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4266 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4269 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4271 struct hclge_get_fd_mode_cmd *req;
4272 struct hclge_desc desc;
4275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4277 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4281 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4285 *fd_mode = req->mode;
4290 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4291 u32 *stage1_entry_num,
4292 u32 *stage2_entry_num,
4293 u16 *stage1_counter_num,
4294 u16 *stage2_counter_num)
4296 struct hclge_get_fd_allocation_cmd *req;
4297 struct hclge_desc desc;
4300 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4302 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4306 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4311 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4312 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4313 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4314 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4319 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4321 struct hclge_set_fd_key_config_cmd *req;
4322 struct hclge_fd_key_cfg *stage;
4323 struct hclge_desc desc;
4326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4328 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4329 stage = &hdev->fd_cfg.key_cfg[stage_num];
4330 req->stage = stage_num;
4331 req->key_select = stage->key_sel;
4332 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4333 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4334 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4335 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4336 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4337 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4339 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4341 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4346 static int hclge_init_fd_config(struct hclge_dev *hdev)
4348 #define LOW_2_WORDS 0x03
4349 struct hclge_fd_key_cfg *key_cfg;
4352 if (!hnae3_dev_fd_supported(hdev))
4355 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4359 switch (hdev->fd_cfg.fd_mode) {
4360 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4361 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4363 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4364 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4367 dev_err(&hdev->pdev->dev,
4368 "Unsupported flow director mode %d\n",
4369 hdev->fd_cfg.fd_mode);
4373 hdev->fd_cfg.proto_support =
4374 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4375 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4376 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4377 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4378 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4379 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4380 key_cfg->outer_sipv6_word_en = 0;
4381 key_cfg->outer_dipv6_word_en = 0;
4383 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4384 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4385 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4386 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4388 /* If use max 400bit key, we can support tuples for ether type */
4389 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4390 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4391 key_cfg->tuple_active |=
4392 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4395 /* roce_type is used to filter roce frames
4396 * dst_vport is used to specify the rule
4398 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4400 ret = hclge_get_fd_allocation(hdev,
4401 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4402 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4403 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4404 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4408 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4411 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4412 int loc, u8 *key, bool is_add)
4414 struct hclge_fd_tcam_config_1_cmd *req1;
4415 struct hclge_fd_tcam_config_2_cmd *req2;
4416 struct hclge_fd_tcam_config_3_cmd *req3;
4417 struct hclge_desc desc[3];
4420 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4421 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4422 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4423 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4424 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4426 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4427 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4428 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4430 req1->stage = stage;
4431 req1->xy_sel = sel_x ? 1 : 0;
4432 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4433 req1->index = cpu_to_le32(loc);
4434 req1->entry_vld = sel_x ? is_add : 0;
4437 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4438 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4439 sizeof(req2->tcam_data));
4440 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4441 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4444 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4446 dev_err(&hdev->pdev->dev,
4447 "config tcam key fail, ret=%d\n",
4453 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4454 struct hclge_fd_ad_data *action)
4456 struct hclge_fd_ad_config_cmd *req;
4457 struct hclge_desc desc;
4461 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4463 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4464 req->index = cpu_to_le32(loc);
4467 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4468 action->write_rule_id_to_bd);
4469 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4472 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4473 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4474 action->forward_to_direct_queue);
4475 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4477 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4478 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4479 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4480 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4481 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4482 action->counter_id);
4484 req->ad_data = cpu_to_le64(ad_data);
4485 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4487 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4492 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4493 struct hclge_fd_rule *rule)
4495 u16 tmp_x_s, tmp_y_s;
4496 u32 tmp_x_l, tmp_y_l;
4499 if (rule->unused_tuple & tuple_bit)
4502 switch (tuple_bit) {
4505 case BIT(INNER_DST_MAC):
4506 for (i = 0; i < 6; i++) {
4507 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4508 rule->tuples_mask.dst_mac[i]);
4509 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4510 rule->tuples_mask.dst_mac[i]);
4514 case BIT(INNER_SRC_MAC):
4515 for (i = 0; i < 6; i++) {
4516 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4517 rule->tuples.src_mac[i]);
4518 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4519 rule->tuples.src_mac[i]);
4523 case BIT(INNER_VLAN_TAG_FST):
4524 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4525 rule->tuples_mask.vlan_tag1);
4526 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4527 rule->tuples_mask.vlan_tag1);
4528 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4529 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4532 case BIT(INNER_ETH_TYPE):
4533 calc_x(tmp_x_s, rule->tuples.ether_proto,
4534 rule->tuples_mask.ether_proto);
4535 calc_y(tmp_y_s, rule->tuples.ether_proto,
4536 rule->tuples_mask.ether_proto);
4537 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4538 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4541 case BIT(INNER_IP_TOS):
4542 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4543 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4546 case BIT(INNER_IP_PROTO):
4547 calc_x(*key_x, rule->tuples.ip_proto,
4548 rule->tuples_mask.ip_proto);
4549 calc_y(*key_y, rule->tuples.ip_proto,
4550 rule->tuples_mask.ip_proto);
4553 case BIT(INNER_SRC_IP):
4554 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4555 rule->tuples_mask.src_ip[3]);
4556 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4557 rule->tuples_mask.src_ip[3]);
4558 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4559 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4562 case BIT(INNER_DST_IP):
4563 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4564 rule->tuples_mask.dst_ip[3]);
4565 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4566 rule->tuples_mask.dst_ip[3]);
4567 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4568 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4571 case BIT(INNER_SRC_PORT):
4572 calc_x(tmp_x_s, rule->tuples.src_port,
4573 rule->tuples_mask.src_port);
4574 calc_y(tmp_y_s, rule->tuples.src_port,
4575 rule->tuples_mask.src_port);
4576 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4577 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4580 case BIT(INNER_DST_PORT):
4581 calc_x(tmp_x_s, rule->tuples.dst_port,
4582 rule->tuples_mask.dst_port);
4583 calc_y(tmp_y_s, rule->tuples.dst_port,
4584 rule->tuples_mask.dst_port);
4585 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4586 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4594 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4595 u8 vf_id, u8 network_port_id)
4597 u32 port_number = 0;
4599 if (port_type == HOST_PORT) {
4600 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4602 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4604 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4606 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4607 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4608 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4614 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4615 __le32 *key_x, __le32 *key_y,
4616 struct hclge_fd_rule *rule)
4618 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4619 u8 cur_pos = 0, tuple_size, shift_bits;
4622 for (i = 0; i < MAX_META_DATA; i++) {
4623 tuple_size = meta_data_key_info[i].key_length;
4624 tuple_bit = key_cfg->meta_data_active & BIT(i);
4626 switch (tuple_bit) {
4627 case BIT(ROCE_TYPE):
4628 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4629 cur_pos += tuple_size;
4631 case BIT(DST_VPORT):
4632 port_number = hclge_get_port_number(HOST_PORT, 0,
4634 hnae3_set_field(meta_data,
4635 GENMASK(cur_pos + tuple_size, cur_pos),
4636 cur_pos, port_number);
4637 cur_pos += tuple_size;
4644 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4645 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4646 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4648 *key_x = cpu_to_le32(tmp_x << shift_bits);
4649 *key_y = cpu_to_le32(tmp_y << shift_bits);
4652 /* A complete key is combined with meta data key and tuple key.
4653 * Meta data key is stored at the MSB region, and tuple key is stored at
4654 * the LSB region, unused bits will be filled 0.
4656 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4657 struct hclge_fd_rule *rule)
4659 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4660 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4661 u8 *cur_key_x, *cur_key_y;
4662 int i, ret, tuple_size;
4663 u8 meta_data_region;
4665 memset(key_x, 0, sizeof(key_x));
4666 memset(key_y, 0, sizeof(key_y));
4670 for (i = 0 ; i < MAX_TUPLE; i++) {
4674 tuple_size = tuple_key_info[i].key_length / 8;
4675 check_tuple = key_cfg->tuple_active & BIT(i);
4677 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4680 cur_key_x += tuple_size;
4681 cur_key_y += tuple_size;
4685 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4686 MAX_META_DATA_LENGTH / 8;
4688 hclge_fd_convert_meta_data(key_cfg,
4689 (__le32 *)(key_x + meta_data_region),
4690 (__le32 *)(key_y + meta_data_region),
4693 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4696 dev_err(&hdev->pdev->dev,
4697 "fd key_y config fail, loc=%d, ret=%d\n",
4698 rule->queue_id, ret);
4702 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4705 dev_err(&hdev->pdev->dev,
4706 "fd key_x config fail, loc=%d, ret=%d\n",
4707 rule->queue_id, ret);
4711 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4712 struct hclge_fd_rule *rule)
4714 struct hclge_fd_ad_data ad_data;
4716 ad_data.ad_id = rule->location;
4718 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4719 ad_data.drop_packet = true;
4720 ad_data.forward_to_direct_queue = false;
4721 ad_data.queue_id = 0;
4723 ad_data.drop_packet = false;
4724 ad_data.forward_to_direct_queue = true;
4725 ad_data.queue_id = rule->queue_id;
4728 ad_data.use_counter = false;
4729 ad_data.counter_id = 0;
4731 ad_data.use_next_stage = false;
4732 ad_data.next_input_key = 0;
4734 ad_data.write_rule_id_to_bd = true;
4735 ad_data.rule_id = rule->location;
4737 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4740 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4741 struct ethtool_rx_flow_spec *fs, u32 *unused)
4743 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4744 struct ethtool_usrip4_spec *usr_ip4_spec;
4745 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4746 struct ethtool_usrip6_spec *usr_ip6_spec;
4747 struct ethhdr *ether_spec;
4749 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4752 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4755 if ((fs->flow_type & FLOW_EXT) &&
4756 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4757 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4761 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4765 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4766 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4768 if (!tcp_ip4_spec->ip4src)
4769 *unused |= BIT(INNER_SRC_IP);
4771 if (!tcp_ip4_spec->ip4dst)
4772 *unused |= BIT(INNER_DST_IP);
4774 if (!tcp_ip4_spec->psrc)
4775 *unused |= BIT(INNER_SRC_PORT);
4777 if (!tcp_ip4_spec->pdst)
4778 *unused |= BIT(INNER_DST_PORT);
4780 if (!tcp_ip4_spec->tos)
4781 *unused |= BIT(INNER_IP_TOS);
4785 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4786 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4787 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4789 if (!usr_ip4_spec->ip4src)
4790 *unused |= BIT(INNER_SRC_IP);
4792 if (!usr_ip4_spec->ip4dst)
4793 *unused |= BIT(INNER_DST_IP);
4795 if (!usr_ip4_spec->tos)
4796 *unused |= BIT(INNER_IP_TOS);
4798 if (!usr_ip4_spec->proto)
4799 *unused |= BIT(INNER_IP_PROTO);
4801 if (usr_ip4_spec->l4_4_bytes)
4804 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4811 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4812 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4815 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4816 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4817 *unused |= BIT(INNER_SRC_IP);
4819 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4820 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4821 *unused |= BIT(INNER_DST_IP);
4823 if (!tcp_ip6_spec->psrc)
4824 *unused |= BIT(INNER_SRC_PORT);
4826 if (!tcp_ip6_spec->pdst)
4827 *unused |= BIT(INNER_DST_PORT);
4829 if (tcp_ip6_spec->tclass)
4833 case IPV6_USER_FLOW:
4834 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4835 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4836 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4837 BIT(INNER_DST_PORT);
4839 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4840 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4841 *unused |= BIT(INNER_SRC_IP);
4843 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4844 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4845 *unused |= BIT(INNER_DST_IP);
4847 if (!usr_ip6_spec->l4_proto)
4848 *unused |= BIT(INNER_IP_PROTO);
4850 if (usr_ip6_spec->tclass)
4853 if (usr_ip6_spec->l4_4_bytes)
4858 ether_spec = &fs->h_u.ether_spec;
4859 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4860 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4861 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4863 if (is_zero_ether_addr(ether_spec->h_source))
4864 *unused |= BIT(INNER_SRC_MAC);
4866 if (is_zero_ether_addr(ether_spec->h_dest))
4867 *unused |= BIT(INNER_DST_MAC);
4869 if (!ether_spec->h_proto)
4870 *unused |= BIT(INNER_ETH_TYPE);
4877 if ((fs->flow_type & FLOW_EXT)) {
4878 if (fs->h_ext.vlan_etype)
4880 if (!fs->h_ext.vlan_tci)
4881 *unused |= BIT(INNER_VLAN_TAG_FST);
4883 if (fs->m_ext.vlan_tci) {
4884 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4888 *unused |= BIT(INNER_VLAN_TAG_FST);
4891 if (fs->flow_type & FLOW_MAC_EXT) {
4892 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4895 if (is_zero_ether_addr(fs->h_ext.h_dest))
4896 *unused |= BIT(INNER_DST_MAC);
4898 *unused &= ~(BIT(INNER_DST_MAC));
4904 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4906 struct hclge_fd_rule *rule = NULL;
4907 struct hlist_node *node2;
4909 spin_lock_bh(&hdev->fd_rule_lock);
4910 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4911 if (rule->location >= location)
4915 spin_unlock_bh(&hdev->fd_rule_lock);
4917 return rule && rule->location == location;
4920 /* make sure being called after lock up with fd_rule_lock */
4921 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4922 struct hclge_fd_rule *new_rule,
4926 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4927 struct hlist_node *node2;
4929 if (is_add && !new_rule)
4932 hlist_for_each_entry_safe(rule, node2,
4933 &hdev->fd_rule_list, rule_node) {
4934 if (rule->location >= location)
4939 if (rule && rule->location == location) {
4940 hlist_del(&rule->rule_node);
4942 hdev->hclge_fd_rule_num--;
4945 if (!hdev->hclge_fd_rule_num)
4946 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4947 clear_bit(location, hdev->fd_bmap);
4951 } else if (!is_add) {
4952 dev_err(&hdev->pdev->dev,
4953 "delete fail, rule %d is inexistent\n",
4958 INIT_HLIST_NODE(&new_rule->rule_node);
4961 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4963 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4965 set_bit(location, hdev->fd_bmap);
4966 hdev->hclge_fd_rule_num++;
4967 hdev->fd_active_type = new_rule->rule_type;
4972 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4973 struct ethtool_rx_flow_spec *fs,
4974 struct hclge_fd_rule *rule)
4976 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4978 switch (flow_type) {
4982 rule->tuples.src_ip[3] =
4983 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4984 rule->tuples_mask.src_ip[3] =
4985 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4987 rule->tuples.dst_ip[3] =
4988 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4989 rule->tuples_mask.dst_ip[3] =
4990 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4992 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4993 rule->tuples_mask.src_port =
4994 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4996 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4997 rule->tuples_mask.dst_port =
4998 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5000 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5001 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5003 rule->tuples.ether_proto = ETH_P_IP;
5004 rule->tuples_mask.ether_proto = 0xFFFF;
5008 rule->tuples.src_ip[3] =
5009 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5010 rule->tuples_mask.src_ip[3] =
5011 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5013 rule->tuples.dst_ip[3] =
5014 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5015 rule->tuples_mask.dst_ip[3] =
5016 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5018 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5019 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5021 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5022 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5024 rule->tuples.ether_proto = ETH_P_IP;
5025 rule->tuples_mask.ether_proto = 0xFFFF;
5031 be32_to_cpu_array(rule->tuples.src_ip,
5032 fs->h_u.tcp_ip6_spec.ip6src, 4);
5033 be32_to_cpu_array(rule->tuples_mask.src_ip,
5034 fs->m_u.tcp_ip6_spec.ip6src, 4);
5036 be32_to_cpu_array(rule->tuples.dst_ip,
5037 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5038 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5039 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5041 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5042 rule->tuples_mask.src_port =
5043 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5045 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5046 rule->tuples_mask.dst_port =
5047 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5049 rule->tuples.ether_proto = ETH_P_IPV6;
5050 rule->tuples_mask.ether_proto = 0xFFFF;
5053 case IPV6_USER_FLOW:
5054 be32_to_cpu_array(rule->tuples.src_ip,
5055 fs->h_u.usr_ip6_spec.ip6src, 4);
5056 be32_to_cpu_array(rule->tuples_mask.src_ip,
5057 fs->m_u.usr_ip6_spec.ip6src, 4);
5059 be32_to_cpu_array(rule->tuples.dst_ip,
5060 fs->h_u.usr_ip6_spec.ip6dst, 4);
5061 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5062 fs->m_u.usr_ip6_spec.ip6dst, 4);
5064 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5065 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5067 rule->tuples.ether_proto = ETH_P_IPV6;
5068 rule->tuples_mask.ether_proto = 0xFFFF;
5072 ether_addr_copy(rule->tuples.src_mac,
5073 fs->h_u.ether_spec.h_source);
5074 ether_addr_copy(rule->tuples_mask.src_mac,
5075 fs->m_u.ether_spec.h_source);
5077 ether_addr_copy(rule->tuples.dst_mac,
5078 fs->h_u.ether_spec.h_dest);
5079 ether_addr_copy(rule->tuples_mask.dst_mac,
5080 fs->m_u.ether_spec.h_dest);
5082 rule->tuples.ether_proto =
5083 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5084 rule->tuples_mask.ether_proto =
5085 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5092 switch (flow_type) {
5095 rule->tuples.ip_proto = IPPROTO_SCTP;
5096 rule->tuples_mask.ip_proto = 0xFF;
5100 rule->tuples.ip_proto = IPPROTO_TCP;
5101 rule->tuples_mask.ip_proto = 0xFF;
5105 rule->tuples.ip_proto = IPPROTO_UDP;
5106 rule->tuples_mask.ip_proto = 0xFF;
5112 if ((fs->flow_type & FLOW_EXT)) {
5113 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5114 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5117 if (fs->flow_type & FLOW_MAC_EXT) {
5118 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5119 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5125 /* make sure being called after lock up with fd_rule_lock */
5126 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5127 struct hclge_fd_rule *rule)
5132 dev_err(&hdev->pdev->dev,
5133 "The flow director rule is NULL\n");
5137 /* it will never fail here, so needn't to check return value */
5138 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5140 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5144 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5151 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5155 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5156 struct ethtool_rxnfc *cmd)
5158 struct hclge_vport *vport = hclge_get_vport(handle);
5159 struct hclge_dev *hdev = vport->back;
5160 u16 dst_vport_id = 0, q_index = 0;
5161 struct ethtool_rx_flow_spec *fs;
5162 struct hclge_fd_rule *rule;
5167 if (!hnae3_dev_fd_supported(hdev))
5171 dev_warn(&hdev->pdev->dev,
5172 "Please enable flow director first\n");
5176 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5178 ret = hclge_fd_check_spec(hdev, fs, &unused);
5180 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5184 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5185 action = HCLGE_FD_ACTION_DROP_PACKET;
5187 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5188 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5191 if (vf > hdev->num_req_vfs) {
5192 dev_err(&hdev->pdev->dev,
5193 "Error: vf id (%d) > max vf num (%d)\n",
5194 vf, hdev->num_req_vfs);
5198 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5199 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5202 dev_err(&hdev->pdev->dev,
5203 "Error: queue id (%d) > max tqp num (%d)\n",
5208 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5212 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5216 ret = hclge_fd_get_tuple(hdev, fs, rule);
5222 rule->flow_type = fs->flow_type;
5224 rule->location = fs->location;
5225 rule->unused_tuple = unused;
5226 rule->vf_id = dst_vport_id;
5227 rule->queue_id = q_index;
5228 rule->action = action;
5229 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5231 /* to avoid rule conflict, when user configure rule by ethtool,
5232 * we need to clear all arfs rules
5234 hclge_clear_arfs_rules(handle);
5236 spin_lock_bh(&hdev->fd_rule_lock);
5237 ret = hclge_fd_config_rule(hdev, rule);
5239 spin_unlock_bh(&hdev->fd_rule_lock);
5244 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5245 struct ethtool_rxnfc *cmd)
5247 struct hclge_vport *vport = hclge_get_vport(handle);
5248 struct hclge_dev *hdev = vport->back;
5249 struct ethtool_rx_flow_spec *fs;
5252 if (!hnae3_dev_fd_supported(hdev))
5255 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5257 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5260 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5261 dev_err(&hdev->pdev->dev,
5262 "Delete fail, rule %d is inexistent\n",
5267 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5268 fs->location, NULL, false);
5272 spin_lock_bh(&hdev->fd_rule_lock);
5273 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5275 spin_unlock_bh(&hdev->fd_rule_lock);
5280 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5283 struct hclge_vport *vport = hclge_get_vport(handle);
5284 struct hclge_dev *hdev = vport->back;
5285 struct hclge_fd_rule *rule;
5286 struct hlist_node *node;
5289 if (!hnae3_dev_fd_supported(hdev))
5292 spin_lock_bh(&hdev->fd_rule_lock);
5293 for_each_set_bit(location, hdev->fd_bmap,
5294 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5295 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5299 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5301 hlist_del(&rule->rule_node);
5304 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5305 hdev->hclge_fd_rule_num = 0;
5306 bitmap_zero(hdev->fd_bmap,
5307 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5310 spin_unlock_bh(&hdev->fd_rule_lock);
5313 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5315 struct hclge_vport *vport = hclge_get_vport(handle);
5316 struct hclge_dev *hdev = vport->back;
5317 struct hclge_fd_rule *rule;
5318 struct hlist_node *node;
5321 /* Return ok here, because reset error handling will check this
5322 * return value. If error is returned here, the reset process will
5325 if (!hnae3_dev_fd_supported(hdev))
5328 /* if fd is disabled, should not restore it when reset */
5332 spin_lock_bh(&hdev->fd_rule_lock);
5333 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5334 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5336 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5339 dev_warn(&hdev->pdev->dev,
5340 "Restore rule %d failed, remove it\n",
5342 clear_bit(rule->location, hdev->fd_bmap);
5343 hlist_del(&rule->rule_node);
5345 hdev->hclge_fd_rule_num--;
5349 if (hdev->hclge_fd_rule_num)
5350 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5352 spin_unlock_bh(&hdev->fd_rule_lock);
5357 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5358 struct ethtool_rxnfc *cmd)
5360 struct hclge_vport *vport = hclge_get_vport(handle);
5361 struct hclge_dev *hdev = vport->back;
5363 if (!hnae3_dev_fd_supported(hdev))
5366 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5367 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5372 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5373 struct ethtool_rxnfc *cmd)
5375 struct hclge_vport *vport = hclge_get_vport(handle);
5376 struct hclge_fd_rule *rule = NULL;
5377 struct hclge_dev *hdev = vport->back;
5378 struct ethtool_rx_flow_spec *fs;
5379 struct hlist_node *node2;
5381 if (!hnae3_dev_fd_supported(hdev))
5384 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5386 spin_lock_bh(&hdev->fd_rule_lock);
5388 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5389 if (rule->location >= fs->location)
5393 if (!rule || fs->location != rule->location) {
5394 spin_unlock_bh(&hdev->fd_rule_lock);
5399 fs->flow_type = rule->flow_type;
5400 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5404 fs->h_u.tcp_ip4_spec.ip4src =
5405 cpu_to_be32(rule->tuples.src_ip[3]);
5406 fs->m_u.tcp_ip4_spec.ip4src =
5407 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5408 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5410 fs->h_u.tcp_ip4_spec.ip4dst =
5411 cpu_to_be32(rule->tuples.dst_ip[3]);
5412 fs->m_u.tcp_ip4_spec.ip4dst =
5413 rule->unused_tuple & BIT(INNER_DST_IP) ?
5414 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5416 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5417 fs->m_u.tcp_ip4_spec.psrc =
5418 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5419 0 : cpu_to_be16(rule->tuples_mask.src_port);
5421 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5422 fs->m_u.tcp_ip4_spec.pdst =
5423 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5424 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5426 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5427 fs->m_u.tcp_ip4_spec.tos =
5428 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5429 0 : rule->tuples_mask.ip_tos;
5433 fs->h_u.usr_ip4_spec.ip4src =
5434 cpu_to_be32(rule->tuples.src_ip[3]);
5435 fs->m_u.tcp_ip4_spec.ip4src =
5436 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5437 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5439 fs->h_u.usr_ip4_spec.ip4dst =
5440 cpu_to_be32(rule->tuples.dst_ip[3]);
5441 fs->m_u.usr_ip4_spec.ip4dst =
5442 rule->unused_tuple & BIT(INNER_DST_IP) ?
5443 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5445 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5446 fs->m_u.usr_ip4_spec.tos =
5447 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5448 0 : rule->tuples_mask.ip_tos;
5450 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5451 fs->m_u.usr_ip4_spec.proto =
5452 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5453 0 : rule->tuples_mask.ip_proto;
5455 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5461 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5462 rule->tuples.src_ip, 4);
5463 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5464 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5466 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5467 rule->tuples_mask.src_ip, 4);
5469 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5470 rule->tuples.dst_ip, 4);
5471 if (rule->unused_tuple & BIT(INNER_DST_IP))
5472 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5474 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5475 rule->tuples_mask.dst_ip, 4);
5477 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5478 fs->m_u.tcp_ip6_spec.psrc =
5479 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5480 0 : cpu_to_be16(rule->tuples_mask.src_port);
5482 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5483 fs->m_u.tcp_ip6_spec.pdst =
5484 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5485 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5488 case IPV6_USER_FLOW:
5489 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5490 rule->tuples.src_ip, 4);
5491 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5492 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5494 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5495 rule->tuples_mask.src_ip, 4);
5497 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5498 rule->tuples.dst_ip, 4);
5499 if (rule->unused_tuple & BIT(INNER_DST_IP))
5500 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5502 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5503 rule->tuples_mask.dst_ip, 4);
5505 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5506 fs->m_u.usr_ip6_spec.l4_proto =
5507 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5508 0 : rule->tuples_mask.ip_proto;
5512 ether_addr_copy(fs->h_u.ether_spec.h_source,
5513 rule->tuples.src_mac);
5514 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5515 eth_zero_addr(fs->m_u.ether_spec.h_source);
5517 ether_addr_copy(fs->m_u.ether_spec.h_source,
5518 rule->tuples_mask.src_mac);
5520 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5521 rule->tuples.dst_mac);
5522 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5523 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5525 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5526 rule->tuples_mask.dst_mac);
5528 fs->h_u.ether_spec.h_proto =
5529 cpu_to_be16(rule->tuples.ether_proto);
5530 fs->m_u.ether_spec.h_proto =
5531 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5532 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5536 spin_unlock_bh(&hdev->fd_rule_lock);
5540 if (fs->flow_type & FLOW_EXT) {
5541 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5542 fs->m_ext.vlan_tci =
5543 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5544 cpu_to_be16(VLAN_VID_MASK) :
5545 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5548 if (fs->flow_type & FLOW_MAC_EXT) {
5549 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5550 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5551 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5553 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5554 rule->tuples_mask.dst_mac);
5557 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5558 fs->ring_cookie = RX_CLS_FLOW_DISC;
5562 fs->ring_cookie = rule->queue_id;
5563 vf_id = rule->vf_id;
5564 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5565 fs->ring_cookie |= vf_id;
5568 spin_unlock_bh(&hdev->fd_rule_lock);
5573 static int hclge_get_all_rules(struct hnae3_handle *handle,
5574 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5576 struct hclge_vport *vport = hclge_get_vport(handle);
5577 struct hclge_dev *hdev = vport->back;
5578 struct hclge_fd_rule *rule;
5579 struct hlist_node *node2;
5582 if (!hnae3_dev_fd_supported(hdev))
5585 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5587 spin_lock_bh(&hdev->fd_rule_lock);
5588 hlist_for_each_entry_safe(rule, node2,
5589 &hdev->fd_rule_list, rule_node) {
5590 if (cnt == cmd->rule_cnt) {
5591 spin_unlock_bh(&hdev->fd_rule_lock);
5595 rule_locs[cnt] = rule->location;
5599 spin_unlock_bh(&hdev->fd_rule_lock);
5601 cmd->rule_cnt = cnt;
5606 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5607 struct hclge_fd_rule_tuples *tuples)
5609 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5610 tuples->ip_proto = fkeys->basic.ip_proto;
5611 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5613 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5614 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5615 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5617 memcpy(tuples->src_ip,
5618 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5619 sizeof(tuples->src_ip));
5620 memcpy(tuples->dst_ip,
5621 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5622 sizeof(tuples->dst_ip));
5626 /* traverse all rules, check whether an existed rule has the same tuples */
5627 static struct hclge_fd_rule *
5628 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5629 const struct hclge_fd_rule_tuples *tuples)
5631 struct hclge_fd_rule *rule = NULL;
5632 struct hlist_node *node;
5634 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5635 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5642 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5643 struct hclge_fd_rule *rule)
5645 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5646 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5647 BIT(INNER_SRC_PORT);
5650 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5651 if (tuples->ether_proto == ETH_P_IP) {
5652 if (tuples->ip_proto == IPPROTO_TCP)
5653 rule->flow_type = TCP_V4_FLOW;
5655 rule->flow_type = UDP_V4_FLOW;
5657 if (tuples->ip_proto == IPPROTO_TCP)
5658 rule->flow_type = TCP_V6_FLOW;
5660 rule->flow_type = UDP_V6_FLOW;
5662 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5663 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5666 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5667 u16 flow_id, struct flow_keys *fkeys)
5669 struct hclge_vport *vport = hclge_get_vport(handle);
5670 struct hclge_fd_rule_tuples new_tuples;
5671 struct hclge_dev *hdev = vport->back;
5672 struct hclge_fd_rule *rule;
5677 if (!hnae3_dev_fd_supported(hdev))
5680 memset(&new_tuples, 0, sizeof(new_tuples));
5681 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5683 spin_lock_bh(&hdev->fd_rule_lock);
5685 /* when there is already fd rule existed add by user,
5686 * arfs should not work
5688 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5689 spin_unlock_bh(&hdev->fd_rule_lock);
5694 /* check is there flow director filter existed for this flow,
5695 * if not, create a new filter for it;
5696 * if filter exist with different queue id, modify the filter;
5697 * if filter exist with same queue id, do nothing
5699 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5701 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5702 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5703 spin_unlock_bh(&hdev->fd_rule_lock);
5708 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5710 spin_unlock_bh(&hdev->fd_rule_lock);
5715 set_bit(bit_id, hdev->fd_bmap);
5716 rule->location = bit_id;
5717 rule->flow_id = flow_id;
5718 rule->queue_id = queue_id;
5719 hclge_fd_build_arfs_rule(&new_tuples, rule);
5720 ret = hclge_fd_config_rule(hdev, rule);
5722 spin_unlock_bh(&hdev->fd_rule_lock);
5727 return rule->location;
5730 spin_unlock_bh(&hdev->fd_rule_lock);
5732 if (rule->queue_id == queue_id)
5733 return rule->location;
5735 tmp_queue_id = rule->queue_id;
5736 rule->queue_id = queue_id;
5737 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5739 rule->queue_id = tmp_queue_id;
5743 return rule->location;
5746 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5748 #ifdef CONFIG_RFS_ACCEL
5749 struct hnae3_handle *handle = &hdev->vport[0].nic;
5750 struct hclge_fd_rule *rule;
5751 struct hlist_node *node;
5752 HLIST_HEAD(del_list);
5754 spin_lock_bh(&hdev->fd_rule_lock);
5755 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5756 spin_unlock_bh(&hdev->fd_rule_lock);
5759 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5760 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5761 rule->flow_id, rule->location)) {
5762 hlist_del_init(&rule->rule_node);
5763 hlist_add_head(&rule->rule_node, &del_list);
5764 hdev->hclge_fd_rule_num--;
5765 clear_bit(rule->location, hdev->fd_bmap);
5768 spin_unlock_bh(&hdev->fd_rule_lock);
5770 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5771 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5772 rule->location, NULL, false);
5778 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5780 #ifdef CONFIG_RFS_ACCEL
5781 struct hclge_vport *vport = hclge_get_vport(handle);
5782 struct hclge_dev *hdev = vport->back;
5784 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5785 hclge_del_all_fd_entries(handle, true);
5789 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5791 struct hclge_vport *vport = hclge_get_vport(handle);
5792 struct hclge_dev *hdev = vport->back;
5794 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5795 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5798 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5800 struct hclge_vport *vport = hclge_get_vport(handle);
5801 struct hclge_dev *hdev = vport->back;
5803 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5806 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5808 struct hclge_vport *vport = hclge_get_vport(handle);
5809 struct hclge_dev *hdev = vport->back;
5811 return hdev->rst_stats.hw_reset_done_cnt;
5814 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5816 struct hclge_vport *vport = hclge_get_vport(handle);
5817 struct hclge_dev *hdev = vport->back;
5820 hdev->fd_en = enable;
5821 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5823 hclge_del_all_fd_entries(handle, clear);
5825 hclge_restore_fd_entries(handle);
5828 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5830 struct hclge_desc desc;
5831 struct hclge_config_mac_mode_cmd *req =
5832 (struct hclge_config_mac_mode_cmd *)desc.data;
5836 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5837 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5838 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5839 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5840 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5841 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5842 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5843 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5844 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5845 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5846 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5847 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5848 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5849 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5850 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5851 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5853 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5855 dev_err(&hdev->pdev->dev,
5856 "mac enable fail, ret =%d.\n", ret);
5859 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5861 struct hclge_config_mac_mode_cmd *req;
5862 struct hclge_desc desc;
5866 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5867 /* 1 Read out the MAC mode config at first */
5868 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5869 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5871 dev_err(&hdev->pdev->dev,
5872 "mac loopback get fail, ret =%d.\n", ret);
5876 /* 2 Then setup the loopback flag */
5877 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5878 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5879 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5880 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5882 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5884 /* 3 Config mac work mode with loopback flag
5885 * and its original configure parameters
5887 hclge_cmd_reuse_desc(&desc, false);
5888 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5890 dev_err(&hdev->pdev->dev,
5891 "mac loopback set fail, ret =%d.\n", ret);
5895 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5896 enum hnae3_loop loop_mode)
5898 #define HCLGE_SERDES_RETRY_MS 10
5899 #define HCLGE_SERDES_RETRY_NUM 100
5901 #define HCLGE_MAC_LINK_STATUS_MS 10
5902 #define HCLGE_MAC_LINK_STATUS_NUM 100
5903 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5904 #define HCLGE_MAC_LINK_STATUS_UP 1
5906 struct hclge_serdes_lb_cmd *req;
5907 struct hclge_desc desc;
5908 int mac_link_ret = 0;
5912 req = (struct hclge_serdes_lb_cmd *)desc.data;
5913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5915 switch (loop_mode) {
5916 case HNAE3_LOOP_SERIAL_SERDES:
5917 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5919 case HNAE3_LOOP_PARALLEL_SERDES:
5920 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5923 dev_err(&hdev->pdev->dev,
5924 "unsupported serdes loopback mode %d\n", loop_mode);
5929 req->enable = loop_mode_b;
5930 req->mask = loop_mode_b;
5931 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5933 req->mask = loop_mode_b;
5934 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5937 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5939 dev_err(&hdev->pdev->dev,
5940 "serdes loopback set fail, ret = %d\n", ret);
5945 msleep(HCLGE_SERDES_RETRY_MS);
5946 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5948 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5950 dev_err(&hdev->pdev->dev,
5951 "serdes loopback get, ret = %d\n", ret);
5954 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5955 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5957 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5958 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5960 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5961 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5965 hclge_cfg_mac_mode(hdev, en);
5969 /* serdes Internal loopback, independent of the network cable.*/
5970 msleep(HCLGE_MAC_LINK_STATUS_MS);
5971 ret = hclge_get_mac_link_status(hdev);
5972 if (ret == mac_link_ret)
5974 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5976 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5981 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5982 int stream_id, bool enable)
5984 struct hclge_desc desc;
5985 struct hclge_cfg_com_tqp_queue_cmd *req =
5986 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5990 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5991 req->stream_id = cpu_to_le16(stream_id);
5992 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5996 dev_err(&hdev->pdev->dev,
5997 "Tqp enable fail, status =%d.\n", ret);
6001 static int hclge_set_loopback(struct hnae3_handle *handle,
6002 enum hnae3_loop loop_mode, bool en)
6004 struct hclge_vport *vport = hclge_get_vport(handle);
6005 struct hnae3_knic_private_info *kinfo;
6006 struct hclge_dev *hdev = vport->back;
6009 switch (loop_mode) {
6010 case HNAE3_LOOP_APP:
6011 ret = hclge_set_app_loopback(hdev, en);
6013 case HNAE3_LOOP_SERIAL_SERDES:
6014 case HNAE3_LOOP_PARALLEL_SERDES:
6015 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6019 dev_err(&hdev->pdev->dev,
6020 "loop_mode %d is not supported\n", loop_mode);
6027 kinfo = &vport->nic.kinfo;
6028 for (i = 0; i < kinfo->num_tqps; i++) {
6029 ret = hclge_tqp_enable(hdev, i, 0, en);
6037 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6039 struct hclge_vport *vport = hclge_get_vport(handle);
6040 struct hnae3_knic_private_info *kinfo;
6041 struct hnae3_queue *queue;
6042 struct hclge_tqp *tqp;
6045 kinfo = &vport->nic.kinfo;
6046 for (i = 0; i < kinfo->num_tqps; i++) {
6047 queue = handle->kinfo.tqp[i];
6048 tqp = container_of(queue, struct hclge_tqp, q);
6049 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6053 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6055 struct hclge_vport *vport = hclge_get_vport(handle);
6056 struct hclge_dev *hdev = vport->back;
6059 mod_timer(&hdev->service_timer, jiffies + HZ);
6061 del_timer_sync(&hdev->service_timer);
6062 cancel_work_sync(&hdev->service_task);
6063 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6067 static int hclge_ae_start(struct hnae3_handle *handle)
6069 struct hclge_vport *vport = hclge_get_vport(handle);
6070 struct hclge_dev *hdev = vport->back;
6073 hclge_cfg_mac_mode(hdev, true);
6074 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6075 hdev->hw.mac.link = 0;
6077 /* reset tqp stats */
6078 hclge_reset_tqp_stats(handle);
6080 hclge_mac_start_phy(hdev);
6085 static void hclge_ae_stop(struct hnae3_handle *handle)
6087 struct hclge_vport *vport = hclge_get_vport(handle);
6088 struct hclge_dev *hdev = vport->back;
6091 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6093 hclge_clear_arfs_rules(handle);
6095 /* If it is not PF reset, the firmware will disable the MAC,
6096 * so it only need to stop phy here.
6098 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6099 hdev->reset_type != HNAE3_FUNC_RESET) {
6100 hclge_mac_stop_phy(hdev);
6104 for (i = 0; i < handle->kinfo.num_tqps; i++)
6105 hclge_reset_tqp(handle, i);
6108 hclge_cfg_mac_mode(hdev, false);
6110 hclge_mac_stop_phy(hdev);
6112 /* reset tqp stats */
6113 hclge_reset_tqp_stats(handle);
6114 hclge_update_link_status(hdev);
6117 int hclge_vport_start(struct hclge_vport *vport)
6119 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6120 vport->last_active_jiffies = jiffies;
6124 void hclge_vport_stop(struct hclge_vport *vport)
6126 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6129 static int hclge_client_start(struct hnae3_handle *handle)
6131 struct hclge_vport *vport = hclge_get_vport(handle);
6133 return hclge_vport_start(vport);
6136 static void hclge_client_stop(struct hnae3_handle *handle)
6138 struct hclge_vport *vport = hclge_get_vport(handle);
6140 hclge_vport_stop(vport);
6143 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6144 u16 cmdq_resp, u8 resp_code,
6145 enum hclge_mac_vlan_tbl_opcode op)
6147 struct hclge_dev *hdev = vport->back;
6148 int return_status = -EIO;
6151 dev_err(&hdev->pdev->dev,
6152 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6157 if (op == HCLGE_MAC_VLAN_ADD) {
6158 if ((!resp_code) || (resp_code == 1)) {
6160 } else if (resp_code == 2) {
6161 return_status = -ENOSPC;
6162 dev_err(&hdev->pdev->dev,
6163 "add mac addr failed for uc_overflow.\n");
6164 } else if (resp_code == 3) {
6165 return_status = -ENOSPC;
6166 dev_err(&hdev->pdev->dev,
6167 "add mac addr failed for mc_overflow.\n");
6169 dev_err(&hdev->pdev->dev,
6170 "add mac addr failed for undefined, code=%d.\n",
6173 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6176 } else if (resp_code == 1) {
6177 return_status = -ENOENT;
6178 dev_dbg(&hdev->pdev->dev,
6179 "remove mac addr failed for miss.\n");
6181 dev_err(&hdev->pdev->dev,
6182 "remove mac addr failed for undefined, code=%d.\n",
6185 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6188 } else if (resp_code == 1) {
6189 return_status = -ENOENT;
6190 dev_dbg(&hdev->pdev->dev,
6191 "lookup mac addr failed for miss.\n");
6193 dev_err(&hdev->pdev->dev,
6194 "lookup mac addr failed for undefined, code=%d.\n",
6198 return_status = -EINVAL;
6199 dev_err(&hdev->pdev->dev,
6200 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6204 return return_status;
6207 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6212 if (vfid > 255 || vfid < 0)
6215 if (vfid >= 0 && vfid <= 191) {
6216 word_num = vfid / 32;
6217 bit_num = vfid % 32;
6219 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6221 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6223 word_num = (vfid - 192) / 32;
6224 bit_num = vfid % 32;
6226 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6228 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6234 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6236 #define HCLGE_DESC_NUMBER 3
6237 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6240 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6241 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6242 if (desc[i].data[j])
6248 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6249 const u8 *addr, bool is_mc)
6251 const unsigned char *mac_addr = addr;
6252 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6253 (mac_addr[0]) | (mac_addr[1] << 8);
6254 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6256 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6258 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6259 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6262 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6263 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6266 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6267 struct hclge_mac_vlan_tbl_entry_cmd *req)
6269 struct hclge_dev *hdev = vport->back;
6270 struct hclge_desc desc;
6275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6277 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6281 dev_err(&hdev->pdev->dev,
6282 "del mac addr failed for cmd_send, ret =%d.\n",
6286 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6287 retval = le16_to_cpu(desc.retval);
6289 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6290 HCLGE_MAC_VLAN_REMOVE);
6293 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6294 struct hclge_mac_vlan_tbl_entry_cmd *req,
6295 struct hclge_desc *desc,
6298 struct hclge_dev *hdev = vport->back;
6303 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6305 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6306 memcpy(desc[0].data,
6308 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6309 hclge_cmd_setup_basic_desc(&desc[1],
6310 HCLGE_OPC_MAC_VLAN_ADD,
6312 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6313 hclge_cmd_setup_basic_desc(&desc[2],
6314 HCLGE_OPC_MAC_VLAN_ADD,
6316 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6318 memcpy(desc[0].data,
6320 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6321 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6324 dev_err(&hdev->pdev->dev,
6325 "lookup mac addr failed for cmd_send, ret =%d.\n",
6329 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6330 retval = le16_to_cpu(desc[0].retval);
6332 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6333 HCLGE_MAC_VLAN_LKUP);
6336 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6337 struct hclge_mac_vlan_tbl_entry_cmd *req,
6338 struct hclge_desc *mc_desc)
6340 struct hclge_dev *hdev = vport->back;
6347 struct hclge_desc desc;
6349 hclge_cmd_setup_basic_desc(&desc,
6350 HCLGE_OPC_MAC_VLAN_ADD,
6352 memcpy(desc.data, req,
6353 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6354 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6355 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6356 retval = le16_to_cpu(desc.retval);
6358 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6360 HCLGE_MAC_VLAN_ADD);
6362 hclge_cmd_reuse_desc(&mc_desc[0], false);
6363 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6364 hclge_cmd_reuse_desc(&mc_desc[1], false);
6365 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6366 hclge_cmd_reuse_desc(&mc_desc[2], false);
6367 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6368 memcpy(mc_desc[0].data, req,
6369 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6370 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6371 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6372 retval = le16_to_cpu(mc_desc[0].retval);
6374 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6376 HCLGE_MAC_VLAN_ADD);
6380 dev_err(&hdev->pdev->dev,
6381 "add mac addr failed for cmd_send, ret =%d.\n",
6389 static int hclge_init_umv_space(struct hclge_dev *hdev)
6391 u16 allocated_size = 0;
6394 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6399 if (allocated_size < hdev->wanted_umv_size)
6400 dev_warn(&hdev->pdev->dev,
6401 "Alloc umv space failed, want %d, get %d\n",
6402 hdev->wanted_umv_size, allocated_size);
6404 mutex_init(&hdev->umv_mutex);
6405 hdev->max_umv_size = allocated_size;
6406 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6407 hdev->share_umv_size = hdev->priv_umv_size +
6408 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6413 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6417 if (hdev->max_umv_size > 0) {
6418 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6422 hdev->max_umv_size = 0;
6424 mutex_destroy(&hdev->umv_mutex);
6429 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6430 u16 *allocated_size, bool is_alloc)
6432 struct hclge_umv_spc_alc_cmd *req;
6433 struct hclge_desc desc;
6436 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6438 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6439 req->space_size = cpu_to_le32(space_size);
6441 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6443 dev_err(&hdev->pdev->dev,
6444 "%s umv space failed for cmd_send, ret =%d\n",
6445 is_alloc ? "allocate" : "free", ret);
6449 if (is_alloc && allocated_size)
6450 *allocated_size = le32_to_cpu(desc.data[1]);
6455 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6457 struct hclge_vport *vport;
6460 for (i = 0; i < hdev->num_alloc_vport; i++) {
6461 vport = &hdev->vport[i];
6462 vport->used_umv_num = 0;
6465 mutex_lock(&hdev->umv_mutex);
6466 hdev->share_umv_size = hdev->priv_umv_size +
6467 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6468 mutex_unlock(&hdev->umv_mutex);
6471 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6473 struct hclge_dev *hdev = vport->back;
6476 mutex_lock(&hdev->umv_mutex);
6477 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6478 hdev->share_umv_size == 0);
6479 mutex_unlock(&hdev->umv_mutex);
6484 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6486 struct hclge_dev *hdev = vport->back;
6488 mutex_lock(&hdev->umv_mutex);
6490 if (vport->used_umv_num > hdev->priv_umv_size)
6491 hdev->share_umv_size++;
6493 if (vport->used_umv_num > 0)
6494 vport->used_umv_num--;
6496 if (vport->used_umv_num >= hdev->priv_umv_size &&
6497 hdev->share_umv_size > 0)
6498 hdev->share_umv_size--;
6499 vport->used_umv_num++;
6501 mutex_unlock(&hdev->umv_mutex);
6504 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6505 const unsigned char *addr)
6507 struct hclge_vport *vport = hclge_get_vport(handle);
6509 return hclge_add_uc_addr_common(vport, addr);
6512 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6513 const unsigned char *addr)
6515 struct hclge_dev *hdev = vport->back;
6516 struct hclge_mac_vlan_tbl_entry_cmd req;
6517 struct hclge_desc desc;
6518 u16 egress_port = 0;
6521 /* mac addr check */
6522 if (is_zero_ether_addr(addr) ||
6523 is_broadcast_ether_addr(addr) ||
6524 is_multicast_ether_addr(addr)) {
6525 dev_err(&hdev->pdev->dev,
6526 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6528 is_zero_ether_addr(addr),
6529 is_broadcast_ether_addr(addr),
6530 is_multicast_ether_addr(addr));
6534 memset(&req, 0, sizeof(req));
6536 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6537 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6539 req.egress_port = cpu_to_le16(egress_port);
6541 hclge_prepare_mac_addr(&req, addr, false);
6543 /* Lookup the mac address in the mac_vlan table, and add
6544 * it if the entry is inexistent. Repeated unicast entry
6545 * is not allowed in the mac vlan table.
6547 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6548 if (ret == -ENOENT) {
6549 if (!hclge_is_umv_space_full(vport)) {
6550 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6552 hclge_update_umv_space(vport, false);
6556 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6557 hdev->priv_umv_size);
6562 /* check if we just hit the duplicate */
6564 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6565 vport->vport_id, addr);
6569 dev_err(&hdev->pdev->dev,
6570 "PF failed to add unicast entry(%pM) in the MAC table\n",
6576 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6577 const unsigned char *addr)
6579 struct hclge_vport *vport = hclge_get_vport(handle);
6581 return hclge_rm_uc_addr_common(vport, addr);
6584 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6585 const unsigned char *addr)
6587 struct hclge_dev *hdev = vport->back;
6588 struct hclge_mac_vlan_tbl_entry_cmd req;
6591 /* mac addr check */
6592 if (is_zero_ether_addr(addr) ||
6593 is_broadcast_ether_addr(addr) ||
6594 is_multicast_ether_addr(addr)) {
6595 dev_dbg(&hdev->pdev->dev,
6596 "Remove mac err! invalid mac:%pM.\n",
6601 memset(&req, 0, sizeof(req));
6602 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6603 hclge_prepare_mac_addr(&req, addr, false);
6604 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6606 hclge_update_umv_space(vport, true);
6611 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6612 const unsigned char *addr)
6614 struct hclge_vport *vport = hclge_get_vport(handle);
6616 return hclge_add_mc_addr_common(vport, addr);
6619 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6620 const unsigned char *addr)
6622 struct hclge_dev *hdev = vport->back;
6623 struct hclge_mac_vlan_tbl_entry_cmd req;
6624 struct hclge_desc desc[3];
6627 /* mac addr check */
6628 if (!is_multicast_ether_addr(addr)) {
6629 dev_err(&hdev->pdev->dev,
6630 "Add mc mac err! invalid mac:%pM.\n",
6634 memset(&req, 0, sizeof(req));
6635 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6636 hclge_prepare_mac_addr(&req, addr, true);
6637 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6639 /* This mac addr exist, update VFID for it */
6640 hclge_update_desc_vfid(desc, vport->vport_id, false);
6641 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6643 /* This mac addr do not exist, add new entry for it */
6644 memset(desc[0].data, 0, sizeof(desc[0].data));
6645 memset(desc[1].data, 0, sizeof(desc[0].data));
6646 memset(desc[2].data, 0, sizeof(desc[0].data));
6647 hclge_update_desc_vfid(desc, vport->vport_id, false);
6648 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6651 if (status == -ENOSPC)
6652 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6657 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6658 const unsigned char *addr)
6660 struct hclge_vport *vport = hclge_get_vport(handle);
6662 return hclge_rm_mc_addr_common(vport, addr);
6665 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6666 const unsigned char *addr)
6668 struct hclge_dev *hdev = vport->back;
6669 struct hclge_mac_vlan_tbl_entry_cmd req;
6670 enum hclge_cmd_status status;
6671 struct hclge_desc desc[3];
6673 /* mac addr check */
6674 if (!is_multicast_ether_addr(addr)) {
6675 dev_dbg(&hdev->pdev->dev,
6676 "Remove mc mac err! invalid mac:%pM.\n",
6681 memset(&req, 0, sizeof(req));
6682 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6683 hclge_prepare_mac_addr(&req, addr, true);
6684 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6686 /* This mac addr exist, remove this handle's VFID for it */
6687 hclge_update_desc_vfid(desc, vport->vport_id, true);
6689 if (hclge_is_all_function_id_zero(desc))
6690 /* All the vfid is zero, so need to delete this entry */
6691 status = hclge_remove_mac_vlan_tbl(vport, &req);
6693 /* Not all the vfid is zero, update the vfid */
6694 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6697 /* Maybe this mac address is in mta table, but it cannot be
6698 * deleted here because an entry of mta represents an address
6699 * range rather than a specific address. the delete action to
6700 * all entries will take effect in update_mta_status called by
6701 * hns3_nic_set_rx_mode.
6709 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6710 enum HCLGE_MAC_ADDR_TYPE mac_type)
6712 struct hclge_vport_mac_addr_cfg *mac_cfg;
6713 struct list_head *list;
6715 if (!vport->vport_id)
6718 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6722 mac_cfg->hd_tbl_status = true;
6723 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6725 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6726 &vport->uc_mac_list : &vport->mc_mac_list;
6728 list_add_tail(&mac_cfg->node, list);
6731 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6733 enum HCLGE_MAC_ADDR_TYPE mac_type)
6735 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6736 struct list_head *list;
6737 bool uc_flag, mc_flag;
6739 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6740 &vport->uc_mac_list : &vport->mc_mac_list;
6742 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6743 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6745 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6746 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6747 if (uc_flag && mac_cfg->hd_tbl_status)
6748 hclge_rm_uc_addr_common(vport, mac_addr);
6750 if (mc_flag && mac_cfg->hd_tbl_status)
6751 hclge_rm_mc_addr_common(vport, mac_addr);
6753 list_del(&mac_cfg->node);
6760 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6761 enum HCLGE_MAC_ADDR_TYPE mac_type)
6763 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6764 struct list_head *list;
6766 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6767 &vport->uc_mac_list : &vport->mc_mac_list;
6769 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6770 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6771 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6773 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6774 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6776 mac_cfg->hd_tbl_status = false;
6778 list_del(&mac_cfg->node);
6784 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6786 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6787 struct hclge_vport *vport;
6790 mutex_lock(&hdev->vport_cfg_mutex);
6791 for (i = 0; i < hdev->num_alloc_vport; i++) {
6792 vport = &hdev->vport[i];
6793 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6794 list_del(&mac->node);
6798 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6799 list_del(&mac->node);
6803 mutex_unlock(&hdev->vport_cfg_mutex);
6806 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6807 u16 cmdq_resp, u8 resp_code)
6809 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6810 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6811 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6812 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6817 dev_err(&hdev->pdev->dev,
6818 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6823 switch (resp_code) {
6824 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6825 case HCLGE_ETHERTYPE_ALREADY_ADD:
6828 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6829 dev_err(&hdev->pdev->dev,
6830 "add mac ethertype failed for manager table overflow.\n");
6831 return_status = -EIO;
6833 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6834 dev_err(&hdev->pdev->dev,
6835 "add mac ethertype failed for key conflict.\n");
6836 return_status = -EIO;
6839 dev_err(&hdev->pdev->dev,
6840 "add mac ethertype failed for undefined, code=%d.\n",
6842 return_status = -EIO;
6845 return return_status;
6848 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6849 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6851 struct hclge_desc desc;
6856 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6857 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6859 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6861 dev_err(&hdev->pdev->dev,
6862 "add mac ethertype failed for cmd_send, ret =%d.\n",
6867 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6868 retval = le16_to_cpu(desc.retval);
6870 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6873 static int init_mgr_tbl(struct hclge_dev *hdev)
6878 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6879 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6881 dev_err(&hdev->pdev->dev,
6882 "add mac ethertype failed, ret =%d.\n",
6891 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6893 struct hclge_vport *vport = hclge_get_vport(handle);
6894 struct hclge_dev *hdev = vport->back;
6896 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6899 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6902 const unsigned char *new_addr = (const unsigned char *)p;
6903 struct hclge_vport *vport = hclge_get_vport(handle);
6904 struct hclge_dev *hdev = vport->back;
6907 /* mac addr check */
6908 if (is_zero_ether_addr(new_addr) ||
6909 is_broadcast_ether_addr(new_addr) ||
6910 is_multicast_ether_addr(new_addr)) {
6911 dev_err(&hdev->pdev->dev,
6912 "Change uc mac err! invalid mac:%p.\n",
6917 if ((!is_first || is_kdump_kernel()) &&
6918 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6919 dev_warn(&hdev->pdev->dev,
6920 "remove old uc mac address fail.\n");
6922 ret = hclge_add_uc_addr(handle, new_addr);
6924 dev_err(&hdev->pdev->dev,
6925 "add uc mac address fail, ret =%d.\n",
6929 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6930 dev_err(&hdev->pdev->dev,
6931 "restore uc mac address fail.\n");
6936 ret = hclge_pause_addr_cfg(hdev, new_addr);
6938 dev_err(&hdev->pdev->dev,
6939 "configure mac pause address fail, ret =%d.\n",
6944 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6949 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6952 struct hclge_vport *vport = hclge_get_vport(handle);
6953 struct hclge_dev *hdev = vport->back;
6955 if (!hdev->hw.mac.phydev)
6958 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6961 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6962 u8 fe_type, bool filter_en, u8 vf_id)
6964 struct hclge_vlan_filter_ctrl_cmd *req;
6965 struct hclge_desc desc;
6968 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6970 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6971 req->vlan_type = vlan_type;
6972 req->vlan_fe = filter_en ? fe_type : 0;
6975 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6977 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6983 #define HCLGE_FILTER_TYPE_VF 0
6984 #define HCLGE_FILTER_TYPE_PORT 1
6985 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6986 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6987 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6988 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6989 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6990 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6991 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6992 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6993 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6995 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6997 struct hclge_vport *vport = hclge_get_vport(handle);
6998 struct hclge_dev *hdev = vport->back;
7000 if (hdev->pdev->revision >= 0x21) {
7001 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7002 HCLGE_FILTER_FE_EGRESS, enable, 0);
7003 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7004 HCLGE_FILTER_FE_INGRESS, enable, 0);
7006 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7007 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7011 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7013 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7016 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7017 bool is_kill, u16 vlan, u8 qos,
7020 #define HCLGE_MAX_VF_BYTES 16
7021 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7022 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7023 struct hclge_desc desc[2];
7028 /* if vf vlan table is full, firmware will close vf vlan filter, it
7029 * is unable and unnecessary to add new vlan id to vf vlan filter
7031 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7034 hclge_cmd_setup_basic_desc(&desc[0],
7035 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7036 hclge_cmd_setup_basic_desc(&desc[1],
7037 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7039 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7041 vf_byte_off = vfid / 8;
7042 vf_byte_val = 1 << (vfid % 8);
7044 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7045 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7047 req0->vlan_id = cpu_to_le16(vlan);
7048 req0->vlan_cfg = is_kill;
7050 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7051 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7053 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7055 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7057 dev_err(&hdev->pdev->dev,
7058 "Send vf vlan command fail, ret =%d.\n",
7064 #define HCLGE_VF_VLAN_NO_ENTRY 2
7065 if (!req0->resp_code || req0->resp_code == 1)
7068 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7069 set_bit(vfid, hdev->vf_vlan_full);
7070 dev_warn(&hdev->pdev->dev,
7071 "vf vlan table is full, vf vlan filter is disabled\n");
7075 dev_err(&hdev->pdev->dev,
7076 "Add vf vlan filter fail, ret =%d.\n",
7079 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7080 if (!req0->resp_code)
7083 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7084 dev_warn(&hdev->pdev->dev,
7085 "vlan %d filter is not in vf vlan table\n",
7090 dev_err(&hdev->pdev->dev,
7091 "Kill vf vlan filter fail, ret =%d.\n",
7098 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7099 u16 vlan_id, bool is_kill)
7101 struct hclge_vlan_filter_pf_cfg_cmd *req;
7102 struct hclge_desc desc;
7103 u8 vlan_offset_byte_val;
7104 u8 vlan_offset_byte;
7108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7110 vlan_offset_160 = vlan_id / 160;
7111 vlan_offset_byte = (vlan_id % 160) / 8;
7112 vlan_offset_byte_val = 1 << (vlan_id % 8);
7114 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7115 req->vlan_offset = vlan_offset_160;
7116 req->vlan_cfg = is_kill;
7117 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7119 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7121 dev_err(&hdev->pdev->dev,
7122 "port vlan command, send fail, ret =%d.\n", ret);
7126 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7127 u16 vport_id, u16 vlan_id, u8 qos,
7130 u16 vport_idx, vport_num = 0;
7133 if (is_kill && !vlan_id)
7136 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7139 dev_err(&hdev->pdev->dev,
7140 "Set %d vport vlan filter config fail, ret =%d.\n",
7145 /* vlan 0 may be added twice when 8021q module is enabled */
7146 if (!is_kill && !vlan_id &&
7147 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7150 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7151 dev_err(&hdev->pdev->dev,
7152 "Add port vlan failed, vport %d is already in vlan %d\n",
7158 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7159 dev_err(&hdev->pdev->dev,
7160 "Delete port vlan failed, vport %d is not in vlan %d\n",
7165 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7168 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7169 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7175 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7177 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7178 struct hclge_vport_vtag_tx_cfg_cmd *req;
7179 struct hclge_dev *hdev = vport->back;
7180 struct hclge_desc desc;
7183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7185 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7186 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7187 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7188 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7189 vcfg->accept_tag1 ? 1 : 0);
7190 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7191 vcfg->accept_untag1 ? 1 : 0);
7192 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7193 vcfg->accept_tag2 ? 1 : 0);
7194 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7195 vcfg->accept_untag2 ? 1 : 0);
7196 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7197 vcfg->insert_tag1_en ? 1 : 0);
7198 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7199 vcfg->insert_tag2_en ? 1 : 0);
7200 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7202 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7203 req->vf_bitmap[req->vf_offset] =
7204 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7206 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7208 dev_err(&hdev->pdev->dev,
7209 "Send port txvlan cfg command fail, ret =%d\n",
7215 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7217 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7218 struct hclge_vport_vtag_rx_cfg_cmd *req;
7219 struct hclge_dev *hdev = vport->back;
7220 struct hclge_desc desc;
7223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7225 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7226 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7227 vcfg->strip_tag1_en ? 1 : 0);
7228 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7229 vcfg->strip_tag2_en ? 1 : 0);
7230 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7231 vcfg->vlan1_vlan_prionly ? 1 : 0);
7232 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7233 vcfg->vlan2_vlan_prionly ? 1 : 0);
7235 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7236 req->vf_bitmap[req->vf_offset] =
7237 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7239 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7241 dev_err(&hdev->pdev->dev,
7242 "Send port rxvlan cfg command fail, ret =%d\n",
7248 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7249 u16 port_base_vlan_state,
7254 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7255 vport->txvlan_cfg.accept_tag1 = true;
7256 vport->txvlan_cfg.insert_tag1_en = false;
7257 vport->txvlan_cfg.default_tag1 = 0;
7259 vport->txvlan_cfg.accept_tag1 = false;
7260 vport->txvlan_cfg.insert_tag1_en = true;
7261 vport->txvlan_cfg.default_tag1 = vlan_tag;
7264 vport->txvlan_cfg.accept_untag1 = true;
7266 /* accept_tag2 and accept_untag2 are not supported on
7267 * pdev revision(0x20), new revision support them,
7268 * this two fields can not be configured by user.
7270 vport->txvlan_cfg.accept_tag2 = true;
7271 vport->txvlan_cfg.accept_untag2 = true;
7272 vport->txvlan_cfg.insert_tag2_en = false;
7273 vport->txvlan_cfg.default_tag2 = 0;
7275 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7276 vport->rxvlan_cfg.strip_tag1_en = false;
7277 vport->rxvlan_cfg.strip_tag2_en =
7278 vport->rxvlan_cfg.rx_vlan_offload_en;
7280 vport->rxvlan_cfg.strip_tag1_en =
7281 vport->rxvlan_cfg.rx_vlan_offload_en;
7282 vport->rxvlan_cfg.strip_tag2_en = true;
7284 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7285 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7287 ret = hclge_set_vlan_tx_offload_cfg(vport);
7291 return hclge_set_vlan_rx_offload_cfg(vport);
7294 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7296 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7297 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7298 struct hclge_desc desc;
7301 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7302 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7303 rx_req->ot_fst_vlan_type =
7304 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7305 rx_req->ot_sec_vlan_type =
7306 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7307 rx_req->in_fst_vlan_type =
7308 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7309 rx_req->in_sec_vlan_type =
7310 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7312 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7314 dev_err(&hdev->pdev->dev,
7315 "Send rxvlan protocol type command fail, ret =%d\n",
7320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7322 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7323 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7324 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7326 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7328 dev_err(&hdev->pdev->dev,
7329 "Send txvlan protocol type command fail, ret =%d\n",
7335 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7337 #define HCLGE_DEF_VLAN_TYPE 0x8100
7339 struct hnae3_handle *handle = &hdev->vport[0].nic;
7340 struct hclge_vport *vport;
7344 if (hdev->pdev->revision >= 0x21) {
7345 /* for revision 0x21, vf vlan filter is per function */
7346 for (i = 0; i < hdev->num_alloc_vport; i++) {
7347 vport = &hdev->vport[i];
7348 ret = hclge_set_vlan_filter_ctrl(hdev,
7349 HCLGE_FILTER_TYPE_VF,
7350 HCLGE_FILTER_FE_EGRESS,
7357 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7358 HCLGE_FILTER_FE_INGRESS, true,
7363 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7364 HCLGE_FILTER_FE_EGRESS_V1_B,
7370 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7372 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7373 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7374 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7375 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7376 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7377 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7379 ret = hclge_set_vlan_protocol_type(hdev);
7383 for (i = 0; i < hdev->num_alloc_vport; i++) {
7386 vport = &hdev->vport[i];
7387 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7389 ret = hclge_vlan_offload_cfg(vport,
7390 vport->port_base_vlan_cfg.state,
7396 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7399 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7402 struct hclge_vport_vlan_cfg *vlan;
7404 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7408 vlan->hd_tbl_status = writen_to_tbl;
7409 vlan->vlan_id = vlan_id;
7411 list_add_tail(&vlan->node, &vport->vlan_list);
7414 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7416 struct hclge_vport_vlan_cfg *vlan, *tmp;
7417 struct hclge_dev *hdev = vport->back;
7420 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7421 if (!vlan->hd_tbl_status) {
7422 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7424 vlan->vlan_id, 0, false);
7426 dev_err(&hdev->pdev->dev,
7427 "restore vport vlan list failed, ret=%d\n",
7432 vlan->hd_tbl_status = true;
7438 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7441 struct hclge_vport_vlan_cfg *vlan, *tmp;
7442 struct hclge_dev *hdev = vport->back;
7444 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7445 if (vlan->vlan_id == vlan_id) {
7446 if (is_write_tbl && vlan->hd_tbl_status)
7447 hclge_set_vlan_filter_hw(hdev,
7453 list_del(&vlan->node);
7460 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7462 struct hclge_vport_vlan_cfg *vlan, *tmp;
7463 struct hclge_dev *hdev = vport->back;
7465 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7466 if (vlan->hd_tbl_status)
7467 hclge_set_vlan_filter_hw(hdev,
7473 vlan->hd_tbl_status = false;
7475 list_del(&vlan->node);
7481 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7483 struct hclge_vport_vlan_cfg *vlan, *tmp;
7484 struct hclge_vport *vport;
7487 mutex_lock(&hdev->vport_cfg_mutex);
7488 for (i = 0; i < hdev->num_alloc_vport; i++) {
7489 vport = &hdev->vport[i];
7490 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7491 list_del(&vlan->node);
7495 mutex_unlock(&hdev->vport_cfg_mutex);
7498 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7500 struct hclge_vport *vport = hclge_get_vport(handle);
7501 struct hclge_vport_vlan_cfg *vlan, *tmp;
7502 struct hclge_dev *hdev = vport->back;
7503 u16 vlan_proto, qos;
7507 mutex_lock(&hdev->vport_cfg_mutex);
7508 for (i = 0; i < hdev->num_alloc_vport; i++) {
7509 vport = &hdev->vport[i];
7510 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7511 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7512 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7513 state = vport->port_base_vlan_cfg.state;
7515 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7516 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7517 vport->vport_id, vlan_id, qos,
7522 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7523 if (vlan->hd_tbl_status)
7524 hclge_set_vlan_filter_hw(hdev,
7532 mutex_unlock(&hdev->vport_cfg_mutex);
7535 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7537 struct hclge_vport *vport = hclge_get_vport(handle);
7539 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7540 vport->rxvlan_cfg.strip_tag1_en = false;
7541 vport->rxvlan_cfg.strip_tag2_en = enable;
7543 vport->rxvlan_cfg.strip_tag1_en = enable;
7544 vport->rxvlan_cfg.strip_tag2_en = true;
7546 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7547 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7548 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7550 return hclge_set_vlan_rx_offload_cfg(vport);
7553 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7554 u16 port_base_vlan_state,
7555 struct hclge_vlan_info *new_info,
7556 struct hclge_vlan_info *old_info)
7558 struct hclge_dev *hdev = vport->back;
7561 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7562 hclge_rm_vport_all_vlan_table(vport, false);
7563 return hclge_set_vlan_filter_hw(hdev,
7564 htons(new_info->vlan_proto),
7567 new_info->qos, false);
7570 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7571 vport->vport_id, old_info->vlan_tag,
7572 old_info->qos, true);
7576 return hclge_add_vport_all_vlan_table(vport);
7579 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7580 struct hclge_vlan_info *vlan_info)
7582 struct hnae3_handle *nic = &vport->nic;
7583 struct hclge_vlan_info *old_vlan_info;
7584 struct hclge_dev *hdev = vport->back;
7587 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7589 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7593 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7594 /* add new VLAN tag */
7595 ret = hclge_set_vlan_filter_hw(hdev,
7596 htons(vlan_info->vlan_proto),
7598 vlan_info->vlan_tag,
7599 vlan_info->qos, false);
7603 /* remove old VLAN tag */
7604 ret = hclge_set_vlan_filter_hw(hdev,
7605 htons(old_vlan_info->vlan_proto),
7607 old_vlan_info->vlan_tag,
7608 old_vlan_info->qos, true);
7615 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7620 /* update state only when disable/enable port based VLAN */
7621 vport->port_base_vlan_cfg.state = state;
7622 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7623 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7625 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7628 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7629 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7630 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7635 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7636 enum hnae3_port_base_vlan_state state,
7639 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7641 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7643 return HNAE3_PORT_BASE_VLAN_ENABLE;
7646 return HNAE3_PORT_BASE_VLAN_DISABLE;
7647 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7648 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7650 return HNAE3_PORT_BASE_VLAN_MODIFY;
7654 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7655 u16 vlan, u8 qos, __be16 proto)
7657 struct hclge_vport *vport = hclge_get_vport(handle);
7658 struct hclge_dev *hdev = vport->back;
7659 struct hclge_vlan_info vlan_info;
7663 if (hdev->pdev->revision == 0x20)
7666 /* qos is a 3 bits value, so can not be bigger than 7 */
7667 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7669 if (proto != htons(ETH_P_8021Q))
7670 return -EPROTONOSUPPORT;
7672 vport = &hdev->vport[vfid];
7673 state = hclge_get_port_base_vlan_state(vport,
7674 vport->port_base_vlan_cfg.state,
7676 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7679 vlan_info.vlan_tag = vlan;
7680 vlan_info.qos = qos;
7681 vlan_info.vlan_proto = ntohs(proto);
7683 /* update port based VLAN for PF */
7685 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7686 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7687 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7692 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7693 return hclge_update_port_base_vlan_cfg(vport, state,
7696 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7704 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7705 u16 vlan_id, bool is_kill)
7707 struct hclge_vport *vport = hclge_get_vport(handle);
7708 struct hclge_dev *hdev = vport->back;
7709 bool writen_to_tbl = false;
7712 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7713 * filter entry. In this case, we don't update VLAN filter table
7714 * when user add new VLAN or remove exist VLAN, just update the vport
7715 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7716 * table until port based VLAN disabled
7718 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7719 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7720 vlan_id, 0, is_kill);
7721 writen_to_tbl = true;
7728 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7730 hclge_add_vport_vlan_table(vport, vlan_id,
7736 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7738 struct hclge_config_max_frm_size_cmd *req;
7739 struct hclge_desc desc;
7741 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7743 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7744 req->max_frm_size = cpu_to_le16(new_mps);
7745 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7747 return hclge_cmd_send(&hdev->hw, &desc, 1);
7750 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7752 struct hclge_vport *vport = hclge_get_vport(handle);
7754 return hclge_set_vport_mtu(vport, new_mtu);
7757 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7759 struct hclge_dev *hdev = vport->back;
7760 int i, max_frm_size, ret = 0;
7762 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7763 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7764 max_frm_size > HCLGE_MAC_MAX_FRAME)
7767 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7768 mutex_lock(&hdev->vport_lock);
7769 /* VF's mps must fit within hdev->mps */
7770 if (vport->vport_id && max_frm_size > hdev->mps) {
7771 mutex_unlock(&hdev->vport_lock);
7773 } else if (vport->vport_id) {
7774 vport->mps = max_frm_size;
7775 mutex_unlock(&hdev->vport_lock);
7779 /* PF's mps must be greater then VF's mps */
7780 for (i = 1; i < hdev->num_alloc_vport; i++)
7781 if (max_frm_size < hdev->vport[i].mps) {
7782 mutex_unlock(&hdev->vport_lock);
7786 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7788 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7790 dev_err(&hdev->pdev->dev,
7791 "Change mtu fail, ret =%d\n", ret);
7795 hdev->mps = max_frm_size;
7796 vport->mps = max_frm_size;
7798 ret = hclge_buffer_alloc(hdev);
7800 dev_err(&hdev->pdev->dev,
7801 "Allocate buffer fail, ret =%d\n", ret);
7804 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7805 mutex_unlock(&hdev->vport_lock);
7809 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7812 struct hclge_reset_tqp_queue_cmd *req;
7813 struct hclge_desc desc;
7816 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7818 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7819 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7820 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7822 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7824 dev_err(&hdev->pdev->dev,
7825 "Send tqp reset cmd error, status =%d\n", ret);
7832 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7834 struct hclge_reset_tqp_queue_cmd *req;
7835 struct hclge_desc desc;
7838 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7840 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7841 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7843 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7845 dev_err(&hdev->pdev->dev,
7846 "Get reset status error, status =%d\n", ret);
7850 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7853 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7855 struct hnae3_queue *queue;
7856 struct hclge_tqp *tqp;
7858 queue = handle->kinfo.tqp[queue_id];
7859 tqp = container_of(queue, struct hclge_tqp, q);
7864 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7866 struct hclge_vport *vport = hclge_get_vport(handle);
7867 struct hclge_dev *hdev = vport->back;
7868 int reset_try_times = 0;
7873 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7875 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7877 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7881 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7883 dev_err(&hdev->pdev->dev,
7884 "Send reset tqp cmd fail, ret = %d\n", ret);
7888 reset_try_times = 0;
7889 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7890 /* Wait for tqp hw reset */
7892 reset_status = hclge_get_reset_status(hdev, queue_gid);
7897 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7898 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7902 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7904 dev_err(&hdev->pdev->dev,
7905 "Deassert the soft reset fail, ret = %d\n", ret);
7910 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7912 struct hclge_dev *hdev = vport->back;
7913 int reset_try_times = 0;
7918 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7920 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7922 dev_warn(&hdev->pdev->dev,
7923 "Send reset tqp cmd fail, ret = %d\n", ret);
7927 reset_try_times = 0;
7928 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7929 /* Wait for tqp hw reset */
7931 reset_status = hclge_get_reset_status(hdev, queue_gid);
7936 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7937 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7941 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7943 dev_warn(&hdev->pdev->dev,
7944 "Deassert the soft reset fail, ret = %d\n", ret);
7947 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7949 struct hclge_vport *vport = hclge_get_vport(handle);
7950 struct hclge_dev *hdev = vport->back;
7952 return hdev->fw_version;
7955 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7957 struct phy_device *phydev = hdev->hw.mac.phydev;
7962 phy_set_asym_pause(phydev, rx_en, tx_en);
7965 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7970 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7971 else if (rx_en && !tx_en)
7972 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7973 else if (!rx_en && tx_en)
7974 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7976 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7978 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7981 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7983 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7988 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7993 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7995 struct phy_device *phydev = hdev->hw.mac.phydev;
7996 u16 remote_advertising = 0;
7997 u16 local_advertising = 0;
7998 u32 rx_pause, tx_pause;
8001 if (!phydev->link || !phydev->autoneg)
8004 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8007 remote_advertising = LPA_PAUSE_CAP;
8009 if (phydev->asym_pause)
8010 remote_advertising |= LPA_PAUSE_ASYM;
8012 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8013 remote_advertising);
8014 tx_pause = flowctl & FLOW_CTRL_TX;
8015 rx_pause = flowctl & FLOW_CTRL_RX;
8017 if (phydev->duplex == HCLGE_MAC_HALF) {
8022 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8025 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8026 u32 *rx_en, u32 *tx_en)
8028 struct hclge_vport *vport = hclge_get_vport(handle);
8029 struct hclge_dev *hdev = vport->back;
8031 *auto_neg = hclge_get_autoneg(handle);
8033 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8039 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8042 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8045 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8054 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8055 u32 rx_en, u32 tx_en)
8057 struct hclge_vport *vport = hclge_get_vport(handle);
8058 struct hclge_dev *hdev = vport->back;
8059 struct phy_device *phydev = hdev->hw.mac.phydev;
8062 fc_autoneg = hclge_get_autoneg(handle);
8063 if (auto_neg != fc_autoneg) {
8064 dev_info(&hdev->pdev->dev,
8065 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8069 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8070 dev_info(&hdev->pdev->dev,
8071 "Priority flow control enabled. Cannot set link flow control.\n");
8075 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8078 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8081 return phy_start_aneg(phydev);
8083 if (hdev->pdev->revision == 0x20)
8086 return hclge_restart_autoneg(handle);
8089 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8090 u8 *auto_neg, u32 *speed, u8 *duplex)
8092 struct hclge_vport *vport = hclge_get_vport(handle);
8093 struct hclge_dev *hdev = vport->back;
8096 *speed = hdev->hw.mac.speed;
8098 *duplex = hdev->hw.mac.duplex;
8100 *auto_neg = hdev->hw.mac.autoneg;
8103 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8106 struct hclge_vport *vport = hclge_get_vport(handle);
8107 struct hclge_dev *hdev = vport->back;
8110 *media_type = hdev->hw.mac.media_type;
8113 *module_type = hdev->hw.mac.module_type;
8116 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8117 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8119 struct hclge_vport *vport = hclge_get_vport(handle);
8120 struct hclge_dev *hdev = vport->back;
8121 struct phy_device *phydev = hdev->hw.mac.phydev;
8122 int mdix_ctrl, mdix, retval, is_resolved;
8125 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8126 *tp_mdix = ETH_TP_MDI_INVALID;
8130 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8132 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8133 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8134 HCLGE_PHY_MDIX_CTRL_S);
8136 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8137 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8138 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8140 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8142 switch (mdix_ctrl) {
8144 *tp_mdix_ctrl = ETH_TP_MDI;
8147 *tp_mdix_ctrl = ETH_TP_MDI_X;
8150 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8153 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8158 *tp_mdix = ETH_TP_MDI_INVALID;
8160 *tp_mdix = ETH_TP_MDI_X;
8162 *tp_mdix = ETH_TP_MDI;
8165 static void hclge_info_show(struct hclge_dev *hdev)
8167 struct device *dev = &hdev->pdev->dev;
8169 dev_info(dev, "PF info begin:\n");
8171 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8172 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8173 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8174 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8175 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8176 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8177 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8178 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8179 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8180 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8181 dev_info(dev, "This is %s PF\n",
8182 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8183 dev_info(dev, "DCB %s\n",
8184 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8185 dev_info(dev, "MQPRIO %s\n",
8186 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8188 dev_info(dev, "PF info end.\n");
8191 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8192 struct hclge_vport *vport)
8194 struct hnae3_client *client = vport->nic.client;
8195 struct hclge_dev *hdev = ae_dev->priv;
8198 ret = client->ops->init_instance(&vport->nic);
8202 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8203 hnae3_set_client_init_flag(client, ae_dev, 1);
8205 /* Enable nic hw error interrupts */
8206 ret = hclge_config_nic_hw_error(hdev, true);
8208 dev_err(&ae_dev->pdev->dev,
8209 "fail(%d) to enable hw error interrupts\n", ret);
8211 if (netif_msg_drv(&hdev->vport->nic))
8212 hclge_info_show(hdev);
8217 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8218 struct hclge_vport *vport)
8220 struct hnae3_client *client = vport->roce.client;
8221 struct hclge_dev *hdev = ae_dev->priv;
8224 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8228 client = hdev->roce_client;
8229 ret = hclge_init_roce_base_info(vport);
8233 ret = client->ops->init_instance(&vport->roce);
8237 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8238 hnae3_set_client_init_flag(client, ae_dev, 1);
8243 static int hclge_init_client_instance(struct hnae3_client *client,
8244 struct hnae3_ae_dev *ae_dev)
8246 struct hclge_dev *hdev = ae_dev->priv;
8247 struct hclge_vport *vport;
8250 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8251 vport = &hdev->vport[i];
8253 switch (client->type) {
8254 case HNAE3_CLIENT_KNIC:
8256 hdev->nic_client = client;
8257 vport->nic.client = client;
8258 ret = hclge_init_nic_client_instance(ae_dev, vport);
8262 ret = hclge_init_roce_client_instance(ae_dev, vport);
8267 case HNAE3_CLIENT_UNIC:
8268 hdev->nic_client = client;
8269 vport->nic.client = client;
8271 ret = client->ops->init_instance(&vport->nic);
8275 hnae3_set_client_init_flag(client, ae_dev, 1);
8278 case HNAE3_CLIENT_ROCE:
8279 if (hnae3_dev_roce_supported(hdev)) {
8280 hdev->roce_client = client;
8281 vport->roce.client = client;
8284 ret = hclge_init_roce_client_instance(ae_dev, vport);
8294 /* Enable roce ras interrupts */
8295 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8297 dev_err(&ae_dev->pdev->dev,
8298 "fail(%d) to enable roce ras interrupts\n", ret);
8303 hdev->nic_client = NULL;
8304 vport->nic.client = NULL;
8307 hdev->roce_client = NULL;
8308 vport->roce.client = NULL;
8312 static void hclge_uninit_client_instance(struct hnae3_client *client,
8313 struct hnae3_ae_dev *ae_dev)
8315 struct hclge_dev *hdev = ae_dev->priv;
8316 struct hclge_vport *vport;
8319 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8320 vport = &hdev->vport[i];
8321 if (hdev->roce_client) {
8322 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8323 hdev->roce_client->ops->uninit_instance(&vport->roce,
8325 hdev->roce_client = NULL;
8326 vport->roce.client = NULL;
8328 if (client->type == HNAE3_CLIENT_ROCE)
8330 if (hdev->nic_client && client->ops->uninit_instance) {
8331 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8332 client->ops->uninit_instance(&vport->nic, 0);
8333 hdev->nic_client = NULL;
8334 vport->nic.client = NULL;
8339 static int hclge_pci_init(struct hclge_dev *hdev)
8341 struct pci_dev *pdev = hdev->pdev;
8342 struct hclge_hw *hw;
8345 ret = pci_enable_device(pdev);
8347 dev_err(&pdev->dev, "failed to enable PCI device\n");
8351 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8353 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8356 "can't set consistent PCI DMA");
8357 goto err_disable_device;
8359 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8362 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8364 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8365 goto err_disable_device;
8368 pci_set_master(pdev);
8370 hw->io_base = pcim_iomap(pdev, 2, 0);
8372 dev_err(&pdev->dev, "Can't map configuration register space\n");
8374 goto err_clr_master;
8377 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8381 pci_clear_master(pdev);
8382 pci_release_regions(pdev);
8384 pci_disable_device(pdev);
8389 static void hclge_pci_uninit(struct hclge_dev *hdev)
8391 struct pci_dev *pdev = hdev->pdev;
8393 pcim_iounmap(pdev, hdev->hw.io_base);
8394 pci_free_irq_vectors(pdev);
8395 pci_clear_master(pdev);
8396 pci_release_mem_regions(pdev);
8397 pci_disable_device(pdev);
8400 static void hclge_state_init(struct hclge_dev *hdev)
8402 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8403 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8404 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8405 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8406 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8407 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8410 static void hclge_state_uninit(struct hclge_dev *hdev)
8412 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8413 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8415 if (hdev->service_timer.function)
8416 del_timer_sync(&hdev->service_timer);
8417 if (hdev->reset_timer.function)
8418 del_timer_sync(&hdev->reset_timer);
8419 if (hdev->service_task.func)
8420 cancel_work_sync(&hdev->service_task);
8421 if (hdev->rst_service_task.func)
8422 cancel_work_sync(&hdev->rst_service_task);
8423 if (hdev->mbx_service_task.func)
8424 cancel_work_sync(&hdev->mbx_service_task);
8427 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8429 #define HCLGE_FLR_WAIT_MS 100
8430 #define HCLGE_FLR_WAIT_CNT 50
8431 struct hclge_dev *hdev = ae_dev->priv;
8434 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8435 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8436 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8437 hclge_reset_event(hdev->pdev, NULL);
8439 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8440 cnt++ < HCLGE_FLR_WAIT_CNT)
8441 msleep(HCLGE_FLR_WAIT_MS);
8443 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8444 dev_err(&hdev->pdev->dev,
8445 "flr wait down timeout: %d\n", cnt);
8448 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8450 struct hclge_dev *hdev = ae_dev->priv;
8452 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8455 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8457 struct pci_dev *pdev = ae_dev->pdev;
8458 struct hclge_dev *hdev;
8461 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8468 hdev->ae_dev = ae_dev;
8469 hdev->reset_type = HNAE3_NONE_RESET;
8470 hdev->reset_level = HNAE3_FUNC_RESET;
8471 ae_dev->priv = hdev;
8472 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8474 mutex_init(&hdev->vport_lock);
8475 mutex_init(&hdev->vport_cfg_mutex);
8476 spin_lock_init(&hdev->fd_rule_lock);
8478 ret = hclge_pci_init(hdev);
8480 dev_err(&pdev->dev, "PCI init failed\n");
8484 /* Firmware command queue initialize */
8485 ret = hclge_cmd_queue_init(hdev);
8487 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8488 goto err_pci_uninit;
8491 /* Firmware command initialize */
8492 ret = hclge_cmd_init(hdev);
8494 goto err_cmd_uninit;
8496 ret = hclge_get_cap(hdev);
8498 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8500 goto err_cmd_uninit;
8503 ret = hclge_configure(hdev);
8505 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8506 goto err_cmd_uninit;
8509 ret = hclge_init_msi(hdev);
8511 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8512 goto err_cmd_uninit;
8515 ret = hclge_misc_irq_init(hdev);
8518 "Misc IRQ(vector0) init error, ret = %d.\n",
8520 goto err_msi_uninit;
8523 ret = hclge_alloc_tqps(hdev);
8525 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8526 goto err_msi_irq_uninit;
8529 ret = hclge_alloc_vport(hdev);
8531 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8532 goto err_msi_irq_uninit;
8535 ret = hclge_map_tqp(hdev);
8537 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8538 goto err_msi_irq_uninit;
8541 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8542 ret = hclge_mac_mdio_config(hdev);
8544 dev_err(&hdev->pdev->dev,
8545 "mdio config fail ret=%d\n", ret);
8546 goto err_msi_irq_uninit;
8550 ret = hclge_init_umv_space(hdev);
8552 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8553 goto err_mdiobus_unreg;
8556 ret = hclge_mac_init(hdev);
8558 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8559 goto err_mdiobus_unreg;
8562 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8564 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8565 goto err_mdiobus_unreg;
8568 ret = hclge_config_gro(hdev, true);
8570 goto err_mdiobus_unreg;
8572 ret = hclge_init_vlan_config(hdev);
8574 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8575 goto err_mdiobus_unreg;
8578 ret = hclge_tm_schd_init(hdev);
8580 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8581 goto err_mdiobus_unreg;
8584 hclge_rss_init_cfg(hdev);
8585 ret = hclge_rss_init_hw(hdev);
8587 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8588 goto err_mdiobus_unreg;
8591 ret = init_mgr_tbl(hdev);
8593 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8594 goto err_mdiobus_unreg;
8597 ret = hclge_init_fd_config(hdev);
8600 "fd table init fail, ret=%d\n", ret);
8601 goto err_mdiobus_unreg;
8604 INIT_KFIFO(hdev->mac_tnl_log);
8606 hclge_dcb_ops_set(hdev);
8608 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8609 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8610 INIT_WORK(&hdev->service_task, hclge_service_task);
8611 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8612 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8614 hclge_clear_all_event_cause(hdev);
8616 /* Enable MISC vector(vector0) */
8617 hclge_enable_vector(&hdev->misc_vector, true);
8619 hclge_state_init(hdev);
8620 hdev->last_reset_time = jiffies;
8622 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8626 if (hdev->hw.mac.phydev)
8627 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8629 hclge_misc_irq_uninit(hdev);
8631 pci_free_irq_vectors(pdev);
8633 hclge_cmd_uninit(hdev);
8635 pcim_iounmap(pdev, hdev->hw.io_base);
8636 pci_clear_master(pdev);
8637 pci_release_regions(pdev);
8638 pci_disable_device(pdev);
8643 static void hclge_stats_clear(struct hclge_dev *hdev)
8645 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8648 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8650 struct hclge_vport *vport = hdev->vport;
8653 for (i = 0; i < hdev->num_alloc_vport; i++) {
8654 hclge_vport_stop(vport);
8659 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8661 struct hclge_dev *hdev = ae_dev->priv;
8662 struct pci_dev *pdev = ae_dev->pdev;
8665 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8667 hclge_stats_clear(hdev);
8668 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8669 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8671 ret = hclge_cmd_init(hdev);
8673 dev_err(&pdev->dev, "Cmd queue init failed\n");
8677 ret = hclge_map_tqp(hdev);
8679 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8683 hclge_reset_umv_space(hdev);
8685 ret = hclge_mac_init(hdev);
8687 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8691 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8693 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8697 ret = hclge_config_gro(hdev, true);
8701 ret = hclge_init_vlan_config(hdev);
8703 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8707 ret = hclge_tm_init_hw(hdev, true);
8709 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8713 ret = hclge_rss_init_hw(hdev);
8715 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8719 ret = hclge_init_fd_config(hdev);
8722 "fd table init fail, ret=%d\n", ret);
8726 /* Re-enable the hw error interrupts because
8727 * the interrupts get disabled on global reset.
8729 ret = hclge_config_nic_hw_error(hdev, true);
8732 "fail(%d) to re-enable NIC hw error interrupts\n",
8737 if (hdev->roce_client) {
8738 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8741 "fail(%d) to re-enable roce ras interrupts\n",
8747 hclge_reset_vport_state(hdev);
8749 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8755 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8757 struct hclge_dev *hdev = ae_dev->priv;
8758 struct hclge_mac *mac = &hdev->hw.mac;
8760 hclge_state_uninit(hdev);
8763 mdiobus_unregister(mac->mdio_bus);
8765 hclge_uninit_umv_space(hdev);
8767 /* Disable MISC vector(vector0) */
8768 hclge_enable_vector(&hdev->misc_vector, false);
8769 synchronize_irq(hdev->misc_vector.vector_irq);
8771 /* Disable all hw interrupts */
8772 hclge_config_mac_tnl_int(hdev, false);
8773 hclge_config_nic_hw_error(hdev, false);
8774 hclge_config_rocee_ras_interrupt(hdev, false);
8776 hclge_cmd_uninit(hdev);
8777 hclge_misc_irq_uninit(hdev);
8778 hclge_pci_uninit(hdev);
8779 mutex_destroy(&hdev->vport_lock);
8780 hclge_uninit_vport_mac_table(hdev);
8781 hclge_uninit_vport_vlan_table(hdev);
8782 mutex_destroy(&hdev->vport_cfg_mutex);
8783 ae_dev->priv = NULL;
8786 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8788 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8789 struct hclge_vport *vport = hclge_get_vport(handle);
8790 struct hclge_dev *hdev = vport->back;
8792 return min_t(u32, hdev->rss_size_max,
8793 vport->alloc_tqps / kinfo->num_tc);
8796 static void hclge_get_channels(struct hnae3_handle *handle,
8797 struct ethtool_channels *ch)
8799 ch->max_combined = hclge_get_max_channels(handle);
8800 ch->other_count = 1;
8802 ch->combined_count = handle->kinfo.rss_size;
8805 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8806 u16 *alloc_tqps, u16 *max_rss_size)
8808 struct hclge_vport *vport = hclge_get_vport(handle);
8809 struct hclge_dev *hdev = vport->back;
8811 *alloc_tqps = vport->alloc_tqps;
8812 *max_rss_size = hdev->rss_size_max;
8815 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8816 bool rxfh_configured)
8818 struct hclge_vport *vport = hclge_get_vport(handle);
8819 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8820 struct hclge_dev *hdev = vport->back;
8821 int cur_rss_size = kinfo->rss_size;
8822 int cur_tqps = kinfo->num_tqps;
8823 u16 tc_offset[HCLGE_MAX_TC_NUM];
8824 u16 tc_valid[HCLGE_MAX_TC_NUM];
8825 u16 tc_size[HCLGE_MAX_TC_NUM];
8830 kinfo->req_rss_size = new_tqps_num;
8832 ret = hclge_tm_vport_map_update(hdev);
8834 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8838 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8839 roundup_size = ilog2(roundup_size);
8840 /* Set the RSS TC mode according to the new RSS size */
8841 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8844 if (!(hdev->hw_tc_map & BIT(i)))
8848 tc_size[i] = roundup_size;
8849 tc_offset[i] = kinfo->rss_size * i;
8851 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8855 /* RSS indirection table has been configuared by user */
8856 if (rxfh_configured)
8859 /* Reinitializes the rss indirect table according to the new RSS size */
8860 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8864 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8865 rss_indir[i] = i % kinfo->rss_size;
8867 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8869 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8876 dev_info(&hdev->pdev->dev,
8877 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8878 cur_rss_size, kinfo->rss_size,
8879 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8884 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8885 u32 *regs_num_64_bit)
8887 struct hclge_desc desc;
8891 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8892 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8894 dev_err(&hdev->pdev->dev,
8895 "Query register number cmd failed, ret = %d.\n", ret);
8899 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8900 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8902 total_num = *regs_num_32_bit + *regs_num_64_bit;
8909 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8912 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8914 struct hclge_desc *desc;
8915 u32 *reg_val = data;
8924 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8925 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8929 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8930 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8932 dev_err(&hdev->pdev->dev,
8933 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8938 for (i = 0; i < cmd_num; i++) {
8940 desc_data = (__le32 *)(&desc[i].data[0]);
8941 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8943 desc_data = (__le32 *)(&desc[i]);
8944 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8946 for (k = 0; k < n; k++) {
8947 *reg_val++ = le32_to_cpu(*desc_data++);
8959 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8962 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8964 struct hclge_desc *desc;
8965 u64 *reg_val = data;
8974 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8975 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8979 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8980 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8982 dev_err(&hdev->pdev->dev,
8983 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8988 for (i = 0; i < cmd_num; i++) {
8990 desc_data = (__le64 *)(&desc[i].data[0]);
8991 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8993 desc_data = (__le64 *)(&desc[i]);
8994 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8996 for (k = 0; k < n; k++) {
8997 *reg_val++ = le64_to_cpu(*desc_data++);
9009 #define MAX_SEPARATE_NUM 4
9010 #define SEPARATOR_VALUE 0xFFFFFFFF
9011 #define REG_NUM_PER_LINE 4
9012 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
9014 static int hclge_get_regs_len(struct hnae3_handle *handle)
9016 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9017 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9018 struct hclge_vport *vport = hclge_get_vport(handle);
9019 struct hclge_dev *hdev = vport->back;
9020 u32 regs_num_32_bit, regs_num_64_bit;
9023 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9025 dev_err(&hdev->pdev->dev,
9026 "Get register number failed, ret = %d.\n", ret);
9030 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9031 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9032 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9033 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9035 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9036 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9037 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9040 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9043 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9044 struct hclge_vport *vport = hclge_get_vport(handle);
9045 struct hclge_dev *hdev = vport->back;
9046 u32 regs_num_32_bit, regs_num_64_bit;
9047 int i, j, reg_um, separator_num;
9051 *version = hdev->fw_version;
9053 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9055 dev_err(&hdev->pdev->dev,
9056 "Get register number failed, ret = %d.\n", ret);
9060 /* fetching per-PF registers valus from PF PCIe register space */
9061 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9062 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9063 for (i = 0; i < reg_um; i++)
9064 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9065 for (i = 0; i < separator_num; i++)
9066 *reg++ = SEPARATOR_VALUE;
9068 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9069 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9070 for (i = 0; i < reg_um; i++)
9071 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9072 for (i = 0; i < separator_num; i++)
9073 *reg++ = SEPARATOR_VALUE;
9075 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9076 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9077 for (j = 0; j < kinfo->num_tqps; j++) {
9078 for (i = 0; i < reg_um; i++)
9079 *reg++ = hclge_read_dev(&hdev->hw,
9080 ring_reg_addr_list[i] +
9082 for (i = 0; i < separator_num; i++)
9083 *reg++ = SEPARATOR_VALUE;
9086 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9087 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9088 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9089 for (i = 0; i < reg_um; i++)
9090 *reg++ = hclge_read_dev(&hdev->hw,
9091 tqp_intr_reg_addr_list[i] +
9093 for (i = 0; i < separator_num; i++)
9094 *reg++ = SEPARATOR_VALUE;
9097 /* fetching PF common registers values from firmware */
9098 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9100 dev_err(&hdev->pdev->dev,
9101 "Get 32 bit register failed, ret = %d.\n", ret);
9105 reg += regs_num_32_bit;
9106 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9108 dev_err(&hdev->pdev->dev,
9109 "Get 64 bit register failed, ret = %d.\n", ret);
9112 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9114 struct hclge_set_led_state_cmd *req;
9115 struct hclge_desc desc;
9118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9120 req = (struct hclge_set_led_state_cmd *)desc.data;
9121 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9122 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9126 dev_err(&hdev->pdev->dev,
9127 "Send set led state cmd error, ret =%d\n", ret);
9132 enum hclge_led_status {
9135 HCLGE_LED_NO_CHANGE = 0xFF,
9138 static int hclge_set_led_id(struct hnae3_handle *handle,
9139 enum ethtool_phys_id_state status)
9141 struct hclge_vport *vport = hclge_get_vport(handle);
9142 struct hclge_dev *hdev = vport->back;
9145 case ETHTOOL_ID_ACTIVE:
9146 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9147 case ETHTOOL_ID_INACTIVE:
9148 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9154 static void hclge_get_link_mode(struct hnae3_handle *handle,
9155 unsigned long *supported,
9156 unsigned long *advertising)
9158 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9159 struct hclge_vport *vport = hclge_get_vport(handle);
9160 struct hclge_dev *hdev = vport->back;
9161 unsigned int idx = 0;
9163 for (; idx < size; idx++) {
9164 supported[idx] = hdev->hw.mac.supported[idx];
9165 advertising[idx] = hdev->hw.mac.advertising[idx];
9169 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9171 struct hclge_vport *vport = hclge_get_vport(handle);
9172 struct hclge_dev *hdev = vport->back;
9174 return hclge_config_gro(hdev, enable);
9177 static const struct hnae3_ae_ops hclge_ops = {
9178 .init_ae_dev = hclge_init_ae_dev,
9179 .uninit_ae_dev = hclge_uninit_ae_dev,
9180 .flr_prepare = hclge_flr_prepare,
9181 .flr_done = hclge_flr_done,
9182 .init_client_instance = hclge_init_client_instance,
9183 .uninit_client_instance = hclge_uninit_client_instance,
9184 .map_ring_to_vector = hclge_map_ring_to_vector,
9185 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9186 .get_vector = hclge_get_vector,
9187 .put_vector = hclge_put_vector,
9188 .set_promisc_mode = hclge_set_promisc_mode,
9189 .set_loopback = hclge_set_loopback,
9190 .start = hclge_ae_start,
9191 .stop = hclge_ae_stop,
9192 .client_start = hclge_client_start,
9193 .client_stop = hclge_client_stop,
9194 .get_status = hclge_get_status,
9195 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9196 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9197 .get_media_type = hclge_get_media_type,
9198 .check_port_speed = hclge_check_port_speed,
9199 .get_fec = hclge_get_fec,
9200 .set_fec = hclge_set_fec,
9201 .get_rss_key_size = hclge_get_rss_key_size,
9202 .get_rss_indir_size = hclge_get_rss_indir_size,
9203 .get_rss = hclge_get_rss,
9204 .set_rss = hclge_set_rss,
9205 .set_rss_tuple = hclge_set_rss_tuple,
9206 .get_rss_tuple = hclge_get_rss_tuple,
9207 .get_tc_size = hclge_get_tc_size,
9208 .get_mac_addr = hclge_get_mac_addr,
9209 .set_mac_addr = hclge_set_mac_addr,
9210 .do_ioctl = hclge_do_ioctl,
9211 .add_uc_addr = hclge_add_uc_addr,
9212 .rm_uc_addr = hclge_rm_uc_addr,
9213 .add_mc_addr = hclge_add_mc_addr,
9214 .rm_mc_addr = hclge_rm_mc_addr,
9215 .set_autoneg = hclge_set_autoneg,
9216 .get_autoneg = hclge_get_autoneg,
9217 .restart_autoneg = hclge_restart_autoneg,
9218 .get_pauseparam = hclge_get_pauseparam,
9219 .set_pauseparam = hclge_set_pauseparam,
9220 .set_mtu = hclge_set_mtu,
9221 .reset_queue = hclge_reset_tqp,
9222 .get_stats = hclge_get_stats,
9223 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9224 .update_stats = hclge_update_stats,
9225 .get_strings = hclge_get_strings,
9226 .get_sset_count = hclge_get_sset_count,
9227 .get_fw_version = hclge_get_fw_version,
9228 .get_mdix_mode = hclge_get_mdix_mode,
9229 .enable_vlan_filter = hclge_enable_vlan_filter,
9230 .set_vlan_filter = hclge_set_vlan_filter,
9231 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9232 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9233 .reset_event = hclge_reset_event,
9234 .set_default_reset_request = hclge_set_def_reset_request,
9235 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9236 .set_channels = hclge_set_channels,
9237 .get_channels = hclge_get_channels,
9238 .get_regs_len = hclge_get_regs_len,
9239 .get_regs = hclge_get_regs,
9240 .set_led_id = hclge_set_led_id,
9241 .get_link_mode = hclge_get_link_mode,
9242 .add_fd_entry = hclge_add_fd_entry,
9243 .del_fd_entry = hclge_del_fd_entry,
9244 .del_all_fd_entries = hclge_del_all_fd_entries,
9245 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9246 .get_fd_rule_info = hclge_get_fd_rule_info,
9247 .get_fd_all_rules = hclge_get_all_rules,
9248 .restore_fd_rules = hclge_restore_fd_entries,
9249 .enable_fd = hclge_enable_fd,
9250 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9251 .dbg_run_cmd = hclge_dbg_run_cmd,
9252 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9253 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9254 .ae_dev_resetting = hclge_ae_dev_resetting,
9255 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9256 .set_gro_en = hclge_gro_en,
9257 .get_global_queue_id = hclge_covert_handle_qid_global,
9258 .set_timer_task = hclge_set_timer_task,
9259 .mac_connect_phy = hclge_mac_connect_phy,
9260 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9261 .restore_vlan_table = hclge_restore_vlan_table,
9264 static struct hnae3_ae_algo ae_algo = {
9266 .pdev_id_table = ae_algo_pci_tbl,
9269 static int hclge_init(void)
9271 pr_info("%s is initializing\n", HCLGE_NAME);
9273 hnae3_register_ae_algo(&ae_algo);
9278 static void hclge_exit(void)
9280 hnae3_unregister_ae_algo(&ae_algo);
9282 module_init(hclge_init);
9283 module_exit(hclge_exit);
9285 MODULE_LICENSE("GPL");
9286 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9287 MODULE_DESCRIPTION("HCLGE Driver");
9288 MODULE_VERSION(HCLGE_MOD_VERSION);