1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431 schedule_work(&hdev->rst_service_task);
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439 (void)schedule_work(&hdev->service_task);
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 struct hclge_link_status_cmd *req;
2445 struct hclge_desc desc;
2449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2457 req = (struct hclge_link_status_cmd *)desc.data;
2458 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460 return !!link_status;
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2468 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2471 mac_state = hclge_get_mac_link_status(hdev);
2473 if (hdev->hw.mac.phydev) {
2474 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475 link_stat = mac_state &
2476 hdev->hw.mac.phydev->link;
2481 link_stat = mac_state;
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 struct hnae3_client *rclient = hdev->roce_client;
2490 struct hnae3_client *client = hdev->nic_client;
2491 struct hnae3_handle *rhandle;
2492 struct hnae3_handle *handle;
2498 state = hclge_get_mac_phy_link(hdev);
2499 if (state != hdev->hw.mac.link) {
2500 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501 handle = &hdev->vport[i].nic;
2502 client->ops->link_status_change(handle, state);
2503 hclge_config_mac_tnl_int(hdev, state);
2504 rhandle = &hdev->vport[i].roce;
2505 if (rclient && rclient->ops->link_status_change)
2506 rclient->ops->link_status_change(rhandle,
2509 hdev->hw.mac.link = state;
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 /* update fec ability by speed */
2516 hclge_convert_setting_fec(mac);
2518 /* firmware can not identify back plane type, the media type
2519 * read from configuration can help deal it
2521 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527 if (mac->support_autoneg == true) {
2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529 linkmode_copy(mac->advertising, mac->supported);
2531 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533 linkmode_zero(mac->advertising);
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 struct hclge_sfp_info_cmd *resp = NULL;
2540 struct hclge_desc desc;
2543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544 resp = (struct hclge_sfp_info_cmd *)desc.data;
2545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546 if (ret == -EOPNOTSUPP) {
2547 dev_warn(&hdev->pdev->dev,
2548 "IMP do not support get SFP speed %d\n", ret);
2551 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2555 *speed = le32_to_cpu(resp->speed);
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 struct hclge_sfp_info_cmd *resp;
2563 struct hclge_desc desc;
2566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567 resp = (struct hclge_sfp_info_cmd *)desc.data;
2569 resp->query_type = QUERY_ACTIVE_SPEED;
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret == -EOPNOTSUPP) {
2573 dev_warn(&hdev->pdev->dev,
2574 "IMP does not support get SFP info %d\n", ret);
2577 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2581 mac->speed = le32_to_cpu(resp->speed);
2582 /* if resp->speed_ability is 0, it means it's an old version
2583 * firmware, do not update these params
2585 if (resp->speed_ability) {
2586 mac->module_type = le32_to_cpu(resp->module_type);
2587 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588 mac->autoneg = resp->autoneg;
2589 mac->support_autoneg = resp->autoneg_ability;
2590 if (!resp->active_fec)
2593 mac->fec_mode = BIT(resp->active_fec);
2595 mac->speed_type = QUERY_SFP_SPEED;
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 struct hclge_mac *mac = &hdev->hw.mac;
2604 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2607 /* get the port info from SFP cmd if not copper port */
2608 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2611 /* if IMP does not support get SFP/qSFP info, return directly */
2612 if (!hdev->support_sfp_query)
2615 if (hdev->pdev->revision >= 0x21)
2616 ret = hclge_get_sfp_info(hdev, mac);
2618 ret = hclge_get_sfp_speed(hdev, &speed);
2620 if (ret == -EOPNOTSUPP) {
2621 hdev->support_sfp_query = false;
2627 if (hdev->pdev->revision >= 0x21) {
2628 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629 hclge_update_port_capability(mac);
2632 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2635 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636 return 0; /* do nothing if no SFP */
2638 /* must config full duplex for SFP */
2639 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2643 static int hclge_get_status(struct hnae3_handle *handle)
2645 struct hclge_vport *vport = hclge_get_vport(handle);
2646 struct hclge_dev *hdev = vport->back;
2648 hclge_update_link_status(hdev);
2650 return hdev->hw.mac.link;
2653 static void hclge_service_timer(struct timer_list *t)
2655 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657 mod_timer(&hdev->service_timer, jiffies + HZ);
2658 hdev->hw_stats.stats_timer++;
2659 hdev->fd_arfs_expire_timer++;
2660 hclge_task_schedule(hdev);
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2665 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667 /* Flush memory before next watchdog */
2668 smp_mb__before_atomic();
2669 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676 /* fetch the events from their corresponding regs */
2677 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679 msix_src_reg = hclge_read_dev(&hdev->hw,
2680 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682 /* Assumption: If by any chance reset and mailbox events are reported
2683 * together then we will only process reset event in this go and will
2684 * defer the processing of the mailbox events. Since, we would have not
2685 * cleared RX CMDQ event this time we would receive again another
2686 * interrupt from H/W just for the mailbox.
2689 /* check for vector0 reset event sources */
2690 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695 hdev->rst_stats.imp_rst_cnt++;
2696 return HCLGE_VECTOR0_EVENT_RST;
2699 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704 hdev->rst_stats.global_rst_cnt++;
2705 return HCLGE_VECTOR0_EVENT_RST;
2708 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713 hdev->rst_stats.core_rst_cnt++;
2714 return HCLGE_VECTOR0_EVENT_RST;
2717 /* check for vector0 msix event source */
2718 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2721 return HCLGE_VECTOR0_EVENT_ERR;
2724 /* check for vector0 mailbox(=CMDQ RX) event source */
2725 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727 *clearval = cmdq_src_reg;
2728 return HCLGE_VECTOR0_EVENT_MBX;
2731 /* print other vector0 event source */
2732 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733 cmdq_src_reg, msix_src_reg);
2734 return HCLGE_VECTOR0_EVENT_OTHER;
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2740 switch (event_type) {
2741 case HCLGE_VECTOR0_EVENT_RST:
2742 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2744 case HCLGE_VECTOR0_EVENT_MBX:
2745 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2754 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2763 writel(enable ? 1 : 0, vector->addr);
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2768 struct hclge_dev *hdev = data;
2772 hclge_enable_vector(&hdev->misc_vector, false);
2773 event_cause = hclge_check_event_cause(hdev, &clearval);
2775 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776 switch (event_cause) {
2777 case HCLGE_VECTOR0_EVENT_ERR:
2778 /* we do not know what type of reset is required now. This could
2779 * only be decided after we fetch the type of errors which
2780 * caused this event. Therefore, we will do below for now:
2781 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782 * have defered type of reset to be used.
2783 * 2. Schedule the reset serivce task.
2784 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2785 * will fetch the correct type of reset. This would be done
2786 * by first decoding the types of errors.
2788 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2790 case HCLGE_VECTOR0_EVENT_RST:
2791 hclge_reset_task_schedule(hdev);
2793 case HCLGE_VECTOR0_EVENT_MBX:
2794 /* If we are here then,
2795 * 1. Either we are not handling any mbx task and we are not
2798 * 2. We could be handling a mbx task but nothing more is
2800 * In both cases, we should schedule mbx task as there are more
2801 * mbx messages reported by this interrupt.
2803 hclge_mbx_task_schedule(hdev);
2806 dev_warn(&hdev->pdev->dev,
2807 "received unknown or unhandled event of vector0\n");
2811 /* clear the source of interrupt if it is not cause by reset */
2812 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813 hclge_clear_event_cause(hdev, event_cause, clearval);
2814 hclge_enable_vector(&hdev->misc_vector, true);
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2822 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823 dev_warn(&hdev->pdev->dev,
2824 "vector(vector_id %d) has been freed.\n", vector_id);
2828 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829 hdev->num_msi_left += 1;
2830 hdev->num_msi_used -= 1;
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2835 struct hclge_misc_vector *vector = &hdev->misc_vector;
2837 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2839 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840 hdev->vector_status[0] = 0;
2842 hdev->num_msi_left -= 1;
2843 hdev->num_msi_used += 1;
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2850 hclge_get_misc_vector(hdev);
2852 /* this would be explicitly freed in the end */
2853 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854 0, "hclge_misc", hdev);
2856 hclge_free_vector(hdev, 0);
2857 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858 hdev->misc_vector.vector_irq);
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2866 free_irq(hdev->misc_vector.vector_irq, hdev);
2867 hclge_free_vector(hdev, 0);
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871 enum hnae3_reset_notify_type type)
2873 struct hnae3_client *client = hdev->nic_client;
2876 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2880 if (!client->ops->reset_notify)
2883 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2884 struct hnae3_handle *handle = &hdev->vport[i].nic;
2887 ret = client->ops->reset_notify(handle, type);
2889 dev_err(&hdev->pdev->dev,
2890 "notify nic client failed %d(%d)\n", type, ret);
2898 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2899 enum hnae3_reset_notify_type type)
2901 struct hnae3_client *client = hdev->roce_client;
2905 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2909 if (!client->ops->reset_notify)
2912 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2913 struct hnae3_handle *handle = &hdev->vport[i].roce;
2915 ret = client->ops->reset_notify(handle, type);
2917 dev_err(&hdev->pdev->dev,
2918 "notify roce client failed %d(%d)",
2927 static int hclge_reset_wait(struct hclge_dev *hdev)
2929 #define HCLGE_RESET_WATI_MS 100
2930 #define HCLGE_RESET_WAIT_CNT 200
2931 u32 val, reg, reg_bit;
2934 switch (hdev->reset_type) {
2935 case HNAE3_IMP_RESET:
2936 reg = HCLGE_GLOBAL_RESET_REG;
2937 reg_bit = HCLGE_IMP_RESET_BIT;
2939 case HNAE3_GLOBAL_RESET:
2940 reg = HCLGE_GLOBAL_RESET_REG;
2941 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2943 case HNAE3_CORE_RESET:
2944 reg = HCLGE_GLOBAL_RESET_REG;
2945 reg_bit = HCLGE_CORE_RESET_BIT;
2947 case HNAE3_FUNC_RESET:
2948 reg = HCLGE_FUN_RST_ING;
2949 reg_bit = HCLGE_FUN_RST_ING_B;
2951 case HNAE3_FLR_RESET:
2954 dev_err(&hdev->pdev->dev,
2955 "Wait for unsupported reset type: %d\n",
2960 if (hdev->reset_type == HNAE3_FLR_RESET) {
2961 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2962 cnt++ < HCLGE_RESET_WAIT_CNT)
2963 msleep(HCLGE_RESET_WATI_MS);
2965 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2966 dev_err(&hdev->pdev->dev,
2967 "flr wait timeout: %d\n", cnt);
2974 val = hclge_read_dev(&hdev->hw, reg);
2975 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2976 msleep(HCLGE_RESET_WATI_MS);
2977 val = hclge_read_dev(&hdev->hw, reg);
2981 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2982 dev_warn(&hdev->pdev->dev,
2983 "Wait for reset timeout: %d\n", hdev->reset_type);
2990 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2992 struct hclge_vf_rst_cmd *req;
2993 struct hclge_desc desc;
2995 req = (struct hclge_vf_rst_cmd *)desc.data;
2996 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2997 req->dest_vfid = func_id;
3002 return hclge_cmd_send(&hdev->hw, &desc, 1);
3005 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3009 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3010 struct hclge_vport *vport = &hdev->vport[i];
3013 /* Send cmd to set/clear VF's FUNC_RST_ING */
3014 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3016 dev_err(&hdev->pdev->dev,
3017 "set vf(%d) rst failed %d!\n",
3018 vport->vport_id, ret);
3022 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3025 /* Inform VF to process the reset.
3026 * hclge_inform_reset_assert_to_vf may fail if VF
3027 * driver is not loaded.
3029 ret = hclge_inform_reset_assert_to_vf(vport);
3031 dev_warn(&hdev->pdev->dev,
3032 "inform reset to vf(%d) failed %d!\n",
3033 vport->vport_id, ret);
3039 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3041 struct hclge_desc desc;
3042 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3045 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3046 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3047 req->fun_reset_vfid = func_id;
3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3051 dev_err(&hdev->pdev->dev,
3052 "send function reset cmd fail, status =%d\n", ret);
3057 static void hclge_do_reset(struct hclge_dev *hdev)
3059 struct hnae3_handle *handle = &hdev->vport[0].nic;
3060 struct pci_dev *pdev = hdev->pdev;
3063 if (hclge_get_hw_reset_stat(handle)) {
3064 dev_info(&pdev->dev, "Hardware reset not finish\n");
3065 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3066 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3067 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3071 switch (hdev->reset_type) {
3072 case HNAE3_GLOBAL_RESET:
3073 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3074 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3075 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3076 dev_info(&pdev->dev, "Global Reset requested\n");
3078 case HNAE3_CORE_RESET:
3079 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3080 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3081 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3082 dev_info(&pdev->dev, "Core Reset requested\n");
3084 case HNAE3_FUNC_RESET:
3085 dev_info(&pdev->dev, "PF Reset requested\n");
3086 /* schedule again to check later */
3087 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3088 hclge_reset_task_schedule(hdev);
3090 case HNAE3_FLR_RESET:
3091 dev_info(&pdev->dev, "FLR requested\n");
3092 /* schedule again to check later */
3093 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3094 hclge_reset_task_schedule(hdev);
3097 dev_warn(&pdev->dev,
3098 "Unsupported reset type: %d\n", hdev->reset_type);
3103 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3104 unsigned long *addr)
3106 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3108 /* first, resolve any unknown reset type to the known type(s) */
3109 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3110 /* we will intentionally ignore any errors from this function
3111 * as we will end up in *some* reset request in any case
3113 hclge_handle_hw_msix_error(hdev, addr);
3114 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3115 /* We defered the clearing of the error event which caused
3116 * interrupt since it was not posssible to do that in
3117 * interrupt context (and this is the reason we introduced
3118 * new UNKNOWN reset type). Now, the errors have been
3119 * handled and cleared in hardware we can safely enable
3120 * interrupts. This is an exception to the norm.
3122 hclge_enable_vector(&hdev->misc_vector, true);
3125 /* return the highest priority reset level amongst all */
3126 if (test_bit(HNAE3_IMP_RESET, addr)) {
3127 rst_level = HNAE3_IMP_RESET;
3128 clear_bit(HNAE3_IMP_RESET, addr);
3129 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130 clear_bit(HNAE3_CORE_RESET, addr);
3131 clear_bit(HNAE3_FUNC_RESET, addr);
3132 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3133 rst_level = HNAE3_GLOBAL_RESET;
3134 clear_bit(HNAE3_GLOBAL_RESET, addr);
3135 clear_bit(HNAE3_CORE_RESET, addr);
3136 clear_bit(HNAE3_FUNC_RESET, addr);
3137 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3138 rst_level = HNAE3_CORE_RESET;
3139 clear_bit(HNAE3_CORE_RESET, addr);
3140 clear_bit(HNAE3_FUNC_RESET, addr);
3141 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3142 rst_level = HNAE3_FUNC_RESET;
3143 clear_bit(HNAE3_FUNC_RESET, addr);
3144 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3145 rst_level = HNAE3_FLR_RESET;
3146 clear_bit(HNAE3_FLR_RESET, addr);
3149 if (hdev->reset_type != HNAE3_NONE_RESET &&
3150 rst_level < hdev->reset_type)
3151 return HNAE3_NONE_RESET;
3156 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3160 switch (hdev->reset_type) {
3161 case HNAE3_IMP_RESET:
3162 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3164 case HNAE3_GLOBAL_RESET:
3165 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3167 case HNAE3_CORE_RESET:
3168 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3177 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3178 hclge_enable_vector(&hdev->misc_vector, true);
3181 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3185 switch (hdev->reset_type) {
3186 case HNAE3_FUNC_RESET:
3188 case HNAE3_FLR_RESET:
3189 ret = hclge_set_all_vf_rst(hdev, true);
3198 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3200 #define HCLGE_RESET_SYNC_TIME 100
3205 switch (hdev->reset_type) {
3206 case HNAE3_FUNC_RESET:
3207 /* There is no mechanism for PF to know if VF has stopped IO
3208 * for now, just wait 100 ms for VF to stop IO
3210 msleep(HCLGE_RESET_SYNC_TIME);
3211 ret = hclge_func_reset_cmd(hdev, 0);
3213 dev_err(&hdev->pdev->dev,
3214 "asserting function reset fail %d!\n", ret);
3218 /* After performaning pf reset, it is not necessary to do the
3219 * mailbox handling or send any command to firmware, because
3220 * any mailbox handling or command to firmware is only valid
3221 * after hclge_cmd_init is called.
3223 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3224 hdev->rst_stats.pf_rst_cnt++;
3226 case HNAE3_FLR_RESET:
3227 /* There is no mechanism for PF to know if VF has stopped IO
3228 * for now, just wait 100 ms for VF to stop IO
3230 msleep(HCLGE_RESET_SYNC_TIME);
3231 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3232 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3233 hdev->rst_stats.flr_rst_cnt++;
3235 case HNAE3_IMP_RESET:
3236 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3237 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3238 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3244 /* inform hardware that preparatory work is done */
3245 msleep(HCLGE_RESET_SYNC_TIME);
3246 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3247 HCLGE_NIC_CMQ_ENABLE);
3248 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3253 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3255 #define MAX_RESET_FAIL_CNT 5
3256 #define RESET_UPGRADE_DELAY_SEC 10
3258 if (hdev->reset_pending) {
3259 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3260 hdev->reset_pending);
3262 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3263 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3264 BIT(HCLGE_IMP_RESET_BIT))) {
3265 dev_info(&hdev->pdev->dev,
3266 "reset failed because IMP Reset is pending\n");
3267 hclge_clear_reset_cause(hdev);
3269 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3270 hdev->reset_fail_cnt++;
3272 set_bit(hdev->reset_type, &hdev->reset_pending);
3273 dev_info(&hdev->pdev->dev,
3274 "re-schedule to wait for hw reset done\n");
3278 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3279 hclge_clear_reset_cause(hdev);
3280 mod_timer(&hdev->reset_timer,
3281 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3286 hclge_clear_reset_cause(hdev);
3287 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3291 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3295 switch (hdev->reset_type) {
3296 case HNAE3_FUNC_RESET:
3298 case HNAE3_FLR_RESET:
3299 ret = hclge_set_all_vf_rst(hdev, false);
3308 static void hclge_reset(struct hclge_dev *hdev)
3310 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3311 bool is_timeout = false;
3314 /* Initialize ae_dev reset status as well, in case enet layer wants to
3315 * know if device is undergoing reset
3317 ae_dev->reset_type = hdev->reset_type;
3318 hdev->rst_stats.reset_cnt++;
3319 /* perform reset of the stack & ae device for a client */
3320 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3324 ret = hclge_reset_prepare_down(hdev);
3329 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3331 goto err_reset_lock;
3335 ret = hclge_reset_prepare_wait(hdev);
3339 if (hclge_reset_wait(hdev)) {
3344 hdev->rst_stats.hw_reset_done_cnt++;
3346 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3351 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3353 goto err_reset_lock;
3355 ret = hclge_reset_ae_dev(hdev->ae_dev);
3357 goto err_reset_lock;
3359 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3361 goto err_reset_lock;
3363 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3365 goto err_reset_lock;
3367 hclge_clear_reset_cause(hdev);
3369 ret = hclge_reset_prepare_up(hdev);
3371 goto err_reset_lock;
3373 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3375 goto err_reset_lock;
3379 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3383 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3387 hdev->last_reset_time = jiffies;
3388 hdev->reset_fail_cnt = 0;
3389 hdev->rst_stats.reset_done_cnt++;
3390 ae_dev->reset_type = HNAE3_NONE_RESET;
3391 del_timer(&hdev->reset_timer);
3398 if (hclge_reset_err_handle(hdev, is_timeout))
3399 hclge_reset_task_schedule(hdev);
3402 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3404 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3405 struct hclge_dev *hdev = ae_dev->priv;
3407 /* We might end up getting called broadly because of 2 below cases:
3408 * 1. Recoverable error was conveyed through APEI and only way to bring
3409 * normalcy is to reset.
3410 * 2. A new reset request from the stack due to timeout
3412 * For the first case,error event might not have ae handle available.
3413 * check if this is a new reset request and we are not here just because
3414 * last reset attempt did not succeed and watchdog hit us again. We will
3415 * know this if last reset request did not occur very recently (watchdog
3416 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3417 * In case of new request we reset the "reset level" to PF reset.
3418 * And if it is a repeat reset request of the most recent one then we
3419 * want to make sure we throttle the reset request. Therefore, we will
3420 * not allow it again before 3*HZ times.
3423 handle = &hdev->vport[0].nic;
3425 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3427 else if (hdev->default_reset_request)
3429 hclge_get_reset_level(hdev,
3430 &hdev->default_reset_request);
3431 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3432 hdev->reset_level = HNAE3_FUNC_RESET;
3434 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3437 /* request reset & schedule reset task */
3438 set_bit(hdev->reset_level, &hdev->reset_request);
3439 hclge_reset_task_schedule(hdev);
3441 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3442 hdev->reset_level++;
3445 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3446 enum hnae3_reset_type rst_type)
3448 struct hclge_dev *hdev = ae_dev->priv;
3450 set_bit(rst_type, &hdev->default_reset_request);
3453 static void hclge_reset_timer(struct timer_list *t)
3455 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3457 dev_info(&hdev->pdev->dev,
3458 "triggering global reset in reset timer\n");
3459 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3460 hclge_reset_event(hdev->pdev, NULL);
3463 static void hclge_reset_subtask(struct hclge_dev *hdev)
3465 /* check if there is any ongoing reset in the hardware. This status can
3466 * be checked from reset_pending. If there is then, we need to wait for
3467 * hardware to complete reset.
3468 * a. If we are able to figure out in reasonable time that hardware
3469 * has fully resetted then, we can proceed with driver, client
3471 * b. else, we can come back later to check this status so re-sched
3474 hdev->last_reset_time = jiffies;
3475 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3476 if (hdev->reset_type != HNAE3_NONE_RESET)
3479 /* check if we got any *new* reset requests to be honored */
3480 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3481 if (hdev->reset_type != HNAE3_NONE_RESET)
3482 hclge_do_reset(hdev);
3484 hdev->reset_type = HNAE3_NONE_RESET;
3487 static void hclge_reset_service_task(struct work_struct *work)
3489 struct hclge_dev *hdev =
3490 container_of(work, struct hclge_dev, rst_service_task);
3492 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3495 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3497 hclge_reset_subtask(hdev);
3499 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3502 static void hclge_mailbox_service_task(struct work_struct *work)
3504 struct hclge_dev *hdev =
3505 container_of(work, struct hclge_dev, mbx_service_task);
3507 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3510 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3512 hclge_mbx_handler(hdev);
3514 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3517 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3521 /* start from vport 1 for PF is always alive */
3522 for (i = 1; i < hdev->num_alloc_vport; i++) {
3523 struct hclge_vport *vport = &hdev->vport[i];
3525 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3526 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3528 /* If vf is not alive, set to default value */
3529 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3530 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3534 static void hclge_service_task(struct work_struct *work)
3536 struct hclge_dev *hdev =
3537 container_of(work, struct hclge_dev, service_task);
3539 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3540 hclge_update_stats_for_all(hdev);
3541 hdev->hw_stats.stats_timer = 0;
3544 hclge_update_port_info(hdev);
3545 hclge_update_link_status(hdev);
3546 hclge_update_vport_alive(hdev);
3547 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3548 hclge_rfs_filter_expire(hdev);
3549 hdev->fd_arfs_expire_timer = 0;
3551 hclge_service_complete(hdev);
3554 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3556 /* VF handle has no client */
3557 if (!handle->client)
3558 return container_of(handle, struct hclge_vport, nic);
3559 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3560 return container_of(handle, struct hclge_vport, roce);
3562 return container_of(handle, struct hclge_vport, nic);
3565 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3566 struct hnae3_vector_info *vector_info)
3568 struct hclge_vport *vport = hclge_get_vport(handle);
3569 struct hnae3_vector_info *vector = vector_info;
3570 struct hclge_dev *hdev = vport->back;
3574 vector_num = min(hdev->num_msi_left, vector_num);
3576 for (j = 0; j < vector_num; j++) {
3577 for (i = 1; i < hdev->num_msi; i++) {
3578 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3579 vector->vector = pci_irq_vector(hdev->pdev, i);
3580 vector->io_addr = hdev->hw.io_base +
3581 HCLGE_VECTOR_REG_BASE +
3582 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3584 HCLGE_VECTOR_VF_OFFSET;
3585 hdev->vector_status[i] = vport->vport_id;
3586 hdev->vector_irq[i] = vector->vector;
3595 hdev->num_msi_left -= alloc;
3596 hdev->num_msi_used += alloc;
3601 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3605 for (i = 0; i < hdev->num_msi; i++)
3606 if (vector == hdev->vector_irq[i])
3612 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3614 struct hclge_vport *vport = hclge_get_vport(handle);
3615 struct hclge_dev *hdev = vport->back;
3618 vector_id = hclge_get_vector_index(hdev, vector);
3619 if (vector_id < 0) {
3620 dev_err(&hdev->pdev->dev,
3621 "Get vector index fail. vector_id =%d\n", vector_id);
3625 hclge_free_vector(hdev, vector_id);
3630 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3632 return HCLGE_RSS_KEY_SIZE;
3635 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3637 return HCLGE_RSS_IND_TBL_SIZE;
3640 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3641 const u8 hfunc, const u8 *key)
3643 struct hclge_rss_config_cmd *req;
3644 struct hclge_desc desc;
3649 req = (struct hclge_rss_config_cmd *)desc.data;
3651 for (key_offset = 0; key_offset < 3; key_offset++) {
3652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3655 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3656 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3658 if (key_offset == 2)
3660 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3662 key_size = HCLGE_RSS_HASH_KEY_NUM;
3664 memcpy(req->hash_key,
3665 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3667 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3669 dev_err(&hdev->pdev->dev,
3670 "Configure RSS config fail, status = %d\n",
3678 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3680 struct hclge_rss_indirection_table_cmd *req;
3681 struct hclge_desc desc;
3685 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3687 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3688 hclge_cmd_setup_basic_desc
3689 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3691 req->start_table_index =
3692 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3693 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3695 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3696 req->rss_result[j] =
3697 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3701 dev_err(&hdev->pdev->dev,
3702 "Configure rss indir table fail,status = %d\n",
3710 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3711 u16 *tc_size, u16 *tc_offset)
3713 struct hclge_rss_tc_mode_cmd *req;
3714 struct hclge_desc desc;
3718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3719 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3721 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3724 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3725 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3726 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3727 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3728 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3730 req->rss_tc_mode[i] = cpu_to_le16(mode);
3733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3735 dev_err(&hdev->pdev->dev,
3736 "Configure rss tc mode fail, status = %d\n", ret);
3741 static void hclge_get_rss_type(struct hclge_vport *vport)
3743 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3744 vport->rss_tuple_sets.ipv4_udp_en ||
3745 vport->rss_tuple_sets.ipv4_sctp_en ||
3746 vport->rss_tuple_sets.ipv6_tcp_en ||
3747 vport->rss_tuple_sets.ipv6_udp_en ||
3748 vport->rss_tuple_sets.ipv6_sctp_en)
3749 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3750 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3751 vport->rss_tuple_sets.ipv6_fragment_en)
3752 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3754 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3757 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3759 struct hclge_rss_input_tuple_cmd *req;
3760 struct hclge_desc desc;
3763 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3765 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3767 /* Get the tuple cfg from pf */
3768 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3769 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3770 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3771 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3772 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3773 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3774 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3775 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3776 hclge_get_rss_type(&hdev->vport[0]);
3777 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3779 dev_err(&hdev->pdev->dev,
3780 "Configure rss input fail, status = %d\n", ret);
3784 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3787 struct hclge_vport *vport = hclge_get_vport(handle);
3790 /* Get hash algorithm */
3792 switch (vport->rss_algo) {
3793 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3794 *hfunc = ETH_RSS_HASH_TOP;
3796 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3797 *hfunc = ETH_RSS_HASH_XOR;
3800 *hfunc = ETH_RSS_HASH_UNKNOWN;
3805 /* Get the RSS Key required by the user */
3807 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3809 /* Get indirect table */
3811 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3812 indir[i] = vport->rss_indirection_tbl[i];
3817 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3818 const u8 *key, const u8 hfunc)
3820 struct hclge_vport *vport = hclge_get_vport(handle);
3821 struct hclge_dev *hdev = vport->back;
3825 /* Set the RSS Hash Key if specififed by the user */
3828 case ETH_RSS_HASH_TOP:
3829 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3831 case ETH_RSS_HASH_XOR:
3832 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3834 case ETH_RSS_HASH_NO_CHANGE:
3835 hash_algo = vport->rss_algo;
3841 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3845 /* Update the shadow RSS key with user specified qids */
3846 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3847 vport->rss_algo = hash_algo;
3850 /* Update the shadow RSS table with user specified qids */
3851 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3852 vport->rss_indirection_tbl[i] = indir[i];
3854 /* Update the hardware */
3855 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3858 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3860 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3862 if (nfc->data & RXH_L4_B_2_3)
3863 hash_sets |= HCLGE_D_PORT_BIT;
3865 hash_sets &= ~HCLGE_D_PORT_BIT;
3867 if (nfc->data & RXH_IP_SRC)
3868 hash_sets |= HCLGE_S_IP_BIT;
3870 hash_sets &= ~HCLGE_S_IP_BIT;
3872 if (nfc->data & RXH_IP_DST)
3873 hash_sets |= HCLGE_D_IP_BIT;
3875 hash_sets &= ~HCLGE_D_IP_BIT;
3877 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3878 hash_sets |= HCLGE_V_TAG_BIT;
3883 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3884 struct ethtool_rxnfc *nfc)
3886 struct hclge_vport *vport = hclge_get_vport(handle);
3887 struct hclge_dev *hdev = vport->back;
3888 struct hclge_rss_input_tuple_cmd *req;
3889 struct hclge_desc desc;
3893 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3894 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3897 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3900 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3901 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3902 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3903 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3904 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3905 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3906 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3907 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3909 tuple_sets = hclge_get_rss_hash_bits(nfc);
3910 switch (nfc->flow_type) {
3912 req->ipv4_tcp_en = tuple_sets;
3915 req->ipv6_tcp_en = tuple_sets;
3918 req->ipv4_udp_en = tuple_sets;
3921 req->ipv6_udp_en = tuple_sets;
3924 req->ipv4_sctp_en = tuple_sets;
3927 if ((nfc->data & RXH_L4_B_0_1) ||
3928 (nfc->data & RXH_L4_B_2_3))
3931 req->ipv6_sctp_en = tuple_sets;
3934 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3937 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3943 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3945 dev_err(&hdev->pdev->dev,
3946 "Set rss tuple fail, status = %d\n", ret);
3950 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3951 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3952 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3953 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3954 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3955 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3956 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3957 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3958 hclge_get_rss_type(vport);
3962 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3963 struct ethtool_rxnfc *nfc)
3965 struct hclge_vport *vport = hclge_get_vport(handle);
3970 switch (nfc->flow_type) {
3972 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3975 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3978 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3981 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3984 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3987 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3991 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4000 if (tuple_sets & HCLGE_D_PORT_BIT)
4001 nfc->data |= RXH_L4_B_2_3;
4002 if (tuple_sets & HCLGE_S_PORT_BIT)
4003 nfc->data |= RXH_L4_B_0_1;
4004 if (tuple_sets & HCLGE_D_IP_BIT)
4005 nfc->data |= RXH_IP_DST;
4006 if (tuple_sets & HCLGE_S_IP_BIT)
4007 nfc->data |= RXH_IP_SRC;
4012 static int hclge_get_tc_size(struct hnae3_handle *handle)
4014 struct hclge_vport *vport = hclge_get_vport(handle);
4015 struct hclge_dev *hdev = vport->back;
4017 return hdev->rss_size_max;
4020 int hclge_rss_init_hw(struct hclge_dev *hdev)
4022 struct hclge_vport *vport = hdev->vport;
4023 u8 *rss_indir = vport[0].rss_indirection_tbl;
4024 u16 rss_size = vport[0].alloc_rss_size;
4025 u8 *key = vport[0].rss_hash_key;
4026 u8 hfunc = vport[0].rss_algo;
4027 u16 tc_offset[HCLGE_MAX_TC_NUM];
4028 u16 tc_valid[HCLGE_MAX_TC_NUM];
4029 u16 tc_size[HCLGE_MAX_TC_NUM];
4033 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4037 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4041 ret = hclge_set_rss_input_tuple(hdev);
4045 /* Each TC have the same queue size, and tc_size set to hardware is
4046 * the log2 of roundup power of two of rss_size, the acutal queue
4047 * size is limited by indirection table.
4049 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4050 dev_err(&hdev->pdev->dev,
4051 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4056 roundup_size = roundup_pow_of_two(rss_size);
4057 roundup_size = ilog2(roundup_size);
4059 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4062 if (!(hdev->hw_tc_map & BIT(i)))
4066 tc_size[i] = roundup_size;
4067 tc_offset[i] = rss_size * i;
4070 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4073 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4075 struct hclge_vport *vport = hdev->vport;
4078 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4079 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4080 vport[j].rss_indirection_tbl[i] =
4081 i % vport[j].alloc_rss_size;
4085 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4087 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4088 struct hclge_vport *vport = hdev->vport;
4090 if (hdev->pdev->revision >= 0x21)
4091 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4093 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4094 vport[i].rss_tuple_sets.ipv4_tcp_en =
4095 HCLGE_RSS_INPUT_TUPLE_OTHER;
4096 vport[i].rss_tuple_sets.ipv4_udp_en =
4097 HCLGE_RSS_INPUT_TUPLE_OTHER;
4098 vport[i].rss_tuple_sets.ipv4_sctp_en =
4099 HCLGE_RSS_INPUT_TUPLE_SCTP;
4100 vport[i].rss_tuple_sets.ipv4_fragment_en =
4101 HCLGE_RSS_INPUT_TUPLE_OTHER;
4102 vport[i].rss_tuple_sets.ipv6_tcp_en =
4103 HCLGE_RSS_INPUT_TUPLE_OTHER;
4104 vport[i].rss_tuple_sets.ipv6_udp_en =
4105 HCLGE_RSS_INPUT_TUPLE_OTHER;
4106 vport[i].rss_tuple_sets.ipv6_sctp_en =
4107 HCLGE_RSS_INPUT_TUPLE_SCTP;
4108 vport[i].rss_tuple_sets.ipv6_fragment_en =
4109 HCLGE_RSS_INPUT_TUPLE_OTHER;
4111 vport[i].rss_algo = rss_algo;
4113 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4114 HCLGE_RSS_KEY_SIZE);
4117 hclge_rss_indir_init_cfg(hdev);
4120 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4121 int vector_id, bool en,
4122 struct hnae3_ring_chain_node *ring_chain)
4124 struct hclge_dev *hdev = vport->back;
4125 struct hnae3_ring_chain_node *node;
4126 struct hclge_desc desc;
4127 struct hclge_ctrl_vector_chain_cmd *req
4128 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4129 enum hclge_cmd_status status;
4130 enum hclge_opcode_type op;
4131 u16 tqp_type_and_id;
4134 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4135 hclge_cmd_setup_basic_desc(&desc, op, false);
4136 req->int_vector_id = vector_id;
4139 for (node = ring_chain; node; node = node->next) {
4140 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4141 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4143 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4144 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4145 HCLGE_TQP_ID_S, node->tqp_index);
4146 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4148 hnae3_get_field(node->int_gl_idx,
4149 HNAE3_RING_GL_IDX_M,
4150 HNAE3_RING_GL_IDX_S));
4151 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4152 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4153 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4154 req->vfid = vport->vport_id;
4156 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4158 dev_err(&hdev->pdev->dev,
4159 "Map TQP fail, status is %d.\n",
4165 hclge_cmd_setup_basic_desc(&desc,
4168 req->int_vector_id = vector_id;
4173 req->int_cause_num = i;
4174 req->vfid = vport->vport_id;
4175 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4177 dev_err(&hdev->pdev->dev,
4178 "Map TQP fail, status is %d.\n", status);
4186 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4188 struct hnae3_ring_chain_node *ring_chain)
4190 struct hclge_vport *vport = hclge_get_vport(handle);
4191 struct hclge_dev *hdev = vport->back;
4194 vector_id = hclge_get_vector_index(hdev, vector);
4195 if (vector_id < 0) {
4196 dev_err(&hdev->pdev->dev,
4197 "Get vector index fail. vector_id =%d\n", vector_id);
4201 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4204 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4206 struct hnae3_ring_chain_node *ring_chain)
4208 struct hclge_vport *vport = hclge_get_vport(handle);
4209 struct hclge_dev *hdev = vport->back;
4212 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4215 vector_id = hclge_get_vector_index(hdev, vector);
4216 if (vector_id < 0) {
4217 dev_err(&handle->pdev->dev,
4218 "Get vector index fail. ret =%d\n", vector_id);
4222 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4224 dev_err(&handle->pdev->dev,
4225 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4232 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4233 struct hclge_promisc_param *param)
4235 struct hclge_promisc_cfg_cmd *req;
4236 struct hclge_desc desc;
4239 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4241 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4242 req->vf_id = param->vf_id;
4244 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4245 * pdev revision(0x20), new revision support them. The
4246 * value of this two fields will not return error when driver
4247 * send command to fireware in revision(0x20).
4249 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4250 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4252 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4254 dev_err(&hdev->pdev->dev,
4255 "Set promisc mode fail, status is %d.\n", ret);
4260 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4261 bool en_mc, bool en_bc, int vport_id)
4266 memset(param, 0, sizeof(struct hclge_promisc_param));
4268 param->enable = HCLGE_PROMISC_EN_UC;
4270 param->enable |= HCLGE_PROMISC_EN_MC;
4272 param->enable |= HCLGE_PROMISC_EN_BC;
4273 param->vf_id = vport_id;
4276 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4279 struct hclge_vport *vport = hclge_get_vport(handle);
4280 struct hclge_dev *hdev = vport->back;
4281 struct hclge_promisc_param param;
4282 bool en_bc_pmc = true;
4284 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4285 * always bypassed. So broadcast promisc should be disabled until
4286 * user enable promisc mode
4288 if (handle->pdev->revision == 0x20)
4289 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4291 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4293 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4296 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4298 struct hclge_get_fd_mode_cmd *req;
4299 struct hclge_desc desc;
4302 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4304 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4306 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4308 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4312 *fd_mode = req->mode;
4317 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4318 u32 *stage1_entry_num,
4319 u32 *stage2_entry_num,
4320 u16 *stage1_counter_num,
4321 u16 *stage2_counter_num)
4323 struct hclge_get_fd_allocation_cmd *req;
4324 struct hclge_desc desc;
4327 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4329 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4331 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4333 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4338 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4339 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4340 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4341 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4346 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4348 struct hclge_set_fd_key_config_cmd *req;
4349 struct hclge_fd_key_cfg *stage;
4350 struct hclge_desc desc;
4353 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4355 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4356 stage = &hdev->fd_cfg.key_cfg[stage_num];
4357 req->stage = stage_num;
4358 req->key_select = stage->key_sel;
4359 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4360 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4361 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4362 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4363 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4364 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4366 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4368 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4373 static int hclge_init_fd_config(struct hclge_dev *hdev)
4375 #define LOW_2_WORDS 0x03
4376 struct hclge_fd_key_cfg *key_cfg;
4379 if (!hnae3_dev_fd_supported(hdev))
4382 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4386 switch (hdev->fd_cfg.fd_mode) {
4387 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4388 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4390 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4391 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4394 dev_err(&hdev->pdev->dev,
4395 "Unsupported flow director mode %d\n",
4396 hdev->fd_cfg.fd_mode);
4400 hdev->fd_cfg.proto_support =
4401 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4402 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4403 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4404 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4405 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4406 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4407 key_cfg->outer_sipv6_word_en = 0;
4408 key_cfg->outer_dipv6_word_en = 0;
4410 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4411 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4412 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4413 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4415 /* If use max 400bit key, we can support tuples for ether type */
4416 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4417 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4418 key_cfg->tuple_active |=
4419 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4422 /* roce_type is used to filter roce frames
4423 * dst_vport is used to specify the rule
4425 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4427 ret = hclge_get_fd_allocation(hdev,
4428 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4429 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4430 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4431 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4435 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4438 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4439 int loc, u8 *key, bool is_add)
4441 struct hclge_fd_tcam_config_1_cmd *req1;
4442 struct hclge_fd_tcam_config_2_cmd *req2;
4443 struct hclge_fd_tcam_config_3_cmd *req3;
4444 struct hclge_desc desc[3];
4447 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4448 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4449 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4450 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4451 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4453 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4454 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4455 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4457 req1->stage = stage;
4458 req1->xy_sel = sel_x ? 1 : 0;
4459 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4460 req1->index = cpu_to_le32(loc);
4461 req1->entry_vld = sel_x ? is_add : 0;
4464 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4465 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4466 sizeof(req2->tcam_data));
4467 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4468 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4471 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4473 dev_err(&hdev->pdev->dev,
4474 "config tcam key fail, ret=%d\n",
4480 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4481 struct hclge_fd_ad_data *action)
4483 struct hclge_fd_ad_config_cmd *req;
4484 struct hclge_desc desc;
4488 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4490 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4491 req->index = cpu_to_le32(loc);
4494 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4495 action->write_rule_id_to_bd);
4496 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4499 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4500 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4501 action->forward_to_direct_queue);
4502 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4504 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4505 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4506 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4507 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4508 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4509 action->counter_id);
4511 req->ad_data = cpu_to_le64(ad_data);
4512 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4514 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4519 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4520 struct hclge_fd_rule *rule)
4522 u16 tmp_x_s, tmp_y_s;
4523 u32 tmp_x_l, tmp_y_l;
4526 if (rule->unused_tuple & tuple_bit)
4529 switch (tuple_bit) {
4532 case BIT(INNER_DST_MAC):
4533 for (i = 0; i < 6; i++) {
4534 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4535 rule->tuples_mask.dst_mac[i]);
4536 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4537 rule->tuples_mask.dst_mac[i]);
4541 case BIT(INNER_SRC_MAC):
4542 for (i = 0; i < 6; i++) {
4543 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4544 rule->tuples.src_mac[i]);
4545 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4546 rule->tuples.src_mac[i]);
4550 case BIT(INNER_VLAN_TAG_FST):
4551 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4552 rule->tuples_mask.vlan_tag1);
4553 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4554 rule->tuples_mask.vlan_tag1);
4555 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4556 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4559 case BIT(INNER_ETH_TYPE):
4560 calc_x(tmp_x_s, rule->tuples.ether_proto,
4561 rule->tuples_mask.ether_proto);
4562 calc_y(tmp_y_s, rule->tuples.ether_proto,
4563 rule->tuples_mask.ether_proto);
4564 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4565 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4568 case BIT(INNER_IP_TOS):
4569 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4570 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4573 case BIT(INNER_IP_PROTO):
4574 calc_x(*key_x, rule->tuples.ip_proto,
4575 rule->tuples_mask.ip_proto);
4576 calc_y(*key_y, rule->tuples.ip_proto,
4577 rule->tuples_mask.ip_proto);
4580 case BIT(INNER_SRC_IP):
4581 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4582 rule->tuples_mask.src_ip[3]);
4583 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4584 rule->tuples_mask.src_ip[3]);
4585 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4586 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4589 case BIT(INNER_DST_IP):
4590 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4591 rule->tuples_mask.dst_ip[3]);
4592 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4593 rule->tuples_mask.dst_ip[3]);
4594 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4595 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4598 case BIT(INNER_SRC_PORT):
4599 calc_x(tmp_x_s, rule->tuples.src_port,
4600 rule->tuples_mask.src_port);
4601 calc_y(tmp_y_s, rule->tuples.src_port,
4602 rule->tuples_mask.src_port);
4603 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4604 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4607 case BIT(INNER_DST_PORT):
4608 calc_x(tmp_x_s, rule->tuples.dst_port,
4609 rule->tuples_mask.dst_port);
4610 calc_y(tmp_y_s, rule->tuples.dst_port,
4611 rule->tuples_mask.dst_port);
4612 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4613 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4621 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4622 u8 vf_id, u8 network_port_id)
4624 u32 port_number = 0;
4626 if (port_type == HOST_PORT) {
4627 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4629 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4631 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4633 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4634 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4635 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4641 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4642 __le32 *key_x, __le32 *key_y,
4643 struct hclge_fd_rule *rule)
4645 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4646 u8 cur_pos = 0, tuple_size, shift_bits;
4649 for (i = 0; i < MAX_META_DATA; i++) {
4650 tuple_size = meta_data_key_info[i].key_length;
4651 tuple_bit = key_cfg->meta_data_active & BIT(i);
4653 switch (tuple_bit) {
4654 case BIT(ROCE_TYPE):
4655 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4656 cur_pos += tuple_size;
4658 case BIT(DST_VPORT):
4659 port_number = hclge_get_port_number(HOST_PORT, 0,
4661 hnae3_set_field(meta_data,
4662 GENMASK(cur_pos + tuple_size, cur_pos),
4663 cur_pos, port_number);
4664 cur_pos += tuple_size;
4671 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4672 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4673 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4675 *key_x = cpu_to_le32(tmp_x << shift_bits);
4676 *key_y = cpu_to_le32(tmp_y << shift_bits);
4679 /* A complete key is combined with meta data key and tuple key.
4680 * Meta data key is stored at the MSB region, and tuple key is stored at
4681 * the LSB region, unused bits will be filled 0.
4683 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4684 struct hclge_fd_rule *rule)
4686 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4687 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4688 u8 *cur_key_x, *cur_key_y;
4689 int i, ret, tuple_size;
4690 u8 meta_data_region;
4692 memset(key_x, 0, sizeof(key_x));
4693 memset(key_y, 0, sizeof(key_y));
4697 for (i = 0 ; i < MAX_TUPLE; i++) {
4701 tuple_size = tuple_key_info[i].key_length / 8;
4702 check_tuple = key_cfg->tuple_active & BIT(i);
4704 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4707 cur_key_x += tuple_size;
4708 cur_key_y += tuple_size;
4712 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4713 MAX_META_DATA_LENGTH / 8;
4715 hclge_fd_convert_meta_data(key_cfg,
4716 (__le32 *)(key_x + meta_data_region),
4717 (__le32 *)(key_y + meta_data_region),
4720 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4723 dev_err(&hdev->pdev->dev,
4724 "fd key_y config fail, loc=%d, ret=%d\n",
4725 rule->queue_id, ret);
4729 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4732 dev_err(&hdev->pdev->dev,
4733 "fd key_x config fail, loc=%d, ret=%d\n",
4734 rule->queue_id, ret);
4738 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4739 struct hclge_fd_rule *rule)
4741 struct hclge_fd_ad_data ad_data;
4743 ad_data.ad_id = rule->location;
4745 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4746 ad_data.drop_packet = true;
4747 ad_data.forward_to_direct_queue = false;
4748 ad_data.queue_id = 0;
4750 ad_data.drop_packet = false;
4751 ad_data.forward_to_direct_queue = true;
4752 ad_data.queue_id = rule->queue_id;
4755 ad_data.use_counter = false;
4756 ad_data.counter_id = 0;
4758 ad_data.use_next_stage = false;
4759 ad_data.next_input_key = 0;
4761 ad_data.write_rule_id_to_bd = true;
4762 ad_data.rule_id = rule->location;
4764 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4767 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4768 struct ethtool_rx_flow_spec *fs, u32 *unused)
4770 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4771 struct ethtool_usrip4_spec *usr_ip4_spec;
4772 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4773 struct ethtool_usrip6_spec *usr_ip6_spec;
4774 struct ethhdr *ether_spec;
4776 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4779 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4782 if ((fs->flow_type & FLOW_EXT) &&
4783 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4784 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4788 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4792 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4793 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4795 if (!tcp_ip4_spec->ip4src)
4796 *unused |= BIT(INNER_SRC_IP);
4798 if (!tcp_ip4_spec->ip4dst)
4799 *unused |= BIT(INNER_DST_IP);
4801 if (!tcp_ip4_spec->psrc)
4802 *unused |= BIT(INNER_SRC_PORT);
4804 if (!tcp_ip4_spec->pdst)
4805 *unused |= BIT(INNER_DST_PORT);
4807 if (!tcp_ip4_spec->tos)
4808 *unused |= BIT(INNER_IP_TOS);
4812 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4813 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4814 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4816 if (!usr_ip4_spec->ip4src)
4817 *unused |= BIT(INNER_SRC_IP);
4819 if (!usr_ip4_spec->ip4dst)
4820 *unused |= BIT(INNER_DST_IP);
4822 if (!usr_ip4_spec->tos)
4823 *unused |= BIT(INNER_IP_TOS);
4825 if (!usr_ip4_spec->proto)
4826 *unused |= BIT(INNER_IP_PROTO);
4828 if (usr_ip4_spec->l4_4_bytes)
4831 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4838 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4839 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4842 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4843 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4844 *unused |= BIT(INNER_SRC_IP);
4846 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4847 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4848 *unused |= BIT(INNER_DST_IP);
4850 if (!tcp_ip6_spec->psrc)
4851 *unused |= BIT(INNER_SRC_PORT);
4853 if (!tcp_ip6_spec->pdst)
4854 *unused |= BIT(INNER_DST_PORT);
4856 if (tcp_ip6_spec->tclass)
4860 case IPV6_USER_FLOW:
4861 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4862 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4863 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4864 BIT(INNER_DST_PORT);
4866 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4867 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4868 *unused |= BIT(INNER_SRC_IP);
4870 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4871 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4872 *unused |= BIT(INNER_DST_IP);
4874 if (!usr_ip6_spec->l4_proto)
4875 *unused |= BIT(INNER_IP_PROTO);
4877 if (usr_ip6_spec->tclass)
4880 if (usr_ip6_spec->l4_4_bytes)
4885 ether_spec = &fs->h_u.ether_spec;
4886 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4887 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4888 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4890 if (is_zero_ether_addr(ether_spec->h_source))
4891 *unused |= BIT(INNER_SRC_MAC);
4893 if (is_zero_ether_addr(ether_spec->h_dest))
4894 *unused |= BIT(INNER_DST_MAC);
4896 if (!ether_spec->h_proto)
4897 *unused |= BIT(INNER_ETH_TYPE);
4904 if ((fs->flow_type & FLOW_EXT)) {
4905 if (fs->h_ext.vlan_etype)
4907 if (!fs->h_ext.vlan_tci)
4908 *unused |= BIT(INNER_VLAN_TAG_FST);
4910 if (fs->m_ext.vlan_tci) {
4911 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4915 *unused |= BIT(INNER_VLAN_TAG_FST);
4918 if (fs->flow_type & FLOW_MAC_EXT) {
4919 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4922 if (is_zero_ether_addr(fs->h_ext.h_dest))
4923 *unused |= BIT(INNER_DST_MAC);
4925 *unused &= ~(BIT(INNER_DST_MAC));
4931 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4933 struct hclge_fd_rule *rule = NULL;
4934 struct hlist_node *node2;
4936 spin_lock_bh(&hdev->fd_rule_lock);
4937 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4938 if (rule->location >= location)
4942 spin_unlock_bh(&hdev->fd_rule_lock);
4944 return rule && rule->location == location;
4947 /* make sure being called after lock up with fd_rule_lock */
4948 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4949 struct hclge_fd_rule *new_rule,
4953 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4954 struct hlist_node *node2;
4956 if (is_add && !new_rule)
4959 hlist_for_each_entry_safe(rule, node2,
4960 &hdev->fd_rule_list, rule_node) {
4961 if (rule->location >= location)
4966 if (rule && rule->location == location) {
4967 hlist_del(&rule->rule_node);
4969 hdev->hclge_fd_rule_num--;
4972 if (!hdev->hclge_fd_rule_num)
4973 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4974 clear_bit(location, hdev->fd_bmap);
4978 } else if (!is_add) {
4979 dev_err(&hdev->pdev->dev,
4980 "delete fail, rule %d is inexistent\n",
4985 INIT_HLIST_NODE(&new_rule->rule_node);
4988 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4990 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4992 set_bit(location, hdev->fd_bmap);
4993 hdev->hclge_fd_rule_num++;
4994 hdev->fd_active_type = new_rule->rule_type;
4999 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5000 struct ethtool_rx_flow_spec *fs,
5001 struct hclge_fd_rule *rule)
5003 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5005 switch (flow_type) {
5009 rule->tuples.src_ip[3] =
5010 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5011 rule->tuples_mask.src_ip[3] =
5012 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5014 rule->tuples.dst_ip[3] =
5015 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5016 rule->tuples_mask.dst_ip[3] =
5017 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5019 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5020 rule->tuples_mask.src_port =
5021 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5023 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5024 rule->tuples_mask.dst_port =
5025 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5027 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5028 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5030 rule->tuples.ether_proto = ETH_P_IP;
5031 rule->tuples_mask.ether_proto = 0xFFFF;
5035 rule->tuples.src_ip[3] =
5036 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5037 rule->tuples_mask.src_ip[3] =
5038 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5040 rule->tuples.dst_ip[3] =
5041 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5042 rule->tuples_mask.dst_ip[3] =
5043 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5045 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5046 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5048 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5049 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5051 rule->tuples.ether_proto = ETH_P_IP;
5052 rule->tuples_mask.ether_proto = 0xFFFF;
5058 be32_to_cpu_array(rule->tuples.src_ip,
5059 fs->h_u.tcp_ip6_spec.ip6src, 4);
5060 be32_to_cpu_array(rule->tuples_mask.src_ip,
5061 fs->m_u.tcp_ip6_spec.ip6src, 4);
5063 be32_to_cpu_array(rule->tuples.dst_ip,
5064 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5065 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5066 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5068 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5069 rule->tuples_mask.src_port =
5070 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5072 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5073 rule->tuples_mask.dst_port =
5074 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5076 rule->tuples.ether_proto = ETH_P_IPV6;
5077 rule->tuples_mask.ether_proto = 0xFFFF;
5080 case IPV6_USER_FLOW:
5081 be32_to_cpu_array(rule->tuples.src_ip,
5082 fs->h_u.usr_ip6_spec.ip6src, 4);
5083 be32_to_cpu_array(rule->tuples_mask.src_ip,
5084 fs->m_u.usr_ip6_spec.ip6src, 4);
5086 be32_to_cpu_array(rule->tuples.dst_ip,
5087 fs->h_u.usr_ip6_spec.ip6dst, 4);
5088 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5089 fs->m_u.usr_ip6_spec.ip6dst, 4);
5091 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5092 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5094 rule->tuples.ether_proto = ETH_P_IPV6;
5095 rule->tuples_mask.ether_proto = 0xFFFF;
5099 ether_addr_copy(rule->tuples.src_mac,
5100 fs->h_u.ether_spec.h_source);
5101 ether_addr_copy(rule->tuples_mask.src_mac,
5102 fs->m_u.ether_spec.h_source);
5104 ether_addr_copy(rule->tuples.dst_mac,
5105 fs->h_u.ether_spec.h_dest);
5106 ether_addr_copy(rule->tuples_mask.dst_mac,
5107 fs->m_u.ether_spec.h_dest);
5109 rule->tuples.ether_proto =
5110 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5111 rule->tuples_mask.ether_proto =
5112 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5119 switch (flow_type) {
5122 rule->tuples.ip_proto = IPPROTO_SCTP;
5123 rule->tuples_mask.ip_proto = 0xFF;
5127 rule->tuples.ip_proto = IPPROTO_TCP;
5128 rule->tuples_mask.ip_proto = 0xFF;
5132 rule->tuples.ip_proto = IPPROTO_UDP;
5133 rule->tuples_mask.ip_proto = 0xFF;
5139 if ((fs->flow_type & FLOW_EXT)) {
5140 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5141 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5144 if (fs->flow_type & FLOW_MAC_EXT) {
5145 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5146 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5152 /* make sure being called after lock up with fd_rule_lock */
5153 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5154 struct hclge_fd_rule *rule)
5159 dev_err(&hdev->pdev->dev,
5160 "The flow director rule is NULL\n");
5164 /* it will never fail here, so needn't to check return value */
5165 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5167 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5171 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5178 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5182 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5183 struct ethtool_rxnfc *cmd)
5185 struct hclge_vport *vport = hclge_get_vport(handle);
5186 struct hclge_dev *hdev = vport->back;
5187 u16 dst_vport_id = 0, q_index = 0;
5188 struct ethtool_rx_flow_spec *fs;
5189 struct hclge_fd_rule *rule;
5194 if (!hnae3_dev_fd_supported(hdev))
5198 dev_warn(&hdev->pdev->dev,
5199 "Please enable flow director first\n");
5203 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5205 ret = hclge_fd_check_spec(hdev, fs, &unused);
5207 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5211 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5212 action = HCLGE_FD_ACTION_DROP_PACKET;
5214 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5215 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5218 if (vf > hdev->num_req_vfs) {
5219 dev_err(&hdev->pdev->dev,
5220 "Error: vf id (%d) > max vf num (%d)\n",
5221 vf, hdev->num_req_vfs);
5225 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5226 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5229 dev_err(&hdev->pdev->dev,
5230 "Error: queue id (%d) > max tqp num (%d)\n",
5235 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5239 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5243 ret = hclge_fd_get_tuple(hdev, fs, rule);
5249 rule->flow_type = fs->flow_type;
5251 rule->location = fs->location;
5252 rule->unused_tuple = unused;
5253 rule->vf_id = dst_vport_id;
5254 rule->queue_id = q_index;
5255 rule->action = action;
5256 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5258 /* to avoid rule conflict, when user configure rule by ethtool,
5259 * we need to clear all arfs rules
5261 hclge_clear_arfs_rules(handle);
5263 spin_lock_bh(&hdev->fd_rule_lock);
5264 ret = hclge_fd_config_rule(hdev, rule);
5266 spin_unlock_bh(&hdev->fd_rule_lock);
5271 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5272 struct ethtool_rxnfc *cmd)
5274 struct hclge_vport *vport = hclge_get_vport(handle);
5275 struct hclge_dev *hdev = vport->back;
5276 struct ethtool_rx_flow_spec *fs;
5279 if (!hnae3_dev_fd_supported(hdev))
5282 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5284 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5287 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5288 dev_err(&hdev->pdev->dev,
5289 "Delete fail, rule %d is inexistent\n",
5294 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5295 fs->location, NULL, false);
5299 spin_lock_bh(&hdev->fd_rule_lock);
5300 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5302 spin_unlock_bh(&hdev->fd_rule_lock);
5307 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5310 struct hclge_vport *vport = hclge_get_vport(handle);
5311 struct hclge_dev *hdev = vport->back;
5312 struct hclge_fd_rule *rule;
5313 struct hlist_node *node;
5316 if (!hnae3_dev_fd_supported(hdev))
5319 spin_lock_bh(&hdev->fd_rule_lock);
5320 for_each_set_bit(location, hdev->fd_bmap,
5321 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5322 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5326 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5328 hlist_del(&rule->rule_node);
5331 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5332 hdev->hclge_fd_rule_num = 0;
5333 bitmap_zero(hdev->fd_bmap,
5334 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5337 spin_unlock_bh(&hdev->fd_rule_lock);
5340 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5342 struct hclge_vport *vport = hclge_get_vport(handle);
5343 struct hclge_dev *hdev = vport->back;
5344 struct hclge_fd_rule *rule;
5345 struct hlist_node *node;
5348 /* Return ok here, because reset error handling will check this
5349 * return value. If error is returned here, the reset process will
5352 if (!hnae3_dev_fd_supported(hdev))
5355 /* if fd is disabled, should not restore it when reset */
5359 spin_lock_bh(&hdev->fd_rule_lock);
5360 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5361 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5363 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5366 dev_warn(&hdev->pdev->dev,
5367 "Restore rule %d failed, remove it\n",
5369 clear_bit(rule->location, hdev->fd_bmap);
5370 hlist_del(&rule->rule_node);
5372 hdev->hclge_fd_rule_num--;
5376 if (hdev->hclge_fd_rule_num)
5377 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5379 spin_unlock_bh(&hdev->fd_rule_lock);
5384 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5385 struct ethtool_rxnfc *cmd)
5387 struct hclge_vport *vport = hclge_get_vport(handle);
5388 struct hclge_dev *hdev = vport->back;
5390 if (!hnae3_dev_fd_supported(hdev))
5393 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5394 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5399 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5400 struct ethtool_rxnfc *cmd)
5402 struct hclge_vport *vport = hclge_get_vport(handle);
5403 struct hclge_fd_rule *rule = NULL;
5404 struct hclge_dev *hdev = vport->back;
5405 struct ethtool_rx_flow_spec *fs;
5406 struct hlist_node *node2;
5408 if (!hnae3_dev_fd_supported(hdev))
5411 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5413 spin_lock_bh(&hdev->fd_rule_lock);
5415 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5416 if (rule->location >= fs->location)
5420 if (!rule || fs->location != rule->location) {
5421 spin_unlock_bh(&hdev->fd_rule_lock);
5426 fs->flow_type = rule->flow_type;
5427 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5431 fs->h_u.tcp_ip4_spec.ip4src =
5432 cpu_to_be32(rule->tuples.src_ip[3]);
5433 fs->m_u.tcp_ip4_spec.ip4src =
5434 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5435 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5437 fs->h_u.tcp_ip4_spec.ip4dst =
5438 cpu_to_be32(rule->tuples.dst_ip[3]);
5439 fs->m_u.tcp_ip4_spec.ip4dst =
5440 rule->unused_tuple & BIT(INNER_DST_IP) ?
5441 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5443 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5444 fs->m_u.tcp_ip4_spec.psrc =
5445 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5446 0 : cpu_to_be16(rule->tuples_mask.src_port);
5448 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5449 fs->m_u.tcp_ip4_spec.pdst =
5450 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5451 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5453 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5454 fs->m_u.tcp_ip4_spec.tos =
5455 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5456 0 : rule->tuples_mask.ip_tos;
5460 fs->h_u.usr_ip4_spec.ip4src =
5461 cpu_to_be32(rule->tuples.src_ip[3]);
5462 fs->m_u.tcp_ip4_spec.ip4src =
5463 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5464 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5466 fs->h_u.usr_ip4_spec.ip4dst =
5467 cpu_to_be32(rule->tuples.dst_ip[3]);
5468 fs->m_u.usr_ip4_spec.ip4dst =
5469 rule->unused_tuple & BIT(INNER_DST_IP) ?
5470 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5472 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5473 fs->m_u.usr_ip4_spec.tos =
5474 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5475 0 : rule->tuples_mask.ip_tos;
5477 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5478 fs->m_u.usr_ip4_spec.proto =
5479 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5480 0 : rule->tuples_mask.ip_proto;
5482 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5488 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5489 rule->tuples.src_ip, 4);
5490 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5491 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5493 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5494 rule->tuples_mask.src_ip, 4);
5496 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5497 rule->tuples.dst_ip, 4);
5498 if (rule->unused_tuple & BIT(INNER_DST_IP))
5499 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5501 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5502 rule->tuples_mask.dst_ip, 4);
5504 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5505 fs->m_u.tcp_ip6_spec.psrc =
5506 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5507 0 : cpu_to_be16(rule->tuples_mask.src_port);
5509 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5510 fs->m_u.tcp_ip6_spec.pdst =
5511 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5512 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5515 case IPV6_USER_FLOW:
5516 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5517 rule->tuples.src_ip, 4);
5518 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5519 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5521 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5522 rule->tuples_mask.src_ip, 4);
5524 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5525 rule->tuples.dst_ip, 4);
5526 if (rule->unused_tuple & BIT(INNER_DST_IP))
5527 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5529 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5530 rule->tuples_mask.dst_ip, 4);
5532 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5533 fs->m_u.usr_ip6_spec.l4_proto =
5534 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5535 0 : rule->tuples_mask.ip_proto;
5539 ether_addr_copy(fs->h_u.ether_spec.h_source,
5540 rule->tuples.src_mac);
5541 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5542 eth_zero_addr(fs->m_u.ether_spec.h_source);
5544 ether_addr_copy(fs->m_u.ether_spec.h_source,
5545 rule->tuples_mask.src_mac);
5547 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5548 rule->tuples.dst_mac);
5549 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5550 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5552 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5553 rule->tuples_mask.dst_mac);
5555 fs->h_u.ether_spec.h_proto =
5556 cpu_to_be16(rule->tuples.ether_proto);
5557 fs->m_u.ether_spec.h_proto =
5558 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5559 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5563 spin_unlock_bh(&hdev->fd_rule_lock);
5567 if (fs->flow_type & FLOW_EXT) {
5568 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5569 fs->m_ext.vlan_tci =
5570 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5571 cpu_to_be16(VLAN_VID_MASK) :
5572 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5575 if (fs->flow_type & FLOW_MAC_EXT) {
5576 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5577 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5578 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5580 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5581 rule->tuples_mask.dst_mac);
5584 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5585 fs->ring_cookie = RX_CLS_FLOW_DISC;
5589 fs->ring_cookie = rule->queue_id;
5590 vf_id = rule->vf_id;
5591 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5592 fs->ring_cookie |= vf_id;
5595 spin_unlock_bh(&hdev->fd_rule_lock);
5600 static int hclge_get_all_rules(struct hnae3_handle *handle,
5601 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5603 struct hclge_vport *vport = hclge_get_vport(handle);
5604 struct hclge_dev *hdev = vport->back;
5605 struct hclge_fd_rule *rule;
5606 struct hlist_node *node2;
5609 if (!hnae3_dev_fd_supported(hdev))
5612 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5614 spin_lock_bh(&hdev->fd_rule_lock);
5615 hlist_for_each_entry_safe(rule, node2,
5616 &hdev->fd_rule_list, rule_node) {
5617 if (cnt == cmd->rule_cnt) {
5618 spin_unlock_bh(&hdev->fd_rule_lock);
5622 rule_locs[cnt] = rule->location;
5626 spin_unlock_bh(&hdev->fd_rule_lock);
5628 cmd->rule_cnt = cnt;
5633 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5634 struct hclge_fd_rule_tuples *tuples)
5636 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5637 tuples->ip_proto = fkeys->basic.ip_proto;
5638 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5640 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5641 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5642 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5644 memcpy(tuples->src_ip,
5645 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5646 sizeof(tuples->src_ip));
5647 memcpy(tuples->dst_ip,
5648 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5649 sizeof(tuples->dst_ip));
5653 /* traverse all rules, check whether an existed rule has the same tuples */
5654 static struct hclge_fd_rule *
5655 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5656 const struct hclge_fd_rule_tuples *tuples)
5658 struct hclge_fd_rule *rule = NULL;
5659 struct hlist_node *node;
5661 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5662 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5669 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5670 struct hclge_fd_rule *rule)
5672 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5673 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5674 BIT(INNER_SRC_PORT);
5677 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5678 if (tuples->ether_proto == ETH_P_IP) {
5679 if (tuples->ip_proto == IPPROTO_TCP)
5680 rule->flow_type = TCP_V4_FLOW;
5682 rule->flow_type = UDP_V4_FLOW;
5684 if (tuples->ip_proto == IPPROTO_TCP)
5685 rule->flow_type = TCP_V6_FLOW;
5687 rule->flow_type = UDP_V6_FLOW;
5689 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5690 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5693 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5694 u16 flow_id, struct flow_keys *fkeys)
5696 struct hclge_vport *vport = hclge_get_vport(handle);
5697 struct hclge_fd_rule_tuples new_tuples;
5698 struct hclge_dev *hdev = vport->back;
5699 struct hclge_fd_rule *rule;
5704 if (!hnae3_dev_fd_supported(hdev))
5707 memset(&new_tuples, 0, sizeof(new_tuples));
5708 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5710 spin_lock_bh(&hdev->fd_rule_lock);
5712 /* when there is already fd rule existed add by user,
5713 * arfs should not work
5715 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5716 spin_unlock_bh(&hdev->fd_rule_lock);
5721 /* check is there flow director filter existed for this flow,
5722 * if not, create a new filter for it;
5723 * if filter exist with different queue id, modify the filter;
5724 * if filter exist with same queue id, do nothing
5726 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5728 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5729 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5730 spin_unlock_bh(&hdev->fd_rule_lock);
5735 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5737 spin_unlock_bh(&hdev->fd_rule_lock);
5742 set_bit(bit_id, hdev->fd_bmap);
5743 rule->location = bit_id;
5744 rule->flow_id = flow_id;
5745 rule->queue_id = queue_id;
5746 hclge_fd_build_arfs_rule(&new_tuples, rule);
5747 ret = hclge_fd_config_rule(hdev, rule);
5749 spin_unlock_bh(&hdev->fd_rule_lock);
5754 return rule->location;
5757 spin_unlock_bh(&hdev->fd_rule_lock);
5759 if (rule->queue_id == queue_id)
5760 return rule->location;
5762 tmp_queue_id = rule->queue_id;
5763 rule->queue_id = queue_id;
5764 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5766 rule->queue_id = tmp_queue_id;
5770 return rule->location;
5773 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5775 #ifdef CONFIG_RFS_ACCEL
5776 struct hnae3_handle *handle = &hdev->vport[0].nic;
5777 struct hclge_fd_rule *rule;
5778 struct hlist_node *node;
5779 HLIST_HEAD(del_list);
5781 spin_lock_bh(&hdev->fd_rule_lock);
5782 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5783 spin_unlock_bh(&hdev->fd_rule_lock);
5786 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5787 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5788 rule->flow_id, rule->location)) {
5789 hlist_del_init(&rule->rule_node);
5790 hlist_add_head(&rule->rule_node, &del_list);
5791 hdev->hclge_fd_rule_num--;
5792 clear_bit(rule->location, hdev->fd_bmap);
5795 spin_unlock_bh(&hdev->fd_rule_lock);
5797 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5798 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5799 rule->location, NULL, false);
5805 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5807 #ifdef CONFIG_RFS_ACCEL
5808 struct hclge_vport *vport = hclge_get_vport(handle);
5809 struct hclge_dev *hdev = vport->back;
5811 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5812 hclge_del_all_fd_entries(handle, true);
5816 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5818 struct hclge_vport *vport = hclge_get_vport(handle);
5819 struct hclge_dev *hdev = vport->back;
5821 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5822 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5825 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5827 struct hclge_vport *vport = hclge_get_vport(handle);
5828 struct hclge_dev *hdev = vport->back;
5830 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5833 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5835 struct hclge_vport *vport = hclge_get_vport(handle);
5836 struct hclge_dev *hdev = vport->back;
5838 return hdev->rst_stats.hw_reset_done_cnt;
5841 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5843 struct hclge_vport *vport = hclge_get_vport(handle);
5844 struct hclge_dev *hdev = vport->back;
5847 hdev->fd_en = enable;
5848 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5850 hclge_del_all_fd_entries(handle, clear);
5852 hclge_restore_fd_entries(handle);
5855 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5857 struct hclge_desc desc;
5858 struct hclge_config_mac_mode_cmd *req =
5859 (struct hclge_config_mac_mode_cmd *)desc.data;
5863 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5868 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5869 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5870 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5871 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5872 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5873 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5874 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5875 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5876 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5877 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5878 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5882 dev_err(&hdev->pdev->dev,
5883 "mac enable fail, ret =%d.\n", ret);
5886 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5888 struct hclge_config_mac_mode_cmd *req;
5889 struct hclge_desc desc;
5893 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5894 /* 1 Read out the MAC mode config at first */
5895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5896 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5898 dev_err(&hdev->pdev->dev,
5899 "mac loopback get fail, ret =%d.\n", ret);
5903 /* 2 Then setup the loopback flag */
5904 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5905 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5906 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5907 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5909 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5911 /* 3 Config mac work mode with loopback flag
5912 * and its original configure parameters
5914 hclge_cmd_reuse_desc(&desc, false);
5915 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5917 dev_err(&hdev->pdev->dev,
5918 "mac loopback set fail, ret =%d.\n", ret);
5922 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5923 enum hnae3_loop loop_mode)
5925 #define HCLGE_SERDES_RETRY_MS 10
5926 #define HCLGE_SERDES_RETRY_NUM 100
5928 #define HCLGE_MAC_LINK_STATUS_MS 10
5929 #define HCLGE_MAC_LINK_STATUS_NUM 100
5930 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5931 #define HCLGE_MAC_LINK_STATUS_UP 1
5933 struct hclge_serdes_lb_cmd *req;
5934 struct hclge_desc desc;
5935 int mac_link_ret = 0;
5939 req = (struct hclge_serdes_lb_cmd *)desc.data;
5940 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5942 switch (loop_mode) {
5943 case HNAE3_LOOP_SERIAL_SERDES:
5944 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5946 case HNAE3_LOOP_PARALLEL_SERDES:
5947 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5950 dev_err(&hdev->pdev->dev,
5951 "unsupported serdes loopback mode %d\n", loop_mode);
5956 req->enable = loop_mode_b;
5957 req->mask = loop_mode_b;
5958 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5960 req->mask = loop_mode_b;
5961 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5966 dev_err(&hdev->pdev->dev,
5967 "serdes loopback set fail, ret = %d\n", ret);
5972 msleep(HCLGE_SERDES_RETRY_MS);
5973 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5975 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5977 dev_err(&hdev->pdev->dev,
5978 "serdes loopback get, ret = %d\n", ret);
5981 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5982 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5984 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5985 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5987 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5988 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5992 hclge_cfg_mac_mode(hdev, en);
5996 /* serdes Internal loopback, independent of the network cable.*/
5997 msleep(HCLGE_MAC_LINK_STATUS_MS);
5998 ret = hclge_get_mac_link_status(hdev);
5999 if (ret == mac_link_ret)
6001 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6003 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6008 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6009 int stream_id, bool enable)
6011 struct hclge_desc desc;
6012 struct hclge_cfg_com_tqp_queue_cmd *req =
6013 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6017 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6018 req->stream_id = cpu_to_le16(stream_id);
6019 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6023 dev_err(&hdev->pdev->dev,
6024 "Tqp enable fail, status =%d.\n", ret);
6028 static int hclge_set_loopback(struct hnae3_handle *handle,
6029 enum hnae3_loop loop_mode, bool en)
6031 struct hclge_vport *vport = hclge_get_vport(handle);
6032 struct hnae3_knic_private_info *kinfo;
6033 struct hclge_dev *hdev = vport->back;
6036 switch (loop_mode) {
6037 case HNAE3_LOOP_APP:
6038 ret = hclge_set_app_loopback(hdev, en);
6040 case HNAE3_LOOP_SERIAL_SERDES:
6041 case HNAE3_LOOP_PARALLEL_SERDES:
6042 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6046 dev_err(&hdev->pdev->dev,
6047 "loop_mode %d is not supported\n", loop_mode);
6054 kinfo = &vport->nic.kinfo;
6055 for (i = 0; i < kinfo->num_tqps; i++) {
6056 ret = hclge_tqp_enable(hdev, i, 0, en);
6064 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6066 struct hclge_vport *vport = hclge_get_vport(handle);
6067 struct hnae3_knic_private_info *kinfo;
6068 struct hnae3_queue *queue;
6069 struct hclge_tqp *tqp;
6072 kinfo = &vport->nic.kinfo;
6073 for (i = 0; i < kinfo->num_tqps; i++) {
6074 queue = handle->kinfo.tqp[i];
6075 tqp = container_of(queue, struct hclge_tqp, q);
6076 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6080 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6082 struct hclge_vport *vport = hclge_get_vport(handle);
6083 struct hclge_dev *hdev = vport->back;
6086 mod_timer(&hdev->service_timer, jiffies + HZ);
6088 del_timer_sync(&hdev->service_timer);
6089 cancel_work_sync(&hdev->service_task);
6090 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6094 static int hclge_ae_start(struct hnae3_handle *handle)
6096 struct hclge_vport *vport = hclge_get_vport(handle);
6097 struct hclge_dev *hdev = vport->back;
6100 hclge_cfg_mac_mode(hdev, true);
6101 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6102 hdev->hw.mac.link = 0;
6104 /* reset tqp stats */
6105 hclge_reset_tqp_stats(handle);
6107 hclge_mac_start_phy(hdev);
6112 static void hclge_ae_stop(struct hnae3_handle *handle)
6114 struct hclge_vport *vport = hclge_get_vport(handle);
6115 struct hclge_dev *hdev = vport->back;
6118 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6120 hclge_clear_arfs_rules(handle);
6122 /* If it is not PF reset, the firmware will disable the MAC,
6123 * so it only need to stop phy here.
6125 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6126 hdev->reset_type != HNAE3_FUNC_RESET) {
6127 hclge_mac_stop_phy(hdev);
6131 for (i = 0; i < handle->kinfo.num_tqps; i++)
6132 hclge_reset_tqp(handle, i);
6135 hclge_cfg_mac_mode(hdev, false);
6137 hclge_mac_stop_phy(hdev);
6139 /* reset tqp stats */
6140 hclge_reset_tqp_stats(handle);
6141 hclge_update_link_status(hdev);
6144 int hclge_vport_start(struct hclge_vport *vport)
6146 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6147 vport->last_active_jiffies = jiffies;
6151 void hclge_vport_stop(struct hclge_vport *vport)
6153 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6156 static int hclge_client_start(struct hnae3_handle *handle)
6158 struct hclge_vport *vport = hclge_get_vport(handle);
6160 return hclge_vport_start(vport);
6163 static void hclge_client_stop(struct hnae3_handle *handle)
6165 struct hclge_vport *vport = hclge_get_vport(handle);
6167 hclge_vport_stop(vport);
6170 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6171 u16 cmdq_resp, u8 resp_code,
6172 enum hclge_mac_vlan_tbl_opcode op)
6174 struct hclge_dev *hdev = vport->back;
6175 int return_status = -EIO;
6178 dev_err(&hdev->pdev->dev,
6179 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6184 if (op == HCLGE_MAC_VLAN_ADD) {
6185 if ((!resp_code) || (resp_code == 1)) {
6187 } else if (resp_code == 2) {
6188 return_status = -ENOSPC;
6189 dev_err(&hdev->pdev->dev,
6190 "add mac addr failed for uc_overflow.\n");
6191 } else if (resp_code == 3) {
6192 return_status = -ENOSPC;
6193 dev_err(&hdev->pdev->dev,
6194 "add mac addr failed for mc_overflow.\n");
6196 dev_err(&hdev->pdev->dev,
6197 "add mac addr failed for undefined, code=%d.\n",
6200 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6203 } else if (resp_code == 1) {
6204 return_status = -ENOENT;
6205 dev_dbg(&hdev->pdev->dev,
6206 "remove mac addr failed for miss.\n");
6208 dev_err(&hdev->pdev->dev,
6209 "remove mac addr failed for undefined, code=%d.\n",
6212 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6215 } else if (resp_code == 1) {
6216 return_status = -ENOENT;
6217 dev_dbg(&hdev->pdev->dev,
6218 "lookup mac addr failed for miss.\n");
6220 dev_err(&hdev->pdev->dev,
6221 "lookup mac addr failed for undefined, code=%d.\n",
6225 return_status = -EINVAL;
6226 dev_err(&hdev->pdev->dev,
6227 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6231 return return_status;
6234 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6239 if (vfid > 255 || vfid < 0)
6242 if (vfid >= 0 && vfid <= 191) {
6243 word_num = vfid / 32;
6244 bit_num = vfid % 32;
6246 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6248 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6250 word_num = (vfid - 192) / 32;
6251 bit_num = vfid % 32;
6253 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6255 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6261 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6263 #define HCLGE_DESC_NUMBER 3
6264 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6267 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6268 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6269 if (desc[i].data[j])
6275 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6276 const u8 *addr, bool is_mc)
6278 const unsigned char *mac_addr = addr;
6279 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6280 (mac_addr[0]) | (mac_addr[1] << 8);
6281 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6283 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6285 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6286 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6289 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6290 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6293 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6294 struct hclge_mac_vlan_tbl_entry_cmd *req)
6296 struct hclge_dev *hdev = vport->back;
6297 struct hclge_desc desc;
6302 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6304 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6306 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6308 dev_err(&hdev->pdev->dev,
6309 "del mac addr failed for cmd_send, ret =%d.\n",
6313 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6314 retval = le16_to_cpu(desc.retval);
6316 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6317 HCLGE_MAC_VLAN_REMOVE);
6320 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6321 struct hclge_mac_vlan_tbl_entry_cmd *req,
6322 struct hclge_desc *desc,
6325 struct hclge_dev *hdev = vport->back;
6330 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6332 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6333 memcpy(desc[0].data,
6335 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6336 hclge_cmd_setup_basic_desc(&desc[1],
6337 HCLGE_OPC_MAC_VLAN_ADD,
6339 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6340 hclge_cmd_setup_basic_desc(&desc[2],
6341 HCLGE_OPC_MAC_VLAN_ADD,
6343 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6345 memcpy(desc[0].data,
6347 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6348 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6351 dev_err(&hdev->pdev->dev,
6352 "lookup mac addr failed for cmd_send, ret =%d.\n",
6356 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6357 retval = le16_to_cpu(desc[0].retval);
6359 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6360 HCLGE_MAC_VLAN_LKUP);
6363 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6364 struct hclge_mac_vlan_tbl_entry_cmd *req,
6365 struct hclge_desc *mc_desc)
6367 struct hclge_dev *hdev = vport->back;
6374 struct hclge_desc desc;
6376 hclge_cmd_setup_basic_desc(&desc,
6377 HCLGE_OPC_MAC_VLAN_ADD,
6379 memcpy(desc.data, req,
6380 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6381 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6382 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6383 retval = le16_to_cpu(desc.retval);
6385 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6387 HCLGE_MAC_VLAN_ADD);
6389 hclge_cmd_reuse_desc(&mc_desc[0], false);
6390 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6391 hclge_cmd_reuse_desc(&mc_desc[1], false);
6392 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6393 hclge_cmd_reuse_desc(&mc_desc[2], false);
6394 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6395 memcpy(mc_desc[0].data, req,
6396 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6397 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6398 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6399 retval = le16_to_cpu(mc_desc[0].retval);
6401 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6403 HCLGE_MAC_VLAN_ADD);
6407 dev_err(&hdev->pdev->dev,
6408 "add mac addr failed for cmd_send, ret =%d.\n",
6416 static int hclge_init_umv_space(struct hclge_dev *hdev)
6418 u16 allocated_size = 0;
6421 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6426 if (allocated_size < hdev->wanted_umv_size)
6427 dev_warn(&hdev->pdev->dev,
6428 "Alloc umv space failed, want %d, get %d\n",
6429 hdev->wanted_umv_size, allocated_size);
6431 mutex_init(&hdev->umv_mutex);
6432 hdev->max_umv_size = allocated_size;
6433 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6434 hdev->share_umv_size = hdev->priv_umv_size +
6435 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6440 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6444 if (hdev->max_umv_size > 0) {
6445 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6449 hdev->max_umv_size = 0;
6451 mutex_destroy(&hdev->umv_mutex);
6456 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6457 u16 *allocated_size, bool is_alloc)
6459 struct hclge_umv_spc_alc_cmd *req;
6460 struct hclge_desc desc;
6463 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6464 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6465 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6466 req->space_size = cpu_to_le32(space_size);
6468 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6470 dev_err(&hdev->pdev->dev,
6471 "%s umv space failed for cmd_send, ret =%d\n",
6472 is_alloc ? "allocate" : "free", ret);
6476 if (is_alloc && allocated_size)
6477 *allocated_size = le32_to_cpu(desc.data[1]);
6482 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6484 struct hclge_vport *vport;
6487 for (i = 0; i < hdev->num_alloc_vport; i++) {
6488 vport = &hdev->vport[i];
6489 vport->used_umv_num = 0;
6492 mutex_lock(&hdev->umv_mutex);
6493 hdev->share_umv_size = hdev->priv_umv_size +
6494 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6495 mutex_unlock(&hdev->umv_mutex);
6498 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6500 struct hclge_dev *hdev = vport->back;
6503 mutex_lock(&hdev->umv_mutex);
6504 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6505 hdev->share_umv_size == 0);
6506 mutex_unlock(&hdev->umv_mutex);
6511 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6513 struct hclge_dev *hdev = vport->back;
6515 mutex_lock(&hdev->umv_mutex);
6517 if (vport->used_umv_num > hdev->priv_umv_size)
6518 hdev->share_umv_size++;
6520 if (vport->used_umv_num > 0)
6521 vport->used_umv_num--;
6523 if (vport->used_umv_num >= hdev->priv_umv_size &&
6524 hdev->share_umv_size > 0)
6525 hdev->share_umv_size--;
6526 vport->used_umv_num++;
6528 mutex_unlock(&hdev->umv_mutex);
6531 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6532 const unsigned char *addr)
6534 struct hclge_vport *vport = hclge_get_vport(handle);
6536 return hclge_add_uc_addr_common(vport, addr);
6539 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6540 const unsigned char *addr)
6542 struct hclge_dev *hdev = vport->back;
6543 struct hclge_mac_vlan_tbl_entry_cmd req;
6544 struct hclge_desc desc;
6545 u16 egress_port = 0;
6548 /* mac addr check */
6549 if (is_zero_ether_addr(addr) ||
6550 is_broadcast_ether_addr(addr) ||
6551 is_multicast_ether_addr(addr)) {
6552 dev_err(&hdev->pdev->dev,
6553 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6555 is_zero_ether_addr(addr),
6556 is_broadcast_ether_addr(addr),
6557 is_multicast_ether_addr(addr));
6561 memset(&req, 0, sizeof(req));
6563 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6564 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6566 req.egress_port = cpu_to_le16(egress_port);
6568 hclge_prepare_mac_addr(&req, addr, false);
6570 /* Lookup the mac address in the mac_vlan table, and add
6571 * it if the entry is inexistent. Repeated unicast entry
6572 * is not allowed in the mac vlan table.
6574 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6575 if (ret == -ENOENT) {
6576 if (!hclge_is_umv_space_full(vport)) {
6577 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6579 hclge_update_umv_space(vport, false);
6583 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6584 hdev->priv_umv_size);
6589 /* check if we just hit the duplicate */
6591 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6592 vport->vport_id, addr);
6596 dev_err(&hdev->pdev->dev,
6597 "PF failed to add unicast entry(%pM) in the MAC table\n",
6603 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6604 const unsigned char *addr)
6606 struct hclge_vport *vport = hclge_get_vport(handle);
6608 return hclge_rm_uc_addr_common(vport, addr);
6611 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6612 const unsigned char *addr)
6614 struct hclge_dev *hdev = vport->back;
6615 struct hclge_mac_vlan_tbl_entry_cmd req;
6618 /* mac addr check */
6619 if (is_zero_ether_addr(addr) ||
6620 is_broadcast_ether_addr(addr) ||
6621 is_multicast_ether_addr(addr)) {
6622 dev_dbg(&hdev->pdev->dev,
6623 "Remove mac err! invalid mac:%pM.\n",
6628 memset(&req, 0, sizeof(req));
6629 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6630 hclge_prepare_mac_addr(&req, addr, false);
6631 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6633 hclge_update_umv_space(vport, true);
6638 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6639 const unsigned char *addr)
6641 struct hclge_vport *vport = hclge_get_vport(handle);
6643 return hclge_add_mc_addr_common(vport, addr);
6646 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6647 const unsigned char *addr)
6649 struct hclge_dev *hdev = vport->back;
6650 struct hclge_mac_vlan_tbl_entry_cmd req;
6651 struct hclge_desc desc[3];
6654 /* mac addr check */
6655 if (!is_multicast_ether_addr(addr)) {
6656 dev_err(&hdev->pdev->dev,
6657 "Add mc mac err! invalid mac:%pM.\n",
6661 memset(&req, 0, sizeof(req));
6662 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6663 hclge_prepare_mac_addr(&req, addr, true);
6664 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6666 /* This mac addr exist, update VFID for it */
6667 hclge_update_desc_vfid(desc, vport->vport_id, false);
6668 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6670 /* This mac addr do not exist, add new entry for it */
6671 memset(desc[0].data, 0, sizeof(desc[0].data));
6672 memset(desc[1].data, 0, sizeof(desc[0].data));
6673 memset(desc[2].data, 0, sizeof(desc[0].data));
6674 hclge_update_desc_vfid(desc, vport->vport_id, false);
6675 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6678 if (status == -ENOSPC)
6679 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6684 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6685 const unsigned char *addr)
6687 struct hclge_vport *vport = hclge_get_vport(handle);
6689 return hclge_rm_mc_addr_common(vport, addr);
6692 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6693 const unsigned char *addr)
6695 struct hclge_dev *hdev = vport->back;
6696 struct hclge_mac_vlan_tbl_entry_cmd req;
6697 enum hclge_cmd_status status;
6698 struct hclge_desc desc[3];
6700 /* mac addr check */
6701 if (!is_multicast_ether_addr(addr)) {
6702 dev_dbg(&hdev->pdev->dev,
6703 "Remove mc mac err! invalid mac:%pM.\n",
6708 memset(&req, 0, sizeof(req));
6709 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6710 hclge_prepare_mac_addr(&req, addr, true);
6711 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6713 /* This mac addr exist, remove this handle's VFID for it */
6714 hclge_update_desc_vfid(desc, vport->vport_id, true);
6716 if (hclge_is_all_function_id_zero(desc))
6717 /* All the vfid is zero, so need to delete this entry */
6718 status = hclge_remove_mac_vlan_tbl(vport, &req);
6720 /* Not all the vfid is zero, update the vfid */
6721 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6724 /* Maybe this mac address is in mta table, but it cannot be
6725 * deleted here because an entry of mta represents an address
6726 * range rather than a specific address. the delete action to
6727 * all entries will take effect in update_mta_status called by
6728 * hns3_nic_set_rx_mode.
6736 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6737 enum HCLGE_MAC_ADDR_TYPE mac_type)
6739 struct hclge_vport_mac_addr_cfg *mac_cfg;
6740 struct list_head *list;
6742 if (!vport->vport_id)
6745 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6749 mac_cfg->hd_tbl_status = true;
6750 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6752 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6753 &vport->uc_mac_list : &vport->mc_mac_list;
6755 list_add_tail(&mac_cfg->node, list);
6758 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6760 enum HCLGE_MAC_ADDR_TYPE mac_type)
6762 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6763 struct list_head *list;
6764 bool uc_flag, mc_flag;
6766 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6767 &vport->uc_mac_list : &vport->mc_mac_list;
6769 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6770 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6772 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6773 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6774 if (uc_flag && mac_cfg->hd_tbl_status)
6775 hclge_rm_uc_addr_common(vport, mac_addr);
6777 if (mc_flag && mac_cfg->hd_tbl_status)
6778 hclge_rm_mc_addr_common(vport, mac_addr);
6780 list_del(&mac_cfg->node);
6787 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6788 enum HCLGE_MAC_ADDR_TYPE mac_type)
6790 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6791 struct list_head *list;
6793 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6794 &vport->uc_mac_list : &vport->mc_mac_list;
6796 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6797 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6798 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6800 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6801 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6803 mac_cfg->hd_tbl_status = false;
6805 list_del(&mac_cfg->node);
6811 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6813 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6814 struct hclge_vport *vport;
6817 mutex_lock(&hdev->vport_cfg_mutex);
6818 for (i = 0; i < hdev->num_alloc_vport; i++) {
6819 vport = &hdev->vport[i];
6820 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6821 list_del(&mac->node);
6825 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6826 list_del(&mac->node);
6830 mutex_unlock(&hdev->vport_cfg_mutex);
6833 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6834 u16 cmdq_resp, u8 resp_code)
6836 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6837 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6838 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6839 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6844 dev_err(&hdev->pdev->dev,
6845 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6850 switch (resp_code) {
6851 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6852 case HCLGE_ETHERTYPE_ALREADY_ADD:
6855 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6856 dev_err(&hdev->pdev->dev,
6857 "add mac ethertype failed for manager table overflow.\n");
6858 return_status = -EIO;
6860 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6861 dev_err(&hdev->pdev->dev,
6862 "add mac ethertype failed for key conflict.\n");
6863 return_status = -EIO;
6866 dev_err(&hdev->pdev->dev,
6867 "add mac ethertype failed for undefined, code=%d.\n",
6869 return_status = -EIO;
6872 return return_status;
6875 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6876 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6878 struct hclge_desc desc;
6883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6884 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6888 dev_err(&hdev->pdev->dev,
6889 "add mac ethertype failed for cmd_send, ret =%d.\n",
6894 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6895 retval = le16_to_cpu(desc.retval);
6897 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6900 static int init_mgr_tbl(struct hclge_dev *hdev)
6905 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6906 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6908 dev_err(&hdev->pdev->dev,
6909 "add mac ethertype failed, ret =%d.\n",
6918 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6920 struct hclge_vport *vport = hclge_get_vport(handle);
6921 struct hclge_dev *hdev = vport->back;
6923 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6926 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6929 const unsigned char *new_addr = (const unsigned char *)p;
6930 struct hclge_vport *vport = hclge_get_vport(handle);
6931 struct hclge_dev *hdev = vport->back;
6934 /* mac addr check */
6935 if (is_zero_ether_addr(new_addr) ||
6936 is_broadcast_ether_addr(new_addr) ||
6937 is_multicast_ether_addr(new_addr)) {
6938 dev_err(&hdev->pdev->dev,
6939 "Change uc mac err! invalid mac:%p.\n",
6944 if ((!is_first || is_kdump_kernel()) &&
6945 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6946 dev_warn(&hdev->pdev->dev,
6947 "remove old uc mac address fail.\n");
6949 ret = hclge_add_uc_addr(handle, new_addr);
6951 dev_err(&hdev->pdev->dev,
6952 "add uc mac address fail, ret =%d.\n",
6956 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6957 dev_err(&hdev->pdev->dev,
6958 "restore uc mac address fail.\n");
6963 ret = hclge_pause_addr_cfg(hdev, new_addr);
6965 dev_err(&hdev->pdev->dev,
6966 "configure mac pause address fail, ret =%d.\n",
6971 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6976 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6979 struct hclge_vport *vport = hclge_get_vport(handle);
6980 struct hclge_dev *hdev = vport->back;
6982 if (!hdev->hw.mac.phydev)
6985 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6988 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6989 u8 fe_type, bool filter_en, u8 vf_id)
6991 struct hclge_vlan_filter_ctrl_cmd *req;
6992 struct hclge_desc desc;
6995 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6997 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6998 req->vlan_type = vlan_type;
6999 req->vlan_fe = filter_en ? fe_type : 0;
7002 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7004 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7010 #define HCLGE_FILTER_TYPE_VF 0
7011 #define HCLGE_FILTER_TYPE_PORT 1
7012 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7013 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7014 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7015 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7016 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7017 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7018 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7019 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7020 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7022 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7024 struct hclge_vport *vport = hclge_get_vport(handle);
7025 struct hclge_dev *hdev = vport->back;
7027 if (hdev->pdev->revision >= 0x21) {
7028 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7029 HCLGE_FILTER_FE_EGRESS, enable, 0);
7030 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7031 HCLGE_FILTER_FE_INGRESS, enable, 0);
7033 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7034 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7038 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7040 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7043 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7044 bool is_kill, u16 vlan, u8 qos,
7047 #define HCLGE_MAX_VF_BYTES 16
7048 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7049 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7050 struct hclge_desc desc[2];
7055 hclge_cmd_setup_basic_desc(&desc[0],
7056 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7057 hclge_cmd_setup_basic_desc(&desc[1],
7058 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7060 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7062 vf_byte_off = vfid / 8;
7063 vf_byte_val = 1 << (vfid % 8);
7065 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7066 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7068 req0->vlan_id = cpu_to_le16(vlan);
7069 req0->vlan_cfg = is_kill;
7071 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7072 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7074 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7076 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7078 dev_err(&hdev->pdev->dev,
7079 "Send vf vlan command fail, ret =%d.\n",
7085 #define HCLGE_VF_VLAN_NO_ENTRY 2
7086 if (!req0->resp_code || req0->resp_code == 1)
7089 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7090 dev_warn(&hdev->pdev->dev,
7091 "vf vlan table is full, vf vlan filter is disabled\n");
7095 dev_err(&hdev->pdev->dev,
7096 "Add vf vlan filter fail, ret =%d.\n",
7099 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7100 if (!req0->resp_code)
7103 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7104 dev_warn(&hdev->pdev->dev,
7105 "vlan %d filter is not in vf vlan table\n",
7110 dev_err(&hdev->pdev->dev,
7111 "Kill vf vlan filter fail, ret =%d.\n",
7118 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7119 u16 vlan_id, bool is_kill)
7121 struct hclge_vlan_filter_pf_cfg_cmd *req;
7122 struct hclge_desc desc;
7123 u8 vlan_offset_byte_val;
7124 u8 vlan_offset_byte;
7128 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7130 vlan_offset_160 = vlan_id / 160;
7131 vlan_offset_byte = (vlan_id % 160) / 8;
7132 vlan_offset_byte_val = 1 << (vlan_id % 8);
7134 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7135 req->vlan_offset = vlan_offset_160;
7136 req->vlan_cfg = is_kill;
7137 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7139 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7141 dev_err(&hdev->pdev->dev,
7142 "port vlan command, send fail, ret =%d.\n", ret);
7146 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7147 u16 vport_id, u16 vlan_id, u8 qos,
7150 u16 vport_idx, vport_num = 0;
7153 if (is_kill && !vlan_id)
7156 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7159 dev_err(&hdev->pdev->dev,
7160 "Set %d vport vlan filter config fail, ret =%d.\n",
7165 /* vlan 0 may be added twice when 8021q module is enabled */
7166 if (!is_kill && !vlan_id &&
7167 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7170 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7171 dev_err(&hdev->pdev->dev,
7172 "Add port vlan failed, vport %d is already in vlan %d\n",
7178 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7179 dev_err(&hdev->pdev->dev,
7180 "Delete port vlan failed, vport %d is not in vlan %d\n",
7185 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7188 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7189 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7195 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7197 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7198 struct hclge_vport_vtag_tx_cfg_cmd *req;
7199 struct hclge_dev *hdev = vport->back;
7200 struct hclge_desc desc;
7203 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7205 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7206 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7207 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7208 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7209 vcfg->accept_tag1 ? 1 : 0);
7210 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7211 vcfg->accept_untag1 ? 1 : 0);
7212 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7213 vcfg->accept_tag2 ? 1 : 0);
7214 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7215 vcfg->accept_untag2 ? 1 : 0);
7216 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7217 vcfg->insert_tag1_en ? 1 : 0);
7218 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7219 vcfg->insert_tag2_en ? 1 : 0);
7220 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7222 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7223 req->vf_bitmap[req->vf_offset] =
7224 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7226 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7228 dev_err(&hdev->pdev->dev,
7229 "Send port txvlan cfg command fail, ret =%d\n",
7235 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7237 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7238 struct hclge_vport_vtag_rx_cfg_cmd *req;
7239 struct hclge_dev *hdev = vport->back;
7240 struct hclge_desc desc;
7243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7245 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7246 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7247 vcfg->strip_tag1_en ? 1 : 0);
7248 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7249 vcfg->strip_tag2_en ? 1 : 0);
7250 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7251 vcfg->vlan1_vlan_prionly ? 1 : 0);
7252 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7253 vcfg->vlan2_vlan_prionly ? 1 : 0);
7255 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7256 req->vf_bitmap[req->vf_offset] =
7257 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7259 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7261 dev_err(&hdev->pdev->dev,
7262 "Send port rxvlan cfg command fail, ret =%d\n",
7268 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7269 u16 port_base_vlan_state,
7274 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7275 vport->txvlan_cfg.accept_tag1 = true;
7276 vport->txvlan_cfg.insert_tag1_en = false;
7277 vport->txvlan_cfg.default_tag1 = 0;
7279 vport->txvlan_cfg.accept_tag1 = false;
7280 vport->txvlan_cfg.insert_tag1_en = true;
7281 vport->txvlan_cfg.default_tag1 = vlan_tag;
7284 vport->txvlan_cfg.accept_untag1 = true;
7286 /* accept_tag2 and accept_untag2 are not supported on
7287 * pdev revision(0x20), new revision support them,
7288 * this two fields can not be configured by user.
7290 vport->txvlan_cfg.accept_tag2 = true;
7291 vport->txvlan_cfg.accept_untag2 = true;
7292 vport->txvlan_cfg.insert_tag2_en = false;
7293 vport->txvlan_cfg.default_tag2 = 0;
7295 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7296 vport->rxvlan_cfg.strip_tag1_en = false;
7297 vport->rxvlan_cfg.strip_tag2_en =
7298 vport->rxvlan_cfg.rx_vlan_offload_en;
7300 vport->rxvlan_cfg.strip_tag1_en =
7301 vport->rxvlan_cfg.rx_vlan_offload_en;
7302 vport->rxvlan_cfg.strip_tag2_en = true;
7304 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7305 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7307 ret = hclge_set_vlan_tx_offload_cfg(vport);
7311 return hclge_set_vlan_rx_offload_cfg(vport);
7314 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7316 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7317 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7318 struct hclge_desc desc;
7321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7322 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7323 rx_req->ot_fst_vlan_type =
7324 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7325 rx_req->ot_sec_vlan_type =
7326 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7327 rx_req->in_fst_vlan_type =
7328 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7329 rx_req->in_sec_vlan_type =
7330 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7332 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7334 dev_err(&hdev->pdev->dev,
7335 "Send rxvlan protocol type command fail, ret =%d\n",
7340 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7342 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7343 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7344 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7346 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7348 dev_err(&hdev->pdev->dev,
7349 "Send txvlan protocol type command fail, ret =%d\n",
7355 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7357 #define HCLGE_DEF_VLAN_TYPE 0x8100
7359 struct hnae3_handle *handle = &hdev->vport[0].nic;
7360 struct hclge_vport *vport;
7364 if (hdev->pdev->revision >= 0x21) {
7365 /* for revision 0x21, vf vlan filter is per function */
7366 for (i = 0; i < hdev->num_alloc_vport; i++) {
7367 vport = &hdev->vport[i];
7368 ret = hclge_set_vlan_filter_ctrl(hdev,
7369 HCLGE_FILTER_TYPE_VF,
7370 HCLGE_FILTER_FE_EGRESS,
7377 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7378 HCLGE_FILTER_FE_INGRESS, true,
7383 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7384 HCLGE_FILTER_FE_EGRESS_V1_B,
7390 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7392 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7393 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7394 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7395 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7396 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7397 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7399 ret = hclge_set_vlan_protocol_type(hdev);
7403 for (i = 0; i < hdev->num_alloc_vport; i++) {
7406 vport = &hdev->vport[i];
7407 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7409 ret = hclge_vlan_offload_cfg(vport,
7410 vport->port_base_vlan_cfg.state,
7416 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7419 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7422 struct hclge_vport_vlan_cfg *vlan;
7424 /* vlan 0 is reserved */
7428 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7432 vlan->hd_tbl_status = writen_to_tbl;
7433 vlan->vlan_id = vlan_id;
7435 list_add_tail(&vlan->node, &vport->vlan_list);
7438 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7440 struct hclge_vport_vlan_cfg *vlan, *tmp;
7441 struct hclge_dev *hdev = vport->back;
7444 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7445 if (!vlan->hd_tbl_status) {
7446 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7448 vlan->vlan_id, 0, false);
7450 dev_err(&hdev->pdev->dev,
7451 "restore vport vlan list failed, ret=%d\n",
7456 vlan->hd_tbl_status = true;
7462 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7465 struct hclge_vport_vlan_cfg *vlan, *tmp;
7466 struct hclge_dev *hdev = vport->back;
7468 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7469 if (vlan->vlan_id == vlan_id) {
7470 if (is_write_tbl && vlan->hd_tbl_status)
7471 hclge_set_vlan_filter_hw(hdev,
7477 list_del(&vlan->node);
7484 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7486 struct hclge_vport_vlan_cfg *vlan, *tmp;
7487 struct hclge_dev *hdev = vport->back;
7489 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7490 if (vlan->hd_tbl_status)
7491 hclge_set_vlan_filter_hw(hdev,
7497 vlan->hd_tbl_status = false;
7499 list_del(&vlan->node);
7505 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7507 struct hclge_vport_vlan_cfg *vlan, *tmp;
7508 struct hclge_vport *vport;
7511 mutex_lock(&hdev->vport_cfg_mutex);
7512 for (i = 0; i < hdev->num_alloc_vport; i++) {
7513 vport = &hdev->vport[i];
7514 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7515 list_del(&vlan->node);
7519 mutex_unlock(&hdev->vport_cfg_mutex);
7522 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7524 struct hclge_vport *vport = hclge_get_vport(handle);
7526 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7527 vport->rxvlan_cfg.strip_tag1_en = false;
7528 vport->rxvlan_cfg.strip_tag2_en = enable;
7530 vport->rxvlan_cfg.strip_tag1_en = enable;
7531 vport->rxvlan_cfg.strip_tag2_en = true;
7533 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7534 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7535 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7537 return hclge_set_vlan_rx_offload_cfg(vport);
7540 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7541 u16 port_base_vlan_state,
7542 struct hclge_vlan_info *new_info,
7543 struct hclge_vlan_info *old_info)
7545 struct hclge_dev *hdev = vport->back;
7548 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7549 hclge_rm_vport_all_vlan_table(vport, false);
7550 return hclge_set_vlan_filter_hw(hdev,
7551 htons(new_info->vlan_proto),
7554 new_info->qos, false);
7557 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7558 vport->vport_id, old_info->vlan_tag,
7559 old_info->qos, true);
7563 return hclge_add_vport_all_vlan_table(vport);
7566 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7567 struct hclge_vlan_info *vlan_info)
7569 struct hnae3_handle *nic = &vport->nic;
7570 struct hclge_vlan_info *old_vlan_info;
7571 struct hclge_dev *hdev = vport->back;
7574 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7576 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7580 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7581 /* add new VLAN tag */
7582 ret = hclge_set_vlan_filter_hw(hdev,
7583 htons(vlan_info->vlan_proto),
7585 vlan_info->vlan_tag,
7586 vlan_info->qos, false);
7590 /* remove old VLAN tag */
7591 ret = hclge_set_vlan_filter_hw(hdev,
7592 htons(old_vlan_info->vlan_proto),
7594 old_vlan_info->vlan_tag,
7595 old_vlan_info->qos, true);
7602 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7607 /* update state only when disable/enable port based VLAN */
7608 vport->port_base_vlan_cfg.state = state;
7609 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7610 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7612 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7615 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7616 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7617 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7622 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7623 enum hnae3_port_base_vlan_state state,
7626 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7628 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7630 return HNAE3_PORT_BASE_VLAN_ENABLE;
7633 return HNAE3_PORT_BASE_VLAN_DISABLE;
7634 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7635 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7637 return HNAE3_PORT_BASE_VLAN_MODIFY;
7641 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7642 u16 vlan, u8 qos, __be16 proto)
7644 struct hclge_vport *vport = hclge_get_vport(handle);
7645 struct hclge_dev *hdev = vport->back;
7646 struct hclge_vlan_info vlan_info;
7650 if (hdev->pdev->revision == 0x20)
7653 /* qos is a 3 bits value, so can not be bigger than 7 */
7654 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7656 if (proto != htons(ETH_P_8021Q))
7657 return -EPROTONOSUPPORT;
7659 vport = &hdev->vport[vfid];
7660 state = hclge_get_port_base_vlan_state(vport,
7661 vport->port_base_vlan_cfg.state,
7663 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7666 vlan_info.vlan_tag = vlan;
7667 vlan_info.qos = qos;
7668 vlan_info.vlan_proto = ntohs(proto);
7670 /* update port based VLAN for PF */
7672 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7673 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7674 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7679 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7680 return hclge_update_port_base_vlan_cfg(vport, state,
7683 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7691 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7692 u16 vlan_id, bool is_kill)
7694 struct hclge_vport *vport = hclge_get_vport(handle);
7695 struct hclge_dev *hdev = vport->back;
7696 bool writen_to_tbl = false;
7699 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7700 * filter entry. In this case, we don't update VLAN filter table
7701 * when user add new VLAN or remove exist VLAN, just update the vport
7702 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7703 * table until port based VLAN disabled
7705 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7706 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7707 vlan_id, 0, is_kill);
7708 writen_to_tbl = true;
7715 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7717 hclge_add_vport_vlan_table(vport, vlan_id,
7723 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7725 struct hclge_config_max_frm_size_cmd *req;
7726 struct hclge_desc desc;
7728 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7730 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7731 req->max_frm_size = cpu_to_le16(new_mps);
7732 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7734 return hclge_cmd_send(&hdev->hw, &desc, 1);
7737 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7739 struct hclge_vport *vport = hclge_get_vport(handle);
7741 return hclge_set_vport_mtu(vport, new_mtu);
7744 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7746 struct hclge_dev *hdev = vport->back;
7747 int i, max_frm_size, ret = 0;
7749 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7750 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7751 max_frm_size > HCLGE_MAC_MAX_FRAME)
7754 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7755 mutex_lock(&hdev->vport_lock);
7756 /* VF's mps must fit within hdev->mps */
7757 if (vport->vport_id && max_frm_size > hdev->mps) {
7758 mutex_unlock(&hdev->vport_lock);
7760 } else if (vport->vport_id) {
7761 vport->mps = max_frm_size;
7762 mutex_unlock(&hdev->vport_lock);
7766 /* PF's mps must be greater then VF's mps */
7767 for (i = 1; i < hdev->num_alloc_vport; i++)
7768 if (max_frm_size < hdev->vport[i].mps) {
7769 mutex_unlock(&hdev->vport_lock);
7773 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7775 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7777 dev_err(&hdev->pdev->dev,
7778 "Change mtu fail, ret =%d\n", ret);
7782 hdev->mps = max_frm_size;
7783 vport->mps = max_frm_size;
7785 ret = hclge_buffer_alloc(hdev);
7787 dev_err(&hdev->pdev->dev,
7788 "Allocate buffer fail, ret =%d\n", ret);
7791 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7792 mutex_unlock(&hdev->vport_lock);
7796 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7799 struct hclge_reset_tqp_queue_cmd *req;
7800 struct hclge_desc desc;
7803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7805 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7806 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7807 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7809 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7811 dev_err(&hdev->pdev->dev,
7812 "Send tqp reset cmd error, status =%d\n", ret);
7819 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7821 struct hclge_reset_tqp_queue_cmd *req;
7822 struct hclge_desc desc;
7825 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7827 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7828 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7830 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7832 dev_err(&hdev->pdev->dev,
7833 "Get reset status error, status =%d\n", ret);
7837 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7840 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7842 struct hnae3_queue *queue;
7843 struct hclge_tqp *tqp;
7845 queue = handle->kinfo.tqp[queue_id];
7846 tqp = container_of(queue, struct hclge_tqp, q);
7851 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7853 struct hclge_vport *vport = hclge_get_vport(handle);
7854 struct hclge_dev *hdev = vport->back;
7855 int reset_try_times = 0;
7860 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7862 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7864 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7868 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7870 dev_err(&hdev->pdev->dev,
7871 "Send reset tqp cmd fail, ret = %d\n", ret);
7875 reset_try_times = 0;
7876 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7877 /* Wait for tqp hw reset */
7879 reset_status = hclge_get_reset_status(hdev, queue_gid);
7884 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7885 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7889 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7891 dev_err(&hdev->pdev->dev,
7892 "Deassert the soft reset fail, ret = %d\n", ret);
7897 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7899 struct hclge_dev *hdev = vport->back;
7900 int reset_try_times = 0;
7905 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7907 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7909 dev_warn(&hdev->pdev->dev,
7910 "Send reset tqp cmd fail, ret = %d\n", ret);
7914 reset_try_times = 0;
7915 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7916 /* Wait for tqp hw reset */
7918 reset_status = hclge_get_reset_status(hdev, queue_gid);
7923 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7924 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7928 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7930 dev_warn(&hdev->pdev->dev,
7931 "Deassert the soft reset fail, ret = %d\n", ret);
7934 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7936 struct hclge_vport *vport = hclge_get_vport(handle);
7937 struct hclge_dev *hdev = vport->back;
7939 return hdev->fw_version;
7942 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7944 struct phy_device *phydev = hdev->hw.mac.phydev;
7949 phy_set_asym_pause(phydev, rx_en, tx_en);
7952 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7957 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7958 else if (rx_en && !tx_en)
7959 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7960 else if (!rx_en && tx_en)
7961 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7963 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7965 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7968 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7970 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7975 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7980 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7982 struct phy_device *phydev = hdev->hw.mac.phydev;
7983 u16 remote_advertising = 0;
7984 u16 local_advertising = 0;
7985 u32 rx_pause, tx_pause;
7988 if (!phydev->link || !phydev->autoneg)
7991 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7994 remote_advertising = LPA_PAUSE_CAP;
7996 if (phydev->asym_pause)
7997 remote_advertising |= LPA_PAUSE_ASYM;
7999 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8000 remote_advertising);
8001 tx_pause = flowctl & FLOW_CTRL_TX;
8002 rx_pause = flowctl & FLOW_CTRL_RX;
8004 if (phydev->duplex == HCLGE_MAC_HALF) {
8009 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8012 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8013 u32 *rx_en, u32 *tx_en)
8015 struct hclge_vport *vport = hclge_get_vport(handle);
8016 struct hclge_dev *hdev = vport->back;
8018 *auto_neg = hclge_get_autoneg(handle);
8020 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8026 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8029 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8032 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8041 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8042 u32 rx_en, u32 tx_en)
8044 struct hclge_vport *vport = hclge_get_vport(handle);
8045 struct hclge_dev *hdev = vport->back;
8046 struct phy_device *phydev = hdev->hw.mac.phydev;
8049 fc_autoneg = hclge_get_autoneg(handle);
8050 if (auto_neg != fc_autoneg) {
8051 dev_info(&hdev->pdev->dev,
8052 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8056 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8057 dev_info(&hdev->pdev->dev,
8058 "Priority flow control enabled. Cannot set link flow control.\n");
8062 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8065 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8068 return phy_start_aneg(phydev);
8070 if (hdev->pdev->revision == 0x20)
8073 return hclge_restart_autoneg(handle);
8076 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8077 u8 *auto_neg, u32 *speed, u8 *duplex)
8079 struct hclge_vport *vport = hclge_get_vport(handle);
8080 struct hclge_dev *hdev = vport->back;
8083 *speed = hdev->hw.mac.speed;
8085 *duplex = hdev->hw.mac.duplex;
8087 *auto_neg = hdev->hw.mac.autoneg;
8090 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8093 struct hclge_vport *vport = hclge_get_vport(handle);
8094 struct hclge_dev *hdev = vport->back;
8097 *media_type = hdev->hw.mac.media_type;
8100 *module_type = hdev->hw.mac.module_type;
8103 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8104 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8106 struct hclge_vport *vport = hclge_get_vport(handle);
8107 struct hclge_dev *hdev = vport->back;
8108 struct phy_device *phydev = hdev->hw.mac.phydev;
8109 int mdix_ctrl, mdix, retval, is_resolved;
8112 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8113 *tp_mdix = ETH_TP_MDI_INVALID;
8117 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8119 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8120 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8121 HCLGE_PHY_MDIX_CTRL_S);
8123 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8124 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8125 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8127 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8129 switch (mdix_ctrl) {
8131 *tp_mdix_ctrl = ETH_TP_MDI;
8134 *tp_mdix_ctrl = ETH_TP_MDI_X;
8137 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8140 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8145 *tp_mdix = ETH_TP_MDI_INVALID;
8147 *tp_mdix = ETH_TP_MDI_X;
8149 *tp_mdix = ETH_TP_MDI;
8152 static void hclge_info_show(struct hclge_dev *hdev)
8154 struct device *dev = &hdev->pdev->dev;
8156 dev_info(dev, "PF info begin:\n");
8158 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8159 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8160 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8161 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8162 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8163 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8164 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8165 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8166 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8167 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8168 dev_info(dev, "This is %s PF\n",
8169 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8170 dev_info(dev, "DCB %s\n",
8171 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8172 dev_info(dev, "MQPRIO %s\n",
8173 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8175 dev_info(dev, "PF info end.\n");
8178 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8179 struct hclge_vport *vport)
8181 struct hnae3_client *client = vport->nic.client;
8182 struct hclge_dev *hdev = ae_dev->priv;
8185 ret = client->ops->init_instance(&vport->nic);
8189 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8190 hnae3_set_client_init_flag(client, ae_dev, 1);
8192 if (netif_msg_drv(&hdev->vport->nic))
8193 hclge_info_show(hdev);
8198 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8199 struct hclge_vport *vport)
8201 struct hnae3_client *client = vport->roce.client;
8202 struct hclge_dev *hdev = ae_dev->priv;
8205 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8209 client = hdev->roce_client;
8210 ret = hclge_init_roce_base_info(vport);
8214 ret = client->ops->init_instance(&vport->roce);
8218 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8219 hnae3_set_client_init_flag(client, ae_dev, 1);
8224 static int hclge_init_client_instance(struct hnae3_client *client,
8225 struct hnae3_ae_dev *ae_dev)
8227 struct hclge_dev *hdev = ae_dev->priv;
8228 struct hclge_vport *vport;
8231 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8232 vport = &hdev->vport[i];
8234 switch (client->type) {
8235 case HNAE3_CLIENT_KNIC:
8237 hdev->nic_client = client;
8238 vport->nic.client = client;
8239 ret = hclge_init_nic_client_instance(ae_dev, vport);
8243 ret = hclge_init_roce_client_instance(ae_dev, vport);
8248 case HNAE3_CLIENT_UNIC:
8249 hdev->nic_client = client;
8250 vport->nic.client = client;
8252 ret = client->ops->init_instance(&vport->nic);
8256 hnae3_set_client_init_flag(client, ae_dev, 1);
8259 case HNAE3_CLIENT_ROCE:
8260 if (hnae3_dev_roce_supported(hdev)) {
8261 hdev->roce_client = client;
8262 vport->roce.client = client;
8265 ret = hclge_init_roce_client_instance(ae_dev, vport);
8278 hdev->nic_client = NULL;
8279 vport->nic.client = NULL;
8282 hdev->roce_client = NULL;
8283 vport->roce.client = NULL;
8287 static void hclge_uninit_client_instance(struct hnae3_client *client,
8288 struct hnae3_ae_dev *ae_dev)
8290 struct hclge_dev *hdev = ae_dev->priv;
8291 struct hclge_vport *vport;
8294 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8295 vport = &hdev->vport[i];
8296 if (hdev->roce_client) {
8297 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8298 hdev->roce_client->ops->uninit_instance(&vport->roce,
8300 hdev->roce_client = NULL;
8301 vport->roce.client = NULL;
8303 if (client->type == HNAE3_CLIENT_ROCE)
8305 if (hdev->nic_client && client->ops->uninit_instance) {
8306 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8307 client->ops->uninit_instance(&vport->nic, 0);
8308 hdev->nic_client = NULL;
8309 vport->nic.client = NULL;
8314 static int hclge_pci_init(struct hclge_dev *hdev)
8316 struct pci_dev *pdev = hdev->pdev;
8317 struct hclge_hw *hw;
8320 ret = pci_enable_device(pdev);
8322 dev_err(&pdev->dev, "failed to enable PCI device\n");
8326 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8328 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8331 "can't set consistent PCI DMA");
8332 goto err_disable_device;
8334 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8337 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8339 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8340 goto err_disable_device;
8343 pci_set_master(pdev);
8345 hw->io_base = pcim_iomap(pdev, 2, 0);
8347 dev_err(&pdev->dev, "Can't map configuration register space\n");
8349 goto err_clr_master;
8352 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8356 pci_clear_master(pdev);
8357 pci_release_regions(pdev);
8359 pci_disable_device(pdev);
8364 static void hclge_pci_uninit(struct hclge_dev *hdev)
8366 struct pci_dev *pdev = hdev->pdev;
8368 pcim_iounmap(pdev, hdev->hw.io_base);
8369 pci_free_irq_vectors(pdev);
8370 pci_clear_master(pdev);
8371 pci_release_mem_regions(pdev);
8372 pci_disable_device(pdev);
8375 static void hclge_state_init(struct hclge_dev *hdev)
8377 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8378 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8379 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8380 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8381 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8382 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8385 static void hclge_state_uninit(struct hclge_dev *hdev)
8387 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8389 if (hdev->service_timer.function)
8390 del_timer_sync(&hdev->service_timer);
8391 if (hdev->reset_timer.function)
8392 del_timer_sync(&hdev->reset_timer);
8393 if (hdev->service_task.func)
8394 cancel_work_sync(&hdev->service_task);
8395 if (hdev->rst_service_task.func)
8396 cancel_work_sync(&hdev->rst_service_task);
8397 if (hdev->mbx_service_task.func)
8398 cancel_work_sync(&hdev->mbx_service_task);
8401 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8403 #define HCLGE_FLR_WAIT_MS 100
8404 #define HCLGE_FLR_WAIT_CNT 50
8405 struct hclge_dev *hdev = ae_dev->priv;
8408 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8409 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8410 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8411 hclge_reset_event(hdev->pdev, NULL);
8413 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8414 cnt++ < HCLGE_FLR_WAIT_CNT)
8415 msleep(HCLGE_FLR_WAIT_MS);
8417 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8418 dev_err(&hdev->pdev->dev,
8419 "flr wait down timeout: %d\n", cnt);
8422 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8424 struct hclge_dev *hdev = ae_dev->priv;
8426 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8429 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8431 struct pci_dev *pdev = ae_dev->pdev;
8432 struct hclge_dev *hdev;
8435 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8442 hdev->ae_dev = ae_dev;
8443 hdev->reset_type = HNAE3_NONE_RESET;
8444 hdev->reset_level = HNAE3_FUNC_RESET;
8445 ae_dev->priv = hdev;
8446 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8448 mutex_init(&hdev->vport_lock);
8449 mutex_init(&hdev->vport_cfg_mutex);
8450 spin_lock_init(&hdev->fd_rule_lock);
8452 ret = hclge_pci_init(hdev);
8454 dev_err(&pdev->dev, "PCI init failed\n");
8458 /* Firmware command queue initialize */
8459 ret = hclge_cmd_queue_init(hdev);
8461 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8462 goto err_pci_uninit;
8465 /* Firmware command initialize */
8466 ret = hclge_cmd_init(hdev);
8468 goto err_cmd_uninit;
8470 ret = hclge_get_cap(hdev);
8472 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8474 goto err_cmd_uninit;
8477 ret = hclge_configure(hdev);
8479 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8480 goto err_cmd_uninit;
8483 ret = hclge_init_msi(hdev);
8485 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8486 goto err_cmd_uninit;
8489 ret = hclge_misc_irq_init(hdev);
8492 "Misc IRQ(vector0) init error, ret = %d.\n",
8494 goto err_msi_uninit;
8497 ret = hclge_alloc_tqps(hdev);
8499 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8500 goto err_msi_irq_uninit;
8503 ret = hclge_alloc_vport(hdev);
8505 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8506 goto err_msi_irq_uninit;
8509 ret = hclge_map_tqp(hdev);
8511 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8512 goto err_msi_irq_uninit;
8515 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8516 ret = hclge_mac_mdio_config(hdev);
8518 dev_err(&hdev->pdev->dev,
8519 "mdio config fail ret=%d\n", ret);
8520 goto err_msi_irq_uninit;
8524 ret = hclge_init_umv_space(hdev);
8526 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8527 goto err_mdiobus_unreg;
8530 ret = hclge_mac_init(hdev);
8532 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8533 goto err_mdiobus_unreg;
8536 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8538 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8539 goto err_mdiobus_unreg;
8542 ret = hclge_config_gro(hdev, true);
8544 goto err_mdiobus_unreg;
8546 ret = hclge_init_vlan_config(hdev);
8548 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8549 goto err_mdiobus_unreg;
8552 ret = hclge_tm_schd_init(hdev);
8554 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8555 goto err_mdiobus_unreg;
8558 hclge_rss_init_cfg(hdev);
8559 ret = hclge_rss_init_hw(hdev);
8561 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8562 goto err_mdiobus_unreg;
8565 ret = init_mgr_tbl(hdev);
8567 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8568 goto err_mdiobus_unreg;
8571 ret = hclge_init_fd_config(hdev);
8574 "fd table init fail, ret=%d\n", ret);
8575 goto err_mdiobus_unreg;
8578 ret = hclge_hw_error_set_state(hdev, true);
8581 "fail(%d) to enable hw error interrupts\n", ret);
8582 goto err_mdiobus_unreg;
8585 INIT_KFIFO(hdev->mac_tnl_log);
8587 hclge_dcb_ops_set(hdev);
8589 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8590 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8591 INIT_WORK(&hdev->service_task, hclge_service_task);
8592 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8593 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8595 hclge_clear_all_event_cause(hdev);
8597 /* Enable MISC vector(vector0) */
8598 hclge_enable_vector(&hdev->misc_vector, true);
8600 hclge_state_init(hdev);
8601 hdev->last_reset_time = jiffies;
8603 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8607 if (hdev->hw.mac.phydev)
8608 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8610 hclge_misc_irq_uninit(hdev);
8612 pci_free_irq_vectors(pdev);
8614 hclge_cmd_uninit(hdev);
8616 pcim_iounmap(pdev, hdev->hw.io_base);
8617 pci_clear_master(pdev);
8618 pci_release_regions(pdev);
8619 pci_disable_device(pdev);
8624 static void hclge_stats_clear(struct hclge_dev *hdev)
8626 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8629 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8631 struct hclge_vport *vport = hdev->vport;
8634 for (i = 0; i < hdev->num_alloc_vport; i++) {
8635 hclge_vport_stop(vport);
8640 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8642 struct hclge_dev *hdev = ae_dev->priv;
8643 struct pci_dev *pdev = ae_dev->pdev;
8646 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8648 hclge_stats_clear(hdev);
8649 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8651 ret = hclge_cmd_init(hdev);
8653 dev_err(&pdev->dev, "Cmd queue init failed\n");
8657 ret = hclge_map_tqp(hdev);
8659 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8663 hclge_reset_umv_space(hdev);
8665 ret = hclge_mac_init(hdev);
8667 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8671 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8673 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8677 ret = hclge_config_gro(hdev, true);
8681 ret = hclge_init_vlan_config(hdev);
8683 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8687 ret = hclge_tm_init_hw(hdev, true);
8689 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8693 ret = hclge_rss_init_hw(hdev);
8695 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8699 ret = hclge_init_fd_config(hdev);
8702 "fd table init fail, ret=%d\n", ret);
8706 /* Re-enable the hw error interrupts because
8707 * the interrupts get disabled on core/global reset.
8709 ret = hclge_hw_error_set_state(hdev, true);
8712 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8716 hclge_reset_vport_state(hdev);
8718 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8724 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8726 struct hclge_dev *hdev = ae_dev->priv;
8727 struct hclge_mac *mac = &hdev->hw.mac;
8729 hclge_state_uninit(hdev);
8732 mdiobus_unregister(mac->mdio_bus);
8734 hclge_uninit_umv_space(hdev);
8736 /* Disable MISC vector(vector0) */
8737 hclge_enable_vector(&hdev->misc_vector, false);
8738 synchronize_irq(hdev->misc_vector.vector_irq);
8740 hclge_config_mac_tnl_int(hdev, false);
8741 hclge_hw_error_set_state(hdev, false);
8742 hclge_cmd_uninit(hdev);
8743 hclge_misc_irq_uninit(hdev);
8744 hclge_pci_uninit(hdev);
8745 mutex_destroy(&hdev->vport_lock);
8746 hclge_uninit_vport_mac_table(hdev);
8747 hclge_uninit_vport_vlan_table(hdev);
8748 mutex_destroy(&hdev->vport_cfg_mutex);
8749 ae_dev->priv = NULL;
8752 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8754 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8755 struct hclge_vport *vport = hclge_get_vport(handle);
8756 struct hclge_dev *hdev = vport->back;
8758 return min_t(u32, hdev->rss_size_max,
8759 vport->alloc_tqps / kinfo->num_tc);
8762 static void hclge_get_channels(struct hnae3_handle *handle,
8763 struct ethtool_channels *ch)
8765 ch->max_combined = hclge_get_max_channels(handle);
8766 ch->other_count = 1;
8768 ch->combined_count = handle->kinfo.rss_size;
8771 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8772 u16 *alloc_tqps, u16 *max_rss_size)
8774 struct hclge_vport *vport = hclge_get_vport(handle);
8775 struct hclge_dev *hdev = vport->back;
8777 *alloc_tqps = vport->alloc_tqps;
8778 *max_rss_size = hdev->rss_size_max;
8781 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8782 bool rxfh_configured)
8784 struct hclge_vport *vport = hclge_get_vport(handle);
8785 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8786 struct hclge_dev *hdev = vport->back;
8787 int cur_rss_size = kinfo->rss_size;
8788 int cur_tqps = kinfo->num_tqps;
8789 u16 tc_offset[HCLGE_MAX_TC_NUM];
8790 u16 tc_valid[HCLGE_MAX_TC_NUM];
8791 u16 tc_size[HCLGE_MAX_TC_NUM];
8796 kinfo->req_rss_size = new_tqps_num;
8798 ret = hclge_tm_vport_map_update(hdev);
8800 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8804 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8805 roundup_size = ilog2(roundup_size);
8806 /* Set the RSS TC mode according to the new RSS size */
8807 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8810 if (!(hdev->hw_tc_map & BIT(i)))
8814 tc_size[i] = roundup_size;
8815 tc_offset[i] = kinfo->rss_size * i;
8817 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8821 /* RSS indirection table has been configuared by user */
8822 if (rxfh_configured)
8825 /* Reinitializes the rss indirect table according to the new RSS size */
8826 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8830 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8831 rss_indir[i] = i % kinfo->rss_size;
8833 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8835 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8842 dev_info(&hdev->pdev->dev,
8843 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8844 cur_rss_size, kinfo->rss_size,
8845 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8850 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8851 u32 *regs_num_64_bit)
8853 struct hclge_desc desc;
8857 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8860 dev_err(&hdev->pdev->dev,
8861 "Query register number cmd failed, ret = %d.\n", ret);
8865 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8866 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8868 total_num = *regs_num_32_bit + *regs_num_64_bit;
8875 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8878 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8880 struct hclge_desc *desc;
8881 u32 *reg_val = data;
8890 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8891 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8895 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8896 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8898 dev_err(&hdev->pdev->dev,
8899 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8904 for (i = 0; i < cmd_num; i++) {
8906 desc_data = (__le32 *)(&desc[i].data[0]);
8907 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8909 desc_data = (__le32 *)(&desc[i]);
8910 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8912 for (k = 0; k < n; k++) {
8913 *reg_val++ = le32_to_cpu(*desc_data++);
8925 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8928 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8930 struct hclge_desc *desc;
8931 u64 *reg_val = data;
8940 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8941 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8945 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8946 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8948 dev_err(&hdev->pdev->dev,
8949 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8954 for (i = 0; i < cmd_num; i++) {
8956 desc_data = (__le64 *)(&desc[i].data[0]);
8957 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8959 desc_data = (__le64 *)(&desc[i]);
8960 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8962 for (k = 0; k < n; k++) {
8963 *reg_val++ = le64_to_cpu(*desc_data++);
8975 #define MAX_SEPARATE_NUM 4
8976 #define SEPARATOR_VALUE 0xFFFFFFFF
8977 #define REG_NUM_PER_LINE 4
8978 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8980 static int hclge_get_regs_len(struct hnae3_handle *handle)
8982 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8983 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8984 struct hclge_vport *vport = hclge_get_vport(handle);
8985 struct hclge_dev *hdev = vport->back;
8986 u32 regs_num_32_bit, regs_num_64_bit;
8989 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8991 dev_err(&hdev->pdev->dev,
8992 "Get register number failed, ret = %d.\n", ret);
8996 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8997 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8998 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8999 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9001 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9002 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9003 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9006 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9009 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9010 struct hclge_vport *vport = hclge_get_vport(handle);
9011 struct hclge_dev *hdev = vport->back;
9012 u32 regs_num_32_bit, regs_num_64_bit;
9013 int i, j, reg_um, separator_num;
9017 *version = hdev->fw_version;
9019 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9021 dev_err(&hdev->pdev->dev,
9022 "Get register number failed, ret = %d.\n", ret);
9026 /* fetching per-PF registers valus from PF PCIe register space */
9027 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9028 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9029 for (i = 0; i < reg_um; i++)
9030 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9031 for (i = 0; i < separator_num; i++)
9032 *reg++ = SEPARATOR_VALUE;
9034 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9035 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9036 for (i = 0; i < reg_um; i++)
9037 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9038 for (i = 0; i < separator_num; i++)
9039 *reg++ = SEPARATOR_VALUE;
9041 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9042 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9043 for (j = 0; j < kinfo->num_tqps; j++) {
9044 for (i = 0; i < reg_um; i++)
9045 *reg++ = hclge_read_dev(&hdev->hw,
9046 ring_reg_addr_list[i] +
9048 for (i = 0; i < separator_num; i++)
9049 *reg++ = SEPARATOR_VALUE;
9052 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9053 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9054 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9055 for (i = 0; i < reg_um; i++)
9056 *reg++ = hclge_read_dev(&hdev->hw,
9057 tqp_intr_reg_addr_list[i] +
9059 for (i = 0; i < separator_num; i++)
9060 *reg++ = SEPARATOR_VALUE;
9063 /* fetching PF common registers values from firmware */
9064 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9066 dev_err(&hdev->pdev->dev,
9067 "Get 32 bit register failed, ret = %d.\n", ret);
9071 reg += regs_num_32_bit;
9072 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9074 dev_err(&hdev->pdev->dev,
9075 "Get 64 bit register failed, ret = %d.\n", ret);
9078 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9080 struct hclge_set_led_state_cmd *req;
9081 struct hclge_desc desc;
9084 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9086 req = (struct hclge_set_led_state_cmd *)desc.data;
9087 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9088 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9090 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9092 dev_err(&hdev->pdev->dev,
9093 "Send set led state cmd error, ret =%d\n", ret);
9098 enum hclge_led_status {
9101 HCLGE_LED_NO_CHANGE = 0xFF,
9104 static int hclge_set_led_id(struct hnae3_handle *handle,
9105 enum ethtool_phys_id_state status)
9107 struct hclge_vport *vport = hclge_get_vport(handle);
9108 struct hclge_dev *hdev = vport->back;
9111 case ETHTOOL_ID_ACTIVE:
9112 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9113 case ETHTOOL_ID_INACTIVE:
9114 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9120 static void hclge_get_link_mode(struct hnae3_handle *handle,
9121 unsigned long *supported,
9122 unsigned long *advertising)
9124 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9125 struct hclge_vport *vport = hclge_get_vport(handle);
9126 struct hclge_dev *hdev = vport->back;
9127 unsigned int idx = 0;
9129 for (; idx < size; idx++) {
9130 supported[idx] = hdev->hw.mac.supported[idx];
9131 advertising[idx] = hdev->hw.mac.advertising[idx];
9135 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9137 struct hclge_vport *vport = hclge_get_vport(handle);
9138 struct hclge_dev *hdev = vport->back;
9140 return hclge_config_gro(hdev, enable);
9143 static const struct hnae3_ae_ops hclge_ops = {
9144 .init_ae_dev = hclge_init_ae_dev,
9145 .uninit_ae_dev = hclge_uninit_ae_dev,
9146 .flr_prepare = hclge_flr_prepare,
9147 .flr_done = hclge_flr_done,
9148 .init_client_instance = hclge_init_client_instance,
9149 .uninit_client_instance = hclge_uninit_client_instance,
9150 .map_ring_to_vector = hclge_map_ring_to_vector,
9151 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9152 .get_vector = hclge_get_vector,
9153 .put_vector = hclge_put_vector,
9154 .set_promisc_mode = hclge_set_promisc_mode,
9155 .set_loopback = hclge_set_loopback,
9156 .start = hclge_ae_start,
9157 .stop = hclge_ae_stop,
9158 .client_start = hclge_client_start,
9159 .client_stop = hclge_client_stop,
9160 .get_status = hclge_get_status,
9161 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9162 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9163 .get_media_type = hclge_get_media_type,
9164 .check_port_speed = hclge_check_port_speed,
9165 .get_fec = hclge_get_fec,
9166 .set_fec = hclge_set_fec,
9167 .get_rss_key_size = hclge_get_rss_key_size,
9168 .get_rss_indir_size = hclge_get_rss_indir_size,
9169 .get_rss = hclge_get_rss,
9170 .set_rss = hclge_set_rss,
9171 .set_rss_tuple = hclge_set_rss_tuple,
9172 .get_rss_tuple = hclge_get_rss_tuple,
9173 .get_tc_size = hclge_get_tc_size,
9174 .get_mac_addr = hclge_get_mac_addr,
9175 .set_mac_addr = hclge_set_mac_addr,
9176 .do_ioctl = hclge_do_ioctl,
9177 .add_uc_addr = hclge_add_uc_addr,
9178 .rm_uc_addr = hclge_rm_uc_addr,
9179 .add_mc_addr = hclge_add_mc_addr,
9180 .rm_mc_addr = hclge_rm_mc_addr,
9181 .set_autoneg = hclge_set_autoneg,
9182 .get_autoneg = hclge_get_autoneg,
9183 .restart_autoneg = hclge_restart_autoneg,
9184 .get_pauseparam = hclge_get_pauseparam,
9185 .set_pauseparam = hclge_set_pauseparam,
9186 .set_mtu = hclge_set_mtu,
9187 .reset_queue = hclge_reset_tqp,
9188 .get_stats = hclge_get_stats,
9189 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9190 .update_stats = hclge_update_stats,
9191 .get_strings = hclge_get_strings,
9192 .get_sset_count = hclge_get_sset_count,
9193 .get_fw_version = hclge_get_fw_version,
9194 .get_mdix_mode = hclge_get_mdix_mode,
9195 .enable_vlan_filter = hclge_enable_vlan_filter,
9196 .set_vlan_filter = hclge_set_vlan_filter,
9197 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9198 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9199 .reset_event = hclge_reset_event,
9200 .set_default_reset_request = hclge_set_def_reset_request,
9201 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9202 .set_channels = hclge_set_channels,
9203 .get_channels = hclge_get_channels,
9204 .get_regs_len = hclge_get_regs_len,
9205 .get_regs = hclge_get_regs,
9206 .set_led_id = hclge_set_led_id,
9207 .get_link_mode = hclge_get_link_mode,
9208 .add_fd_entry = hclge_add_fd_entry,
9209 .del_fd_entry = hclge_del_fd_entry,
9210 .del_all_fd_entries = hclge_del_all_fd_entries,
9211 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9212 .get_fd_rule_info = hclge_get_fd_rule_info,
9213 .get_fd_all_rules = hclge_get_all_rules,
9214 .restore_fd_rules = hclge_restore_fd_entries,
9215 .enable_fd = hclge_enable_fd,
9216 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9217 .dbg_run_cmd = hclge_dbg_run_cmd,
9218 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9219 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9220 .ae_dev_resetting = hclge_ae_dev_resetting,
9221 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9222 .set_gro_en = hclge_gro_en,
9223 .get_global_queue_id = hclge_covert_handle_qid_global,
9224 .set_timer_task = hclge_set_timer_task,
9225 .mac_connect_phy = hclge_mac_connect_phy,
9226 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9229 static struct hnae3_ae_algo ae_algo = {
9231 .pdev_id_table = ae_algo_pci_tbl,
9234 static int hclge_init(void)
9236 pr_info("%s is initializing\n", HCLGE_NAME);
9238 hnae3_register_ae_algo(&ae_algo);
9243 static void hclge_exit(void)
9245 hnae3_unregister_ae_algo(&ae_algo);
9247 module_init(hclge_init);
9248 module_exit(hclge_exit);
9250 MODULE_LICENSE("GPL");
9251 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9252 MODULE_DESCRIPTION("HCLGE Driver");
9253 MODULE_VERSION(HCLGE_MOD_VERSION);