1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2431 schedule_work(&hdev->rst_service_task);
2434 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2437 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2438 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2439 (void)schedule_work(&hdev->service_task);
2442 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 struct hclge_link_status_cmd *req;
2445 struct hclge_desc desc;
2449 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2450 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2457 req = (struct hclge_link_status_cmd *)desc.data;
2458 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460 return !!link_status;
2463 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2468 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2471 mac_state = hclge_get_mac_link_status(hdev);
2473 if (hdev->hw.mac.phydev) {
2474 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2475 link_stat = mac_state &
2476 hdev->hw.mac.phydev->link;
2481 link_stat = mac_state;
2487 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 struct hnae3_client *rclient = hdev->roce_client;
2490 struct hnae3_client *client = hdev->nic_client;
2491 struct hnae3_handle *rhandle;
2492 struct hnae3_handle *handle;
2498 state = hclge_get_mac_phy_link(hdev);
2499 if (state != hdev->hw.mac.link) {
2500 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2501 handle = &hdev->vport[i].nic;
2502 client->ops->link_status_change(handle, state);
2503 hclge_config_mac_tnl_int(hdev, state);
2504 rhandle = &hdev->vport[i].roce;
2505 if (rclient && rclient->ops->link_status_change)
2506 rclient->ops->link_status_change(rhandle,
2509 hdev->hw.mac.link = state;
2513 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 /* update fec ability by speed */
2516 hclge_convert_setting_fec(mac);
2518 /* firmware can not identify back plane type, the media type
2519 * read from configuration can help deal it
2521 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2522 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2523 mac->module_type = HNAE3_MODULE_TYPE_KR;
2524 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2525 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527 if (mac->support_autoneg == true) {
2528 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2529 linkmode_copy(mac->advertising, mac->supported);
2531 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533 linkmode_zero(mac->advertising);
2537 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 struct hclge_sfp_info_cmd *resp = NULL;
2540 struct hclge_desc desc;
2543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2544 resp = (struct hclge_sfp_info_cmd *)desc.data;
2545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2546 if (ret == -EOPNOTSUPP) {
2547 dev_warn(&hdev->pdev->dev,
2548 "IMP do not support get SFP speed %d\n", ret);
2551 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2555 *speed = le32_to_cpu(resp->speed);
2560 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 struct hclge_sfp_info_cmd *resp;
2563 struct hclge_desc desc;
2566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2567 resp = (struct hclge_sfp_info_cmd *)desc.data;
2569 resp->query_type = QUERY_ACTIVE_SPEED;
2571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2572 if (ret == -EOPNOTSUPP) {
2573 dev_warn(&hdev->pdev->dev,
2574 "IMP does not support get SFP info %d\n", ret);
2577 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2581 mac->speed = le32_to_cpu(resp->speed);
2582 /* if resp->speed_ability is 0, it means it's an old version
2583 * firmware, do not update these params
2585 if (resp->speed_ability) {
2586 mac->module_type = le32_to_cpu(resp->module_type);
2587 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2588 mac->autoneg = resp->autoneg;
2589 mac->support_autoneg = resp->autoneg_ability;
2590 if (!resp->active_fec)
2593 mac->fec_mode = BIT(resp->active_fec);
2595 mac->speed_type = QUERY_SFP_SPEED;
2601 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 struct hclge_mac *mac = &hdev->hw.mac;
2604 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2607 /* get the port info from SFP cmd if not copper port */
2608 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2611 /* if IMP does not support get SFP/qSFP info, return directly */
2612 if (!hdev->support_sfp_query)
2615 if (hdev->pdev->revision >= 0x21)
2616 ret = hclge_get_sfp_info(hdev, mac);
2618 ret = hclge_get_sfp_speed(hdev, &speed);
2620 if (ret == -EOPNOTSUPP) {
2621 hdev->support_sfp_query = false;
2627 if (hdev->pdev->revision >= 0x21) {
2628 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2629 hclge_update_port_capability(mac);
2632 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2635 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2636 return 0; /* do nothing if no SFP */
2638 /* must config full duplex for SFP */
2639 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2643 static int hclge_get_status(struct hnae3_handle *handle)
2645 struct hclge_vport *vport = hclge_get_vport(handle);
2646 struct hclge_dev *hdev = vport->back;
2648 hclge_update_link_status(hdev);
2650 return hdev->hw.mac.link;
2653 static void hclge_service_timer(struct timer_list *t)
2655 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657 mod_timer(&hdev->service_timer, jiffies + HZ);
2658 hdev->hw_stats.stats_timer++;
2659 hdev->fd_arfs_expire_timer++;
2660 hclge_task_schedule(hdev);
2663 static void hclge_service_complete(struct hclge_dev *hdev)
2665 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667 /* Flush memory before next watchdog */
2668 smp_mb__before_atomic();
2669 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2672 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676 /* fetch the events from their corresponding regs */
2677 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2678 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2679 msix_src_reg = hclge_read_dev(&hdev->hw,
2680 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682 /* Assumption: If by any chance reset and mailbox events are reported
2683 * together then we will only process reset event in this go and will
2684 * defer the processing of the mailbox events. Since, we would have not
2685 * cleared RX CMDQ event this time we would receive again another
2686 * interrupt from H/W just for the mailbox.
2689 /* check for vector0 reset event sources */
2690 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2691 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2692 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2693 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2694 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2695 hdev->rst_stats.imp_rst_cnt++;
2696 return HCLGE_VECTOR0_EVENT_RST;
2699 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2700 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2701 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2702 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2703 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2704 hdev->rst_stats.global_rst_cnt++;
2705 return HCLGE_VECTOR0_EVENT_RST;
2708 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2709 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2710 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2711 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2712 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2713 hdev->rst_stats.core_rst_cnt++;
2714 return HCLGE_VECTOR0_EVENT_RST;
2717 /* check for vector0 msix event source */
2718 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2719 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2721 return HCLGE_VECTOR0_EVENT_ERR;
2724 /* check for vector0 mailbox(=CMDQ RX) event source */
2725 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2726 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2727 *clearval = cmdq_src_reg;
2728 return HCLGE_VECTOR0_EVENT_MBX;
2731 /* print other vector0 event source */
2732 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2733 cmdq_src_reg, msix_src_reg);
2734 return HCLGE_VECTOR0_EVENT_OTHER;
2737 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2740 switch (event_type) {
2741 case HCLGE_VECTOR0_EVENT_RST:
2742 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2744 case HCLGE_VECTOR0_EVENT_MBX:
2745 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2752 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2754 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2755 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2756 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2757 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2758 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2761 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2763 writel(enable ? 1 : 0, vector->addr);
2766 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2768 struct hclge_dev *hdev = data;
2772 hclge_enable_vector(&hdev->misc_vector, false);
2773 event_cause = hclge_check_event_cause(hdev, &clearval);
2775 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2776 switch (event_cause) {
2777 case HCLGE_VECTOR0_EVENT_ERR:
2778 /* we do not know what type of reset is required now. This could
2779 * only be decided after we fetch the type of errors which
2780 * caused this event. Therefore, we will do below for now:
2781 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2782 * have defered type of reset to be used.
2783 * 2. Schedule the reset serivce task.
2784 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2785 * will fetch the correct type of reset. This would be done
2786 * by first decoding the types of errors.
2788 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2790 case HCLGE_VECTOR0_EVENT_RST:
2791 hclge_reset_task_schedule(hdev);
2793 case HCLGE_VECTOR0_EVENT_MBX:
2794 /* If we are here then,
2795 * 1. Either we are not handling any mbx task and we are not
2798 * 2. We could be handling a mbx task but nothing more is
2800 * In both cases, we should schedule mbx task as there are more
2801 * mbx messages reported by this interrupt.
2803 hclge_mbx_task_schedule(hdev);
2806 dev_warn(&hdev->pdev->dev,
2807 "received unknown or unhandled event of vector0\n");
2811 /* clear the source of interrupt if it is not cause by reset */
2812 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2813 hclge_clear_event_cause(hdev, event_cause, clearval);
2814 hclge_enable_vector(&hdev->misc_vector, true);
2820 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2822 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2823 dev_warn(&hdev->pdev->dev,
2824 "vector(vector_id %d) has been freed.\n", vector_id);
2828 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2829 hdev->num_msi_left += 1;
2830 hdev->num_msi_used -= 1;
2833 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2835 struct hclge_misc_vector *vector = &hdev->misc_vector;
2837 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2839 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2840 hdev->vector_status[0] = 0;
2842 hdev->num_msi_left -= 1;
2843 hdev->num_msi_used += 1;
2846 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2850 hclge_get_misc_vector(hdev);
2852 /* this would be explicitly freed in the end */
2853 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2854 0, "hclge_misc", hdev);
2856 hclge_free_vector(hdev, 0);
2857 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2858 hdev->misc_vector.vector_irq);
2864 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2866 free_irq(hdev->misc_vector.vector_irq, hdev);
2867 hclge_free_vector(hdev, 0);
2870 int hclge_notify_client(struct hclge_dev *hdev,
2871 enum hnae3_reset_notify_type type)
2873 struct hnae3_client *client = hdev->nic_client;
2876 if (!client->ops->reset_notify)
2879 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2880 struct hnae3_handle *handle = &hdev->vport[i].nic;
2883 ret = client->ops->reset_notify(handle, type);
2885 dev_err(&hdev->pdev->dev,
2886 "notify nic client failed %d(%d)\n", type, ret);
2894 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2895 enum hnae3_reset_notify_type type)
2897 struct hnae3_client *client = hdev->roce_client;
2904 if (!client->ops->reset_notify)
2907 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2908 struct hnae3_handle *handle = &hdev->vport[i].roce;
2910 ret = client->ops->reset_notify(handle, type);
2912 dev_err(&hdev->pdev->dev,
2913 "notify roce client failed %d(%d)",
2922 static int hclge_reset_wait(struct hclge_dev *hdev)
2924 #define HCLGE_RESET_WATI_MS 100
2925 #define HCLGE_RESET_WAIT_CNT 200
2926 u32 val, reg, reg_bit;
2929 switch (hdev->reset_type) {
2930 case HNAE3_IMP_RESET:
2931 reg = HCLGE_GLOBAL_RESET_REG;
2932 reg_bit = HCLGE_IMP_RESET_BIT;
2934 case HNAE3_GLOBAL_RESET:
2935 reg = HCLGE_GLOBAL_RESET_REG;
2936 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2938 case HNAE3_CORE_RESET:
2939 reg = HCLGE_GLOBAL_RESET_REG;
2940 reg_bit = HCLGE_CORE_RESET_BIT;
2942 case HNAE3_FUNC_RESET:
2943 reg = HCLGE_FUN_RST_ING;
2944 reg_bit = HCLGE_FUN_RST_ING_B;
2946 case HNAE3_FLR_RESET:
2949 dev_err(&hdev->pdev->dev,
2950 "Wait for unsupported reset type: %d\n",
2955 if (hdev->reset_type == HNAE3_FLR_RESET) {
2956 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2957 cnt++ < HCLGE_RESET_WAIT_CNT)
2958 msleep(HCLGE_RESET_WATI_MS);
2960 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2961 dev_err(&hdev->pdev->dev,
2962 "flr wait timeout: %d\n", cnt);
2969 val = hclge_read_dev(&hdev->hw, reg);
2970 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2971 msleep(HCLGE_RESET_WATI_MS);
2972 val = hclge_read_dev(&hdev->hw, reg);
2976 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2977 dev_warn(&hdev->pdev->dev,
2978 "Wait for reset timeout: %d\n", hdev->reset_type);
2985 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2987 struct hclge_vf_rst_cmd *req;
2988 struct hclge_desc desc;
2990 req = (struct hclge_vf_rst_cmd *)desc.data;
2991 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2992 req->dest_vfid = func_id;
2997 return hclge_cmd_send(&hdev->hw, &desc, 1);
3000 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3004 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3005 struct hclge_vport *vport = &hdev->vport[i];
3008 /* Send cmd to set/clear VF's FUNC_RST_ING */
3009 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3011 dev_err(&hdev->pdev->dev,
3012 "set vf(%d) rst failed %d!\n",
3013 vport->vport_id, ret);
3017 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3020 /* Inform VF to process the reset.
3021 * hclge_inform_reset_assert_to_vf may fail if VF
3022 * driver is not loaded.
3024 ret = hclge_inform_reset_assert_to_vf(vport);
3026 dev_warn(&hdev->pdev->dev,
3027 "inform reset to vf(%d) failed %d!\n",
3028 vport->vport_id, ret);
3034 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3036 struct hclge_desc desc;
3037 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3041 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3042 req->fun_reset_vfid = func_id;
3044 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3046 dev_err(&hdev->pdev->dev,
3047 "send function reset cmd fail, status =%d\n", ret);
3052 static void hclge_do_reset(struct hclge_dev *hdev)
3054 struct hnae3_handle *handle = &hdev->vport[0].nic;
3055 struct pci_dev *pdev = hdev->pdev;
3058 if (hclge_get_hw_reset_stat(handle)) {
3059 dev_info(&pdev->dev, "Hardware reset not finish\n");
3060 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3061 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3062 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3066 switch (hdev->reset_type) {
3067 case HNAE3_GLOBAL_RESET:
3068 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3069 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3070 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3071 dev_info(&pdev->dev, "Global Reset requested\n");
3073 case HNAE3_CORE_RESET:
3074 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3075 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3076 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3077 dev_info(&pdev->dev, "Core Reset requested\n");
3079 case HNAE3_FUNC_RESET:
3080 dev_info(&pdev->dev, "PF Reset requested\n");
3081 /* schedule again to check later */
3082 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3083 hclge_reset_task_schedule(hdev);
3085 case HNAE3_FLR_RESET:
3086 dev_info(&pdev->dev, "FLR requested\n");
3087 /* schedule again to check later */
3088 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3089 hclge_reset_task_schedule(hdev);
3092 dev_warn(&pdev->dev,
3093 "Unsupported reset type: %d\n", hdev->reset_type);
3098 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3099 unsigned long *addr)
3101 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3103 /* first, resolve any unknown reset type to the known type(s) */
3104 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3105 /* we will intentionally ignore any errors from this function
3106 * as we will end up in *some* reset request in any case
3108 hclge_handle_hw_msix_error(hdev, addr);
3109 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3110 /* We defered the clearing of the error event which caused
3111 * interrupt since it was not posssible to do that in
3112 * interrupt context (and this is the reason we introduced
3113 * new UNKNOWN reset type). Now, the errors have been
3114 * handled and cleared in hardware we can safely enable
3115 * interrupts. This is an exception to the norm.
3117 hclge_enable_vector(&hdev->misc_vector, true);
3120 /* return the highest priority reset level amongst all */
3121 if (test_bit(HNAE3_IMP_RESET, addr)) {
3122 rst_level = HNAE3_IMP_RESET;
3123 clear_bit(HNAE3_IMP_RESET, addr);
3124 clear_bit(HNAE3_GLOBAL_RESET, addr);
3125 clear_bit(HNAE3_CORE_RESET, addr);
3126 clear_bit(HNAE3_FUNC_RESET, addr);
3127 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3128 rst_level = HNAE3_GLOBAL_RESET;
3129 clear_bit(HNAE3_GLOBAL_RESET, addr);
3130 clear_bit(HNAE3_CORE_RESET, addr);
3131 clear_bit(HNAE3_FUNC_RESET, addr);
3132 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3133 rst_level = HNAE3_CORE_RESET;
3134 clear_bit(HNAE3_CORE_RESET, addr);
3135 clear_bit(HNAE3_FUNC_RESET, addr);
3136 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3137 rst_level = HNAE3_FUNC_RESET;
3138 clear_bit(HNAE3_FUNC_RESET, addr);
3139 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3140 rst_level = HNAE3_FLR_RESET;
3141 clear_bit(HNAE3_FLR_RESET, addr);
3144 if (hdev->reset_type != HNAE3_NONE_RESET &&
3145 rst_level < hdev->reset_type)
3146 return HNAE3_NONE_RESET;
3151 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3155 switch (hdev->reset_type) {
3156 case HNAE3_IMP_RESET:
3157 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3159 case HNAE3_GLOBAL_RESET:
3160 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3162 case HNAE3_CORE_RESET:
3163 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3172 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3173 hclge_enable_vector(&hdev->misc_vector, true);
3176 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3180 switch (hdev->reset_type) {
3181 case HNAE3_FUNC_RESET:
3183 case HNAE3_FLR_RESET:
3184 ret = hclge_set_all_vf_rst(hdev, true);
3193 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3198 switch (hdev->reset_type) {
3199 case HNAE3_FUNC_RESET:
3200 /* There is no mechanism for PF to know if VF has stopped IO
3201 * for now, just wait 100 ms for VF to stop IO
3204 ret = hclge_func_reset_cmd(hdev, 0);
3206 dev_err(&hdev->pdev->dev,
3207 "asserting function reset fail %d!\n", ret);
3211 /* After performaning pf reset, it is not necessary to do the
3212 * mailbox handling or send any command to firmware, because
3213 * any mailbox handling or command to firmware is only valid
3214 * after hclge_cmd_init is called.
3216 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3217 hdev->rst_stats.pf_rst_cnt++;
3219 case HNAE3_FLR_RESET:
3220 /* There is no mechanism for PF to know if VF has stopped IO
3221 * for now, just wait 100 ms for VF to stop IO
3224 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3225 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3226 hdev->rst_stats.flr_rst_cnt++;
3228 case HNAE3_IMP_RESET:
3229 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3230 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3231 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3237 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3242 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3244 #define MAX_RESET_FAIL_CNT 5
3245 #define RESET_UPGRADE_DELAY_SEC 10
3247 if (hdev->reset_pending) {
3248 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3249 hdev->reset_pending);
3251 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3252 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3253 BIT(HCLGE_IMP_RESET_BIT))) {
3254 dev_info(&hdev->pdev->dev,
3255 "reset failed because IMP Reset is pending\n");
3256 hclge_clear_reset_cause(hdev);
3258 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3259 hdev->reset_fail_cnt++;
3261 set_bit(hdev->reset_type, &hdev->reset_pending);
3262 dev_info(&hdev->pdev->dev,
3263 "re-schedule to wait for hw reset done\n");
3267 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3268 hclge_clear_reset_cause(hdev);
3269 mod_timer(&hdev->reset_timer,
3270 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3275 hclge_clear_reset_cause(hdev);
3276 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3280 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3284 switch (hdev->reset_type) {
3285 case HNAE3_FUNC_RESET:
3287 case HNAE3_FLR_RESET:
3288 ret = hclge_set_all_vf_rst(hdev, false);
3297 static void hclge_reset(struct hclge_dev *hdev)
3299 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3300 bool is_timeout = false;
3303 /* Initialize ae_dev reset status as well, in case enet layer wants to
3304 * know if device is undergoing reset
3306 ae_dev->reset_type = hdev->reset_type;
3307 hdev->rst_stats.reset_cnt++;
3308 /* perform reset of the stack & ae device for a client */
3309 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3313 ret = hclge_reset_prepare_down(hdev);
3318 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3320 goto err_reset_lock;
3324 ret = hclge_reset_prepare_wait(hdev);
3328 if (hclge_reset_wait(hdev)) {
3333 hdev->rst_stats.hw_reset_done_cnt++;
3335 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3340 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3342 goto err_reset_lock;
3344 ret = hclge_reset_ae_dev(hdev->ae_dev);
3346 goto err_reset_lock;
3348 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3350 goto err_reset_lock;
3352 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3354 goto err_reset_lock;
3356 hclge_clear_reset_cause(hdev);
3358 ret = hclge_reset_prepare_up(hdev);
3360 goto err_reset_lock;
3362 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3364 goto err_reset_lock;
3368 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3372 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3376 hdev->last_reset_time = jiffies;
3377 hdev->reset_fail_cnt = 0;
3378 hdev->rst_stats.reset_done_cnt++;
3379 ae_dev->reset_type = HNAE3_NONE_RESET;
3380 del_timer(&hdev->reset_timer);
3387 if (hclge_reset_err_handle(hdev, is_timeout))
3388 hclge_reset_task_schedule(hdev);
3391 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3393 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3394 struct hclge_dev *hdev = ae_dev->priv;
3396 /* We might end up getting called broadly because of 2 below cases:
3397 * 1. Recoverable error was conveyed through APEI and only way to bring
3398 * normalcy is to reset.
3399 * 2. A new reset request from the stack due to timeout
3401 * For the first case,error event might not have ae handle available.
3402 * check if this is a new reset request and we are not here just because
3403 * last reset attempt did not succeed and watchdog hit us again. We will
3404 * know this if last reset request did not occur very recently (watchdog
3405 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3406 * In case of new request we reset the "reset level" to PF reset.
3407 * And if it is a repeat reset request of the most recent one then we
3408 * want to make sure we throttle the reset request. Therefore, we will
3409 * not allow it again before 3*HZ times.
3412 handle = &hdev->vport[0].nic;
3414 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3416 else if (hdev->default_reset_request)
3418 hclge_get_reset_level(hdev,
3419 &hdev->default_reset_request);
3420 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3421 hdev->reset_level = HNAE3_FUNC_RESET;
3423 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3426 /* request reset & schedule reset task */
3427 set_bit(hdev->reset_level, &hdev->reset_request);
3428 hclge_reset_task_schedule(hdev);
3430 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3431 hdev->reset_level++;
3434 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3435 enum hnae3_reset_type rst_type)
3437 struct hclge_dev *hdev = ae_dev->priv;
3439 set_bit(rst_type, &hdev->default_reset_request);
3442 static void hclge_reset_timer(struct timer_list *t)
3444 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3446 dev_info(&hdev->pdev->dev,
3447 "triggering global reset in reset timer\n");
3448 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3449 hclge_reset_event(hdev->pdev, NULL);
3452 static void hclge_reset_subtask(struct hclge_dev *hdev)
3454 /* check if there is any ongoing reset in the hardware. This status can
3455 * be checked from reset_pending. If there is then, we need to wait for
3456 * hardware to complete reset.
3457 * a. If we are able to figure out in reasonable time that hardware
3458 * has fully resetted then, we can proceed with driver, client
3460 * b. else, we can come back later to check this status so re-sched
3463 hdev->last_reset_time = jiffies;
3464 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3465 if (hdev->reset_type != HNAE3_NONE_RESET)
3468 /* check if we got any *new* reset requests to be honored */
3469 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3470 if (hdev->reset_type != HNAE3_NONE_RESET)
3471 hclge_do_reset(hdev);
3473 hdev->reset_type = HNAE3_NONE_RESET;
3476 static void hclge_reset_service_task(struct work_struct *work)
3478 struct hclge_dev *hdev =
3479 container_of(work, struct hclge_dev, rst_service_task);
3481 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3484 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3486 hclge_reset_subtask(hdev);
3488 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3491 static void hclge_mailbox_service_task(struct work_struct *work)
3493 struct hclge_dev *hdev =
3494 container_of(work, struct hclge_dev, mbx_service_task);
3496 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3499 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3501 hclge_mbx_handler(hdev);
3503 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3506 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3510 /* start from vport 1 for PF is always alive */
3511 for (i = 1; i < hdev->num_alloc_vport; i++) {
3512 struct hclge_vport *vport = &hdev->vport[i];
3514 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3515 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3517 /* If vf is not alive, set to default value */
3518 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3519 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3523 static void hclge_service_task(struct work_struct *work)
3525 struct hclge_dev *hdev =
3526 container_of(work, struct hclge_dev, service_task);
3528 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3529 hclge_update_stats_for_all(hdev);
3530 hdev->hw_stats.stats_timer = 0;
3533 hclge_update_port_info(hdev);
3534 hclge_update_link_status(hdev);
3535 hclge_update_vport_alive(hdev);
3536 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3537 hclge_rfs_filter_expire(hdev);
3538 hdev->fd_arfs_expire_timer = 0;
3540 hclge_service_complete(hdev);
3543 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3545 /* VF handle has no client */
3546 if (!handle->client)
3547 return container_of(handle, struct hclge_vport, nic);
3548 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3549 return container_of(handle, struct hclge_vport, roce);
3551 return container_of(handle, struct hclge_vport, nic);
3554 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3555 struct hnae3_vector_info *vector_info)
3557 struct hclge_vport *vport = hclge_get_vport(handle);
3558 struct hnae3_vector_info *vector = vector_info;
3559 struct hclge_dev *hdev = vport->back;
3563 vector_num = min(hdev->num_msi_left, vector_num);
3565 for (j = 0; j < vector_num; j++) {
3566 for (i = 1; i < hdev->num_msi; i++) {
3567 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3568 vector->vector = pci_irq_vector(hdev->pdev, i);
3569 vector->io_addr = hdev->hw.io_base +
3570 HCLGE_VECTOR_REG_BASE +
3571 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3573 HCLGE_VECTOR_VF_OFFSET;
3574 hdev->vector_status[i] = vport->vport_id;
3575 hdev->vector_irq[i] = vector->vector;
3584 hdev->num_msi_left -= alloc;
3585 hdev->num_msi_used += alloc;
3590 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3594 for (i = 0; i < hdev->num_msi; i++)
3595 if (vector == hdev->vector_irq[i])
3601 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3603 struct hclge_vport *vport = hclge_get_vport(handle);
3604 struct hclge_dev *hdev = vport->back;
3607 vector_id = hclge_get_vector_index(hdev, vector);
3608 if (vector_id < 0) {
3609 dev_err(&hdev->pdev->dev,
3610 "Get vector index fail. vector_id =%d\n", vector_id);
3614 hclge_free_vector(hdev, vector_id);
3619 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3621 return HCLGE_RSS_KEY_SIZE;
3624 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3626 return HCLGE_RSS_IND_TBL_SIZE;
3629 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3630 const u8 hfunc, const u8 *key)
3632 struct hclge_rss_config_cmd *req;
3633 struct hclge_desc desc;
3638 req = (struct hclge_rss_config_cmd *)desc.data;
3640 for (key_offset = 0; key_offset < 3; key_offset++) {
3641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3644 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3645 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3647 if (key_offset == 2)
3649 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3651 key_size = HCLGE_RSS_HASH_KEY_NUM;
3653 memcpy(req->hash_key,
3654 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3658 dev_err(&hdev->pdev->dev,
3659 "Configure RSS config fail, status = %d\n",
3667 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3669 struct hclge_rss_indirection_table_cmd *req;
3670 struct hclge_desc desc;
3674 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3676 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3677 hclge_cmd_setup_basic_desc
3678 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3680 req->start_table_index =
3681 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3682 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3684 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3685 req->rss_result[j] =
3686 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3690 dev_err(&hdev->pdev->dev,
3691 "Configure rss indir table fail,status = %d\n",
3699 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3700 u16 *tc_size, u16 *tc_offset)
3702 struct hclge_rss_tc_mode_cmd *req;
3703 struct hclge_desc desc;
3707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3708 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3710 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3713 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3714 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3715 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3716 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3717 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3719 req->rss_tc_mode[i] = cpu_to_le16(mode);
3722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3724 dev_err(&hdev->pdev->dev,
3725 "Configure rss tc mode fail, status = %d\n", ret);
3730 static void hclge_get_rss_type(struct hclge_vport *vport)
3732 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3733 vport->rss_tuple_sets.ipv4_udp_en ||
3734 vport->rss_tuple_sets.ipv4_sctp_en ||
3735 vport->rss_tuple_sets.ipv6_tcp_en ||
3736 vport->rss_tuple_sets.ipv6_udp_en ||
3737 vport->rss_tuple_sets.ipv6_sctp_en)
3738 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3739 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3740 vport->rss_tuple_sets.ipv6_fragment_en)
3741 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3743 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3746 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3748 struct hclge_rss_input_tuple_cmd *req;
3749 struct hclge_desc desc;
3752 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3754 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3756 /* Get the tuple cfg from pf */
3757 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3758 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3759 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3760 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3761 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3762 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3763 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3764 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3765 hclge_get_rss_type(&hdev->vport[0]);
3766 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3768 dev_err(&hdev->pdev->dev,
3769 "Configure rss input fail, status = %d\n", ret);
3773 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3776 struct hclge_vport *vport = hclge_get_vport(handle);
3779 /* Get hash algorithm */
3781 switch (vport->rss_algo) {
3782 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3783 *hfunc = ETH_RSS_HASH_TOP;
3785 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3786 *hfunc = ETH_RSS_HASH_XOR;
3789 *hfunc = ETH_RSS_HASH_UNKNOWN;
3794 /* Get the RSS Key required by the user */
3796 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3798 /* Get indirect table */
3800 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3801 indir[i] = vport->rss_indirection_tbl[i];
3806 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3807 const u8 *key, const u8 hfunc)
3809 struct hclge_vport *vport = hclge_get_vport(handle);
3810 struct hclge_dev *hdev = vport->back;
3814 /* Set the RSS Hash Key if specififed by the user */
3817 case ETH_RSS_HASH_TOP:
3818 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3820 case ETH_RSS_HASH_XOR:
3821 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3823 case ETH_RSS_HASH_NO_CHANGE:
3824 hash_algo = vport->rss_algo;
3830 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3834 /* Update the shadow RSS key with user specified qids */
3835 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3836 vport->rss_algo = hash_algo;
3839 /* Update the shadow RSS table with user specified qids */
3840 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3841 vport->rss_indirection_tbl[i] = indir[i];
3843 /* Update the hardware */
3844 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3847 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3849 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3851 if (nfc->data & RXH_L4_B_2_3)
3852 hash_sets |= HCLGE_D_PORT_BIT;
3854 hash_sets &= ~HCLGE_D_PORT_BIT;
3856 if (nfc->data & RXH_IP_SRC)
3857 hash_sets |= HCLGE_S_IP_BIT;
3859 hash_sets &= ~HCLGE_S_IP_BIT;
3861 if (nfc->data & RXH_IP_DST)
3862 hash_sets |= HCLGE_D_IP_BIT;
3864 hash_sets &= ~HCLGE_D_IP_BIT;
3866 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3867 hash_sets |= HCLGE_V_TAG_BIT;
3872 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3873 struct ethtool_rxnfc *nfc)
3875 struct hclge_vport *vport = hclge_get_vport(handle);
3876 struct hclge_dev *hdev = vport->back;
3877 struct hclge_rss_input_tuple_cmd *req;
3878 struct hclge_desc desc;
3882 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3883 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3886 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3889 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3890 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3891 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3892 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3893 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3894 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3895 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3896 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3898 tuple_sets = hclge_get_rss_hash_bits(nfc);
3899 switch (nfc->flow_type) {
3901 req->ipv4_tcp_en = tuple_sets;
3904 req->ipv6_tcp_en = tuple_sets;
3907 req->ipv4_udp_en = tuple_sets;
3910 req->ipv6_udp_en = tuple_sets;
3913 req->ipv4_sctp_en = tuple_sets;
3916 if ((nfc->data & RXH_L4_B_0_1) ||
3917 (nfc->data & RXH_L4_B_2_3))
3920 req->ipv6_sctp_en = tuple_sets;
3923 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3926 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3932 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3934 dev_err(&hdev->pdev->dev,
3935 "Set rss tuple fail, status = %d\n", ret);
3939 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3940 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3941 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3942 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3943 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3944 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3945 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3946 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3947 hclge_get_rss_type(vport);
3951 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3952 struct ethtool_rxnfc *nfc)
3954 struct hclge_vport *vport = hclge_get_vport(handle);
3959 switch (nfc->flow_type) {
3961 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3964 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3967 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3970 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3973 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3976 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3980 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3989 if (tuple_sets & HCLGE_D_PORT_BIT)
3990 nfc->data |= RXH_L4_B_2_3;
3991 if (tuple_sets & HCLGE_S_PORT_BIT)
3992 nfc->data |= RXH_L4_B_0_1;
3993 if (tuple_sets & HCLGE_D_IP_BIT)
3994 nfc->data |= RXH_IP_DST;
3995 if (tuple_sets & HCLGE_S_IP_BIT)
3996 nfc->data |= RXH_IP_SRC;
4001 static int hclge_get_tc_size(struct hnae3_handle *handle)
4003 struct hclge_vport *vport = hclge_get_vport(handle);
4004 struct hclge_dev *hdev = vport->back;
4006 return hdev->rss_size_max;
4009 int hclge_rss_init_hw(struct hclge_dev *hdev)
4011 struct hclge_vport *vport = hdev->vport;
4012 u8 *rss_indir = vport[0].rss_indirection_tbl;
4013 u16 rss_size = vport[0].alloc_rss_size;
4014 u8 *key = vport[0].rss_hash_key;
4015 u8 hfunc = vport[0].rss_algo;
4016 u16 tc_offset[HCLGE_MAX_TC_NUM];
4017 u16 tc_valid[HCLGE_MAX_TC_NUM];
4018 u16 tc_size[HCLGE_MAX_TC_NUM];
4022 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4026 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4030 ret = hclge_set_rss_input_tuple(hdev);
4034 /* Each TC have the same queue size, and tc_size set to hardware is
4035 * the log2 of roundup power of two of rss_size, the acutal queue
4036 * size is limited by indirection table.
4038 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4039 dev_err(&hdev->pdev->dev,
4040 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4045 roundup_size = roundup_pow_of_two(rss_size);
4046 roundup_size = ilog2(roundup_size);
4048 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4051 if (!(hdev->hw_tc_map & BIT(i)))
4055 tc_size[i] = roundup_size;
4056 tc_offset[i] = rss_size * i;
4059 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4062 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4064 struct hclge_vport *vport = hdev->vport;
4067 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4068 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4069 vport[j].rss_indirection_tbl[i] =
4070 i % vport[j].alloc_rss_size;
4074 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4076 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4077 struct hclge_vport *vport = hdev->vport;
4079 if (hdev->pdev->revision >= 0x21)
4080 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4082 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4083 vport[i].rss_tuple_sets.ipv4_tcp_en =
4084 HCLGE_RSS_INPUT_TUPLE_OTHER;
4085 vport[i].rss_tuple_sets.ipv4_udp_en =
4086 HCLGE_RSS_INPUT_TUPLE_OTHER;
4087 vport[i].rss_tuple_sets.ipv4_sctp_en =
4088 HCLGE_RSS_INPUT_TUPLE_SCTP;
4089 vport[i].rss_tuple_sets.ipv4_fragment_en =
4090 HCLGE_RSS_INPUT_TUPLE_OTHER;
4091 vport[i].rss_tuple_sets.ipv6_tcp_en =
4092 HCLGE_RSS_INPUT_TUPLE_OTHER;
4093 vport[i].rss_tuple_sets.ipv6_udp_en =
4094 HCLGE_RSS_INPUT_TUPLE_OTHER;
4095 vport[i].rss_tuple_sets.ipv6_sctp_en =
4096 HCLGE_RSS_INPUT_TUPLE_SCTP;
4097 vport[i].rss_tuple_sets.ipv6_fragment_en =
4098 HCLGE_RSS_INPUT_TUPLE_OTHER;
4100 vport[i].rss_algo = rss_algo;
4102 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4103 HCLGE_RSS_KEY_SIZE);
4106 hclge_rss_indir_init_cfg(hdev);
4109 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4110 int vector_id, bool en,
4111 struct hnae3_ring_chain_node *ring_chain)
4113 struct hclge_dev *hdev = vport->back;
4114 struct hnae3_ring_chain_node *node;
4115 struct hclge_desc desc;
4116 struct hclge_ctrl_vector_chain_cmd *req
4117 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4118 enum hclge_cmd_status status;
4119 enum hclge_opcode_type op;
4120 u16 tqp_type_and_id;
4123 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4124 hclge_cmd_setup_basic_desc(&desc, op, false);
4125 req->int_vector_id = vector_id;
4128 for (node = ring_chain; node; node = node->next) {
4129 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4130 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4132 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4133 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4134 HCLGE_TQP_ID_S, node->tqp_index);
4135 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4137 hnae3_get_field(node->int_gl_idx,
4138 HNAE3_RING_GL_IDX_M,
4139 HNAE3_RING_GL_IDX_S));
4140 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4141 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4142 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4143 req->vfid = vport->vport_id;
4145 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4147 dev_err(&hdev->pdev->dev,
4148 "Map TQP fail, status is %d.\n",
4154 hclge_cmd_setup_basic_desc(&desc,
4157 req->int_vector_id = vector_id;
4162 req->int_cause_num = i;
4163 req->vfid = vport->vport_id;
4164 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4166 dev_err(&hdev->pdev->dev,
4167 "Map TQP fail, status is %d.\n", status);
4175 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4177 struct hnae3_ring_chain_node *ring_chain)
4179 struct hclge_vport *vport = hclge_get_vport(handle);
4180 struct hclge_dev *hdev = vport->back;
4183 vector_id = hclge_get_vector_index(hdev, vector);
4184 if (vector_id < 0) {
4185 dev_err(&hdev->pdev->dev,
4186 "Get vector index fail. vector_id =%d\n", vector_id);
4190 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4193 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4195 struct hnae3_ring_chain_node *ring_chain)
4197 struct hclge_vport *vport = hclge_get_vport(handle);
4198 struct hclge_dev *hdev = vport->back;
4201 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4204 vector_id = hclge_get_vector_index(hdev, vector);
4205 if (vector_id < 0) {
4206 dev_err(&handle->pdev->dev,
4207 "Get vector index fail. ret =%d\n", vector_id);
4211 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4213 dev_err(&handle->pdev->dev,
4214 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4221 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4222 struct hclge_promisc_param *param)
4224 struct hclge_promisc_cfg_cmd *req;
4225 struct hclge_desc desc;
4228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4230 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4231 req->vf_id = param->vf_id;
4233 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4234 * pdev revision(0x20), new revision support them. The
4235 * value of this two fields will not return error when driver
4236 * send command to fireware in revision(0x20).
4238 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4239 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4241 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4243 dev_err(&hdev->pdev->dev,
4244 "Set promisc mode fail, status is %d.\n", ret);
4249 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4250 bool en_mc, bool en_bc, int vport_id)
4255 memset(param, 0, sizeof(struct hclge_promisc_param));
4257 param->enable = HCLGE_PROMISC_EN_UC;
4259 param->enable |= HCLGE_PROMISC_EN_MC;
4261 param->enable |= HCLGE_PROMISC_EN_BC;
4262 param->vf_id = vport_id;
4265 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4268 struct hclge_vport *vport = hclge_get_vport(handle);
4269 struct hclge_dev *hdev = vport->back;
4270 struct hclge_promisc_param param;
4271 bool en_bc_pmc = true;
4273 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4274 * always bypassed. So broadcast promisc should be disabled until
4275 * user enable promisc mode
4277 if (handle->pdev->revision == 0x20)
4278 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4280 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4282 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4285 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4287 struct hclge_get_fd_mode_cmd *req;
4288 struct hclge_desc desc;
4291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4293 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4295 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4297 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4301 *fd_mode = req->mode;
4306 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4307 u32 *stage1_entry_num,
4308 u32 *stage2_entry_num,
4309 u16 *stage1_counter_num,
4310 u16 *stage2_counter_num)
4312 struct hclge_get_fd_allocation_cmd *req;
4313 struct hclge_desc desc;
4316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4318 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4320 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4322 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4327 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4328 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4329 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4330 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4335 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4337 struct hclge_set_fd_key_config_cmd *req;
4338 struct hclge_fd_key_cfg *stage;
4339 struct hclge_desc desc;
4342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4344 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4345 stage = &hdev->fd_cfg.key_cfg[stage_num];
4346 req->stage = stage_num;
4347 req->key_select = stage->key_sel;
4348 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4349 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4350 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4351 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4352 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4353 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4355 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4357 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4362 static int hclge_init_fd_config(struct hclge_dev *hdev)
4364 #define LOW_2_WORDS 0x03
4365 struct hclge_fd_key_cfg *key_cfg;
4368 if (!hnae3_dev_fd_supported(hdev))
4371 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4375 switch (hdev->fd_cfg.fd_mode) {
4376 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4377 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4379 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4380 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4383 dev_err(&hdev->pdev->dev,
4384 "Unsupported flow director mode %d\n",
4385 hdev->fd_cfg.fd_mode);
4389 hdev->fd_cfg.proto_support =
4390 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4391 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4392 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4393 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4394 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4395 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4396 key_cfg->outer_sipv6_word_en = 0;
4397 key_cfg->outer_dipv6_word_en = 0;
4399 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4400 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4401 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4402 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4404 /* If use max 400bit key, we can support tuples for ether type */
4405 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4406 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4407 key_cfg->tuple_active |=
4408 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4411 /* roce_type is used to filter roce frames
4412 * dst_vport is used to specify the rule
4414 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4416 ret = hclge_get_fd_allocation(hdev,
4417 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4418 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4419 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4420 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4424 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4427 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4428 int loc, u8 *key, bool is_add)
4430 struct hclge_fd_tcam_config_1_cmd *req1;
4431 struct hclge_fd_tcam_config_2_cmd *req2;
4432 struct hclge_fd_tcam_config_3_cmd *req3;
4433 struct hclge_desc desc[3];
4436 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4437 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4438 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4439 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4440 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4442 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4443 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4444 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4446 req1->stage = stage;
4447 req1->xy_sel = sel_x ? 1 : 0;
4448 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4449 req1->index = cpu_to_le32(loc);
4450 req1->entry_vld = sel_x ? is_add : 0;
4453 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4454 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4455 sizeof(req2->tcam_data));
4456 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4457 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4460 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4462 dev_err(&hdev->pdev->dev,
4463 "config tcam key fail, ret=%d\n",
4469 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4470 struct hclge_fd_ad_data *action)
4472 struct hclge_fd_ad_config_cmd *req;
4473 struct hclge_desc desc;
4477 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4479 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4480 req->index = cpu_to_le32(loc);
4483 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4484 action->write_rule_id_to_bd);
4485 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4488 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4489 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4490 action->forward_to_direct_queue);
4491 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4493 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4494 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4495 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4496 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4497 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4498 action->counter_id);
4500 req->ad_data = cpu_to_le64(ad_data);
4501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4503 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4508 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4509 struct hclge_fd_rule *rule)
4511 u16 tmp_x_s, tmp_y_s;
4512 u32 tmp_x_l, tmp_y_l;
4515 if (rule->unused_tuple & tuple_bit)
4518 switch (tuple_bit) {
4521 case BIT(INNER_DST_MAC):
4522 for (i = 0; i < 6; i++) {
4523 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4524 rule->tuples_mask.dst_mac[i]);
4525 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4526 rule->tuples_mask.dst_mac[i]);
4530 case BIT(INNER_SRC_MAC):
4531 for (i = 0; i < 6; i++) {
4532 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4533 rule->tuples.src_mac[i]);
4534 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4535 rule->tuples.src_mac[i]);
4539 case BIT(INNER_VLAN_TAG_FST):
4540 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4541 rule->tuples_mask.vlan_tag1);
4542 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4543 rule->tuples_mask.vlan_tag1);
4544 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4545 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4548 case BIT(INNER_ETH_TYPE):
4549 calc_x(tmp_x_s, rule->tuples.ether_proto,
4550 rule->tuples_mask.ether_proto);
4551 calc_y(tmp_y_s, rule->tuples.ether_proto,
4552 rule->tuples_mask.ether_proto);
4553 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4554 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4557 case BIT(INNER_IP_TOS):
4558 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4559 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4562 case BIT(INNER_IP_PROTO):
4563 calc_x(*key_x, rule->tuples.ip_proto,
4564 rule->tuples_mask.ip_proto);
4565 calc_y(*key_y, rule->tuples.ip_proto,
4566 rule->tuples_mask.ip_proto);
4569 case BIT(INNER_SRC_IP):
4570 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4571 rule->tuples_mask.src_ip[3]);
4572 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4573 rule->tuples_mask.src_ip[3]);
4574 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4575 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4578 case BIT(INNER_DST_IP):
4579 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4580 rule->tuples_mask.dst_ip[3]);
4581 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4582 rule->tuples_mask.dst_ip[3]);
4583 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4584 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4587 case BIT(INNER_SRC_PORT):
4588 calc_x(tmp_x_s, rule->tuples.src_port,
4589 rule->tuples_mask.src_port);
4590 calc_y(tmp_y_s, rule->tuples.src_port,
4591 rule->tuples_mask.src_port);
4592 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4593 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4596 case BIT(INNER_DST_PORT):
4597 calc_x(tmp_x_s, rule->tuples.dst_port,
4598 rule->tuples_mask.dst_port);
4599 calc_y(tmp_y_s, rule->tuples.dst_port,
4600 rule->tuples_mask.dst_port);
4601 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4602 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4610 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4611 u8 vf_id, u8 network_port_id)
4613 u32 port_number = 0;
4615 if (port_type == HOST_PORT) {
4616 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4618 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4620 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4622 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4623 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4624 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4630 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4631 __le32 *key_x, __le32 *key_y,
4632 struct hclge_fd_rule *rule)
4634 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4635 u8 cur_pos = 0, tuple_size, shift_bits;
4638 for (i = 0; i < MAX_META_DATA; i++) {
4639 tuple_size = meta_data_key_info[i].key_length;
4640 tuple_bit = key_cfg->meta_data_active & BIT(i);
4642 switch (tuple_bit) {
4643 case BIT(ROCE_TYPE):
4644 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4645 cur_pos += tuple_size;
4647 case BIT(DST_VPORT):
4648 port_number = hclge_get_port_number(HOST_PORT, 0,
4650 hnae3_set_field(meta_data,
4651 GENMASK(cur_pos + tuple_size, cur_pos),
4652 cur_pos, port_number);
4653 cur_pos += tuple_size;
4660 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4661 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4662 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4664 *key_x = cpu_to_le32(tmp_x << shift_bits);
4665 *key_y = cpu_to_le32(tmp_y << shift_bits);
4668 /* A complete key is combined with meta data key and tuple key.
4669 * Meta data key is stored at the MSB region, and tuple key is stored at
4670 * the LSB region, unused bits will be filled 0.
4672 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4673 struct hclge_fd_rule *rule)
4675 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4676 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4677 u8 *cur_key_x, *cur_key_y;
4678 int i, ret, tuple_size;
4679 u8 meta_data_region;
4681 memset(key_x, 0, sizeof(key_x));
4682 memset(key_y, 0, sizeof(key_y));
4686 for (i = 0 ; i < MAX_TUPLE; i++) {
4690 tuple_size = tuple_key_info[i].key_length / 8;
4691 check_tuple = key_cfg->tuple_active & BIT(i);
4693 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4696 cur_key_x += tuple_size;
4697 cur_key_y += tuple_size;
4701 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4702 MAX_META_DATA_LENGTH / 8;
4704 hclge_fd_convert_meta_data(key_cfg,
4705 (__le32 *)(key_x + meta_data_region),
4706 (__le32 *)(key_y + meta_data_region),
4709 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4712 dev_err(&hdev->pdev->dev,
4713 "fd key_y config fail, loc=%d, ret=%d\n",
4714 rule->queue_id, ret);
4718 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4721 dev_err(&hdev->pdev->dev,
4722 "fd key_x config fail, loc=%d, ret=%d\n",
4723 rule->queue_id, ret);
4727 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4728 struct hclge_fd_rule *rule)
4730 struct hclge_fd_ad_data ad_data;
4732 ad_data.ad_id = rule->location;
4734 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4735 ad_data.drop_packet = true;
4736 ad_data.forward_to_direct_queue = false;
4737 ad_data.queue_id = 0;
4739 ad_data.drop_packet = false;
4740 ad_data.forward_to_direct_queue = true;
4741 ad_data.queue_id = rule->queue_id;
4744 ad_data.use_counter = false;
4745 ad_data.counter_id = 0;
4747 ad_data.use_next_stage = false;
4748 ad_data.next_input_key = 0;
4750 ad_data.write_rule_id_to_bd = true;
4751 ad_data.rule_id = rule->location;
4753 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4756 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4757 struct ethtool_rx_flow_spec *fs, u32 *unused)
4759 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4760 struct ethtool_usrip4_spec *usr_ip4_spec;
4761 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4762 struct ethtool_usrip6_spec *usr_ip6_spec;
4763 struct ethhdr *ether_spec;
4765 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4768 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4771 if ((fs->flow_type & FLOW_EXT) &&
4772 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4773 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4777 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4781 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4782 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4784 if (!tcp_ip4_spec->ip4src)
4785 *unused |= BIT(INNER_SRC_IP);
4787 if (!tcp_ip4_spec->ip4dst)
4788 *unused |= BIT(INNER_DST_IP);
4790 if (!tcp_ip4_spec->psrc)
4791 *unused |= BIT(INNER_SRC_PORT);
4793 if (!tcp_ip4_spec->pdst)
4794 *unused |= BIT(INNER_DST_PORT);
4796 if (!tcp_ip4_spec->tos)
4797 *unused |= BIT(INNER_IP_TOS);
4801 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4802 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4803 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4805 if (!usr_ip4_spec->ip4src)
4806 *unused |= BIT(INNER_SRC_IP);
4808 if (!usr_ip4_spec->ip4dst)
4809 *unused |= BIT(INNER_DST_IP);
4811 if (!usr_ip4_spec->tos)
4812 *unused |= BIT(INNER_IP_TOS);
4814 if (!usr_ip4_spec->proto)
4815 *unused |= BIT(INNER_IP_PROTO);
4817 if (usr_ip4_spec->l4_4_bytes)
4820 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4827 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4828 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4831 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4832 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4833 *unused |= BIT(INNER_SRC_IP);
4835 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4836 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4837 *unused |= BIT(INNER_DST_IP);
4839 if (!tcp_ip6_spec->psrc)
4840 *unused |= BIT(INNER_SRC_PORT);
4842 if (!tcp_ip6_spec->pdst)
4843 *unused |= BIT(INNER_DST_PORT);
4845 if (tcp_ip6_spec->tclass)
4849 case IPV6_USER_FLOW:
4850 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4851 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4852 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4853 BIT(INNER_DST_PORT);
4855 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4856 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4857 *unused |= BIT(INNER_SRC_IP);
4859 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4860 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4861 *unused |= BIT(INNER_DST_IP);
4863 if (!usr_ip6_spec->l4_proto)
4864 *unused |= BIT(INNER_IP_PROTO);
4866 if (usr_ip6_spec->tclass)
4869 if (usr_ip6_spec->l4_4_bytes)
4874 ether_spec = &fs->h_u.ether_spec;
4875 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4876 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4877 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4879 if (is_zero_ether_addr(ether_spec->h_source))
4880 *unused |= BIT(INNER_SRC_MAC);
4882 if (is_zero_ether_addr(ether_spec->h_dest))
4883 *unused |= BIT(INNER_DST_MAC);
4885 if (!ether_spec->h_proto)
4886 *unused |= BIT(INNER_ETH_TYPE);
4893 if ((fs->flow_type & FLOW_EXT)) {
4894 if (fs->h_ext.vlan_etype)
4896 if (!fs->h_ext.vlan_tci)
4897 *unused |= BIT(INNER_VLAN_TAG_FST);
4899 if (fs->m_ext.vlan_tci) {
4900 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4904 *unused |= BIT(INNER_VLAN_TAG_FST);
4907 if (fs->flow_type & FLOW_MAC_EXT) {
4908 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4911 if (is_zero_ether_addr(fs->h_ext.h_dest))
4912 *unused |= BIT(INNER_DST_MAC);
4914 *unused &= ~(BIT(INNER_DST_MAC));
4920 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4922 struct hclge_fd_rule *rule = NULL;
4923 struct hlist_node *node2;
4925 spin_lock_bh(&hdev->fd_rule_lock);
4926 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4927 if (rule->location >= location)
4931 spin_unlock_bh(&hdev->fd_rule_lock);
4933 return rule && rule->location == location;
4936 /* make sure being called after lock up with fd_rule_lock */
4937 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4938 struct hclge_fd_rule *new_rule,
4942 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4943 struct hlist_node *node2;
4945 if (is_add && !new_rule)
4948 hlist_for_each_entry_safe(rule, node2,
4949 &hdev->fd_rule_list, rule_node) {
4950 if (rule->location >= location)
4955 if (rule && rule->location == location) {
4956 hlist_del(&rule->rule_node);
4958 hdev->hclge_fd_rule_num--;
4961 if (!hdev->hclge_fd_rule_num)
4962 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4963 clear_bit(location, hdev->fd_bmap);
4967 } else if (!is_add) {
4968 dev_err(&hdev->pdev->dev,
4969 "delete fail, rule %d is inexistent\n",
4974 INIT_HLIST_NODE(&new_rule->rule_node);
4977 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4979 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4981 set_bit(location, hdev->fd_bmap);
4982 hdev->hclge_fd_rule_num++;
4983 hdev->fd_active_type = new_rule->rule_type;
4988 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4989 struct ethtool_rx_flow_spec *fs,
4990 struct hclge_fd_rule *rule)
4992 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4994 switch (flow_type) {
4998 rule->tuples.src_ip[3] =
4999 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5000 rule->tuples_mask.src_ip[3] =
5001 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5003 rule->tuples.dst_ip[3] =
5004 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5005 rule->tuples_mask.dst_ip[3] =
5006 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5008 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5009 rule->tuples_mask.src_port =
5010 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5012 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5013 rule->tuples_mask.dst_port =
5014 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5016 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5017 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5019 rule->tuples.ether_proto = ETH_P_IP;
5020 rule->tuples_mask.ether_proto = 0xFFFF;
5024 rule->tuples.src_ip[3] =
5025 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5026 rule->tuples_mask.src_ip[3] =
5027 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5029 rule->tuples.dst_ip[3] =
5030 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5031 rule->tuples_mask.dst_ip[3] =
5032 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5034 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5035 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5037 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5038 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5040 rule->tuples.ether_proto = ETH_P_IP;
5041 rule->tuples_mask.ether_proto = 0xFFFF;
5047 be32_to_cpu_array(rule->tuples.src_ip,
5048 fs->h_u.tcp_ip6_spec.ip6src, 4);
5049 be32_to_cpu_array(rule->tuples_mask.src_ip,
5050 fs->m_u.tcp_ip6_spec.ip6src, 4);
5052 be32_to_cpu_array(rule->tuples.dst_ip,
5053 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5054 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5055 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5057 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5058 rule->tuples_mask.src_port =
5059 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5061 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5062 rule->tuples_mask.dst_port =
5063 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5065 rule->tuples.ether_proto = ETH_P_IPV6;
5066 rule->tuples_mask.ether_proto = 0xFFFF;
5069 case IPV6_USER_FLOW:
5070 be32_to_cpu_array(rule->tuples.src_ip,
5071 fs->h_u.usr_ip6_spec.ip6src, 4);
5072 be32_to_cpu_array(rule->tuples_mask.src_ip,
5073 fs->m_u.usr_ip6_spec.ip6src, 4);
5075 be32_to_cpu_array(rule->tuples.dst_ip,
5076 fs->h_u.usr_ip6_spec.ip6dst, 4);
5077 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5078 fs->m_u.usr_ip6_spec.ip6dst, 4);
5080 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5081 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5083 rule->tuples.ether_proto = ETH_P_IPV6;
5084 rule->tuples_mask.ether_proto = 0xFFFF;
5088 ether_addr_copy(rule->tuples.src_mac,
5089 fs->h_u.ether_spec.h_source);
5090 ether_addr_copy(rule->tuples_mask.src_mac,
5091 fs->m_u.ether_spec.h_source);
5093 ether_addr_copy(rule->tuples.dst_mac,
5094 fs->h_u.ether_spec.h_dest);
5095 ether_addr_copy(rule->tuples_mask.dst_mac,
5096 fs->m_u.ether_spec.h_dest);
5098 rule->tuples.ether_proto =
5099 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5100 rule->tuples_mask.ether_proto =
5101 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5108 switch (flow_type) {
5111 rule->tuples.ip_proto = IPPROTO_SCTP;
5112 rule->tuples_mask.ip_proto = 0xFF;
5116 rule->tuples.ip_proto = IPPROTO_TCP;
5117 rule->tuples_mask.ip_proto = 0xFF;
5121 rule->tuples.ip_proto = IPPROTO_UDP;
5122 rule->tuples_mask.ip_proto = 0xFF;
5128 if ((fs->flow_type & FLOW_EXT)) {
5129 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5130 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5133 if (fs->flow_type & FLOW_MAC_EXT) {
5134 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5135 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5141 /* make sure being called after lock up with fd_rule_lock */
5142 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5143 struct hclge_fd_rule *rule)
5148 dev_err(&hdev->pdev->dev,
5149 "The flow director rule is NULL\n");
5153 /* it will never fail here, so needn't to check return value */
5154 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5156 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5160 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5167 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5171 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5172 struct ethtool_rxnfc *cmd)
5174 struct hclge_vport *vport = hclge_get_vport(handle);
5175 struct hclge_dev *hdev = vport->back;
5176 u16 dst_vport_id = 0, q_index = 0;
5177 struct ethtool_rx_flow_spec *fs;
5178 struct hclge_fd_rule *rule;
5183 if (!hnae3_dev_fd_supported(hdev))
5187 dev_warn(&hdev->pdev->dev,
5188 "Please enable flow director first\n");
5192 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5194 ret = hclge_fd_check_spec(hdev, fs, &unused);
5196 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5200 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5201 action = HCLGE_FD_ACTION_DROP_PACKET;
5203 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5204 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5207 if (vf > hdev->num_req_vfs) {
5208 dev_err(&hdev->pdev->dev,
5209 "Error: vf id (%d) > max vf num (%d)\n",
5210 vf, hdev->num_req_vfs);
5214 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5215 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5218 dev_err(&hdev->pdev->dev,
5219 "Error: queue id (%d) > max tqp num (%d)\n",
5224 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5228 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5232 ret = hclge_fd_get_tuple(hdev, fs, rule);
5238 rule->flow_type = fs->flow_type;
5240 rule->location = fs->location;
5241 rule->unused_tuple = unused;
5242 rule->vf_id = dst_vport_id;
5243 rule->queue_id = q_index;
5244 rule->action = action;
5245 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5247 /* to avoid rule conflict, when user configure rule by ethtool,
5248 * we need to clear all arfs rules
5250 hclge_clear_arfs_rules(handle);
5252 spin_lock_bh(&hdev->fd_rule_lock);
5253 ret = hclge_fd_config_rule(hdev, rule);
5255 spin_unlock_bh(&hdev->fd_rule_lock);
5260 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5261 struct ethtool_rxnfc *cmd)
5263 struct hclge_vport *vport = hclge_get_vport(handle);
5264 struct hclge_dev *hdev = vport->back;
5265 struct ethtool_rx_flow_spec *fs;
5268 if (!hnae3_dev_fd_supported(hdev))
5271 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5273 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5276 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5277 dev_err(&hdev->pdev->dev,
5278 "Delete fail, rule %d is inexistent\n",
5283 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5284 fs->location, NULL, false);
5288 spin_lock_bh(&hdev->fd_rule_lock);
5289 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5291 spin_unlock_bh(&hdev->fd_rule_lock);
5296 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5299 struct hclge_vport *vport = hclge_get_vport(handle);
5300 struct hclge_dev *hdev = vport->back;
5301 struct hclge_fd_rule *rule;
5302 struct hlist_node *node;
5305 if (!hnae3_dev_fd_supported(hdev))
5308 spin_lock_bh(&hdev->fd_rule_lock);
5309 for_each_set_bit(location, hdev->fd_bmap,
5310 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5311 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5315 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5317 hlist_del(&rule->rule_node);
5320 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5321 hdev->hclge_fd_rule_num = 0;
5322 bitmap_zero(hdev->fd_bmap,
5323 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5326 spin_unlock_bh(&hdev->fd_rule_lock);
5329 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5331 struct hclge_vport *vport = hclge_get_vport(handle);
5332 struct hclge_dev *hdev = vport->back;
5333 struct hclge_fd_rule *rule;
5334 struct hlist_node *node;
5337 /* Return ok here, because reset error handling will check this
5338 * return value. If error is returned here, the reset process will
5341 if (!hnae3_dev_fd_supported(hdev))
5344 /* if fd is disabled, should not restore it when reset */
5348 spin_lock_bh(&hdev->fd_rule_lock);
5349 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5350 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5352 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5355 dev_warn(&hdev->pdev->dev,
5356 "Restore rule %d failed, remove it\n",
5358 clear_bit(rule->location, hdev->fd_bmap);
5359 hlist_del(&rule->rule_node);
5361 hdev->hclge_fd_rule_num--;
5365 if (hdev->hclge_fd_rule_num)
5366 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5368 spin_unlock_bh(&hdev->fd_rule_lock);
5373 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5374 struct ethtool_rxnfc *cmd)
5376 struct hclge_vport *vport = hclge_get_vport(handle);
5377 struct hclge_dev *hdev = vport->back;
5379 if (!hnae3_dev_fd_supported(hdev))
5382 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5383 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5388 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5389 struct ethtool_rxnfc *cmd)
5391 struct hclge_vport *vport = hclge_get_vport(handle);
5392 struct hclge_fd_rule *rule = NULL;
5393 struct hclge_dev *hdev = vport->back;
5394 struct ethtool_rx_flow_spec *fs;
5395 struct hlist_node *node2;
5397 if (!hnae3_dev_fd_supported(hdev))
5400 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5402 spin_lock_bh(&hdev->fd_rule_lock);
5404 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5405 if (rule->location >= fs->location)
5409 if (!rule || fs->location != rule->location) {
5410 spin_unlock_bh(&hdev->fd_rule_lock);
5415 fs->flow_type = rule->flow_type;
5416 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5420 fs->h_u.tcp_ip4_spec.ip4src =
5421 cpu_to_be32(rule->tuples.src_ip[3]);
5422 fs->m_u.tcp_ip4_spec.ip4src =
5423 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5424 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5426 fs->h_u.tcp_ip4_spec.ip4dst =
5427 cpu_to_be32(rule->tuples.dst_ip[3]);
5428 fs->m_u.tcp_ip4_spec.ip4dst =
5429 rule->unused_tuple & BIT(INNER_DST_IP) ?
5430 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5432 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5433 fs->m_u.tcp_ip4_spec.psrc =
5434 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5435 0 : cpu_to_be16(rule->tuples_mask.src_port);
5437 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5438 fs->m_u.tcp_ip4_spec.pdst =
5439 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5440 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5442 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5443 fs->m_u.tcp_ip4_spec.tos =
5444 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5445 0 : rule->tuples_mask.ip_tos;
5449 fs->h_u.usr_ip4_spec.ip4src =
5450 cpu_to_be32(rule->tuples.src_ip[3]);
5451 fs->m_u.tcp_ip4_spec.ip4src =
5452 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5453 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5455 fs->h_u.usr_ip4_spec.ip4dst =
5456 cpu_to_be32(rule->tuples.dst_ip[3]);
5457 fs->m_u.usr_ip4_spec.ip4dst =
5458 rule->unused_tuple & BIT(INNER_DST_IP) ?
5459 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5461 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5462 fs->m_u.usr_ip4_spec.tos =
5463 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5464 0 : rule->tuples_mask.ip_tos;
5466 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5467 fs->m_u.usr_ip4_spec.proto =
5468 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5469 0 : rule->tuples_mask.ip_proto;
5471 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5477 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5478 rule->tuples.src_ip, 4);
5479 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5480 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5482 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5483 rule->tuples_mask.src_ip, 4);
5485 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5486 rule->tuples.dst_ip, 4);
5487 if (rule->unused_tuple & BIT(INNER_DST_IP))
5488 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5490 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5491 rule->tuples_mask.dst_ip, 4);
5493 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5494 fs->m_u.tcp_ip6_spec.psrc =
5495 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5496 0 : cpu_to_be16(rule->tuples_mask.src_port);
5498 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5499 fs->m_u.tcp_ip6_spec.pdst =
5500 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5501 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5504 case IPV6_USER_FLOW:
5505 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5506 rule->tuples.src_ip, 4);
5507 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5508 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5510 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5511 rule->tuples_mask.src_ip, 4);
5513 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5514 rule->tuples.dst_ip, 4);
5515 if (rule->unused_tuple & BIT(INNER_DST_IP))
5516 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5518 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5519 rule->tuples_mask.dst_ip, 4);
5521 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5522 fs->m_u.usr_ip6_spec.l4_proto =
5523 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5524 0 : rule->tuples_mask.ip_proto;
5528 ether_addr_copy(fs->h_u.ether_spec.h_source,
5529 rule->tuples.src_mac);
5530 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5531 eth_zero_addr(fs->m_u.ether_spec.h_source);
5533 ether_addr_copy(fs->m_u.ether_spec.h_source,
5534 rule->tuples_mask.src_mac);
5536 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5537 rule->tuples.dst_mac);
5538 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5539 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5541 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5542 rule->tuples_mask.dst_mac);
5544 fs->h_u.ether_spec.h_proto =
5545 cpu_to_be16(rule->tuples.ether_proto);
5546 fs->m_u.ether_spec.h_proto =
5547 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5548 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5552 spin_unlock_bh(&hdev->fd_rule_lock);
5556 if (fs->flow_type & FLOW_EXT) {
5557 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5558 fs->m_ext.vlan_tci =
5559 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5560 cpu_to_be16(VLAN_VID_MASK) :
5561 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5564 if (fs->flow_type & FLOW_MAC_EXT) {
5565 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5566 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5567 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5569 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5570 rule->tuples_mask.dst_mac);
5573 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5574 fs->ring_cookie = RX_CLS_FLOW_DISC;
5578 fs->ring_cookie = rule->queue_id;
5579 vf_id = rule->vf_id;
5580 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5581 fs->ring_cookie |= vf_id;
5584 spin_unlock_bh(&hdev->fd_rule_lock);
5589 static int hclge_get_all_rules(struct hnae3_handle *handle,
5590 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5592 struct hclge_vport *vport = hclge_get_vport(handle);
5593 struct hclge_dev *hdev = vport->back;
5594 struct hclge_fd_rule *rule;
5595 struct hlist_node *node2;
5598 if (!hnae3_dev_fd_supported(hdev))
5601 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5603 spin_lock_bh(&hdev->fd_rule_lock);
5604 hlist_for_each_entry_safe(rule, node2,
5605 &hdev->fd_rule_list, rule_node) {
5606 if (cnt == cmd->rule_cnt) {
5607 spin_unlock_bh(&hdev->fd_rule_lock);
5611 rule_locs[cnt] = rule->location;
5615 spin_unlock_bh(&hdev->fd_rule_lock);
5617 cmd->rule_cnt = cnt;
5622 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5623 struct hclge_fd_rule_tuples *tuples)
5625 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5626 tuples->ip_proto = fkeys->basic.ip_proto;
5627 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5629 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5630 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5631 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5633 memcpy(tuples->src_ip,
5634 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5635 sizeof(tuples->src_ip));
5636 memcpy(tuples->dst_ip,
5637 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5638 sizeof(tuples->dst_ip));
5642 /* traverse all rules, check whether an existed rule has the same tuples */
5643 static struct hclge_fd_rule *
5644 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5645 const struct hclge_fd_rule_tuples *tuples)
5647 struct hclge_fd_rule *rule = NULL;
5648 struct hlist_node *node;
5650 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5651 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5658 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5659 struct hclge_fd_rule *rule)
5661 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5662 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5663 BIT(INNER_SRC_PORT);
5666 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5667 if (tuples->ether_proto == ETH_P_IP) {
5668 if (tuples->ip_proto == IPPROTO_TCP)
5669 rule->flow_type = TCP_V4_FLOW;
5671 rule->flow_type = UDP_V4_FLOW;
5673 if (tuples->ip_proto == IPPROTO_TCP)
5674 rule->flow_type = TCP_V6_FLOW;
5676 rule->flow_type = UDP_V6_FLOW;
5678 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5679 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5682 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5683 u16 flow_id, struct flow_keys *fkeys)
5685 #ifdef CONFIG_RFS_ACCEL
5686 struct hclge_vport *vport = hclge_get_vport(handle);
5687 struct hclge_fd_rule_tuples new_tuples;
5688 struct hclge_dev *hdev = vport->back;
5689 struct hclge_fd_rule *rule;
5694 if (!hnae3_dev_fd_supported(hdev))
5697 memset(&new_tuples, 0, sizeof(new_tuples));
5698 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5700 spin_lock_bh(&hdev->fd_rule_lock);
5702 /* when there is already fd rule existed add by user,
5703 * arfs should not work
5705 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5706 spin_unlock_bh(&hdev->fd_rule_lock);
5711 /* check is there flow director filter existed for this flow,
5712 * if not, create a new filter for it;
5713 * if filter exist with different queue id, modify the filter;
5714 * if filter exist with same queue id, do nothing
5716 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5718 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5719 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5720 spin_unlock_bh(&hdev->fd_rule_lock);
5725 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5727 spin_unlock_bh(&hdev->fd_rule_lock);
5732 set_bit(bit_id, hdev->fd_bmap);
5733 rule->location = bit_id;
5734 rule->flow_id = flow_id;
5735 rule->queue_id = queue_id;
5736 hclge_fd_build_arfs_rule(&new_tuples, rule);
5737 ret = hclge_fd_config_rule(hdev, rule);
5739 spin_unlock_bh(&hdev->fd_rule_lock);
5744 return rule->location;
5747 spin_unlock_bh(&hdev->fd_rule_lock);
5749 if (rule->queue_id == queue_id)
5750 return rule->location;
5752 tmp_queue_id = rule->queue_id;
5753 rule->queue_id = queue_id;
5754 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5756 rule->queue_id = tmp_queue_id;
5760 return rule->location;
5764 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5766 #ifdef CONFIG_RFS_ACCEL
5767 struct hnae3_handle *handle = &hdev->vport[0].nic;
5768 struct hclge_fd_rule *rule;
5769 struct hlist_node *node;
5770 HLIST_HEAD(del_list);
5772 spin_lock_bh(&hdev->fd_rule_lock);
5773 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5774 spin_unlock_bh(&hdev->fd_rule_lock);
5777 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5778 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5779 rule->flow_id, rule->location)) {
5780 hlist_del_init(&rule->rule_node);
5781 hlist_add_head(&rule->rule_node, &del_list);
5782 hdev->hclge_fd_rule_num--;
5783 clear_bit(rule->location, hdev->fd_bmap);
5786 spin_unlock_bh(&hdev->fd_rule_lock);
5788 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5789 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5790 rule->location, NULL, false);
5796 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5798 #ifdef CONFIG_RFS_ACCEL
5799 struct hclge_vport *vport = hclge_get_vport(handle);
5800 struct hclge_dev *hdev = vport->back;
5802 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5803 hclge_del_all_fd_entries(handle, true);
5807 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5809 struct hclge_vport *vport = hclge_get_vport(handle);
5810 struct hclge_dev *hdev = vport->back;
5812 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5813 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5816 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5818 struct hclge_vport *vport = hclge_get_vport(handle);
5819 struct hclge_dev *hdev = vport->back;
5821 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5824 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5826 struct hclge_vport *vport = hclge_get_vport(handle);
5827 struct hclge_dev *hdev = vport->back;
5829 return hdev->rst_stats.hw_reset_done_cnt;
5832 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5834 struct hclge_vport *vport = hclge_get_vport(handle);
5835 struct hclge_dev *hdev = vport->back;
5838 hdev->fd_en = enable;
5839 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5841 hclge_del_all_fd_entries(handle, clear);
5843 hclge_restore_fd_entries(handle);
5846 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5848 struct hclge_desc desc;
5849 struct hclge_config_mac_mode_cmd *req =
5850 (struct hclge_config_mac_mode_cmd *)desc.data;
5854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5855 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5856 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5857 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5858 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5859 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5860 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5861 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5862 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5863 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5864 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5868 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5869 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5871 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5873 dev_err(&hdev->pdev->dev,
5874 "mac enable fail, ret =%d.\n", ret);
5877 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5879 struct hclge_config_mac_mode_cmd *req;
5880 struct hclge_desc desc;
5884 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5885 /* 1 Read out the MAC mode config at first */
5886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5887 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5889 dev_err(&hdev->pdev->dev,
5890 "mac loopback get fail, ret =%d.\n", ret);
5894 /* 2 Then setup the loopback flag */
5895 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5896 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5897 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5898 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5900 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5902 /* 3 Config mac work mode with loopback flag
5903 * and its original configure parameters
5905 hclge_cmd_reuse_desc(&desc, false);
5906 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5908 dev_err(&hdev->pdev->dev,
5909 "mac loopback set fail, ret =%d.\n", ret);
5913 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5914 enum hnae3_loop loop_mode)
5916 #define HCLGE_SERDES_RETRY_MS 10
5917 #define HCLGE_SERDES_RETRY_NUM 100
5919 #define HCLGE_MAC_LINK_STATUS_MS 10
5920 #define HCLGE_MAC_LINK_STATUS_NUM 100
5921 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5922 #define HCLGE_MAC_LINK_STATUS_UP 1
5924 struct hclge_serdes_lb_cmd *req;
5925 struct hclge_desc desc;
5926 int mac_link_ret = 0;
5930 req = (struct hclge_serdes_lb_cmd *)desc.data;
5931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5933 switch (loop_mode) {
5934 case HNAE3_LOOP_SERIAL_SERDES:
5935 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5937 case HNAE3_LOOP_PARALLEL_SERDES:
5938 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5941 dev_err(&hdev->pdev->dev,
5942 "unsupported serdes loopback mode %d\n", loop_mode);
5947 req->enable = loop_mode_b;
5948 req->mask = loop_mode_b;
5949 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5951 req->mask = loop_mode_b;
5952 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5955 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5957 dev_err(&hdev->pdev->dev,
5958 "serdes loopback set fail, ret = %d\n", ret);
5963 msleep(HCLGE_SERDES_RETRY_MS);
5964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5966 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5968 dev_err(&hdev->pdev->dev,
5969 "serdes loopback get, ret = %d\n", ret);
5972 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5973 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5975 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5976 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5978 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5979 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5983 hclge_cfg_mac_mode(hdev, en);
5987 /* serdes Internal loopback, independent of the network cable.*/
5988 msleep(HCLGE_MAC_LINK_STATUS_MS);
5989 ret = hclge_get_mac_link_status(hdev);
5990 if (ret == mac_link_ret)
5992 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5994 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5999 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6000 int stream_id, bool enable)
6002 struct hclge_desc desc;
6003 struct hclge_cfg_com_tqp_queue_cmd *req =
6004 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6008 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6009 req->stream_id = cpu_to_le16(stream_id);
6010 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6012 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6014 dev_err(&hdev->pdev->dev,
6015 "Tqp enable fail, status =%d.\n", ret);
6019 static int hclge_set_loopback(struct hnae3_handle *handle,
6020 enum hnae3_loop loop_mode, bool en)
6022 struct hclge_vport *vport = hclge_get_vport(handle);
6023 struct hnae3_knic_private_info *kinfo;
6024 struct hclge_dev *hdev = vport->back;
6027 switch (loop_mode) {
6028 case HNAE3_LOOP_APP:
6029 ret = hclge_set_app_loopback(hdev, en);
6031 case HNAE3_LOOP_SERIAL_SERDES:
6032 case HNAE3_LOOP_PARALLEL_SERDES:
6033 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6037 dev_err(&hdev->pdev->dev,
6038 "loop_mode %d is not supported\n", loop_mode);
6045 kinfo = &vport->nic.kinfo;
6046 for (i = 0; i < kinfo->num_tqps; i++) {
6047 ret = hclge_tqp_enable(hdev, i, 0, en);
6055 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6057 struct hclge_vport *vport = hclge_get_vport(handle);
6058 struct hnae3_knic_private_info *kinfo;
6059 struct hnae3_queue *queue;
6060 struct hclge_tqp *tqp;
6063 kinfo = &vport->nic.kinfo;
6064 for (i = 0; i < kinfo->num_tqps; i++) {
6065 queue = handle->kinfo.tqp[i];
6066 tqp = container_of(queue, struct hclge_tqp, q);
6067 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6071 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6073 struct hclge_vport *vport = hclge_get_vport(handle);
6074 struct hclge_dev *hdev = vport->back;
6077 mod_timer(&hdev->service_timer, jiffies + HZ);
6079 del_timer_sync(&hdev->service_timer);
6080 cancel_work_sync(&hdev->service_task);
6081 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6085 static int hclge_ae_start(struct hnae3_handle *handle)
6087 struct hclge_vport *vport = hclge_get_vport(handle);
6088 struct hclge_dev *hdev = vport->back;
6091 hclge_cfg_mac_mode(hdev, true);
6092 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6093 hdev->hw.mac.link = 0;
6095 /* reset tqp stats */
6096 hclge_reset_tqp_stats(handle);
6098 hclge_mac_start_phy(hdev);
6103 static void hclge_ae_stop(struct hnae3_handle *handle)
6105 struct hclge_vport *vport = hclge_get_vport(handle);
6106 struct hclge_dev *hdev = vport->back;
6109 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6111 hclge_clear_arfs_rules(handle);
6113 /* If it is not PF reset, the firmware will disable the MAC,
6114 * so it only need to stop phy here.
6116 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6117 hdev->reset_type != HNAE3_FUNC_RESET) {
6118 hclge_mac_stop_phy(hdev);
6122 for (i = 0; i < handle->kinfo.num_tqps; i++)
6123 hclge_reset_tqp(handle, i);
6126 hclge_cfg_mac_mode(hdev, false);
6128 hclge_mac_stop_phy(hdev);
6130 /* reset tqp stats */
6131 hclge_reset_tqp_stats(handle);
6132 hclge_update_link_status(hdev);
6135 int hclge_vport_start(struct hclge_vport *vport)
6137 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6138 vport->last_active_jiffies = jiffies;
6142 void hclge_vport_stop(struct hclge_vport *vport)
6144 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6147 static int hclge_client_start(struct hnae3_handle *handle)
6149 struct hclge_vport *vport = hclge_get_vport(handle);
6151 return hclge_vport_start(vport);
6154 static void hclge_client_stop(struct hnae3_handle *handle)
6156 struct hclge_vport *vport = hclge_get_vport(handle);
6158 hclge_vport_stop(vport);
6161 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6162 u16 cmdq_resp, u8 resp_code,
6163 enum hclge_mac_vlan_tbl_opcode op)
6165 struct hclge_dev *hdev = vport->back;
6166 int return_status = -EIO;
6169 dev_err(&hdev->pdev->dev,
6170 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6175 if (op == HCLGE_MAC_VLAN_ADD) {
6176 if ((!resp_code) || (resp_code == 1)) {
6178 } else if (resp_code == 2) {
6179 return_status = -ENOSPC;
6180 dev_err(&hdev->pdev->dev,
6181 "add mac addr failed for uc_overflow.\n");
6182 } else if (resp_code == 3) {
6183 return_status = -ENOSPC;
6184 dev_err(&hdev->pdev->dev,
6185 "add mac addr failed for mc_overflow.\n");
6187 dev_err(&hdev->pdev->dev,
6188 "add mac addr failed for undefined, code=%d.\n",
6191 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6194 } else if (resp_code == 1) {
6195 return_status = -ENOENT;
6196 dev_dbg(&hdev->pdev->dev,
6197 "remove mac addr failed for miss.\n");
6199 dev_err(&hdev->pdev->dev,
6200 "remove mac addr failed for undefined, code=%d.\n",
6203 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6206 } else if (resp_code == 1) {
6207 return_status = -ENOENT;
6208 dev_dbg(&hdev->pdev->dev,
6209 "lookup mac addr failed for miss.\n");
6211 dev_err(&hdev->pdev->dev,
6212 "lookup mac addr failed for undefined, code=%d.\n",
6216 return_status = -EINVAL;
6217 dev_err(&hdev->pdev->dev,
6218 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6222 return return_status;
6225 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6230 if (vfid > 255 || vfid < 0)
6233 if (vfid >= 0 && vfid <= 191) {
6234 word_num = vfid / 32;
6235 bit_num = vfid % 32;
6237 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6239 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6241 word_num = (vfid - 192) / 32;
6242 bit_num = vfid % 32;
6244 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6246 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6252 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6254 #define HCLGE_DESC_NUMBER 3
6255 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6258 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6259 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6260 if (desc[i].data[j])
6266 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6267 const u8 *addr, bool is_mc)
6269 const unsigned char *mac_addr = addr;
6270 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6271 (mac_addr[0]) | (mac_addr[1] << 8);
6272 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6274 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6276 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6277 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6280 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6281 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6284 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6285 struct hclge_mac_vlan_tbl_entry_cmd *req)
6287 struct hclge_dev *hdev = vport->back;
6288 struct hclge_desc desc;
6293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6295 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6297 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6299 dev_err(&hdev->pdev->dev,
6300 "del mac addr failed for cmd_send, ret =%d.\n",
6304 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6305 retval = le16_to_cpu(desc.retval);
6307 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6308 HCLGE_MAC_VLAN_REMOVE);
6311 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6312 struct hclge_mac_vlan_tbl_entry_cmd *req,
6313 struct hclge_desc *desc,
6316 struct hclge_dev *hdev = vport->back;
6321 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6323 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6324 memcpy(desc[0].data,
6326 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6327 hclge_cmd_setup_basic_desc(&desc[1],
6328 HCLGE_OPC_MAC_VLAN_ADD,
6330 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6331 hclge_cmd_setup_basic_desc(&desc[2],
6332 HCLGE_OPC_MAC_VLAN_ADD,
6334 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6336 memcpy(desc[0].data,
6338 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6339 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6342 dev_err(&hdev->pdev->dev,
6343 "lookup mac addr failed for cmd_send, ret =%d.\n",
6347 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6348 retval = le16_to_cpu(desc[0].retval);
6350 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6351 HCLGE_MAC_VLAN_LKUP);
6354 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6355 struct hclge_mac_vlan_tbl_entry_cmd *req,
6356 struct hclge_desc *mc_desc)
6358 struct hclge_dev *hdev = vport->back;
6365 struct hclge_desc desc;
6367 hclge_cmd_setup_basic_desc(&desc,
6368 HCLGE_OPC_MAC_VLAN_ADD,
6370 memcpy(desc.data, req,
6371 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6372 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6373 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6374 retval = le16_to_cpu(desc.retval);
6376 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6378 HCLGE_MAC_VLAN_ADD);
6380 hclge_cmd_reuse_desc(&mc_desc[0], false);
6381 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6382 hclge_cmd_reuse_desc(&mc_desc[1], false);
6383 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6384 hclge_cmd_reuse_desc(&mc_desc[2], false);
6385 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6386 memcpy(mc_desc[0].data, req,
6387 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6388 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6389 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6390 retval = le16_to_cpu(mc_desc[0].retval);
6392 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6394 HCLGE_MAC_VLAN_ADD);
6398 dev_err(&hdev->pdev->dev,
6399 "add mac addr failed for cmd_send, ret =%d.\n",
6407 static int hclge_init_umv_space(struct hclge_dev *hdev)
6409 u16 allocated_size = 0;
6412 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6417 if (allocated_size < hdev->wanted_umv_size)
6418 dev_warn(&hdev->pdev->dev,
6419 "Alloc umv space failed, want %d, get %d\n",
6420 hdev->wanted_umv_size, allocated_size);
6422 mutex_init(&hdev->umv_mutex);
6423 hdev->max_umv_size = allocated_size;
6424 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6425 hdev->share_umv_size = hdev->priv_umv_size +
6426 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6431 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6435 if (hdev->max_umv_size > 0) {
6436 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6440 hdev->max_umv_size = 0;
6442 mutex_destroy(&hdev->umv_mutex);
6447 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6448 u16 *allocated_size, bool is_alloc)
6450 struct hclge_umv_spc_alc_cmd *req;
6451 struct hclge_desc desc;
6454 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6456 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6457 req->space_size = cpu_to_le32(space_size);
6459 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6461 dev_err(&hdev->pdev->dev,
6462 "%s umv space failed for cmd_send, ret =%d\n",
6463 is_alloc ? "allocate" : "free", ret);
6467 if (is_alloc && allocated_size)
6468 *allocated_size = le32_to_cpu(desc.data[1]);
6473 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6475 struct hclge_vport *vport;
6478 for (i = 0; i < hdev->num_alloc_vport; i++) {
6479 vport = &hdev->vport[i];
6480 vport->used_umv_num = 0;
6483 mutex_lock(&hdev->umv_mutex);
6484 hdev->share_umv_size = hdev->priv_umv_size +
6485 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6486 mutex_unlock(&hdev->umv_mutex);
6489 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6491 struct hclge_dev *hdev = vport->back;
6494 mutex_lock(&hdev->umv_mutex);
6495 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6496 hdev->share_umv_size == 0);
6497 mutex_unlock(&hdev->umv_mutex);
6502 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6504 struct hclge_dev *hdev = vport->back;
6506 mutex_lock(&hdev->umv_mutex);
6508 if (vport->used_umv_num > hdev->priv_umv_size)
6509 hdev->share_umv_size++;
6511 if (vport->used_umv_num > 0)
6512 vport->used_umv_num--;
6514 if (vport->used_umv_num >= hdev->priv_umv_size &&
6515 hdev->share_umv_size > 0)
6516 hdev->share_umv_size--;
6517 vport->used_umv_num++;
6519 mutex_unlock(&hdev->umv_mutex);
6522 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6523 const unsigned char *addr)
6525 struct hclge_vport *vport = hclge_get_vport(handle);
6527 return hclge_add_uc_addr_common(vport, addr);
6530 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6531 const unsigned char *addr)
6533 struct hclge_dev *hdev = vport->back;
6534 struct hclge_mac_vlan_tbl_entry_cmd req;
6535 struct hclge_desc desc;
6536 u16 egress_port = 0;
6539 /* mac addr check */
6540 if (is_zero_ether_addr(addr) ||
6541 is_broadcast_ether_addr(addr) ||
6542 is_multicast_ether_addr(addr)) {
6543 dev_err(&hdev->pdev->dev,
6544 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6546 is_zero_ether_addr(addr),
6547 is_broadcast_ether_addr(addr),
6548 is_multicast_ether_addr(addr));
6552 memset(&req, 0, sizeof(req));
6554 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6555 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6557 req.egress_port = cpu_to_le16(egress_port);
6559 hclge_prepare_mac_addr(&req, addr, false);
6561 /* Lookup the mac address in the mac_vlan table, and add
6562 * it if the entry is inexistent. Repeated unicast entry
6563 * is not allowed in the mac vlan table.
6565 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6566 if (ret == -ENOENT) {
6567 if (!hclge_is_umv_space_full(vport)) {
6568 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6570 hclge_update_umv_space(vport, false);
6574 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6575 hdev->priv_umv_size);
6580 /* check if we just hit the duplicate */
6582 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6583 vport->vport_id, addr);
6587 dev_err(&hdev->pdev->dev,
6588 "PF failed to add unicast entry(%pM) in the MAC table\n",
6594 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6595 const unsigned char *addr)
6597 struct hclge_vport *vport = hclge_get_vport(handle);
6599 return hclge_rm_uc_addr_common(vport, addr);
6602 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6603 const unsigned char *addr)
6605 struct hclge_dev *hdev = vport->back;
6606 struct hclge_mac_vlan_tbl_entry_cmd req;
6609 /* mac addr check */
6610 if (is_zero_ether_addr(addr) ||
6611 is_broadcast_ether_addr(addr) ||
6612 is_multicast_ether_addr(addr)) {
6613 dev_dbg(&hdev->pdev->dev,
6614 "Remove mac err! invalid mac:%pM.\n",
6619 memset(&req, 0, sizeof(req));
6620 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6621 hclge_prepare_mac_addr(&req, addr, false);
6622 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6624 hclge_update_umv_space(vport, true);
6629 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6630 const unsigned char *addr)
6632 struct hclge_vport *vport = hclge_get_vport(handle);
6634 return hclge_add_mc_addr_common(vport, addr);
6637 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6638 const unsigned char *addr)
6640 struct hclge_dev *hdev = vport->back;
6641 struct hclge_mac_vlan_tbl_entry_cmd req;
6642 struct hclge_desc desc[3];
6645 /* mac addr check */
6646 if (!is_multicast_ether_addr(addr)) {
6647 dev_err(&hdev->pdev->dev,
6648 "Add mc mac err! invalid mac:%pM.\n",
6652 memset(&req, 0, sizeof(req));
6653 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6654 hclge_prepare_mac_addr(&req, addr, true);
6655 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6657 /* This mac addr exist, update VFID for it */
6658 hclge_update_desc_vfid(desc, vport->vport_id, false);
6659 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6661 /* This mac addr do not exist, add new entry for it */
6662 memset(desc[0].data, 0, sizeof(desc[0].data));
6663 memset(desc[1].data, 0, sizeof(desc[0].data));
6664 memset(desc[2].data, 0, sizeof(desc[0].data));
6665 hclge_update_desc_vfid(desc, vport->vport_id, false);
6666 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6669 if (status == -ENOSPC)
6670 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6675 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6676 const unsigned char *addr)
6678 struct hclge_vport *vport = hclge_get_vport(handle);
6680 return hclge_rm_mc_addr_common(vport, addr);
6683 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6684 const unsigned char *addr)
6686 struct hclge_dev *hdev = vport->back;
6687 struct hclge_mac_vlan_tbl_entry_cmd req;
6688 enum hclge_cmd_status status;
6689 struct hclge_desc desc[3];
6691 /* mac addr check */
6692 if (!is_multicast_ether_addr(addr)) {
6693 dev_dbg(&hdev->pdev->dev,
6694 "Remove mc mac err! invalid mac:%pM.\n",
6699 memset(&req, 0, sizeof(req));
6700 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6701 hclge_prepare_mac_addr(&req, addr, true);
6702 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6704 /* This mac addr exist, remove this handle's VFID for it */
6705 hclge_update_desc_vfid(desc, vport->vport_id, true);
6707 if (hclge_is_all_function_id_zero(desc))
6708 /* All the vfid is zero, so need to delete this entry */
6709 status = hclge_remove_mac_vlan_tbl(vport, &req);
6711 /* Not all the vfid is zero, update the vfid */
6712 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6715 /* Maybe this mac address is in mta table, but it cannot be
6716 * deleted here because an entry of mta represents an address
6717 * range rather than a specific address. the delete action to
6718 * all entries will take effect in update_mta_status called by
6719 * hns3_nic_set_rx_mode.
6727 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6728 enum HCLGE_MAC_ADDR_TYPE mac_type)
6730 struct hclge_vport_mac_addr_cfg *mac_cfg;
6731 struct list_head *list;
6733 if (!vport->vport_id)
6736 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6740 mac_cfg->hd_tbl_status = true;
6741 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6743 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6744 &vport->uc_mac_list : &vport->mc_mac_list;
6746 list_add_tail(&mac_cfg->node, list);
6749 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6751 enum HCLGE_MAC_ADDR_TYPE mac_type)
6753 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6754 struct list_head *list;
6755 bool uc_flag, mc_flag;
6757 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6758 &vport->uc_mac_list : &vport->mc_mac_list;
6760 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6761 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6763 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6764 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6765 if (uc_flag && mac_cfg->hd_tbl_status)
6766 hclge_rm_uc_addr_common(vport, mac_addr);
6768 if (mc_flag && mac_cfg->hd_tbl_status)
6769 hclge_rm_mc_addr_common(vport, mac_addr);
6771 list_del(&mac_cfg->node);
6778 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6779 enum HCLGE_MAC_ADDR_TYPE mac_type)
6781 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6782 struct list_head *list;
6784 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6785 &vport->uc_mac_list : &vport->mc_mac_list;
6787 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6788 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6789 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6791 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6792 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6794 mac_cfg->hd_tbl_status = false;
6796 list_del(&mac_cfg->node);
6802 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6804 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6805 struct hclge_vport *vport;
6808 mutex_lock(&hdev->vport_cfg_mutex);
6809 for (i = 0; i < hdev->num_alloc_vport; i++) {
6810 vport = &hdev->vport[i];
6811 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6812 list_del(&mac->node);
6816 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6817 list_del(&mac->node);
6821 mutex_unlock(&hdev->vport_cfg_mutex);
6824 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6825 u16 cmdq_resp, u8 resp_code)
6827 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6828 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6829 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6830 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6835 dev_err(&hdev->pdev->dev,
6836 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6841 switch (resp_code) {
6842 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6843 case HCLGE_ETHERTYPE_ALREADY_ADD:
6846 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6847 dev_err(&hdev->pdev->dev,
6848 "add mac ethertype failed for manager table overflow.\n");
6849 return_status = -EIO;
6851 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6852 dev_err(&hdev->pdev->dev,
6853 "add mac ethertype failed for key conflict.\n");
6854 return_status = -EIO;
6857 dev_err(&hdev->pdev->dev,
6858 "add mac ethertype failed for undefined, code=%d.\n",
6860 return_status = -EIO;
6863 return return_status;
6866 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6867 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6869 struct hclge_desc desc;
6874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6875 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6879 dev_err(&hdev->pdev->dev,
6880 "add mac ethertype failed for cmd_send, ret =%d.\n",
6885 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6886 retval = le16_to_cpu(desc.retval);
6888 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6891 static int init_mgr_tbl(struct hclge_dev *hdev)
6896 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6897 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6899 dev_err(&hdev->pdev->dev,
6900 "add mac ethertype failed, ret =%d.\n",
6909 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6911 struct hclge_vport *vport = hclge_get_vport(handle);
6912 struct hclge_dev *hdev = vport->back;
6914 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6917 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6920 const unsigned char *new_addr = (const unsigned char *)p;
6921 struct hclge_vport *vport = hclge_get_vport(handle);
6922 struct hclge_dev *hdev = vport->back;
6925 /* mac addr check */
6926 if (is_zero_ether_addr(new_addr) ||
6927 is_broadcast_ether_addr(new_addr) ||
6928 is_multicast_ether_addr(new_addr)) {
6929 dev_err(&hdev->pdev->dev,
6930 "Change uc mac err! invalid mac:%p.\n",
6935 if ((!is_first || is_kdump_kernel()) &&
6936 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6937 dev_warn(&hdev->pdev->dev,
6938 "remove old uc mac address fail.\n");
6940 ret = hclge_add_uc_addr(handle, new_addr);
6942 dev_err(&hdev->pdev->dev,
6943 "add uc mac address fail, ret =%d.\n",
6947 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6948 dev_err(&hdev->pdev->dev,
6949 "restore uc mac address fail.\n");
6954 ret = hclge_pause_addr_cfg(hdev, new_addr);
6956 dev_err(&hdev->pdev->dev,
6957 "configure mac pause address fail, ret =%d.\n",
6962 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6967 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6970 struct hclge_vport *vport = hclge_get_vport(handle);
6971 struct hclge_dev *hdev = vport->back;
6973 if (!hdev->hw.mac.phydev)
6976 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6979 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6980 u8 fe_type, bool filter_en, u8 vf_id)
6982 struct hclge_vlan_filter_ctrl_cmd *req;
6983 struct hclge_desc desc;
6986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6988 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6989 req->vlan_type = vlan_type;
6990 req->vlan_fe = filter_en ? fe_type : 0;
6993 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6995 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7001 #define HCLGE_FILTER_TYPE_VF 0
7002 #define HCLGE_FILTER_TYPE_PORT 1
7003 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7004 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7005 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7006 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7007 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7008 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7009 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7010 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7011 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7013 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7015 struct hclge_vport *vport = hclge_get_vport(handle);
7016 struct hclge_dev *hdev = vport->back;
7018 if (hdev->pdev->revision >= 0x21) {
7019 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7020 HCLGE_FILTER_FE_EGRESS, enable, 0);
7021 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7022 HCLGE_FILTER_FE_INGRESS, enable, 0);
7024 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7025 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7029 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7031 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7034 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7035 bool is_kill, u16 vlan, u8 qos,
7038 #define HCLGE_MAX_VF_BYTES 16
7039 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7040 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7041 struct hclge_desc desc[2];
7046 hclge_cmd_setup_basic_desc(&desc[0],
7047 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7048 hclge_cmd_setup_basic_desc(&desc[1],
7049 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7051 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7053 vf_byte_off = vfid / 8;
7054 vf_byte_val = 1 << (vfid % 8);
7056 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7057 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7059 req0->vlan_id = cpu_to_le16(vlan);
7060 req0->vlan_cfg = is_kill;
7062 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7063 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7065 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7067 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7069 dev_err(&hdev->pdev->dev,
7070 "Send vf vlan command fail, ret =%d.\n",
7076 #define HCLGE_VF_VLAN_NO_ENTRY 2
7077 if (!req0->resp_code || req0->resp_code == 1)
7080 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7081 dev_warn(&hdev->pdev->dev,
7082 "vf vlan table is full, vf vlan filter is disabled\n");
7086 dev_err(&hdev->pdev->dev,
7087 "Add vf vlan filter fail, ret =%d.\n",
7090 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7091 if (!req0->resp_code)
7094 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7095 dev_warn(&hdev->pdev->dev,
7096 "vlan %d filter is not in vf vlan table\n",
7101 dev_err(&hdev->pdev->dev,
7102 "Kill vf vlan filter fail, ret =%d.\n",
7109 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7110 u16 vlan_id, bool is_kill)
7112 struct hclge_vlan_filter_pf_cfg_cmd *req;
7113 struct hclge_desc desc;
7114 u8 vlan_offset_byte_val;
7115 u8 vlan_offset_byte;
7119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7121 vlan_offset_160 = vlan_id / 160;
7122 vlan_offset_byte = (vlan_id % 160) / 8;
7123 vlan_offset_byte_val = 1 << (vlan_id % 8);
7125 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7126 req->vlan_offset = vlan_offset_160;
7127 req->vlan_cfg = is_kill;
7128 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7130 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7132 dev_err(&hdev->pdev->dev,
7133 "port vlan command, send fail, ret =%d.\n", ret);
7137 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7138 u16 vport_id, u16 vlan_id, u8 qos,
7141 u16 vport_idx, vport_num = 0;
7144 if (is_kill && !vlan_id)
7147 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7150 dev_err(&hdev->pdev->dev,
7151 "Set %d vport vlan filter config fail, ret =%d.\n",
7156 /* vlan 0 may be added twice when 8021q module is enabled */
7157 if (!is_kill && !vlan_id &&
7158 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7161 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7162 dev_err(&hdev->pdev->dev,
7163 "Add port vlan failed, vport %d is already in vlan %d\n",
7169 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7170 dev_err(&hdev->pdev->dev,
7171 "Delete port vlan failed, vport %d is not in vlan %d\n",
7176 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7179 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7180 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7186 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7188 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7189 struct hclge_vport_vtag_tx_cfg_cmd *req;
7190 struct hclge_dev *hdev = vport->back;
7191 struct hclge_desc desc;
7194 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7196 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7197 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7198 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7199 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7200 vcfg->accept_tag1 ? 1 : 0);
7201 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7202 vcfg->accept_untag1 ? 1 : 0);
7203 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7204 vcfg->accept_tag2 ? 1 : 0);
7205 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7206 vcfg->accept_untag2 ? 1 : 0);
7207 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7208 vcfg->insert_tag1_en ? 1 : 0);
7209 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7210 vcfg->insert_tag2_en ? 1 : 0);
7211 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7213 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7214 req->vf_bitmap[req->vf_offset] =
7215 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7217 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7219 dev_err(&hdev->pdev->dev,
7220 "Send port txvlan cfg command fail, ret =%d\n",
7226 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7228 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7229 struct hclge_vport_vtag_rx_cfg_cmd *req;
7230 struct hclge_dev *hdev = vport->back;
7231 struct hclge_desc desc;
7234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7236 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7237 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7238 vcfg->strip_tag1_en ? 1 : 0);
7239 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7240 vcfg->strip_tag2_en ? 1 : 0);
7241 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7242 vcfg->vlan1_vlan_prionly ? 1 : 0);
7243 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7244 vcfg->vlan2_vlan_prionly ? 1 : 0);
7246 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7247 req->vf_bitmap[req->vf_offset] =
7248 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7250 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7252 dev_err(&hdev->pdev->dev,
7253 "Send port rxvlan cfg command fail, ret =%d\n",
7259 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7260 u16 port_base_vlan_state,
7265 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7266 vport->txvlan_cfg.accept_tag1 = true;
7267 vport->txvlan_cfg.insert_tag1_en = false;
7268 vport->txvlan_cfg.default_tag1 = 0;
7270 vport->txvlan_cfg.accept_tag1 = false;
7271 vport->txvlan_cfg.insert_tag1_en = true;
7272 vport->txvlan_cfg.default_tag1 = vlan_tag;
7275 vport->txvlan_cfg.accept_untag1 = true;
7277 /* accept_tag2 and accept_untag2 are not supported on
7278 * pdev revision(0x20), new revision support them,
7279 * this two fields can not be configured by user.
7281 vport->txvlan_cfg.accept_tag2 = true;
7282 vport->txvlan_cfg.accept_untag2 = true;
7283 vport->txvlan_cfg.insert_tag2_en = false;
7284 vport->txvlan_cfg.default_tag2 = 0;
7286 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7287 vport->rxvlan_cfg.strip_tag1_en = false;
7288 vport->rxvlan_cfg.strip_tag2_en =
7289 vport->rxvlan_cfg.rx_vlan_offload_en;
7291 vport->rxvlan_cfg.strip_tag1_en =
7292 vport->rxvlan_cfg.rx_vlan_offload_en;
7293 vport->rxvlan_cfg.strip_tag2_en = true;
7295 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7296 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7298 ret = hclge_set_vlan_tx_offload_cfg(vport);
7302 return hclge_set_vlan_rx_offload_cfg(vport);
7305 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7307 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7308 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7309 struct hclge_desc desc;
7312 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7313 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7314 rx_req->ot_fst_vlan_type =
7315 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7316 rx_req->ot_sec_vlan_type =
7317 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7318 rx_req->in_fst_vlan_type =
7319 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7320 rx_req->in_sec_vlan_type =
7321 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7323 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7325 dev_err(&hdev->pdev->dev,
7326 "Send rxvlan protocol type command fail, ret =%d\n",
7331 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7333 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7334 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7335 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7337 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7339 dev_err(&hdev->pdev->dev,
7340 "Send txvlan protocol type command fail, ret =%d\n",
7346 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7348 #define HCLGE_DEF_VLAN_TYPE 0x8100
7350 struct hnae3_handle *handle = &hdev->vport[0].nic;
7351 struct hclge_vport *vport;
7355 if (hdev->pdev->revision >= 0x21) {
7356 /* for revision 0x21, vf vlan filter is per function */
7357 for (i = 0; i < hdev->num_alloc_vport; i++) {
7358 vport = &hdev->vport[i];
7359 ret = hclge_set_vlan_filter_ctrl(hdev,
7360 HCLGE_FILTER_TYPE_VF,
7361 HCLGE_FILTER_FE_EGRESS,
7368 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7369 HCLGE_FILTER_FE_INGRESS, true,
7374 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7375 HCLGE_FILTER_FE_EGRESS_V1_B,
7381 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7383 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7384 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7385 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7386 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7387 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7388 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7390 ret = hclge_set_vlan_protocol_type(hdev);
7394 for (i = 0; i < hdev->num_alloc_vport; i++) {
7397 vport = &hdev->vport[i];
7398 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7400 ret = hclge_vlan_offload_cfg(vport,
7401 vport->port_base_vlan_cfg.state,
7407 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7410 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7413 struct hclge_vport_vlan_cfg *vlan;
7415 /* vlan 0 is reserved */
7419 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7423 vlan->hd_tbl_status = writen_to_tbl;
7424 vlan->vlan_id = vlan_id;
7426 list_add_tail(&vlan->node, &vport->vlan_list);
7429 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7431 struct hclge_vport_vlan_cfg *vlan, *tmp;
7432 struct hclge_dev *hdev = vport->back;
7435 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7436 if (!vlan->hd_tbl_status) {
7437 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7439 vlan->vlan_id, 0, false);
7441 dev_err(&hdev->pdev->dev,
7442 "restore vport vlan list failed, ret=%d\n",
7447 vlan->hd_tbl_status = true;
7453 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7456 struct hclge_vport_vlan_cfg *vlan, *tmp;
7457 struct hclge_dev *hdev = vport->back;
7459 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7460 if (vlan->vlan_id == vlan_id) {
7461 if (is_write_tbl && vlan->hd_tbl_status)
7462 hclge_set_vlan_filter_hw(hdev,
7468 list_del(&vlan->node);
7475 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7477 struct hclge_vport_vlan_cfg *vlan, *tmp;
7478 struct hclge_dev *hdev = vport->back;
7480 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7481 if (vlan->hd_tbl_status)
7482 hclge_set_vlan_filter_hw(hdev,
7488 vlan->hd_tbl_status = false;
7490 list_del(&vlan->node);
7496 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7498 struct hclge_vport_vlan_cfg *vlan, *tmp;
7499 struct hclge_vport *vport;
7502 mutex_lock(&hdev->vport_cfg_mutex);
7503 for (i = 0; i < hdev->num_alloc_vport; i++) {
7504 vport = &hdev->vport[i];
7505 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7506 list_del(&vlan->node);
7510 mutex_unlock(&hdev->vport_cfg_mutex);
7513 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7515 struct hclge_vport *vport = hclge_get_vport(handle);
7517 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7518 vport->rxvlan_cfg.strip_tag1_en = false;
7519 vport->rxvlan_cfg.strip_tag2_en = enable;
7521 vport->rxvlan_cfg.strip_tag1_en = enable;
7522 vport->rxvlan_cfg.strip_tag2_en = true;
7524 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7525 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7526 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7528 return hclge_set_vlan_rx_offload_cfg(vport);
7531 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7532 u16 port_base_vlan_state,
7533 struct hclge_vlan_info *new_info,
7534 struct hclge_vlan_info *old_info)
7536 struct hclge_dev *hdev = vport->back;
7539 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7540 hclge_rm_vport_all_vlan_table(vport, false);
7541 return hclge_set_vlan_filter_hw(hdev,
7542 htons(new_info->vlan_proto),
7545 new_info->qos, false);
7548 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7549 vport->vport_id, old_info->vlan_tag,
7550 old_info->qos, true);
7554 return hclge_add_vport_all_vlan_table(vport);
7557 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7558 struct hclge_vlan_info *vlan_info)
7560 struct hnae3_handle *nic = &vport->nic;
7561 struct hclge_vlan_info *old_vlan_info;
7562 struct hclge_dev *hdev = vport->back;
7565 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7567 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7571 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7572 /* add new VLAN tag */
7573 ret = hclge_set_vlan_filter_hw(hdev,
7574 htons(vlan_info->vlan_proto),
7576 vlan_info->vlan_tag,
7577 vlan_info->qos, false);
7581 /* remove old VLAN tag */
7582 ret = hclge_set_vlan_filter_hw(hdev,
7583 htons(old_vlan_info->vlan_proto),
7585 old_vlan_info->vlan_tag,
7586 old_vlan_info->qos, true);
7593 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7598 /* update state only when disable/enable port based VLAN */
7599 vport->port_base_vlan_cfg.state = state;
7600 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7601 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7603 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7606 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7607 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7608 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7613 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7614 enum hnae3_port_base_vlan_state state,
7617 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7619 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7621 return HNAE3_PORT_BASE_VLAN_ENABLE;
7624 return HNAE3_PORT_BASE_VLAN_DISABLE;
7625 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7626 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7628 return HNAE3_PORT_BASE_VLAN_MODIFY;
7632 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7633 u16 vlan, u8 qos, __be16 proto)
7635 struct hclge_vport *vport = hclge_get_vport(handle);
7636 struct hclge_dev *hdev = vport->back;
7637 struct hclge_vlan_info vlan_info;
7641 if (hdev->pdev->revision == 0x20)
7644 /* qos is a 3 bits value, so can not be bigger than 7 */
7645 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7647 if (proto != htons(ETH_P_8021Q))
7648 return -EPROTONOSUPPORT;
7650 vport = &hdev->vport[vfid];
7651 state = hclge_get_port_base_vlan_state(vport,
7652 vport->port_base_vlan_cfg.state,
7654 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7657 vlan_info.vlan_tag = vlan;
7658 vlan_info.qos = qos;
7659 vlan_info.vlan_proto = ntohs(proto);
7661 /* update port based VLAN for PF */
7663 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7664 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7665 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7670 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7671 return hclge_update_port_base_vlan_cfg(vport, state,
7674 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7682 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7683 u16 vlan_id, bool is_kill)
7685 struct hclge_vport *vport = hclge_get_vport(handle);
7686 struct hclge_dev *hdev = vport->back;
7687 bool writen_to_tbl = false;
7690 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7691 * filter entry. In this case, we don't update VLAN filter table
7692 * when user add new VLAN or remove exist VLAN, just update the vport
7693 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7694 * table until port based VLAN disabled
7696 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7697 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7698 vlan_id, 0, is_kill);
7699 writen_to_tbl = true;
7706 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7708 hclge_add_vport_vlan_table(vport, vlan_id,
7714 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7716 struct hclge_config_max_frm_size_cmd *req;
7717 struct hclge_desc desc;
7719 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7721 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7722 req->max_frm_size = cpu_to_le16(new_mps);
7723 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7725 return hclge_cmd_send(&hdev->hw, &desc, 1);
7728 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7730 struct hclge_vport *vport = hclge_get_vport(handle);
7732 return hclge_set_vport_mtu(vport, new_mtu);
7735 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7737 struct hclge_dev *hdev = vport->back;
7738 int i, max_frm_size, ret = 0;
7740 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7741 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7742 max_frm_size > HCLGE_MAC_MAX_FRAME)
7745 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7746 mutex_lock(&hdev->vport_lock);
7747 /* VF's mps must fit within hdev->mps */
7748 if (vport->vport_id && max_frm_size > hdev->mps) {
7749 mutex_unlock(&hdev->vport_lock);
7751 } else if (vport->vport_id) {
7752 vport->mps = max_frm_size;
7753 mutex_unlock(&hdev->vport_lock);
7757 /* PF's mps must be greater then VF's mps */
7758 for (i = 1; i < hdev->num_alloc_vport; i++)
7759 if (max_frm_size < hdev->vport[i].mps) {
7760 mutex_unlock(&hdev->vport_lock);
7764 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7766 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7768 dev_err(&hdev->pdev->dev,
7769 "Change mtu fail, ret =%d\n", ret);
7773 hdev->mps = max_frm_size;
7774 vport->mps = max_frm_size;
7776 ret = hclge_buffer_alloc(hdev);
7778 dev_err(&hdev->pdev->dev,
7779 "Allocate buffer fail, ret =%d\n", ret);
7782 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7783 mutex_unlock(&hdev->vport_lock);
7787 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7790 struct hclge_reset_tqp_queue_cmd *req;
7791 struct hclge_desc desc;
7794 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7796 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7797 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7798 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7800 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7802 dev_err(&hdev->pdev->dev,
7803 "Send tqp reset cmd error, status =%d\n", ret);
7810 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7812 struct hclge_reset_tqp_queue_cmd *req;
7813 struct hclge_desc desc;
7816 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7818 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7819 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7821 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7823 dev_err(&hdev->pdev->dev,
7824 "Get reset status error, status =%d\n", ret);
7828 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7831 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7833 struct hnae3_queue *queue;
7834 struct hclge_tqp *tqp;
7836 queue = handle->kinfo.tqp[queue_id];
7837 tqp = container_of(queue, struct hclge_tqp, q);
7842 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7844 struct hclge_vport *vport = hclge_get_vport(handle);
7845 struct hclge_dev *hdev = vport->back;
7846 int reset_try_times = 0;
7851 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7853 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7855 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7859 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7861 dev_err(&hdev->pdev->dev,
7862 "Send reset tqp cmd fail, ret = %d\n", ret);
7866 reset_try_times = 0;
7867 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7868 /* Wait for tqp hw reset */
7870 reset_status = hclge_get_reset_status(hdev, queue_gid);
7875 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7876 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7880 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7882 dev_err(&hdev->pdev->dev,
7883 "Deassert the soft reset fail, ret = %d\n", ret);
7888 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7890 struct hclge_dev *hdev = vport->back;
7891 int reset_try_times = 0;
7896 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7898 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7900 dev_warn(&hdev->pdev->dev,
7901 "Send reset tqp cmd fail, ret = %d\n", ret);
7905 reset_try_times = 0;
7906 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7907 /* Wait for tqp hw reset */
7909 reset_status = hclge_get_reset_status(hdev, queue_gid);
7914 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7915 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7919 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7921 dev_warn(&hdev->pdev->dev,
7922 "Deassert the soft reset fail, ret = %d\n", ret);
7925 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7927 struct hclge_vport *vport = hclge_get_vport(handle);
7928 struct hclge_dev *hdev = vport->back;
7930 return hdev->fw_version;
7933 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7935 struct phy_device *phydev = hdev->hw.mac.phydev;
7940 phy_set_asym_pause(phydev, rx_en, tx_en);
7943 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7948 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7949 else if (rx_en && !tx_en)
7950 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7951 else if (!rx_en && tx_en)
7952 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7954 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7956 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7959 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7961 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7966 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7971 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7973 struct phy_device *phydev = hdev->hw.mac.phydev;
7974 u16 remote_advertising = 0;
7975 u16 local_advertising = 0;
7976 u32 rx_pause, tx_pause;
7979 if (!phydev->link || !phydev->autoneg)
7982 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7985 remote_advertising = LPA_PAUSE_CAP;
7987 if (phydev->asym_pause)
7988 remote_advertising |= LPA_PAUSE_ASYM;
7990 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7991 remote_advertising);
7992 tx_pause = flowctl & FLOW_CTRL_TX;
7993 rx_pause = flowctl & FLOW_CTRL_RX;
7995 if (phydev->duplex == HCLGE_MAC_HALF) {
8000 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8003 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8004 u32 *rx_en, u32 *tx_en)
8006 struct hclge_vport *vport = hclge_get_vport(handle);
8007 struct hclge_dev *hdev = vport->back;
8009 *auto_neg = hclge_get_autoneg(handle);
8011 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8017 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8020 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8023 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8032 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8033 u32 rx_en, u32 tx_en)
8035 struct hclge_vport *vport = hclge_get_vport(handle);
8036 struct hclge_dev *hdev = vport->back;
8037 struct phy_device *phydev = hdev->hw.mac.phydev;
8040 fc_autoneg = hclge_get_autoneg(handle);
8041 if (auto_neg != fc_autoneg) {
8042 dev_info(&hdev->pdev->dev,
8043 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8047 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8048 dev_info(&hdev->pdev->dev,
8049 "Priority flow control enabled. Cannot set link flow control.\n");
8053 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8056 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8059 return phy_start_aneg(phydev);
8061 if (hdev->pdev->revision == 0x20)
8064 return hclge_restart_autoneg(handle);
8067 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8068 u8 *auto_neg, u32 *speed, u8 *duplex)
8070 struct hclge_vport *vport = hclge_get_vport(handle);
8071 struct hclge_dev *hdev = vport->back;
8074 *speed = hdev->hw.mac.speed;
8076 *duplex = hdev->hw.mac.duplex;
8078 *auto_neg = hdev->hw.mac.autoneg;
8081 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8084 struct hclge_vport *vport = hclge_get_vport(handle);
8085 struct hclge_dev *hdev = vport->back;
8088 *media_type = hdev->hw.mac.media_type;
8091 *module_type = hdev->hw.mac.module_type;
8094 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8095 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8097 struct hclge_vport *vport = hclge_get_vport(handle);
8098 struct hclge_dev *hdev = vport->back;
8099 struct phy_device *phydev = hdev->hw.mac.phydev;
8100 int mdix_ctrl, mdix, retval, is_resolved;
8103 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8104 *tp_mdix = ETH_TP_MDI_INVALID;
8108 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8110 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8111 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8112 HCLGE_PHY_MDIX_CTRL_S);
8114 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8115 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8116 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8118 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8120 switch (mdix_ctrl) {
8122 *tp_mdix_ctrl = ETH_TP_MDI;
8125 *tp_mdix_ctrl = ETH_TP_MDI_X;
8128 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8131 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8136 *tp_mdix = ETH_TP_MDI_INVALID;
8138 *tp_mdix = ETH_TP_MDI_X;
8140 *tp_mdix = ETH_TP_MDI;
8143 static void hclge_info_show(struct hclge_dev *hdev)
8145 struct device *dev = &hdev->pdev->dev;
8147 dev_info(dev, "PF info begin:\n");
8149 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8150 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8151 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8152 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8153 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8154 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8155 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8156 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8157 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8158 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8159 dev_info(dev, "This is %s PF\n",
8160 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8161 dev_info(dev, "DCB %s\n",
8162 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8163 dev_info(dev, "MQPRIO %s\n",
8164 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8166 dev_info(dev, "PF info end.\n");
8169 static int hclge_init_client_instance(struct hnae3_client *client,
8170 struct hnae3_ae_dev *ae_dev)
8172 struct hclge_dev *hdev = ae_dev->priv;
8173 struct hclge_vport *vport;
8176 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8177 vport = &hdev->vport[i];
8179 switch (client->type) {
8180 case HNAE3_CLIENT_KNIC:
8182 hdev->nic_client = client;
8183 vport->nic.client = client;
8184 ret = client->ops->init_instance(&vport->nic);
8188 hnae3_set_client_init_flag(client, ae_dev, 1);
8190 if (netif_msg_drv(&hdev->vport->nic))
8191 hclge_info_show(hdev);
8193 if (hdev->roce_client &&
8194 hnae3_dev_roce_supported(hdev)) {
8195 struct hnae3_client *rc = hdev->roce_client;
8197 ret = hclge_init_roce_base_info(vport);
8201 ret = rc->ops->init_instance(&vport->roce);
8205 hnae3_set_client_init_flag(hdev->roce_client,
8210 case HNAE3_CLIENT_UNIC:
8211 hdev->nic_client = client;
8212 vport->nic.client = client;
8214 ret = client->ops->init_instance(&vport->nic);
8218 hnae3_set_client_init_flag(client, ae_dev, 1);
8221 case HNAE3_CLIENT_ROCE:
8222 if (hnae3_dev_roce_supported(hdev)) {
8223 hdev->roce_client = client;
8224 vport->roce.client = client;
8227 if (hdev->roce_client && hdev->nic_client) {
8228 ret = hclge_init_roce_base_info(vport);
8232 ret = client->ops->init_instance(&vport->roce);
8236 hnae3_set_client_init_flag(client, ae_dev, 1);
8248 hdev->nic_client = NULL;
8249 vport->nic.client = NULL;
8252 hdev->roce_client = NULL;
8253 vport->roce.client = NULL;
8257 static void hclge_uninit_client_instance(struct hnae3_client *client,
8258 struct hnae3_ae_dev *ae_dev)
8260 struct hclge_dev *hdev = ae_dev->priv;
8261 struct hclge_vport *vport;
8264 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8265 vport = &hdev->vport[i];
8266 if (hdev->roce_client) {
8267 hdev->roce_client->ops->uninit_instance(&vport->roce,
8269 hdev->roce_client = NULL;
8270 vport->roce.client = NULL;
8272 if (client->type == HNAE3_CLIENT_ROCE)
8274 if (hdev->nic_client && client->ops->uninit_instance) {
8275 client->ops->uninit_instance(&vport->nic, 0);
8276 hdev->nic_client = NULL;
8277 vport->nic.client = NULL;
8282 static int hclge_pci_init(struct hclge_dev *hdev)
8284 struct pci_dev *pdev = hdev->pdev;
8285 struct hclge_hw *hw;
8288 ret = pci_enable_device(pdev);
8290 dev_err(&pdev->dev, "failed to enable PCI device\n");
8294 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8296 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8299 "can't set consistent PCI DMA");
8300 goto err_disable_device;
8302 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8305 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8307 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8308 goto err_disable_device;
8311 pci_set_master(pdev);
8313 hw->io_base = pcim_iomap(pdev, 2, 0);
8315 dev_err(&pdev->dev, "Can't map configuration register space\n");
8317 goto err_clr_master;
8320 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8324 pci_clear_master(pdev);
8325 pci_release_regions(pdev);
8327 pci_disable_device(pdev);
8332 static void hclge_pci_uninit(struct hclge_dev *hdev)
8334 struct pci_dev *pdev = hdev->pdev;
8336 pcim_iounmap(pdev, hdev->hw.io_base);
8337 pci_free_irq_vectors(pdev);
8338 pci_clear_master(pdev);
8339 pci_release_mem_regions(pdev);
8340 pci_disable_device(pdev);
8343 static void hclge_state_init(struct hclge_dev *hdev)
8345 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8346 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8347 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8348 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8349 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8350 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8353 static void hclge_state_uninit(struct hclge_dev *hdev)
8355 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8357 if (hdev->service_timer.function)
8358 del_timer_sync(&hdev->service_timer);
8359 if (hdev->reset_timer.function)
8360 del_timer_sync(&hdev->reset_timer);
8361 if (hdev->service_task.func)
8362 cancel_work_sync(&hdev->service_task);
8363 if (hdev->rst_service_task.func)
8364 cancel_work_sync(&hdev->rst_service_task);
8365 if (hdev->mbx_service_task.func)
8366 cancel_work_sync(&hdev->mbx_service_task);
8369 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8371 #define HCLGE_FLR_WAIT_MS 100
8372 #define HCLGE_FLR_WAIT_CNT 50
8373 struct hclge_dev *hdev = ae_dev->priv;
8376 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8377 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8378 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8379 hclge_reset_event(hdev->pdev, NULL);
8381 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8382 cnt++ < HCLGE_FLR_WAIT_CNT)
8383 msleep(HCLGE_FLR_WAIT_MS);
8385 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8386 dev_err(&hdev->pdev->dev,
8387 "flr wait down timeout: %d\n", cnt);
8390 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8392 struct hclge_dev *hdev = ae_dev->priv;
8394 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8397 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8399 struct pci_dev *pdev = ae_dev->pdev;
8400 struct hclge_dev *hdev;
8403 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8410 hdev->ae_dev = ae_dev;
8411 hdev->reset_type = HNAE3_NONE_RESET;
8412 hdev->reset_level = HNAE3_FUNC_RESET;
8413 ae_dev->priv = hdev;
8414 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8416 mutex_init(&hdev->vport_lock);
8417 mutex_init(&hdev->vport_cfg_mutex);
8418 spin_lock_init(&hdev->fd_rule_lock);
8420 ret = hclge_pci_init(hdev);
8422 dev_err(&pdev->dev, "PCI init failed\n");
8426 /* Firmware command queue initialize */
8427 ret = hclge_cmd_queue_init(hdev);
8429 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8430 goto err_pci_uninit;
8433 /* Firmware command initialize */
8434 ret = hclge_cmd_init(hdev);
8436 goto err_cmd_uninit;
8438 ret = hclge_get_cap(hdev);
8440 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8442 goto err_cmd_uninit;
8445 ret = hclge_configure(hdev);
8447 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8448 goto err_cmd_uninit;
8451 ret = hclge_init_msi(hdev);
8453 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8454 goto err_cmd_uninit;
8457 ret = hclge_misc_irq_init(hdev);
8460 "Misc IRQ(vector0) init error, ret = %d.\n",
8462 goto err_msi_uninit;
8465 ret = hclge_alloc_tqps(hdev);
8467 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8468 goto err_msi_irq_uninit;
8471 ret = hclge_alloc_vport(hdev);
8473 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8474 goto err_msi_irq_uninit;
8477 ret = hclge_map_tqp(hdev);
8479 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8480 goto err_msi_irq_uninit;
8483 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8484 ret = hclge_mac_mdio_config(hdev);
8486 dev_err(&hdev->pdev->dev,
8487 "mdio config fail ret=%d\n", ret);
8488 goto err_msi_irq_uninit;
8492 ret = hclge_init_umv_space(hdev);
8494 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8495 goto err_mdiobus_unreg;
8498 ret = hclge_mac_init(hdev);
8500 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8501 goto err_mdiobus_unreg;
8504 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8506 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8507 goto err_mdiobus_unreg;
8510 ret = hclge_config_gro(hdev, true);
8512 goto err_mdiobus_unreg;
8514 ret = hclge_init_vlan_config(hdev);
8516 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8517 goto err_mdiobus_unreg;
8520 ret = hclge_tm_schd_init(hdev);
8522 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8523 goto err_mdiobus_unreg;
8526 hclge_rss_init_cfg(hdev);
8527 ret = hclge_rss_init_hw(hdev);
8529 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8530 goto err_mdiobus_unreg;
8533 ret = init_mgr_tbl(hdev);
8535 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8536 goto err_mdiobus_unreg;
8539 ret = hclge_init_fd_config(hdev);
8542 "fd table init fail, ret=%d\n", ret);
8543 goto err_mdiobus_unreg;
8546 ret = hclge_hw_error_set_state(hdev, true);
8549 "fail(%d) to enable hw error interrupts\n", ret);
8550 goto err_mdiobus_unreg;
8553 INIT_KFIFO(hdev->mac_tnl_log);
8555 hclge_dcb_ops_set(hdev);
8557 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8558 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8559 INIT_WORK(&hdev->service_task, hclge_service_task);
8560 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8561 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8563 hclge_clear_all_event_cause(hdev);
8565 /* Enable MISC vector(vector0) */
8566 hclge_enable_vector(&hdev->misc_vector, true);
8568 hclge_state_init(hdev);
8569 hdev->last_reset_time = jiffies;
8571 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8575 if (hdev->hw.mac.phydev)
8576 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8578 hclge_misc_irq_uninit(hdev);
8580 pci_free_irq_vectors(pdev);
8582 hclge_cmd_uninit(hdev);
8584 pcim_iounmap(pdev, hdev->hw.io_base);
8585 pci_clear_master(pdev);
8586 pci_release_regions(pdev);
8587 pci_disable_device(pdev);
8592 static void hclge_stats_clear(struct hclge_dev *hdev)
8594 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8597 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8599 struct hclge_vport *vport = hdev->vport;
8602 for (i = 0; i < hdev->num_alloc_vport; i++) {
8603 hclge_vport_stop(vport);
8608 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8610 struct hclge_dev *hdev = ae_dev->priv;
8611 struct pci_dev *pdev = ae_dev->pdev;
8614 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8616 hclge_stats_clear(hdev);
8617 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8619 ret = hclge_cmd_init(hdev);
8621 dev_err(&pdev->dev, "Cmd queue init failed\n");
8625 ret = hclge_map_tqp(hdev);
8627 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8631 hclge_reset_umv_space(hdev);
8633 ret = hclge_mac_init(hdev);
8635 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8639 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8641 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8645 ret = hclge_config_gro(hdev, true);
8649 ret = hclge_init_vlan_config(hdev);
8651 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8655 ret = hclge_tm_init_hw(hdev, true);
8657 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8661 ret = hclge_rss_init_hw(hdev);
8663 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8667 ret = hclge_init_fd_config(hdev);
8670 "fd table init fail, ret=%d\n", ret);
8674 /* Re-enable the hw error interrupts because
8675 * the interrupts get disabled on core/global reset.
8677 ret = hclge_hw_error_set_state(hdev, true);
8680 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8684 hclge_reset_vport_state(hdev);
8686 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8692 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8694 struct hclge_dev *hdev = ae_dev->priv;
8695 struct hclge_mac *mac = &hdev->hw.mac;
8697 hclge_state_uninit(hdev);
8700 mdiobus_unregister(mac->mdio_bus);
8702 hclge_uninit_umv_space(hdev);
8704 /* Disable MISC vector(vector0) */
8705 hclge_enable_vector(&hdev->misc_vector, false);
8706 synchronize_irq(hdev->misc_vector.vector_irq);
8708 hclge_config_mac_tnl_int(hdev, false);
8709 hclge_hw_error_set_state(hdev, false);
8710 hclge_cmd_uninit(hdev);
8711 hclge_misc_irq_uninit(hdev);
8712 hclge_pci_uninit(hdev);
8713 mutex_destroy(&hdev->vport_lock);
8714 hclge_uninit_vport_mac_table(hdev);
8715 hclge_uninit_vport_vlan_table(hdev);
8716 mutex_destroy(&hdev->vport_cfg_mutex);
8717 ae_dev->priv = NULL;
8720 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8722 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8723 struct hclge_vport *vport = hclge_get_vport(handle);
8724 struct hclge_dev *hdev = vport->back;
8726 return min_t(u32, hdev->rss_size_max,
8727 vport->alloc_tqps / kinfo->num_tc);
8730 static void hclge_get_channels(struct hnae3_handle *handle,
8731 struct ethtool_channels *ch)
8733 ch->max_combined = hclge_get_max_channels(handle);
8734 ch->other_count = 1;
8736 ch->combined_count = handle->kinfo.rss_size;
8739 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8740 u16 *alloc_tqps, u16 *max_rss_size)
8742 struct hclge_vport *vport = hclge_get_vport(handle);
8743 struct hclge_dev *hdev = vport->back;
8745 *alloc_tqps = vport->alloc_tqps;
8746 *max_rss_size = hdev->rss_size_max;
8749 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8750 bool rxfh_configured)
8752 struct hclge_vport *vport = hclge_get_vport(handle);
8753 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8754 struct hclge_dev *hdev = vport->back;
8755 int cur_rss_size = kinfo->rss_size;
8756 int cur_tqps = kinfo->num_tqps;
8757 u16 tc_offset[HCLGE_MAX_TC_NUM];
8758 u16 tc_valid[HCLGE_MAX_TC_NUM];
8759 u16 tc_size[HCLGE_MAX_TC_NUM];
8764 kinfo->req_rss_size = new_tqps_num;
8766 ret = hclge_tm_vport_map_update(hdev);
8768 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8772 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8773 roundup_size = ilog2(roundup_size);
8774 /* Set the RSS TC mode according to the new RSS size */
8775 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8778 if (!(hdev->hw_tc_map & BIT(i)))
8782 tc_size[i] = roundup_size;
8783 tc_offset[i] = kinfo->rss_size * i;
8785 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8789 /* RSS indirection table has been configuared by user */
8790 if (rxfh_configured)
8793 /* Reinitializes the rss indirect table according to the new RSS size */
8794 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8798 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8799 rss_indir[i] = i % kinfo->rss_size;
8801 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8803 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8810 dev_info(&hdev->pdev->dev,
8811 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8812 cur_rss_size, kinfo->rss_size,
8813 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8818 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8819 u32 *regs_num_64_bit)
8821 struct hclge_desc desc;
8825 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8826 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8828 dev_err(&hdev->pdev->dev,
8829 "Query register number cmd failed, ret = %d.\n", ret);
8833 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8834 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8836 total_num = *regs_num_32_bit + *regs_num_64_bit;
8843 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8846 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8848 struct hclge_desc *desc;
8849 u32 *reg_val = data;
8858 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8859 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8863 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8864 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8866 dev_err(&hdev->pdev->dev,
8867 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8872 for (i = 0; i < cmd_num; i++) {
8874 desc_data = (__le32 *)(&desc[i].data[0]);
8875 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8877 desc_data = (__le32 *)(&desc[i]);
8878 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8880 for (k = 0; k < n; k++) {
8881 *reg_val++ = le32_to_cpu(*desc_data++);
8893 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8896 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8898 struct hclge_desc *desc;
8899 u64 *reg_val = data;
8908 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8909 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8913 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8914 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8916 dev_err(&hdev->pdev->dev,
8917 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8922 for (i = 0; i < cmd_num; i++) {
8924 desc_data = (__le64 *)(&desc[i].data[0]);
8925 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8927 desc_data = (__le64 *)(&desc[i]);
8928 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8930 for (k = 0; k < n; k++) {
8931 *reg_val++ = le64_to_cpu(*desc_data++);
8943 #define MAX_SEPARATE_NUM 4
8944 #define SEPARATOR_VALUE 0xFFFFFFFF
8945 #define REG_NUM_PER_LINE 4
8946 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8948 static int hclge_get_regs_len(struct hnae3_handle *handle)
8950 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8951 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8952 struct hclge_vport *vport = hclge_get_vport(handle);
8953 struct hclge_dev *hdev = vport->back;
8954 u32 regs_num_32_bit, regs_num_64_bit;
8957 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8959 dev_err(&hdev->pdev->dev,
8960 "Get register number failed, ret = %d.\n", ret);
8964 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8965 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8966 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8967 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8969 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8970 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8971 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8974 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8977 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8978 struct hclge_vport *vport = hclge_get_vport(handle);
8979 struct hclge_dev *hdev = vport->back;
8980 u32 regs_num_32_bit, regs_num_64_bit;
8981 int i, j, reg_um, separator_num;
8985 *version = hdev->fw_version;
8987 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8989 dev_err(&hdev->pdev->dev,
8990 "Get register number failed, ret = %d.\n", ret);
8994 /* fetching per-PF registers valus from PF PCIe register space */
8995 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8996 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8997 for (i = 0; i < reg_um; i++)
8998 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8999 for (i = 0; i < separator_num; i++)
9000 *reg++ = SEPARATOR_VALUE;
9002 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9003 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9004 for (i = 0; i < reg_um; i++)
9005 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9006 for (i = 0; i < separator_num; i++)
9007 *reg++ = SEPARATOR_VALUE;
9009 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9010 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9011 for (j = 0; j < kinfo->num_tqps; j++) {
9012 for (i = 0; i < reg_um; i++)
9013 *reg++ = hclge_read_dev(&hdev->hw,
9014 ring_reg_addr_list[i] +
9016 for (i = 0; i < separator_num; i++)
9017 *reg++ = SEPARATOR_VALUE;
9020 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9021 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9022 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9023 for (i = 0; i < reg_um; i++)
9024 *reg++ = hclge_read_dev(&hdev->hw,
9025 tqp_intr_reg_addr_list[i] +
9027 for (i = 0; i < separator_num; i++)
9028 *reg++ = SEPARATOR_VALUE;
9031 /* fetching PF common registers values from firmware */
9032 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9034 dev_err(&hdev->pdev->dev,
9035 "Get 32 bit register failed, ret = %d.\n", ret);
9039 reg += regs_num_32_bit;
9040 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9042 dev_err(&hdev->pdev->dev,
9043 "Get 64 bit register failed, ret = %d.\n", ret);
9046 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9048 struct hclge_set_led_state_cmd *req;
9049 struct hclge_desc desc;
9052 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9054 req = (struct hclge_set_led_state_cmd *)desc.data;
9055 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9056 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9058 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9060 dev_err(&hdev->pdev->dev,
9061 "Send set led state cmd error, ret =%d\n", ret);
9066 enum hclge_led_status {
9069 HCLGE_LED_NO_CHANGE = 0xFF,
9072 static int hclge_set_led_id(struct hnae3_handle *handle,
9073 enum ethtool_phys_id_state status)
9075 struct hclge_vport *vport = hclge_get_vport(handle);
9076 struct hclge_dev *hdev = vport->back;
9079 case ETHTOOL_ID_ACTIVE:
9080 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9081 case ETHTOOL_ID_INACTIVE:
9082 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9088 static void hclge_get_link_mode(struct hnae3_handle *handle,
9089 unsigned long *supported,
9090 unsigned long *advertising)
9092 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9093 struct hclge_vport *vport = hclge_get_vport(handle);
9094 struct hclge_dev *hdev = vport->back;
9095 unsigned int idx = 0;
9097 for (; idx < size; idx++) {
9098 supported[idx] = hdev->hw.mac.supported[idx];
9099 advertising[idx] = hdev->hw.mac.advertising[idx];
9103 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9105 struct hclge_vport *vport = hclge_get_vport(handle);
9106 struct hclge_dev *hdev = vport->back;
9108 return hclge_config_gro(hdev, enable);
9111 static const struct hnae3_ae_ops hclge_ops = {
9112 .init_ae_dev = hclge_init_ae_dev,
9113 .uninit_ae_dev = hclge_uninit_ae_dev,
9114 .flr_prepare = hclge_flr_prepare,
9115 .flr_done = hclge_flr_done,
9116 .init_client_instance = hclge_init_client_instance,
9117 .uninit_client_instance = hclge_uninit_client_instance,
9118 .map_ring_to_vector = hclge_map_ring_to_vector,
9119 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9120 .get_vector = hclge_get_vector,
9121 .put_vector = hclge_put_vector,
9122 .set_promisc_mode = hclge_set_promisc_mode,
9123 .set_loopback = hclge_set_loopback,
9124 .start = hclge_ae_start,
9125 .stop = hclge_ae_stop,
9126 .client_start = hclge_client_start,
9127 .client_stop = hclge_client_stop,
9128 .get_status = hclge_get_status,
9129 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9130 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9131 .get_media_type = hclge_get_media_type,
9132 .check_port_speed = hclge_check_port_speed,
9133 .get_fec = hclge_get_fec,
9134 .set_fec = hclge_set_fec,
9135 .get_rss_key_size = hclge_get_rss_key_size,
9136 .get_rss_indir_size = hclge_get_rss_indir_size,
9137 .get_rss = hclge_get_rss,
9138 .set_rss = hclge_set_rss,
9139 .set_rss_tuple = hclge_set_rss_tuple,
9140 .get_rss_tuple = hclge_get_rss_tuple,
9141 .get_tc_size = hclge_get_tc_size,
9142 .get_mac_addr = hclge_get_mac_addr,
9143 .set_mac_addr = hclge_set_mac_addr,
9144 .do_ioctl = hclge_do_ioctl,
9145 .add_uc_addr = hclge_add_uc_addr,
9146 .rm_uc_addr = hclge_rm_uc_addr,
9147 .add_mc_addr = hclge_add_mc_addr,
9148 .rm_mc_addr = hclge_rm_mc_addr,
9149 .set_autoneg = hclge_set_autoneg,
9150 .get_autoneg = hclge_get_autoneg,
9151 .restart_autoneg = hclge_restart_autoneg,
9152 .get_pauseparam = hclge_get_pauseparam,
9153 .set_pauseparam = hclge_set_pauseparam,
9154 .set_mtu = hclge_set_mtu,
9155 .reset_queue = hclge_reset_tqp,
9156 .get_stats = hclge_get_stats,
9157 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9158 .update_stats = hclge_update_stats,
9159 .get_strings = hclge_get_strings,
9160 .get_sset_count = hclge_get_sset_count,
9161 .get_fw_version = hclge_get_fw_version,
9162 .get_mdix_mode = hclge_get_mdix_mode,
9163 .enable_vlan_filter = hclge_enable_vlan_filter,
9164 .set_vlan_filter = hclge_set_vlan_filter,
9165 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9166 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9167 .reset_event = hclge_reset_event,
9168 .set_default_reset_request = hclge_set_def_reset_request,
9169 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9170 .set_channels = hclge_set_channels,
9171 .get_channels = hclge_get_channels,
9172 .get_regs_len = hclge_get_regs_len,
9173 .get_regs = hclge_get_regs,
9174 .set_led_id = hclge_set_led_id,
9175 .get_link_mode = hclge_get_link_mode,
9176 .add_fd_entry = hclge_add_fd_entry,
9177 .del_fd_entry = hclge_del_fd_entry,
9178 .del_all_fd_entries = hclge_del_all_fd_entries,
9179 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9180 .get_fd_rule_info = hclge_get_fd_rule_info,
9181 .get_fd_all_rules = hclge_get_all_rules,
9182 .restore_fd_rules = hclge_restore_fd_entries,
9183 .enable_fd = hclge_enable_fd,
9184 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9185 .dbg_run_cmd = hclge_dbg_run_cmd,
9186 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9187 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9188 .ae_dev_resetting = hclge_ae_dev_resetting,
9189 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9190 .set_gro_en = hclge_gro_en,
9191 .get_global_queue_id = hclge_covert_handle_qid_global,
9192 .set_timer_task = hclge_set_timer_task,
9193 .mac_connect_phy = hclge_mac_connect_phy,
9194 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9197 static struct hnae3_ae_algo ae_algo = {
9199 .pdev_id_table = ae_algo_pci_tbl,
9202 static int hclge_init(void)
9204 pr_info("%s is initializing\n", HCLGE_NAME);
9206 hnae3_register_ae_algo(&ae_algo);
9211 static void hclge_exit(void)
9213 hnae3_unregister_ae_algo(&ae_algo);
9215 module_init(hclge_init);
9216 module_exit(hclge_exit);
9218 MODULE_LICENSE("GPL");
9219 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9220 MODULE_DESCRIPTION("HCLGE Driver");
9221 MODULE_VERSION(HCLGE_MOD_VERSION);