1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
41 static struct hnae3_ae_algo ae_algo;
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58 HCLGE_CMDQ_TX_ADDR_H_REG,
59 HCLGE_CMDQ_TX_DEPTH_REG,
60 HCLGE_CMDQ_TX_TAIL_REG,
61 HCLGE_CMDQ_TX_HEAD_REG,
62 HCLGE_CMDQ_RX_ADDR_L_REG,
63 HCLGE_CMDQ_RX_ADDR_H_REG,
64 HCLGE_CMDQ_RX_DEPTH_REG,
65 HCLGE_CMDQ_RX_TAIL_REG,
66 HCLGE_CMDQ_RX_HEAD_REG,
67 HCLGE_VECTOR0_CMDQ_SRC_REG,
68 HCLGE_CMDQ_INTR_STS_REG,
69 HCLGE_CMDQ_INTR_EN_REG,
70 HCLGE_CMDQ_INTR_GEN_REG};
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73 HCLGE_VECTOR0_OTER_EN_REG,
74 HCLGE_MISC_RESET_STS_REG,
75 HCLGE_MISC_VECTOR_INT_STS,
76 HCLGE_GLOBAL_RESET_REG,
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81 HCLGE_RING_RX_ADDR_H_REG,
82 HCLGE_RING_RX_BD_NUM_REG,
83 HCLGE_RING_RX_BD_LENGTH_REG,
84 HCLGE_RING_RX_MERGE_EN_REG,
85 HCLGE_RING_RX_TAIL_REG,
86 HCLGE_RING_RX_HEAD_REG,
87 HCLGE_RING_RX_FBD_NUM_REG,
88 HCLGE_RING_RX_OFFSET_REG,
89 HCLGE_RING_RX_FBD_OFFSET_REG,
90 HCLGE_RING_RX_STASH_REG,
91 HCLGE_RING_RX_BD_ERR_REG,
92 HCLGE_RING_TX_ADDR_L_REG,
93 HCLGE_RING_TX_ADDR_H_REG,
94 HCLGE_RING_TX_BD_NUM_REG,
95 HCLGE_RING_TX_PRIORITY_REG,
97 HCLGE_RING_TX_MERGE_EN_REG,
98 HCLGE_RING_TX_TAIL_REG,
99 HCLGE_RING_TX_HEAD_REG,
100 HCLGE_RING_TX_FBD_NUM_REG,
101 HCLGE_RING_TX_OFFSET_REG,
102 HCLGE_RING_TX_EBD_NUM_REG,
103 HCLGE_RING_TX_EBD_OFFSET_REG,
104 HCLGE_RING_TX_BD_ERR_REG,
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108 HCLGE_TQP_INTR_GL0_REG,
109 HCLGE_TQP_INTR_GL1_REG,
110 HCLGE_TQP_INTR_GL2_REG,
111 HCLGE_TQP_INTR_RL_REG};
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
115 "Serdes serial Loopback test",
116 "Serdes parallel Loopback test",
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121 {"mac_tx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123 {"mac_rx_mac_pause_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125 {"mac_tx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127 {"mac_rx_control_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129 {"mac_tx_pfc_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131 {"mac_tx_pfc_pri0_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133 {"mac_tx_pfc_pri1_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135 {"mac_tx_pfc_pri2_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137 {"mac_tx_pfc_pri3_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139 {"mac_tx_pfc_pri4_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141 {"mac_tx_pfc_pri5_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143 {"mac_tx_pfc_pri6_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145 {"mac_tx_pfc_pri7_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147 {"mac_rx_pfc_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149 {"mac_rx_pfc_pri0_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151 {"mac_rx_pfc_pri1_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153 {"mac_rx_pfc_pri2_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155 {"mac_rx_pfc_pri3_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157 {"mac_rx_pfc_pri4_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159 {"mac_rx_pfc_pri5_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161 {"mac_rx_pfc_pri6_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163 {"mac_rx_pfc_pri7_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165 {"mac_tx_total_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167 {"mac_tx_total_oct_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169 {"mac_tx_good_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171 {"mac_tx_bad_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173 {"mac_tx_good_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175 {"mac_tx_bad_oct_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177 {"mac_tx_uni_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179 {"mac_tx_multi_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181 {"mac_tx_broad_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183 {"mac_tx_undersize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185 {"mac_tx_oversize_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187 {"mac_tx_64_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189 {"mac_tx_65_127_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191 {"mac_tx_128_255_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193 {"mac_tx_256_511_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195 {"mac_tx_512_1023_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197 {"mac_tx_1024_1518_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199 {"mac_tx_1519_2047_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201 {"mac_tx_2048_4095_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203 {"mac_tx_4096_8191_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205 {"mac_tx_8192_9216_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207 {"mac_tx_9217_12287_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209 {"mac_tx_12288_16383_oct_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211 {"mac_tx_1519_max_good_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213 {"mac_tx_1519_max_bad_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215 {"mac_rx_total_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217 {"mac_rx_total_oct_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219 {"mac_rx_good_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221 {"mac_rx_bad_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223 {"mac_rx_good_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225 {"mac_rx_bad_oct_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227 {"mac_rx_uni_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229 {"mac_rx_multi_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231 {"mac_rx_broad_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233 {"mac_rx_undersize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235 {"mac_rx_oversize_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237 {"mac_rx_64_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239 {"mac_rx_65_127_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241 {"mac_rx_128_255_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243 {"mac_rx_256_511_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245 {"mac_rx_512_1023_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247 {"mac_rx_1024_1518_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249 {"mac_rx_1519_2047_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251 {"mac_rx_2048_4095_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253 {"mac_rx_4096_8191_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255 {"mac_rx_8192_9216_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257 {"mac_rx_9217_12287_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259 {"mac_rx_12288_16383_oct_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261 {"mac_rx_1519_max_good_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263 {"mac_rx_1519_max_bad_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
266 {"mac_tx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268 {"mac_tx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270 {"mac_tx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272 {"mac_tx_err_all_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274 {"mac_tx_from_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276 {"mac_tx_from_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278 {"mac_rx_fragment_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280 {"mac_rx_undermin_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282 {"mac_rx_jabber_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284 {"mac_rx_fcs_err_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286 {"mac_rx_send_app_good_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288 {"mac_rx_send_app_bad_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
294 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298 .i_port_bitmap = 0x1,
302 static const u8 hclge_hash_key[] = {
303 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
312 #define HCLGE_MAC_CMD_NUM 21
314 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
320 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
323 dev_err(&hdev->pdev->dev,
324 "Get MAC pkt stats fail, status = %d.\n", ret);
329 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330 /* for special opcode 0032, only the first desc has the head */
331 if (unlikely(i == 0)) {
332 desc_data = (__le64 *)(&desc[i].data[0]);
333 n = HCLGE_RD_FIRST_STATS_NUM;
335 desc_data = (__le64 *)(&desc[i]);
336 n = HCLGE_RD_OTHER_STATS_NUM;
339 for (k = 0; k < n; k++) {
340 *data += le64_to_cpu(*desc_data);
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
351 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352 struct hclge_desc *desc;
357 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
360 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
367 for (i = 0; i < desc_num; i++) {
368 /* for special opcode 0034, only the first desc has the head */
370 desc_data = (__le64 *)(&desc[i].data[0]);
371 n = HCLGE_RD_FIRST_STATS_NUM;
373 desc_data = (__le64 *)(&desc[i]);
374 n = HCLGE_RD_OTHER_STATS_NUM;
377 for (k = 0; k < n; k++) {
378 *data += le64_to_cpu(*desc_data);
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
391 struct hclge_desc desc;
396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 desc_data = (__le32 *)(&desc.data[0]);
402 reg_num = le32_to_cpu(*desc_data);
404 *desc_num = 1 + ((reg_num - 3) >> 2) +
405 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
415 ret = hclge_mac_query_reg_num(hdev, &desc_num);
417 /* The firmware supports the new statistics acquisition method */
419 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420 else if (ret == -EOPNOTSUPP)
421 ret = hclge_mac_update_stats_defective(hdev);
423 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
430 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431 struct hclge_vport *vport = hclge_get_vport(handle);
432 struct hclge_dev *hdev = vport->back;
433 struct hnae3_queue *queue;
434 struct hclge_desc desc[1];
435 struct hclge_tqp *tqp;
438 for (i = 0; i < kinfo->num_tqps; i++) {
439 queue = handle->kinfo.tqp[i];
440 tqp = container_of(queue, struct hclge_tqp, q);
441 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442 hclge_cmd_setup_basic_desc(&desc[0],
443 HCLGE_OPC_QUERY_RX_STATUS,
446 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447 ret = hclge_cmd_send(&hdev->hw, desc, 1);
449 dev_err(&hdev->pdev->dev,
450 "Query tqp stat fail, status = %d,queue = %d\n",
454 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455 le32_to_cpu(desc[0].data[1]);
458 for (i = 0; i < kinfo->num_tqps; i++) {
459 queue = handle->kinfo.tqp[i];
460 tqp = container_of(queue, struct hclge_tqp, q);
461 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462 hclge_cmd_setup_basic_desc(&desc[0],
463 HCLGE_OPC_QUERY_TX_STATUS,
466 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467 ret = hclge_cmd_send(&hdev->hw, desc, 1);
469 dev_err(&hdev->pdev->dev,
470 "Query tqp stat fail, status = %d,queue = %d\n",
474 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475 le32_to_cpu(desc[0].data[1]);
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484 struct hclge_tqp *tqp;
488 for (i = 0; i < kinfo->num_tqps; i++) {
489 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
493 for (i = 0; i < kinfo->num_tqps; i++) {
494 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
503 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
505 return kinfo->num_tqps * (2);
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
510 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
514 for (i = 0; i < kinfo->num_tqps; i++) {
515 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516 struct hclge_tqp, q);
517 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
519 buff = buff + ETH_GSTRING_LEN;
522 for (i = 0; i < kinfo->num_tqps; i++) {
523 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524 struct hclge_tqp, q);
525 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
527 buff = buff + ETH_GSTRING_LEN;
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++)
541 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
546 static u8 *hclge_comm_get_strings(u32 stringset,
547 const struct hclge_comm_stats_str strs[],
550 char *buff = (char *)data;
553 if (stringset != ETH_SS_STATS)
556 for (i = 0; i < size; i++) {
557 snprintf(buff, ETH_GSTRING_LEN,
559 buff = buff + ETH_GSTRING_LEN;
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
567 struct hnae3_handle *handle;
570 handle = &hdev->vport[0].nic;
571 if (handle->client) {
572 status = hclge_tqps_update_stats(handle);
574 dev_err(&hdev->pdev->dev,
575 "Update TQPS stats fail, status = %d.\n",
580 status = hclge_mac_update_stats(hdev);
582 dev_err(&hdev->pdev->dev,
583 "Update MAC stats fail, status = %d.\n", status);
586 static void hclge_update_stats(struct hnae3_handle *handle,
587 struct net_device_stats *net_stats)
589 struct hclge_vport *vport = hclge_get_vport(handle);
590 struct hclge_dev *hdev = vport->back;
593 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
596 status = hclge_mac_update_stats(hdev);
598 dev_err(&hdev->pdev->dev,
599 "Update MAC stats fail, status = %d.\n",
602 status = hclge_tqps_update_stats(handle);
604 dev_err(&hdev->pdev->dev,
605 "Update TQPS stats fail, status = %d.\n",
608 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614 HNAE3_SUPPORT_PHY_LOOPBACK |\
615 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
618 struct hclge_vport *vport = hclge_get_vport(handle);
619 struct hclge_dev *hdev = vport->back;
622 /* Loopback test support rules:
623 * mac: only GE mode support
624 * serdes: all mac mode will support include GE/XGE/LGE/CGE
625 * phy: only support when phy device exist on board
627 if (stringset == ETH_SS_TEST) {
628 /* clear loopback bit flags at first */
629 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630 if (hdev->pdev->revision >= 0x21 ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
635 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
639 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641 } else if (stringset == ETH_SS_STATS) {
642 count = ARRAY_SIZE(g_mac_stats_string) +
643 hclge_tqps_get_sset_count(handle, stringset);
649 static void hclge_get_strings(struct hnae3_handle *handle,
653 u8 *p = (char *)data;
656 if (stringset == ETH_SS_STATS) {
657 size = ARRAY_SIZE(g_mac_stats_string);
658 p = hclge_comm_get_strings(stringset,
662 p = hclge_tqps_get_strings(handle, p);
663 } else if (stringset == ETH_SS_TEST) {
664 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
666 hns3_nic_test_strs[HNAE3_LOOP_APP],
668 p += ETH_GSTRING_LEN;
670 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
672 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
674 p += ETH_GSTRING_LEN;
676 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
678 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
680 p += ETH_GSTRING_LEN;
682 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
684 hns3_nic_test_strs[HNAE3_LOOP_PHY],
686 p += ETH_GSTRING_LEN;
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
693 struct hclge_vport *vport = hclge_get_vport(handle);
694 struct hclge_dev *hdev = vport->back;
697 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
699 ARRAY_SIZE(g_mac_stats_string),
701 p = hclge_tqps_get_stats(handle, p);
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
707 struct hclge_vport *vport = hclge_get_vport(handle);
708 struct hclge_dev *hdev = vport->back;
710 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715 struct hclge_func_status_cmd *status)
717 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
720 /* Set the pf to main pf */
721 if (status->pf_state & HCLGE_PF_STATE_MAIN)
722 hdev->flag |= HCLGE_FLAG_MAIN;
724 hdev->flag &= ~HCLGE_FLAG_MAIN;
729 static int hclge_query_function_status(struct hclge_dev *hdev)
731 struct hclge_func_status_cmd *req;
732 struct hclge_desc desc;
736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737 req = (struct hclge_func_status_cmd *)desc.data;
740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
742 dev_err(&hdev->pdev->dev,
743 "query function status failed %d.\n",
749 /* Check pf reset is done */
752 usleep_range(1000, 2000);
753 } while (timeout++ < 5);
755 ret = hclge_parse_func_status(hdev, req);
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 struct hclge_pf_res_cmd *req;
763 struct hclge_desc desc;
766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769 dev_err(&hdev->pdev->dev,
770 "query pf resource failed %d.\n", ret);
774 req = (struct hclge_pf_res_cmd *)desc.data;
775 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778 if (req->tx_buf_size)
780 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786 if (req->dv_buf_size)
788 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794 if (hnae3_dev_roce_supported(hdev)) {
795 hdev->roce_base_msix_offset =
796 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802 /* PF should have NIC vectors and Roce vectors,
803 * NIC vectors are queued before Roce vectors.
805 hdev->num_msi = hdev->num_roce_msi +
806 hdev->roce_base_msix_offset;
809 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
816 static int hclge_parse_speed(int speed_cmd, int *speed)
820 *speed = HCLGE_MAC_SPEED_10M;
823 *speed = HCLGE_MAC_SPEED_100M;
826 *speed = HCLGE_MAC_SPEED_1G;
829 *speed = HCLGE_MAC_SPEED_10G;
832 *speed = HCLGE_MAC_SPEED_25G;
835 *speed = HCLGE_MAC_SPEED_40G;
838 *speed = HCLGE_MAC_SPEED_50G;
841 *speed = HCLGE_MAC_SPEED_100G;
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u32 speed_ability = hdev->hw.mac.speed_ability;
858 case HCLGE_MAC_SPEED_10M:
859 speed_bit = HCLGE_SUPPORT_10M_BIT;
861 case HCLGE_MAC_SPEED_100M:
862 speed_bit = HCLGE_SUPPORT_100M_BIT;
864 case HCLGE_MAC_SPEED_1G:
865 speed_bit = HCLGE_SUPPORT_1G_BIT;
867 case HCLGE_MAC_SPEED_10G:
868 speed_bit = HCLGE_SUPPORT_10G_BIT;
870 case HCLGE_MAC_SPEED_25G:
871 speed_bit = HCLGE_SUPPORT_25G_BIT;
873 case HCLGE_MAC_SPEED_40G:
874 speed_bit = HCLGE_SUPPORT_40G_BIT;
876 case HCLGE_MAC_SPEED_50G:
877 speed_bit = HCLGE_SUPPORT_50G_BIT;
879 case HCLGE_MAC_SPEED_100G:
880 speed_bit = HCLGE_SUPPORT_100G_BIT;
886 if (speed_bit & speed_ability)
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976 switch (mac->speed) {
977 case HCLGE_MAC_SPEED_10G:
978 case HCLGE_MAC_SPEED_40G:
979 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
982 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984 case HCLGE_MAC_SPEED_25G:
985 case HCLGE_MAC_SPEED_50G:
986 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
989 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
992 case HCLGE_MAC_SPEED_100G:
993 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
997 mac->fec_ability = 0;
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1005 struct hclge_mac *mac = &hdev->hw.mac;
1007 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1011 hclge_convert_setting_sr(mac, speed_ability);
1012 hclge_convert_setting_lr(mac, speed_ability);
1013 hclge_convert_setting_cr(mac, speed_ability);
1014 if (hdev->pdev->revision >= 0x21)
1015 hclge_convert_setting_fec(mac);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1025 struct hclge_mac *mac = &hdev->hw.mac;
1027 hclge_convert_setting_kr(mac, speed_ability);
1028 if (hdev->pdev->revision >= 0x21)
1029 hclge_convert_setting_fec(mac);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1038 unsigned long *supported = hdev->hw.mac.supported;
1040 /* default to support all speed for GE port */
1042 speed_ability = HCLGE_SUPPORT_GE;
1044 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1067 u8 media_type = hdev->hw.mac.media_type;
1069 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072 hclge_parse_copper_link_mode(hdev, speed_ability);
1073 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074 hclge_parse_backplane_link_mode(hdev, speed_ability);
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1078 struct hclge_cfg_param_cmd *req;
1079 u64 mac_addr_tmp_high;
1083 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1085 /* get the configuration */
1086 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092 HCLGE_CFG_TQP_DESC_N_M,
1093 HCLGE_CFG_TQP_DESC_N_S);
1095 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096 HCLGE_CFG_PHY_ADDR_M,
1097 HCLGE_CFG_PHY_ADDR_S);
1098 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099 HCLGE_CFG_MEDIA_TP_M,
1100 HCLGE_CFG_MEDIA_TP_S);
1101 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102 HCLGE_CFG_RX_BUF_LEN_M,
1103 HCLGE_CFG_RX_BUF_LEN_S);
1104 /* get mac_address */
1105 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107 HCLGE_CFG_MAC_ADDR_H_M,
1108 HCLGE_CFG_MAC_ADDR_H_S);
1110 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1112 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113 HCLGE_CFG_DEFAULT_SPEED_M,
1114 HCLGE_CFG_DEFAULT_SPEED_S);
1115 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116 HCLGE_CFG_RSS_SIZE_M,
1117 HCLGE_CFG_RSS_SIZE_S);
1119 for (i = 0; i < ETH_ALEN; i++)
1120 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1122 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1125 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126 HCLGE_CFG_SPEED_ABILITY_M,
1127 HCLGE_CFG_SPEED_ABILITY_S);
1128 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129 HCLGE_CFG_UMV_TBL_SPACE_M,
1130 HCLGE_CFG_UMV_TBL_SPACE_S);
1131 if (!cfg->umv_space)
1132 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 /* hclge_get_cfg: query the static parameter from flash
1136 * @hdev: pointer to struct hclge_dev
1137 * @hcfg: the config structure to be getted
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1141 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142 struct hclge_cfg_param_cmd *req;
1145 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1148 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1151 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153 /* Len should be united by 4 bytes when send to hardware */
1154 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156 req->offset = cpu_to_le32(offset);
1159 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1161 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165 hclge_parse_cfg(hcfg, desc);
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1174 ret = hclge_query_function_status(hdev);
1176 dev_err(&hdev->pdev->dev,
1177 "query function status error %d.\n", ret);
1181 /* get pf resource */
1182 ret = hclge_query_pf_resource(hdev);
1184 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1191 #define HCLGE_MIN_TX_DESC 64
1192 #define HCLGE_MIN_RX_DESC 64
1194 if (!is_kdump_kernel())
1197 dev_info(&hdev->pdev->dev,
1198 "Running kdump kernel. Using minimal resources\n");
1200 /* minimal queue pairs equals to the number of vports */
1201 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1206 static int hclge_configure(struct hclge_dev *hdev)
1208 struct hclge_cfg cfg;
1211 ret = hclge_get_cfg(hdev, &cfg);
1213 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1217 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218 hdev->base_tqp_pid = 0;
1219 hdev->rss_size_max = cfg.rss_size_max;
1220 hdev->rx_buf_len = cfg.rx_buf_len;
1221 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222 hdev->hw.mac.media_type = cfg.media_type;
1223 hdev->hw.mac.phy_addr = cfg.phy_addr;
1224 hdev->num_tx_desc = cfg.tqp_desc_num;
1225 hdev->num_rx_desc = cfg.tqp_desc_num;
1226 hdev->tm_info.num_pg = 1;
1227 hdev->tc_max = cfg.tc_num;
1228 hdev->tm_info.hw_pfc_map = 0;
1229 hdev->wanted_umv_size = cfg.umv_space;
1231 if (hnae3_dev_fd_supported(hdev)) {
1233 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1236 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1238 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1242 hclge_parse_link_mode(hdev, cfg.speed_ability);
1244 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245 (hdev->tc_max < 1)) {
1246 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251 /* Dev does not support DCB */
1252 if (!hnae3_dev_dcb_supported(hdev)) {
1256 hdev->pfc_max = hdev->tc_max;
1259 hdev->tm_info.num_tc = 1;
1261 /* Currently not support uncontiuous tc */
1262 for (i = 0; i < hdev->tm_info.num_tc; i++)
1263 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1265 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1267 hclge_init_kdump_kernel_config(hdev);
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1275 struct hclge_cfg_tso_status_cmd *req;
1276 struct hclge_desc desc;
1279 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1281 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1284 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286 req->tso_mss_min = cpu_to_le16(tso_mss);
1289 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291 req->tso_mss_max = cpu_to_le16(tso_mss);
1293 return hclge_cmd_send(&hdev->hw, &desc, 1);
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1298 struct hclge_cfg_gro_status_cmd *req;
1299 struct hclge_desc desc;
1302 if (!hnae3_dev_gro_supported(hdev))
1305 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1308 req->gro_en = cpu_to_le16(en ? 1 : 0);
1310 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1312 dev_err(&hdev->pdev->dev,
1313 "GRO hardware config cmd failed, ret = %d\n", ret);
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1320 struct hclge_tqp *tqp;
1323 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324 sizeof(struct hclge_tqp), GFP_KERNEL);
1330 for (i = 0; i < hdev->num_tqps; i++) {
1331 tqp->dev = &hdev->pdev->dev;
1334 tqp->q.ae_algo = &ae_algo;
1335 tqp->q.buf_size = hdev->rx_buf_len;
1336 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339 i * HCLGE_TQP_REG_SIZE;
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1350 struct hclge_tqp_map_cmd *req;
1351 struct hclge_desc desc;
1354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1356 req = (struct hclge_tqp_map_cmd *)desc.data;
1357 req->tqp_id = cpu_to_le16(tqp_pid);
1358 req->tqp_vf = func_id;
1359 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360 1 << HCLGE_TQP_MAP_EN_B;
1361 req->tqp_vid = cpu_to_le16(tqp_vid);
1363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1365 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1370 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1372 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373 struct hclge_dev *hdev = vport->back;
1376 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377 alloced < num_tqps; i++) {
1378 if (!hdev->htqp[i].alloced) {
1379 hdev->htqp[i].q.handle = &vport->nic;
1380 hdev->htqp[i].q.tqp_index = alloced;
1381 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384 hdev->htqp[i].alloced = true;
1388 vport->alloc_tqps = alloced;
1389 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390 vport->alloc_tqps / hdev->tm_info.num_tc);
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396 u16 num_tx_desc, u16 num_rx_desc)
1399 struct hnae3_handle *nic = &vport->nic;
1400 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401 struct hclge_dev *hdev = vport->back;
1404 kinfo->num_tx_desc = num_tx_desc;
1405 kinfo->num_rx_desc = num_rx_desc;
1407 kinfo->rx_buf_len = hdev->rx_buf_len;
1409 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410 sizeof(struct hnae3_queue *), GFP_KERNEL);
1414 ret = hclge_assign_tqp(vport, num_tqps);
1416 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422 struct hclge_vport *vport)
1424 struct hnae3_handle *nic = &vport->nic;
1425 struct hnae3_knic_private_info *kinfo;
1428 kinfo = &nic->kinfo;
1429 for (i = 0; i < vport->alloc_tqps; i++) {
1430 struct hclge_tqp *q =
1431 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1435 is_pf = !(vport->vport_id);
1436 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1447 struct hclge_vport *vport = hdev->vport;
1450 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451 for (i = 0; i < num_vport; i++) {
1454 ret = hclge_map_tqp_to_vport(hdev, vport);
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1466 /* this would be initialized later */
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1471 struct hnae3_handle *nic = &vport->nic;
1472 struct hclge_dev *hdev = vport->back;
1475 nic->pdev = hdev->pdev;
1476 nic->ae_algo = &ae_algo;
1477 nic->numa_node_mask = hdev->numa_node_mask;
1479 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480 ret = hclge_knic_setup(vport, num_tqps,
1481 hdev->num_tx_desc, hdev->num_rx_desc);
1484 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1489 hclge_unic_setup(vport, num_tqps);
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1497 struct pci_dev *pdev = hdev->pdev;
1498 struct hclge_vport *vport;
1504 /* We need to alloc a vport for main NIC of PF */
1505 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1507 if (hdev->num_tqps < num_vport) {
1508 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509 hdev->num_tqps, num_vport);
1513 /* Alloc the same number of TQPs for every vport */
1514 tqp_per_vport = hdev->num_tqps / num_vport;
1515 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1517 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1522 hdev->vport = vport;
1523 hdev->num_alloc_vport = num_vport;
1525 if (IS_ENABLED(CONFIG_PCI_IOV))
1526 hdev->num_alloc_vfs = hdev->num_req_vfs;
1528 for (i = 0; i < num_vport; i++) {
1530 vport->vport_id = i;
1531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534 INIT_LIST_HEAD(&vport->vlan_list);
1535 INIT_LIST_HEAD(&vport->uc_mac_list);
1536 INIT_LIST_HEAD(&vport->mc_mac_list);
1539 ret = hclge_vport_setup(vport, tqp_main_vport);
1541 ret = hclge_vport_setup(vport, tqp_per_vport);
1544 "vport setup failed for vport %d, %d\n",
1555 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1561 struct hclge_tx_buff_alloc_cmd *req;
1562 struct hclge_desc desc;
1566 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1572 req->tx_pkt_buff[i] =
1573 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1577 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1579 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1591 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1600 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601 if (hdev->hw_tc_map & BIT(i))
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_priv_buf *priv;
1613 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614 priv = &buf_alloc->priv_buf[i];
1615 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625 struct hclge_pkt_buf_alloc *buf_alloc)
1627 struct hclge_priv_buf *priv;
1630 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_priv_buf *priv;
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 priv = &buf_alloc->priv_buf[i];
1650 rx_priv += priv->buf_size;
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 u32 i, total_tx_size = 0;
1659 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662 return total_tx_size;
1665 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666 struct hclge_pkt_buf_alloc *buf_alloc,
1669 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670 u32 tc_num = hclge_get_tc_num(hdev);
1671 u32 shared_buf, aligned_mps;
1675 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677 if (hnae3_dev_dcb_supported(hdev))
1678 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1680 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681 + hdev->dv_buf_size;
1683 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685 HCLGE_BUF_SIZE_UNIT);
1687 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688 if (rx_all < rx_priv + shared_std)
1691 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692 buf_alloc->s_buf.buf_size = shared_buf;
1693 if (hnae3_dev_dcb_supported(hdev)) {
1694 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1698 buf_alloc->s_buf.self.high = aligned_mps +
1699 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700 buf_alloc->s_buf.self.low = aligned_mps;
1703 if (hnae3_dev_dcb_supported(hdev)) {
1705 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1707 hi_thrd = shared_buf - hdev->dv_buf_size;
1709 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711 lo_thrd = hi_thrd - aligned_mps / 2;
1713 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714 lo_thrd = aligned_mps;
1717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726 struct hclge_pkt_buf_alloc *buf_alloc)
1730 total_size = hdev->pkt_buf_size;
1732 /* alloc tx buffer for all enabled tc */
1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1736 if (hdev->hw_tc_map & BIT(i)) {
1737 if (total_size < hdev->tx_buf_size)
1740 priv->tx_buf_size = hdev->tx_buf_size;
1742 priv->tx_buf_size = 0;
1745 total_size -= priv->tx_buf_size;
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752 struct hclge_pkt_buf_alloc *buf_alloc)
1754 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1758 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1766 if (!(hdev->hw_tc_map & BIT(i)))
1771 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772 priv->wl.low = max ? aligned_mps : 256;
1773 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774 HCLGE_BUF_SIZE_UNIT);
1777 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1780 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1783 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787 struct hclge_pkt_buf_alloc *buf_alloc)
1789 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1793 /* let the last to be cleared first */
1794 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1797 if (hdev->hw_tc_map & BIT(i) &&
1798 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799 /* Clear the no pfc TC private buffer */
1807 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808 no_pfc_priv_num == 0)
1812 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816 struct hclge_pkt_buf_alloc *buf_alloc)
1818 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1822 /* let the last to be cleared first */
1823 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1826 if (hdev->hw_tc_map & BIT(i) &&
1827 hdev->tm_info.hw_pfc_map & BIT(i)) {
1828 /* Reduce the number of pfc TC with private buffer */
1836 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1841 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845 * @hdev: pointer to struct hclge_dev
1846 * @buf_alloc: pointer to buffer calculation data
1847 * @return: 0: calculate sucessful, negative: fail
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850 struct hclge_pkt_buf_alloc *buf_alloc)
1852 /* When DCB is not supported, rx private buffer is not allocated. */
1853 if (!hnae3_dev_dcb_supported(hdev)) {
1854 u32 rx_all = hdev->pkt_buf_size;
1856 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1863 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1866 /* try to decrease the buffer size */
1867 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1870 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1873 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880 struct hclge_pkt_buf_alloc *buf_alloc)
1882 struct hclge_rx_priv_buff_cmd *req;
1883 struct hclge_desc desc;
1887 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1890 /* Alloc private buffer TCs */
1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1897 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1901 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1906 dev_err(&hdev->pdev->dev,
1907 "rx private buffer alloc cmd failed %d\n", ret);
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913 struct hclge_pkt_buf_alloc *buf_alloc)
1915 struct hclge_rx_priv_wl_buf *req;
1916 struct hclge_priv_buf *priv;
1917 struct hclge_desc desc[2];
1921 for (i = 0; i < 2; i++) {
1922 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1924 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1926 /* The first descriptor set the NEXT bit to 1 */
1928 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1930 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1932 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1935 priv = &buf_alloc->priv_buf[idx];
1936 req->tc_wl[j].high =
1937 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].high |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1941 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942 req->tc_wl[j].low |=
1943 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1947 /* Send 2 descriptor at one time */
1948 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1950 dev_err(&hdev->pdev->dev,
1951 "rx private waterline config cmd failed %d\n",
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957 struct hclge_pkt_buf_alloc *buf_alloc)
1959 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960 struct hclge_rx_com_thrd *req;
1961 struct hclge_desc desc[2];
1962 struct hclge_tc_thrd *tc;
1966 for (i = 0; i < 2; i++) {
1967 hclge_cmd_setup_basic_desc(&desc[i],
1968 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1971 /* The first descriptor set the NEXT bit to 1 */
1973 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1975 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1977 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1980 req->com_thrd[j].high =
1981 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].high |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 req->com_thrd[j].low =
1985 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986 req->com_thrd[j].low |=
1987 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1991 /* Send 2 descriptors at one time */
1992 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1994 dev_err(&hdev->pdev->dev,
1995 "common threshold config cmd failed %d\n", ret);
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003 struct hclge_rx_com_wl *req;
2004 struct hclge_desc desc;
2007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2009 req = (struct hclge_rx_com_wl *)desc.data;
2010 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2013 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2018 dev_err(&hdev->pdev->dev,
2019 "common waterline config cmd failed %d\n", ret);
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2026 struct hclge_pkt_buf_alloc *pkt_buf;
2029 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2033 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2035 dev_err(&hdev->pdev->dev,
2036 "could not calc tx buffer size for all TCs %d\n", ret);
2040 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2042 dev_err(&hdev->pdev->dev,
2043 "could not alloc tx buffers %d\n", ret);
2047 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2049 dev_err(&hdev->pdev->dev,
2050 "could not calc rx priv buffer size for all TCs %d\n",
2055 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2057 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2062 if (hnae3_dev_dcb_supported(hdev)) {
2063 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2065 dev_err(&hdev->pdev->dev,
2066 "could not configure rx private waterline %d\n",
2071 ret = hclge_common_thrd_config(hdev, pkt_buf);
2073 dev_err(&hdev->pdev->dev,
2074 "could not configure common threshold %d\n",
2080 ret = hclge_common_wl_config(hdev, pkt_buf);
2082 dev_err(&hdev->pdev->dev,
2083 "could not configure common waterline %d\n", ret);
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2092 struct hnae3_handle *roce = &vport->roce;
2093 struct hnae3_handle *nic = &vport->nic;
2095 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2097 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098 vport->back->num_msi_left == 0)
2101 roce->rinfo.base_vector = vport->back->roce_base_vector;
2103 roce->rinfo.netdev = nic->kinfo.netdev;
2104 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2106 roce->pdev = nic->pdev;
2107 roce->ae_algo = nic->ae_algo;
2108 roce->numa_node_mask = nic->numa_node_mask;
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2115 struct pci_dev *pdev = hdev->pdev;
2119 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2123 "failed(%d) to allocate MSI/MSI-X vectors\n",
2127 if (vectors < hdev->num_msi)
2128 dev_warn(&hdev->pdev->dev,
2129 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130 hdev->num_msi, vectors);
2132 hdev->num_msi = vectors;
2133 hdev->num_msi_left = vectors;
2134 hdev->base_msi_vector = pdev->irq;
2135 hdev->roce_base_vector = hdev->base_msi_vector +
2136 hdev->roce_base_msix_offset;
2138 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139 sizeof(u16), GFP_KERNEL);
2140 if (!hdev->vector_status) {
2141 pci_free_irq_vectors(pdev);
2145 for (i = 0; i < hdev->num_msi; i++)
2146 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2148 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149 sizeof(int), GFP_KERNEL);
2150 if (!hdev->vector_irq) {
2151 pci_free_irq_vectors(pdev);
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2161 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162 duplex = HCLGE_MAC_FULL;
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2170 struct hclge_config_mac_speed_dup_cmd *req;
2171 struct hclge_desc desc;
2174 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2178 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2181 case HCLGE_MAC_SPEED_10M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 6);
2185 case HCLGE_MAC_SPEED_100M:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 7);
2189 case HCLGE_MAC_SPEED_1G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 0);
2193 case HCLGE_MAC_SPEED_10G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 1);
2197 case HCLGE_MAC_SPEED_25G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 2);
2201 case HCLGE_MAC_SPEED_40G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 3);
2205 case HCLGE_MAC_SPEED_50G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 4);
2209 case HCLGE_MAC_SPEED_100G:
2210 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211 HCLGE_CFG_SPEED_S, 5);
2214 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2218 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev,
2224 "mac speed/duplex config cmd failed %d.\n", ret);
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2235 duplex = hclge_check_speed_dup(duplex, speed);
2236 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2239 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2243 hdev->hw.mac.speed = speed;
2244 hdev->hw.mac.duplex = duplex;
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2252 struct hclge_vport *vport = hclge_get_vport(handle);
2253 struct hclge_dev *hdev = vport->back;
2255 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2260 struct hclge_config_auto_neg_cmd *req;
2261 struct hclge_desc desc;
2265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2267 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2281 struct hclge_vport *vport = hclge_get_vport(handle);
2282 struct hclge_dev *hdev = vport->back;
2284 if (!hdev->hw.mac.support_autoneg) {
2286 dev_err(&hdev->pdev->dev,
2287 "autoneg is not supported by current port\n");
2294 return hclge_set_autoneg_en(hdev, enable);
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2299 struct hclge_vport *vport = hclge_get_vport(handle);
2300 struct hclge_dev *hdev = vport->back;
2301 struct phy_device *phydev = hdev->hw.mac.phydev;
2304 return phydev->autoneg;
2306 return hdev->hw.mac.autoneg;
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2311 struct hclge_vport *vport = hclge_get_vport(handle);
2312 struct hclge_dev *hdev = vport->back;
2315 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2317 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2320 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2325 struct hclge_config_fec_cmd *req;
2326 struct hclge_desc desc;
2329 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2331 req = (struct hclge_config_fec_cmd *)desc.data;
2332 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334 if (fec_mode & BIT(HNAE3_FEC_RS))
2335 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337 if (fec_mode & BIT(HNAE3_FEC_BASER))
2338 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2341 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2343 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2350 struct hclge_vport *vport = hclge_get_vport(handle);
2351 struct hclge_dev *hdev = vport->back;
2352 struct hclge_mac *mac = &hdev->hw.mac;
2355 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2360 ret = hclge_set_fec_hw(hdev, fec_mode);
2364 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2371 struct hclge_vport *vport = hclge_get_vport(handle);
2372 struct hclge_dev *hdev = vport->back;
2373 struct hclge_mac *mac = &hdev->hw.mac;
2376 *fec_ability = mac->fec_ability;
2378 *fec_mode = mac->fec_mode;
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2383 struct hclge_mac *mac = &hdev->hw.mac;
2386 hdev->support_sfp_query = true;
2387 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389 hdev->hw.mac.duplex);
2391 dev_err(&hdev->pdev->dev,
2392 "Config mac speed dup fail ret=%d\n", ret);
2398 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2401 dev_err(&hdev->pdev->dev,
2402 "Fec mode init fail, ret = %d\n", ret);
2407 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2409 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2413 ret = hclge_buffer_alloc(hdev);
2415 dev_err(&hdev->pdev->dev,
2416 "allocate buffer fail, ret=%d\n", ret);
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2423 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425 schedule_work(&hdev->mbx_service_task);
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2430 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2431 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2432 schedule_work(&hdev->rst_service_task);
2435 static void hclge_task_schedule(struct hclge_dev *hdev)
2437 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2438 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2439 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2440 (void)schedule_work(&hdev->service_task);
2443 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2445 struct hclge_link_status_cmd *req;
2446 struct hclge_desc desc;
2450 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2451 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2453 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2458 req = (struct hclge_link_status_cmd *)desc.data;
2459 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2461 return !!link_status;
2464 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2469 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2472 mac_state = hclge_get_mac_link_status(hdev);
2474 if (hdev->hw.mac.phydev) {
2475 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2476 link_stat = mac_state &
2477 hdev->hw.mac.phydev->link;
2482 link_stat = mac_state;
2488 static void hclge_update_link_status(struct hclge_dev *hdev)
2490 struct hnae3_client *rclient = hdev->roce_client;
2491 struct hnae3_client *client = hdev->nic_client;
2492 struct hnae3_handle *rhandle;
2493 struct hnae3_handle *handle;
2499 state = hclge_get_mac_phy_link(hdev);
2500 if (state != hdev->hw.mac.link) {
2501 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2502 handle = &hdev->vport[i].nic;
2503 client->ops->link_status_change(handle, state);
2504 hclge_config_mac_tnl_int(hdev, state);
2505 rhandle = &hdev->vport[i].roce;
2506 if (rclient && rclient->ops->link_status_change)
2507 rclient->ops->link_status_change(rhandle,
2510 hdev->hw.mac.link = state;
2514 static void hclge_update_port_capability(struct hclge_mac *mac)
2516 /* update fec ability by speed */
2517 hclge_convert_setting_fec(mac);
2519 /* firmware can not identify back plane type, the media type
2520 * read from configuration can help deal it
2522 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2523 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2524 mac->module_type = HNAE3_MODULE_TYPE_KR;
2525 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2526 mac->module_type = HNAE3_MODULE_TYPE_TP;
2528 if (mac->support_autoneg == true) {
2529 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2530 linkmode_copy(mac->advertising, mac->supported);
2532 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2534 linkmode_zero(mac->advertising);
2538 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2540 struct hclge_sfp_info_cmd *resp = NULL;
2541 struct hclge_desc desc;
2544 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2545 resp = (struct hclge_sfp_info_cmd *)desc.data;
2546 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2547 if (ret == -EOPNOTSUPP) {
2548 dev_warn(&hdev->pdev->dev,
2549 "IMP do not support get SFP speed %d\n", ret);
2552 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2556 *speed = le32_to_cpu(resp->speed);
2561 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2563 struct hclge_sfp_info_cmd *resp;
2564 struct hclge_desc desc;
2567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2568 resp = (struct hclge_sfp_info_cmd *)desc.data;
2570 resp->query_type = QUERY_ACTIVE_SPEED;
2572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573 if (ret == -EOPNOTSUPP) {
2574 dev_warn(&hdev->pdev->dev,
2575 "IMP does not support get SFP info %d\n", ret);
2578 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2582 mac->speed = le32_to_cpu(resp->speed);
2583 /* if resp->speed_ability is 0, it means it's an old version
2584 * firmware, do not update these params
2586 if (resp->speed_ability) {
2587 mac->module_type = le32_to_cpu(resp->module_type);
2588 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2589 mac->autoneg = resp->autoneg;
2590 mac->support_autoneg = resp->autoneg_ability;
2591 if (!resp->active_fec)
2594 mac->fec_mode = BIT(resp->active_fec);
2596 mac->speed_type = QUERY_SFP_SPEED;
2602 static int hclge_update_port_info(struct hclge_dev *hdev)
2604 struct hclge_mac *mac = &hdev->hw.mac;
2605 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2608 /* get the port info from SFP cmd if not copper port */
2609 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2612 /* if IMP does not support get SFP/qSFP info, return directly */
2613 if (!hdev->support_sfp_query)
2616 if (hdev->pdev->revision >= 0x21)
2617 ret = hclge_get_sfp_info(hdev, mac);
2619 ret = hclge_get_sfp_speed(hdev, &speed);
2621 if (ret == -EOPNOTSUPP) {
2622 hdev->support_sfp_query = false;
2628 if (hdev->pdev->revision >= 0x21) {
2629 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2630 hclge_update_port_capability(mac);
2633 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2636 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2637 return 0; /* do nothing if no SFP */
2639 /* must config full duplex for SFP */
2640 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2644 static int hclge_get_status(struct hnae3_handle *handle)
2646 struct hclge_vport *vport = hclge_get_vport(handle);
2647 struct hclge_dev *hdev = vport->back;
2649 hclge_update_link_status(hdev);
2651 return hdev->hw.mac.link;
2654 static void hclge_service_timer(struct timer_list *t)
2656 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2658 mod_timer(&hdev->service_timer, jiffies + HZ);
2659 hdev->hw_stats.stats_timer++;
2660 hdev->fd_arfs_expire_timer++;
2661 hclge_task_schedule(hdev);
2664 static void hclge_service_complete(struct hclge_dev *hdev)
2666 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2668 /* Flush memory before next watchdog */
2669 smp_mb__before_atomic();
2670 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2673 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2675 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2677 /* fetch the events from their corresponding regs */
2678 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2679 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2680 msix_src_reg = hclge_read_dev(&hdev->hw,
2681 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2683 /* Assumption: If by any chance reset and mailbox events are reported
2684 * together then we will only process reset event in this go and will
2685 * defer the processing of the mailbox events. Since, we would have not
2686 * cleared RX CMDQ event this time we would receive again another
2687 * interrupt from H/W just for the mailbox.
2690 /* check for vector0 reset event sources */
2691 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2692 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2693 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2694 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2695 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2696 hdev->rst_stats.imp_rst_cnt++;
2697 return HCLGE_VECTOR0_EVENT_RST;
2700 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2701 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2702 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2703 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2704 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2705 hdev->rst_stats.global_rst_cnt++;
2706 return HCLGE_VECTOR0_EVENT_RST;
2709 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2710 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2711 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2712 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2713 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2714 hdev->rst_stats.core_rst_cnt++;
2715 return HCLGE_VECTOR0_EVENT_RST;
2718 /* check for vector0 msix event source */
2719 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2720 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2722 return HCLGE_VECTOR0_EVENT_ERR;
2725 /* check for vector0 mailbox(=CMDQ RX) event source */
2726 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2727 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2728 *clearval = cmdq_src_reg;
2729 return HCLGE_VECTOR0_EVENT_MBX;
2732 /* print other vector0 event source */
2733 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2734 cmdq_src_reg, msix_src_reg);
2735 return HCLGE_VECTOR0_EVENT_OTHER;
2738 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2741 switch (event_type) {
2742 case HCLGE_VECTOR0_EVENT_RST:
2743 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2745 case HCLGE_VECTOR0_EVENT_MBX:
2746 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2753 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2755 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2756 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2757 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2758 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2759 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2762 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2764 writel(enable ? 1 : 0, vector->addr);
2767 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2769 struct hclge_dev *hdev = data;
2773 hclge_enable_vector(&hdev->misc_vector, false);
2774 event_cause = hclge_check_event_cause(hdev, &clearval);
2776 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2777 switch (event_cause) {
2778 case HCLGE_VECTOR0_EVENT_ERR:
2779 /* we do not know what type of reset is required now. This could
2780 * only be decided after we fetch the type of errors which
2781 * caused this event. Therefore, we will do below for now:
2782 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2783 * have defered type of reset to be used.
2784 * 2. Schedule the reset serivce task.
2785 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2786 * will fetch the correct type of reset. This would be done
2787 * by first decoding the types of errors.
2789 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2791 case HCLGE_VECTOR0_EVENT_RST:
2792 hclge_reset_task_schedule(hdev);
2794 case HCLGE_VECTOR0_EVENT_MBX:
2795 /* If we are here then,
2796 * 1. Either we are not handling any mbx task and we are not
2799 * 2. We could be handling a mbx task but nothing more is
2801 * In both cases, we should schedule mbx task as there are more
2802 * mbx messages reported by this interrupt.
2804 hclge_mbx_task_schedule(hdev);
2807 dev_warn(&hdev->pdev->dev,
2808 "received unknown or unhandled event of vector0\n");
2812 /* clear the source of interrupt if it is not cause by reset */
2813 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2814 hclge_clear_event_cause(hdev, event_cause, clearval);
2815 hclge_enable_vector(&hdev->misc_vector, true);
2821 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2823 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2824 dev_warn(&hdev->pdev->dev,
2825 "vector(vector_id %d) has been freed.\n", vector_id);
2829 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2830 hdev->num_msi_left += 1;
2831 hdev->num_msi_used -= 1;
2834 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2836 struct hclge_misc_vector *vector = &hdev->misc_vector;
2838 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2840 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2841 hdev->vector_status[0] = 0;
2843 hdev->num_msi_left -= 1;
2844 hdev->num_msi_used += 1;
2847 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2851 hclge_get_misc_vector(hdev);
2853 /* this would be explicitly freed in the end */
2854 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2855 0, "hclge_misc", hdev);
2857 hclge_free_vector(hdev, 0);
2858 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2859 hdev->misc_vector.vector_irq);
2865 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2867 free_irq(hdev->misc_vector.vector_irq, hdev);
2868 hclge_free_vector(hdev, 0);
2871 int hclge_notify_client(struct hclge_dev *hdev,
2872 enum hnae3_reset_notify_type type)
2874 struct hnae3_client *client = hdev->nic_client;
2877 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2881 if (!client->ops->reset_notify)
2884 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2885 struct hnae3_handle *handle = &hdev->vport[i].nic;
2888 ret = client->ops->reset_notify(handle, type);
2890 dev_err(&hdev->pdev->dev,
2891 "notify nic client failed %d(%d)\n", type, ret);
2899 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2900 enum hnae3_reset_notify_type type)
2902 struct hnae3_client *client = hdev->roce_client;
2906 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2910 if (!client->ops->reset_notify)
2913 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2914 struct hnae3_handle *handle = &hdev->vport[i].roce;
2916 ret = client->ops->reset_notify(handle, type);
2918 dev_err(&hdev->pdev->dev,
2919 "notify roce client failed %d(%d)",
2928 static int hclge_reset_wait(struct hclge_dev *hdev)
2930 #define HCLGE_RESET_WATI_MS 100
2931 #define HCLGE_RESET_WAIT_CNT 200
2932 u32 val, reg, reg_bit;
2935 switch (hdev->reset_type) {
2936 case HNAE3_IMP_RESET:
2937 reg = HCLGE_GLOBAL_RESET_REG;
2938 reg_bit = HCLGE_IMP_RESET_BIT;
2940 case HNAE3_GLOBAL_RESET:
2941 reg = HCLGE_GLOBAL_RESET_REG;
2942 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2944 case HNAE3_CORE_RESET:
2945 reg = HCLGE_GLOBAL_RESET_REG;
2946 reg_bit = HCLGE_CORE_RESET_BIT;
2948 case HNAE3_FUNC_RESET:
2949 reg = HCLGE_FUN_RST_ING;
2950 reg_bit = HCLGE_FUN_RST_ING_B;
2952 case HNAE3_FLR_RESET:
2955 dev_err(&hdev->pdev->dev,
2956 "Wait for unsupported reset type: %d\n",
2961 if (hdev->reset_type == HNAE3_FLR_RESET) {
2962 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2963 cnt++ < HCLGE_RESET_WAIT_CNT)
2964 msleep(HCLGE_RESET_WATI_MS);
2966 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2967 dev_err(&hdev->pdev->dev,
2968 "flr wait timeout: %d\n", cnt);
2975 val = hclge_read_dev(&hdev->hw, reg);
2976 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2977 msleep(HCLGE_RESET_WATI_MS);
2978 val = hclge_read_dev(&hdev->hw, reg);
2982 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2983 dev_warn(&hdev->pdev->dev,
2984 "Wait for reset timeout: %d\n", hdev->reset_type);
2991 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2993 struct hclge_vf_rst_cmd *req;
2994 struct hclge_desc desc;
2996 req = (struct hclge_vf_rst_cmd *)desc.data;
2997 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2998 req->dest_vfid = func_id;
3003 return hclge_cmd_send(&hdev->hw, &desc, 1);
3006 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3010 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3011 struct hclge_vport *vport = &hdev->vport[i];
3014 /* Send cmd to set/clear VF's FUNC_RST_ING */
3015 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3017 dev_err(&hdev->pdev->dev,
3018 "set vf(%d) rst failed %d!\n",
3019 vport->vport_id, ret);
3023 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3026 /* Inform VF to process the reset.
3027 * hclge_inform_reset_assert_to_vf may fail if VF
3028 * driver is not loaded.
3030 ret = hclge_inform_reset_assert_to_vf(vport);
3032 dev_warn(&hdev->pdev->dev,
3033 "inform reset to vf(%d) failed %d!\n",
3034 vport->vport_id, ret);
3040 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3042 struct hclge_desc desc;
3043 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3046 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3047 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3048 req->fun_reset_vfid = func_id;
3050 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3052 dev_err(&hdev->pdev->dev,
3053 "send function reset cmd fail, status =%d\n", ret);
3058 static void hclge_do_reset(struct hclge_dev *hdev)
3060 struct hnae3_handle *handle = &hdev->vport[0].nic;
3061 struct pci_dev *pdev = hdev->pdev;
3064 if (hclge_get_hw_reset_stat(handle)) {
3065 dev_info(&pdev->dev, "Hardware reset not finish\n");
3066 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3067 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3068 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3072 switch (hdev->reset_type) {
3073 case HNAE3_GLOBAL_RESET:
3074 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3075 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3076 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3077 dev_info(&pdev->dev, "Global Reset requested\n");
3079 case HNAE3_CORE_RESET:
3080 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3081 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3082 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3083 dev_info(&pdev->dev, "Core Reset requested\n");
3085 case HNAE3_FUNC_RESET:
3086 dev_info(&pdev->dev, "PF Reset requested\n");
3087 /* schedule again to check later */
3088 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3089 hclge_reset_task_schedule(hdev);
3091 case HNAE3_FLR_RESET:
3092 dev_info(&pdev->dev, "FLR requested\n");
3093 /* schedule again to check later */
3094 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3095 hclge_reset_task_schedule(hdev);
3098 dev_warn(&pdev->dev,
3099 "Unsupported reset type: %d\n", hdev->reset_type);
3104 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3105 unsigned long *addr)
3107 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3109 /* first, resolve any unknown reset type to the known type(s) */
3110 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3111 /* we will intentionally ignore any errors from this function
3112 * as we will end up in *some* reset request in any case
3114 hclge_handle_hw_msix_error(hdev, addr);
3115 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3116 /* We defered the clearing of the error event which caused
3117 * interrupt since it was not posssible to do that in
3118 * interrupt context (and this is the reason we introduced
3119 * new UNKNOWN reset type). Now, the errors have been
3120 * handled and cleared in hardware we can safely enable
3121 * interrupts. This is an exception to the norm.
3123 hclge_enable_vector(&hdev->misc_vector, true);
3126 /* return the highest priority reset level amongst all */
3127 if (test_bit(HNAE3_IMP_RESET, addr)) {
3128 rst_level = HNAE3_IMP_RESET;
3129 clear_bit(HNAE3_IMP_RESET, addr);
3130 clear_bit(HNAE3_GLOBAL_RESET, addr);
3131 clear_bit(HNAE3_CORE_RESET, addr);
3132 clear_bit(HNAE3_FUNC_RESET, addr);
3133 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3134 rst_level = HNAE3_GLOBAL_RESET;
3135 clear_bit(HNAE3_GLOBAL_RESET, addr);
3136 clear_bit(HNAE3_CORE_RESET, addr);
3137 clear_bit(HNAE3_FUNC_RESET, addr);
3138 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3139 rst_level = HNAE3_CORE_RESET;
3140 clear_bit(HNAE3_CORE_RESET, addr);
3141 clear_bit(HNAE3_FUNC_RESET, addr);
3142 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3143 rst_level = HNAE3_FUNC_RESET;
3144 clear_bit(HNAE3_FUNC_RESET, addr);
3145 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3146 rst_level = HNAE3_FLR_RESET;
3147 clear_bit(HNAE3_FLR_RESET, addr);
3150 if (hdev->reset_type != HNAE3_NONE_RESET &&
3151 rst_level < hdev->reset_type)
3152 return HNAE3_NONE_RESET;
3157 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3161 switch (hdev->reset_type) {
3162 case HNAE3_IMP_RESET:
3163 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3165 case HNAE3_GLOBAL_RESET:
3166 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3168 case HNAE3_CORE_RESET:
3169 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3178 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3179 hclge_enable_vector(&hdev->misc_vector, true);
3182 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3186 switch (hdev->reset_type) {
3187 case HNAE3_FUNC_RESET:
3189 case HNAE3_FLR_RESET:
3190 ret = hclge_set_all_vf_rst(hdev, true);
3199 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3201 #define HCLGE_RESET_SYNC_TIME 100
3206 switch (hdev->reset_type) {
3207 case HNAE3_FUNC_RESET:
3208 /* There is no mechanism for PF to know if VF has stopped IO
3209 * for now, just wait 100 ms for VF to stop IO
3211 msleep(HCLGE_RESET_SYNC_TIME);
3212 ret = hclge_func_reset_cmd(hdev, 0);
3214 dev_err(&hdev->pdev->dev,
3215 "asserting function reset fail %d!\n", ret);
3219 /* After performaning pf reset, it is not necessary to do the
3220 * mailbox handling or send any command to firmware, because
3221 * any mailbox handling or command to firmware is only valid
3222 * after hclge_cmd_init is called.
3224 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3225 hdev->rst_stats.pf_rst_cnt++;
3227 case HNAE3_FLR_RESET:
3228 /* There is no mechanism for PF to know if VF has stopped IO
3229 * for now, just wait 100 ms for VF to stop IO
3231 msleep(HCLGE_RESET_SYNC_TIME);
3232 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3233 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3234 hdev->rst_stats.flr_rst_cnt++;
3236 case HNAE3_IMP_RESET:
3237 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3238 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3239 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3245 /* inform hardware that preparatory work is done */
3246 msleep(HCLGE_RESET_SYNC_TIME);
3247 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3248 HCLGE_NIC_CMQ_ENABLE);
3249 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3254 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3256 #define MAX_RESET_FAIL_CNT 5
3257 #define RESET_UPGRADE_DELAY_SEC 10
3259 if (hdev->reset_pending) {
3260 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3261 hdev->reset_pending);
3263 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3264 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3265 BIT(HCLGE_IMP_RESET_BIT))) {
3266 dev_info(&hdev->pdev->dev,
3267 "reset failed because IMP Reset is pending\n");
3268 hclge_clear_reset_cause(hdev);
3270 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3271 hdev->reset_fail_cnt++;
3273 set_bit(hdev->reset_type, &hdev->reset_pending);
3274 dev_info(&hdev->pdev->dev,
3275 "re-schedule to wait for hw reset done\n");
3279 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3280 hclge_clear_reset_cause(hdev);
3281 mod_timer(&hdev->reset_timer,
3282 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3287 hclge_clear_reset_cause(hdev);
3288 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3292 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3296 switch (hdev->reset_type) {
3297 case HNAE3_FUNC_RESET:
3299 case HNAE3_FLR_RESET:
3300 ret = hclge_set_all_vf_rst(hdev, false);
3309 static void hclge_reset(struct hclge_dev *hdev)
3311 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3312 bool is_timeout = false;
3315 /* Initialize ae_dev reset status as well, in case enet layer wants to
3316 * know if device is undergoing reset
3318 ae_dev->reset_type = hdev->reset_type;
3319 hdev->rst_stats.reset_cnt++;
3320 /* perform reset of the stack & ae device for a client */
3321 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3325 ret = hclge_reset_prepare_down(hdev);
3330 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3332 goto err_reset_lock;
3336 ret = hclge_reset_prepare_wait(hdev);
3340 if (hclge_reset_wait(hdev)) {
3345 hdev->rst_stats.hw_reset_done_cnt++;
3347 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3352 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3354 goto err_reset_lock;
3356 ret = hclge_reset_ae_dev(hdev->ae_dev);
3358 goto err_reset_lock;
3360 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3362 goto err_reset_lock;
3364 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3366 goto err_reset_lock;
3368 hclge_clear_reset_cause(hdev);
3370 ret = hclge_reset_prepare_up(hdev);
3372 goto err_reset_lock;
3374 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3376 goto err_reset_lock;
3380 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3384 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3388 hdev->last_reset_time = jiffies;
3389 hdev->reset_fail_cnt = 0;
3390 hdev->rst_stats.reset_done_cnt++;
3391 ae_dev->reset_type = HNAE3_NONE_RESET;
3392 del_timer(&hdev->reset_timer);
3399 if (hclge_reset_err_handle(hdev, is_timeout))
3400 hclge_reset_task_schedule(hdev);
3403 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3405 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3406 struct hclge_dev *hdev = ae_dev->priv;
3408 /* We might end up getting called broadly because of 2 below cases:
3409 * 1. Recoverable error was conveyed through APEI and only way to bring
3410 * normalcy is to reset.
3411 * 2. A new reset request from the stack due to timeout
3413 * For the first case,error event might not have ae handle available.
3414 * check if this is a new reset request and we are not here just because
3415 * last reset attempt did not succeed and watchdog hit us again. We will
3416 * know this if last reset request did not occur very recently (watchdog
3417 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3418 * In case of new request we reset the "reset level" to PF reset.
3419 * And if it is a repeat reset request of the most recent one then we
3420 * want to make sure we throttle the reset request. Therefore, we will
3421 * not allow it again before 3*HZ times.
3424 handle = &hdev->vport[0].nic;
3426 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3428 else if (hdev->default_reset_request)
3430 hclge_get_reset_level(hdev,
3431 &hdev->default_reset_request);
3432 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3433 hdev->reset_level = HNAE3_FUNC_RESET;
3435 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3438 /* request reset & schedule reset task */
3439 set_bit(hdev->reset_level, &hdev->reset_request);
3440 hclge_reset_task_schedule(hdev);
3442 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3443 hdev->reset_level++;
3446 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3447 enum hnae3_reset_type rst_type)
3449 struct hclge_dev *hdev = ae_dev->priv;
3451 set_bit(rst_type, &hdev->default_reset_request);
3454 static void hclge_reset_timer(struct timer_list *t)
3456 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3458 dev_info(&hdev->pdev->dev,
3459 "triggering global reset in reset timer\n");
3460 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3461 hclge_reset_event(hdev->pdev, NULL);
3464 static void hclge_reset_subtask(struct hclge_dev *hdev)
3466 /* check if there is any ongoing reset in the hardware. This status can
3467 * be checked from reset_pending. If there is then, we need to wait for
3468 * hardware to complete reset.
3469 * a. If we are able to figure out in reasonable time that hardware
3470 * has fully resetted then, we can proceed with driver, client
3472 * b. else, we can come back later to check this status so re-sched
3475 hdev->last_reset_time = jiffies;
3476 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3477 if (hdev->reset_type != HNAE3_NONE_RESET)
3480 /* check if we got any *new* reset requests to be honored */
3481 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3482 if (hdev->reset_type != HNAE3_NONE_RESET)
3483 hclge_do_reset(hdev);
3485 hdev->reset_type = HNAE3_NONE_RESET;
3488 static void hclge_reset_service_task(struct work_struct *work)
3490 struct hclge_dev *hdev =
3491 container_of(work, struct hclge_dev, rst_service_task);
3493 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3496 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3498 hclge_reset_subtask(hdev);
3500 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3503 static void hclge_mailbox_service_task(struct work_struct *work)
3505 struct hclge_dev *hdev =
3506 container_of(work, struct hclge_dev, mbx_service_task);
3508 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3511 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3513 hclge_mbx_handler(hdev);
3515 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3518 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3522 /* start from vport 1 for PF is always alive */
3523 for (i = 1; i < hdev->num_alloc_vport; i++) {
3524 struct hclge_vport *vport = &hdev->vport[i];
3526 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3527 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3529 /* If vf is not alive, set to default value */
3530 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3531 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3535 static void hclge_service_task(struct work_struct *work)
3537 struct hclge_dev *hdev =
3538 container_of(work, struct hclge_dev, service_task);
3540 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3541 hclge_update_stats_for_all(hdev);
3542 hdev->hw_stats.stats_timer = 0;
3545 hclge_update_port_info(hdev);
3546 hclge_update_link_status(hdev);
3547 hclge_update_vport_alive(hdev);
3548 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3549 hclge_rfs_filter_expire(hdev);
3550 hdev->fd_arfs_expire_timer = 0;
3552 hclge_service_complete(hdev);
3555 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3557 /* VF handle has no client */
3558 if (!handle->client)
3559 return container_of(handle, struct hclge_vport, nic);
3560 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3561 return container_of(handle, struct hclge_vport, roce);
3563 return container_of(handle, struct hclge_vport, nic);
3566 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3567 struct hnae3_vector_info *vector_info)
3569 struct hclge_vport *vport = hclge_get_vport(handle);
3570 struct hnae3_vector_info *vector = vector_info;
3571 struct hclge_dev *hdev = vport->back;
3575 vector_num = min(hdev->num_msi_left, vector_num);
3577 for (j = 0; j < vector_num; j++) {
3578 for (i = 1; i < hdev->num_msi; i++) {
3579 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3580 vector->vector = pci_irq_vector(hdev->pdev, i);
3581 vector->io_addr = hdev->hw.io_base +
3582 HCLGE_VECTOR_REG_BASE +
3583 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3585 HCLGE_VECTOR_VF_OFFSET;
3586 hdev->vector_status[i] = vport->vport_id;
3587 hdev->vector_irq[i] = vector->vector;
3596 hdev->num_msi_left -= alloc;
3597 hdev->num_msi_used += alloc;
3602 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3606 for (i = 0; i < hdev->num_msi; i++)
3607 if (vector == hdev->vector_irq[i])
3613 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3615 struct hclge_vport *vport = hclge_get_vport(handle);
3616 struct hclge_dev *hdev = vport->back;
3619 vector_id = hclge_get_vector_index(hdev, vector);
3620 if (vector_id < 0) {
3621 dev_err(&hdev->pdev->dev,
3622 "Get vector index fail. vector_id =%d\n", vector_id);
3626 hclge_free_vector(hdev, vector_id);
3631 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3633 return HCLGE_RSS_KEY_SIZE;
3636 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3638 return HCLGE_RSS_IND_TBL_SIZE;
3641 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3642 const u8 hfunc, const u8 *key)
3644 struct hclge_rss_config_cmd *req;
3645 struct hclge_desc desc;
3650 req = (struct hclge_rss_config_cmd *)desc.data;
3652 for (key_offset = 0; key_offset < 3; key_offset++) {
3653 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3656 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3657 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3659 if (key_offset == 2)
3661 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3663 key_size = HCLGE_RSS_HASH_KEY_NUM;
3665 memcpy(req->hash_key,
3666 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3668 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3670 dev_err(&hdev->pdev->dev,
3671 "Configure RSS config fail, status = %d\n",
3679 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3681 struct hclge_rss_indirection_table_cmd *req;
3682 struct hclge_desc desc;
3686 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3688 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3689 hclge_cmd_setup_basic_desc
3690 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3692 req->start_table_index =
3693 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3694 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3696 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3697 req->rss_result[j] =
3698 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3702 dev_err(&hdev->pdev->dev,
3703 "Configure rss indir table fail,status = %d\n",
3711 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3712 u16 *tc_size, u16 *tc_offset)
3714 struct hclge_rss_tc_mode_cmd *req;
3715 struct hclge_desc desc;
3719 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3720 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3722 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3725 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3726 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3727 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3728 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3729 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3731 req->rss_tc_mode[i] = cpu_to_le16(mode);
3734 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3736 dev_err(&hdev->pdev->dev,
3737 "Configure rss tc mode fail, status = %d\n", ret);
3742 static void hclge_get_rss_type(struct hclge_vport *vport)
3744 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3745 vport->rss_tuple_sets.ipv4_udp_en ||
3746 vport->rss_tuple_sets.ipv4_sctp_en ||
3747 vport->rss_tuple_sets.ipv6_tcp_en ||
3748 vport->rss_tuple_sets.ipv6_udp_en ||
3749 vport->rss_tuple_sets.ipv6_sctp_en)
3750 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3751 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3752 vport->rss_tuple_sets.ipv6_fragment_en)
3753 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3755 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3758 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3760 struct hclge_rss_input_tuple_cmd *req;
3761 struct hclge_desc desc;
3764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3766 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3768 /* Get the tuple cfg from pf */
3769 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3770 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3771 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3772 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3773 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3774 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3775 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3776 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3777 hclge_get_rss_type(&hdev->vport[0]);
3778 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3780 dev_err(&hdev->pdev->dev,
3781 "Configure rss input fail, status = %d\n", ret);
3785 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3788 struct hclge_vport *vport = hclge_get_vport(handle);
3791 /* Get hash algorithm */
3793 switch (vport->rss_algo) {
3794 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3795 *hfunc = ETH_RSS_HASH_TOP;
3797 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3798 *hfunc = ETH_RSS_HASH_XOR;
3801 *hfunc = ETH_RSS_HASH_UNKNOWN;
3806 /* Get the RSS Key required by the user */
3808 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3810 /* Get indirect table */
3812 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3813 indir[i] = vport->rss_indirection_tbl[i];
3818 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3819 const u8 *key, const u8 hfunc)
3821 struct hclge_vport *vport = hclge_get_vport(handle);
3822 struct hclge_dev *hdev = vport->back;
3826 /* Set the RSS Hash Key if specififed by the user */
3829 case ETH_RSS_HASH_TOP:
3830 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3832 case ETH_RSS_HASH_XOR:
3833 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3835 case ETH_RSS_HASH_NO_CHANGE:
3836 hash_algo = vport->rss_algo;
3842 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3846 /* Update the shadow RSS key with user specified qids */
3847 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3848 vport->rss_algo = hash_algo;
3851 /* Update the shadow RSS table with user specified qids */
3852 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3853 vport->rss_indirection_tbl[i] = indir[i];
3855 /* Update the hardware */
3856 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3859 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3861 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3863 if (nfc->data & RXH_L4_B_2_3)
3864 hash_sets |= HCLGE_D_PORT_BIT;
3866 hash_sets &= ~HCLGE_D_PORT_BIT;
3868 if (nfc->data & RXH_IP_SRC)
3869 hash_sets |= HCLGE_S_IP_BIT;
3871 hash_sets &= ~HCLGE_S_IP_BIT;
3873 if (nfc->data & RXH_IP_DST)
3874 hash_sets |= HCLGE_D_IP_BIT;
3876 hash_sets &= ~HCLGE_D_IP_BIT;
3878 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3879 hash_sets |= HCLGE_V_TAG_BIT;
3884 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3885 struct ethtool_rxnfc *nfc)
3887 struct hclge_vport *vport = hclge_get_vport(handle);
3888 struct hclge_dev *hdev = vport->back;
3889 struct hclge_rss_input_tuple_cmd *req;
3890 struct hclge_desc desc;
3894 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3895 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3898 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3899 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3901 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3902 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3903 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3904 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3905 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3906 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3907 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3908 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3910 tuple_sets = hclge_get_rss_hash_bits(nfc);
3911 switch (nfc->flow_type) {
3913 req->ipv4_tcp_en = tuple_sets;
3916 req->ipv6_tcp_en = tuple_sets;
3919 req->ipv4_udp_en = tuple_sets;
3922 req->ipv6_udp_en = tuple_sets;
3925 req->ipv4_sctp_en = tuple_sets;
3928 if ((nfc->data & RXH_L4_B_0_1) ||
3929 (nfc->data & RXH_L4_B_2_3))
3932 req->ipv6_sctp_en = tuple_sets;
3935 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3938 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3944 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3946 dev_err(&hdev->pdev->dev,
3947 "Set rss tuple fail, status = %d\n", ret);
3951 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3952 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3953 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3954 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3955 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3956 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3957 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3958 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3959 hclge_get_rss_type(vport);
3963 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3964 struct ethtool_rxnfc *nfc)
3966 struct hclge_vport *vport = hclge_get_vport(handle);
3971 switch (nfc->flow_type) {
3973 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3976 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3979 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3982 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3985 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3988 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3992 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4001 if (tuple_sets & HCLGE_D_PORT_BIT)
4002 nfc->data |= RXH_L4_B_2_3;
4003 if (tuple_sets & HCLGE_S_PORT_BIT)
4004 nfc->data |= RXH_L4_B_0_1;
4005 if (tuple_sets & HCLGE_D_IP_BIT)
4006 nfc->data |= RXH_IP_DST;
4007 if (tuple_sets & HCLGE_S_IP_BIT)
4008 nfc->data |= RXH_IP_SRC;
4013 static int hclge_get_tc_size(struct hnae3_handle *handle)
4015 struct hclge_vport *vport = hclge_get_vport(handle);
4016 struct hclge_dev *hdev = vport->back;
4018 return hdev->rss_size_max;
4021 int hclge_rss_init_hw(struct hclge_dev *hdev)
4023 struct hclge_vport *vport = hdev->vport;
4024 u8 *rss_indir = vport[0].rss_indirection_tbl;
4025 u16 rss_size = vport[0].alloc_rss_size;
4026 u8 *key = vport[0].rss_hash_key;
4027 u8 hfunc = vport[0].rss_algo;
4028 u16 tc_offset[HCLGE_MAX_TC_NUM];
4029 u16 tc_valid[HCLGE_MAX_TC_NUM];
4030 u16 tc_size[HCLGE_MAX_TC_NUM];
4034 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4038 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4042 ret = hclge_set_rss_input_tuple(hdev);
4046 /* Each TC have the same queue size, and tc_size set to hardware is
4047 * the log2 of roundup power of two of rss_size, the acutal queue
4048 * size is limited by indirection table.
4050 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4051 dev_err(&hdev->pdev->dev,
4052 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4057 roundup_size = roundup_pow_of_two(rss_size);
4058 roundup_size = ilog2(roundup_size);
4060 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4063 if (!(hdev->hw_tc_map & BIT(i)))
4067 tc_size[i] = roundup_size;
4068 tc_offset[i] = rss_size * i;
4071 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4074 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4076 struct hclge_vport *vport = hdev->vport;
4079 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4080 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4081 vport[j].rss_indirection_tbl[i] =
4082 i % vport[j].alloc_rss_size;
4086 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4088 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4089 struct hclge_vport *vport = hdev->vport;
4091 if (hdev->pdev->revision >= 0x21)
4092 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4094 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4095 vport[i].rss_tuple_sets.ipv4_tcp_en =
4096 HCLGE_RSS_INPUT_TUPLE_OTHER;
4097 vport[i].rss_tuple_sets.ipv4_udp_en =
4098 HCLGE_RSS_INPUT_TUPLE_OTHER;
4099 vport[i].rss_tuple_sets.ipv4_sctp_en =
4100 HCLGE_RSS_INPUT_TUPLE_SCTP;
4101 vport[i].rss_tuple_sets.ipv4_fragment_en =
4102 HCLGE_RSS_INPUT_TUPLE_OTHER;
4103 vport[i].rss_tuple_sets.ipv6_tcp_en =
4104 HCLGE_RSS_INPUT_TUPLE_OTHER;
4105 vport[i].rss_tuple_sets.ipv6_udp_en =
4106 HCLGE_RSS_INPUT_TUPLE_OTHER;
4107 vport[i].rss_tuple_sets.ipv6_sctp_en =
4108 HCLGE_RSS_INPUT_TUPLE_SCTP;
4109 vport[i].rss_tuple_sets.ipv6_fragment_en =
4110 HCLGE_RSS_INPUT_TUPLE_OTHER;
4112 vport[i].rss_algo = rss_algo;
4114 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4115 HCLGE_RSS_KEY_SIZE);
4118 hclge_rss_indir_init_cfg(hdev);
4121 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4122 int vector_id, bool en,
4123 struct hnae3_ring_chain_node *ring_chain)
4125 struct hclge_dev *hdev = vport->back;
4126 struct hnae3_ring_chain_node *node;
4127 struct hclge_desc desc;
4128 struct hclge_ctrl_vector_chain_cmd *req
4129 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4130 enum hclge_cmd_status status;
4131 enum hclge_opcode_type op;
4132 u16 tqp_type_and_id;
4135 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4136 hclge_cmd_setup_basic_desc(&desc, op, false);
4137 req->int_vector_id = vector_id;
4140 for (node = ring_chain; node; node = node->next) {
4141 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4142 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4144 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4145 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4146 HCLGE_TQP_ID_S, node->tqp_index);
4147 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4149 hnae3_get_field(node->int_gl_idx,
4150 HNAE3_RING_GL_IDX_M,
4151 HNAE3_RING_GL_IDX_S));
4152 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4153 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4154 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4155 req->vfid = vport->vport_id;
4157 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4159 dev_err(&hdev->pdev->dev,
4160 "Map TQP fail, status is %d.\n",
4166 hclge_cmd_setup_basic_desc(&desc,
4169 req->int_vector_id = vector_id;
4174 req->int_cause_num = i;
4175 req->vfid = vport->vport_id;
4176 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4178 dev_err(&hdev->pdev->dev,
4179 "Map TQP fail, status is %d.\n", status);
4187 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4189 struct hnae3_ring_chain_node *ring_chain)
4191 struct hclge_vport *vport = hclge_get_vport(handle);
4192 struct hclge_dev *hdev = vport->back;
4195 vector_id = hclge_get_vector_index(hdev, vector);
4196 if (vector_id < 0) {
4197 dev_err(&hdev->pdev->dev,
4198 "Get vector index fail. vector_id =%d\n", vector_id);
4202 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4205 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4207 struct hnae3_ring_chain_node *ring_chain)
4209 struct hclge_vport *vport = hclge_get_vport(handle);
4210 struct hclge_dev *hdev = vport->back;
4213 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4216 vector_id = hclge_get_vector_index(hdev, vector);
4217 if (vector_id < 0) {
4218 dev_err(&handle->pdev->dev,
4219 "Get vector index fail. ret =%d\n", vector_id);
4223 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4225 dev_err(&handle->pdev->dev,
4226 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4233 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4234 struct hclge_promisc_param *param)
4236 struct hclge_promisc_cfg_cmd *req;
4237 struct hclge_desc desc;
4240 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4242 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4243 req->vf_id = param->vf_id;
4245 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4246 * pdev revision(0x20), new revision support them. The
4247 * value of this two fields will not return error when driver
4248 * send command to fireware in revision(0x20).
4250 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4251 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4253 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4255 dev_err(&hdev->pdev->dev,
4256 "Set promisc mode fail, status is %d.\n", ret);
4261 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4262 bool en_mc, bool en_bc, int vport_id)
4267 memset(param, 0, sizeof(struct hclge_promisc_param));
4269 param->enable = HCLGE_PROMISC_EN_UC;
4271 param->enable |= HCLGE_PROMISC_EN_MC;
4273 param->enable |= HCLGE_PROMISC_EN_BC;
4274 param->vf_id = vport_id;
4277 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4280 struct hclge_vport *vport = hclge_get_vport(handle);
4281 struct hclge_dev *hdev = vport->back;
4282 struct hclge_promisc_param param;
4283 bool en_bc_pmc = true;
4285 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4286 * always bypassed. So broadcast promisc should be disabled until
4287 * user enable promisc mode
4289 if (handle->pdev->revision == 0x20)
4290 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4292 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4294 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4297 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4299 struct hclge_get_fd_mode_cmd *req;
4300 struct hclge_desc desc;
4303 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4305 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4307 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4309 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4313 *fd_mode = req->mode;
4318 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4319 u32 *stage1_entry_num,
4320 u32 *stage2_entry_num,
4321 u16 *stage1_counter_num,
4322 u16 *stage2_counter_num)
4324 struct hclge_get_fd_allocation_cmd *req;
4325 struct hclge_desc desc;
4328 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4330 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4332 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4334 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4339 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4340 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4341 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4342 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4347 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4349 struct hclge_set_fd_key_config_cmd *req;
4350 struct hclge_fd_key_cfg *stage;
4351 struct hclge_desc desc;
4354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4356 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4357 stage = &hdev->fd_cfg.key_cfg[stage_num];
4358 req->stage = stage_num;
4359 req->key_select = stage->key_sel;
4360 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4361 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4362 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4363 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4364 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4365 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4367 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4369 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4374 static int hclge_init_fd_config(struct hclge_dev *hdev)
4376 #define LOW_2_WORDS 0x03
4377 struct hclge_fd_key_cfg *key_cfg;
4380 if (!hnae3_dev_fd_supported(hdev))
4383 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4387 switch (hdev->fd_cfg.fd_mode) {
4388 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4389 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4391 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4392 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4395 dev_err(&hdev->pdev->dev,
4396 "Unsupported flow director mode %d\n",
4397 hdev->fd_cfg.fd_mode);
4401 hdev->fd_cfg.proto_support =
4402 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4403 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4404 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4405 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4406 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4407 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4408 key_cfg->outer_sipv6_word_en = 0;
4409 key_cfg->outer_dipv6_word_en = 0;
4411 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4412 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4413 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4414 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4416 /* If use max 400bit key, we can support tuples for ether type */
4417 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4418 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4419 key_cfg->tuple_active |=
4420 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4423 /* roce_type is used to filter roce frames
4424 * dst_vport is used to specify the rule
4426 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4428 ret = hclge_get_fd_allocation(hdev,
4429 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4430 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4431 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4432 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4436 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4439 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4440 int loc, u8 *key, bool is_add)
4442 struct hclge_fd_tcam_config_1_cmd *req1;
4443 struct hclge_fd_tcam_config_2_cmd *req2;
4444 struct hclge_fd_tcam_config_3_cmd *req3;
4445 struct hclge_desc desc[3];
4448 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4449 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4450 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4451 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4452 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4454 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4455 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4456 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4458 req1->stage = stage;
4459 req1->xy_sel = sel_x ? 1 : 0;
4460 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4461 req1->index = cpu_to_le32(loc);
4462 req1->entry_vld = sel_x ? is_add : 0;
4465 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4466 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4467 sizeof(req2->tcam_data));
4468 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4469 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4472 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4474 dev_err(&hdev->pdev->dev,
4475 "config tcam key fail, ret=%d\n",
4481 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4482 struct hclge_fd_ad_data *action)
4484 struct hclge_fd_ad_config_cmd *req;
4485 struct hclge_desc desc;
4489 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4491 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4492 req->index = cpu_to_le32(loc);
4495 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4496 action->write_rule_id_to_bd);
4497 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4500 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4501 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4502 action->forward_to_direct_queue);
4503 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4505 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4506 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4507 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4508 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4509 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4510 action->counter_id);
4512 req->ad_data = cpu_to_le64(ad_data);
4513 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4515 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4520 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4521 struct hclge_fd_rule *rule)
4523 u16 tmp_x_s, tmp_y_s;
4524 u32 tmp_x_l, tmp_y_l;
4527 if (rule->unused_tuple & tuple_bit)
4530 switch (tuple_bit) {
4533 case BIT(INNER_DST_MAC):
4534 for (i = 0; i < 6; i++) {
4535 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4536 rule->tuples_mask.dst_mac[i]);
4537 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4538 rule->tuples_mask.dst_mac[i]);
4542 case BIT(INNER_SRC_MAC):
4543 for (i = 0; i < 6; i++) {
4544 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4545 rule->tuples.src_mac[i]);
4546 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4547 rule->tuples.src_mac[i]);
4551 case BIT(INNER_VLAN_TAG_FST):
4552 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4553 rule->tuples_mask.vlan_tag1);
4554 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4555 rule->tuples_mask.vlan_tag1);
4556 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4557 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4560 case BIT(INNER_ETH_TYPE):
4561 calc_x(tmp_x_s, rule->tuples.ether_proto,
4562 rule->tuples_mask.ether_proto);
4563 calc_y(tmp_y_s, rule->tuples.ether_proto,
4564 rule->tuples_mask.ether_proto);
4565 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4566 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4569 case BIT(INNER_IP_TOS):
4570 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4571 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4574 case BIT(INNER_IP_PROTO):
4575 calc_x(*key_x, rule->tuples.ip_proto,
4576 rule->tuples_mask.ip_proto);
4577 calc_y(*key_y, rule->tuples.ip_proto,
4578 rule->tuples_mask.ip_proto);
4581 case BIT(INNER_SRC_IP):
4582 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4583 rule->tuples_mask.src_ip[3]);
4584 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4585 rule->tuples_mask.src_ip[3]);
4586 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4587 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4590 case BIT(INNER_DST_IP):
4591 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4592 rule->tuples_mask.dst_ip[3]);
4593 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4594 rule->tuples_mask.dst_ip[3]);
4595 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4596 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4599 case BIT(INNER_SRC_PORT):
4600 calc_x(tmp_x_s, rule->tuples.src_port,
4601 rule->tuples_mask.src_port);
4602 calc_y(tmp_y_s, rule->tuples.src_port,
4603 rule->tuples_mask.src_port);
4604 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4605 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4608 case BIT(INNER_DST_PORT):
4609 calc_x(tmp_x_s, rule->tuples.dst_port,
4610 rule->tuples_mask.dst_port);
4611 calc_y(tmp_y_s, rule->tuples.dst_port,
4612 rule->tuples_mask.dst_port);
4613 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4614 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4622 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4623 u8 vf_id, u8 network_port_id)
4625 u32 port_number = 0;
4627 if (port_type == HOST_PORT) {
4628 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4630 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4632 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4634 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4635 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4636 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4642 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4643 __le32 *key_x, __le32 *key_y,
4644 struct hclge_fd_rule *rule)
4646 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4647 u8 cur_pos = 0, tuple_size, shift_bits;
4650 for (i = 0; i < MAX_META_DATA; i++) {
4651 tuple_size = meta_data_key_info[i].key_length;
4652 tuple_bit = key_cfg->meta_data_active & BIT(i);
4654 switch (tuple_bit) {
4655 case BIT(ROCE_TYPE):
4656 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4657 cur_pos += tuple_size;
4659 case BIT(DST_VPORT):
4660 port_number = hclge_get_port_number(HOST_PORT, 0,
4662 hnae3_set_field(meta_data,
4663 GENMASK(cur_pos + tuple_size, cur_pos),
4664 cur_pos, port_number);
4665 cur_pos += tuple_size;
4672 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4673 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4674 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4676 *key_x = cpu_to_le32(tmp_x << shift_bits);
4677 *key_y = cpu_to_le32(tmp_y << shift_bits);
4680 /* A complete key is combined with meta data key and tuple key.
4681 * Meta data key is stored at the MSB region, and tuple key is stored at
4682 * the LSB region, unused bits will be filled 0.
4684 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4685 struct hclge_fd_rule *rule)
4687 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4688 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4689 u8 *cur_key_x, *cur_key_y;
4690 int i, ret, tuple_size;
4691 u8 meta_data_region;
4693 memset(key_x, 0, sizeof(key_x));
4694 memset(key_y, 0, sizeof(key_y));
4698 for (i = 0 ; i < MAX_TUPLE; i++) {
4702 tuple_size = tuple_key_info[i].key_length / 8;
4703 check_tuple = key_cfg->tuple_active & BIT(i);
4705 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4708 cur_key_x += tuple_size;
4709 cur_key_y += tuple_size;
4713 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4714 MAX_META_DATA_LENGTH / 8;
4716 hclge_fd_convert_meta_data(key_cfg,
4717 (__le32 *)(key_x + meta_data_region),
4718 (__le32 *)(key_y + meta_data_region),
4721 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4724 dev_err(&hdev->pdev->dev,
4725 "fd key_y config fail, loc=%d, ret=%d\n",
4726 rule->queue_id, ret);
4730 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4733 dev_err(&hdev->pdev->dev,
4734 "fd key_x config fail, loc=%d, ret=%d\n",
4735 rule->queue_id, ret);
4739 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4740 struct hclge_fd_rule *rule)
4742 struct hclge_fd_ad_data ad_data;
4744 ad_data.ad_id = rule->location;
4746 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4747 ad_data.drop_packet = true;
4748 ad_data.forward_to_direct_queue = false;
4749 ad_data.queue_id = 0;
4751 ad_data.drop_packet = false;
4752 ad_data.forward_to_direct_queue = true;
4753 ad_data.queue_id = rule->queue_id;
4756 ad_data.use_counter = false;
4757 ad_data.counter_id = 0;
4759 ad_data.use_next_stage = false;
4760 ad_data.next_input_key = 0;
4762 ad_data.write_rule_id_to_bd = true;
4763 ad_data.rule_id = rule->location;
4765 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4768 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4769 struct ethtool_rx_flow_spec *fs, u32 *unused)
4771 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4772 struct ethtool_usrip4_spec *usr_ip4_spec;
4773 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4774 struct ethtool_usrip6_spec *usr_ip6_spec;
4775 struct ethhdr *ether_spec;
4777 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4780 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4783 if ((fs->flow_type & FLOW_EXT) &&
4784 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4785 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4789 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4793 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4794 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4796 if (!tcp_ip4_spec->ip4src)
4797 *unused |= BIT(INNER_SRC_IP);
4799 if (!tcp_ip4_spec->ip4dst)
4800 *unused |= BIT(INNER_DST_IP);
4802 if (!tcp_ip4_spec->psrc)
4803 *unused |= BIT(INNER_SRC_PORT);
4805 if (!tcp_ip4_spec->pdst)
4806 *unused |= BIT(INNER_DST_PORT);
4808 if (!tcp_ip4_spec->tos)
4809 *unused |= BIT(INNER_IP_TOS);
4813 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4814 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4815 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4817 if (!usr_ip4_spec->ip4src)
4818 *unused |= BIT(INNER_SRC_IP);
4820 if (!usr_ip4_spec->ip4dst)
4821 *unused |= BIT(INNER_DST_IP);
4823 if (!usr_ip4_spec->tos)
4824 *unused |= BIT(INNER_IP_TOS);
4826 if (!usr_ip4_spec->proto)
4827 *unused |= BIT(INNER_IP_PROTO);
4829 if (usr_ip4_spec->l4_4_bytes)
4832 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4839 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4840 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4843 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4844 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4845 *unused |= BIT(INNER_SRC_IP);
4847 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4848 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4849 *unused |= BIT(INNER_DST_IP);
4851 if (!tcp_ip6_spec->psrc)
4852 *unused |= BIT(INNER_SRC_PORT);
4854 if (!tcp_ip6_spec->pdst)
4855 *unused |= BIT(INNER_DST_PORT);
4857 if (tcp_ip6_spec->tclass)
4861 case IPV6_USER_FLOW:
4862 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4863 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4864 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4865 BIT(INNER_DST_PORT);
4867 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4868 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4869 *unused |= BIT(INNER_SRC_IP);
4871 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4872 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4873 *unused |= BIT(INNER_DST_IP);
4875 if (!usr_ip6_spec->l4_proto)
4876 *unused |= BIT(INNER_IP_PROTO);
4878 if (usr_ip6_spec->tclass)
4881 if (usr_ip6_spec->l4_4_bytes)
4886 ether_spec = &fs->h_u.ether_spec;
4887 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4888 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4889 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4891 if (is_zero_ether_addr(ether_spec->h_source))
4892 *unused |= BIT(INNER_SRC_MAC);
4894 if (is_zero_ether_addr(ether_spec->h_dest))
4895 *unused |= BIT(INNER_DST_MAC);
4897 if (!ether_spec->h_proto)
4898 *unused |= BIT(INNER_ETH_TYPE);
4905 if ((fs->flow_type & FLOW_EXT)) {
4906 if (fs->h_ext.vlan_etype)
4908 if (!fs->h_ext.vlan_tci)
4909 *unused |= BIT(INNER_VLAN_TAG_FST);
4911 if (fs->m_ext.vlan_tci) {
4912 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4916 *unused |= BIT(INNER_VLAN_TAG_FST);
4919 if (fs->flow_type & FLOW_MAC_EXT) {
4920 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4923 if (is_zero_ether_addr(fs->h_ext.h_dest))
4924 *unused |= BIT(INNER_DST_MAC);
4926 *unused &= ~(BIT(INNER_DST_MAC));
4932 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4934 struct hclge_fd_rule *rule = NULL;
4935 struct hlist_node *node2;
4937 spin_lock_bh(&hdev->fd_rule_lock);
4938 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4939 if (rule->location >= location)
4943 spin_unlock_bh(&hdev->fd_rule_lock);
4945 return rule && rule->location == location;
4948 /* make sure being called after lock up with fd_rule_lock */
4949 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4950 struct hclge_fd_rule *new_rule,
4954 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4955 struct hlist_node *node2;
4957 if (is_add && !new_rule)
4960 hlist_for_each_entry_safe(rule, node2,
4961 &hdev->fd_rule_list, rule_node) {
4962 if (rule->location >= location)
4967 if (rule && rule->location == location) {
4968 hlist_del(&rule->rule_node);
4970 hdev->hclge_fd_rule_num--;
4973 if (!hdev->hclge_fd_rule_num)
4974 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4975 clear_bit(location, hdev->fd_bmap);
4979 } else if (!is_add) {
4980 dev_err(&hdev->pdev->dev,
4981 "delete fail, rule %d is inexistent\n",
4986 INIT_HLIST_NODE(&new_rule->rule_node);
4989 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4991 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4993 set_bit(location, hdev->fd_bmap);
4994 hdev->hclge_fd_rule_num++;
4995 hdev->fd_active_type = new_rule->rule_type;
5000 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5001 struct ethtool_rx_flow_spec *fs,
5002 struct hclge_fd_rule *rule)
5004 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5006 switch (flow_type) {
5010 rule->tuples.src_ip[3] =
5011 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5012 rule->tuples_mask.src_ip[3] =
5013 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5015 rule->tuples.dst_ip[3] =
5016 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5017 rule->tuples_mask.dst_ip[3] =
5018 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5020 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5021 rule->tuples_mask.src_port =
5022 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5024 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5025 rule->tuples_mask.dst_port =
5026 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5028 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5029 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5031 rule->tuples.ether_proto = ETH_P_IP;
5032 rule->tuples_mask.ether_proto = 0xFFFF;
5036 rule->tuples.src_ip[3] =
5037 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5038 rule->tuples_mask.src_ip[3] =
5039 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5041 rule->tuples.dst_ip[3] =
5042 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5043 rule->tuples_mask.dst_ip[3] =
5044 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5046 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5047 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5049 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5050 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5052 rule->tuples.ether_proto = ETH_P_IP;
5053 rule->tuples_mask.ether_proto = 0xFFFF;
5059 be32_to_cpu_array(rule->tuples.src_ip,
5060 fs->h_u.tcp_ip6_spec.ip6src, 4);
5061 be32_to_cpu_array(rule->tuples_mask.src_ip,
5062 fs->m_u.tcp_ip6_spec.ip6src, 4);
5064 be32_to_cpu_array(rule->tuples.dst_ip,
5065 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5066 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5067 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5069 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5070 rule->tuples_mask.src_port =
5071 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5073 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5074 rule->tuples_mask.dst_port =
5075 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5077 rule->tuples.ether_proto = ETH_P_IPV6;
5078 rule->tuples_mask.ether_proto = 0xFFFF;
5081 case IPV6_USER_FLOW:
5082 be32_to_cpu_array(rule->tuples.src_ip,
5083 fs->h_u.usr_ip6_spec.ip6src, 4);
5084 be32_to_cpu_array(rule->tuples_mask.src_ip,
5085 fs->m_u.usr_ip6_spec.ip6src, 4);
5087 be32_to_cpu_array(rule->tuples.dst_ip,
5088 fs->h_u.usr_ip6_spec.ip6dst, 4);
5089 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5090 fs->m_u.usr_ip6_spec.ip6dst, 4);
5092 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5093 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5095 rule->tuples.ether_proto = ETH_P_IPV6;
5096 rule->tuples_mask.ether_proto = 0xFFFF;
5100 ether_addr_copy(rule->tuples.src_mac,
5101 fs->h_u.ether_spec.h_source);
5102 ether_addr_copy(rule->tuples_mask.src_mac,
5103 fs->m_u.ether_spec.h_source);
5105 ether_addr_copy(rule->tuples.dst_mac,
5106 fs->h_u.ether_spec.h_dest);
5107 ether_addr_copy(rule->tuples_mask.dst_mac,
5108 fs->m_u.ether_spec.h_dest);
5110 rule->tuples.ether_proto =
5111 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5112 rule->tuples_mask.ether_proto =
5113 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5120 switch (flow_type) {
5123 rule->tuples.ip_proto = IPPROTO_SCTP;
5124 rule->tuples_mask.ip_proto = 0xFF;
5128 rule->tuples.ip_proto = IPPROTO_TCP;
5129 rule->tuples_mask.ip_proto = 0xFF;
5133 rule->tuples.ip_proto = IPPROTO_UDP;
5134 rule->tuples_mask.ip_proto = 0xFF;
5140 if ((fs->flow_type & FLOW_EXT)) {
5141 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5142 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5145 if (fs->flow_type & FLOW_MAC_EXT) {
5146 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5147 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5153 /* make sure being called after lock up with fd_rule_lock */
5154 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5155 struct hclge_fd_rule *rule)
5160 dev_err(&hdev->pdev->dev,
5161 "The flow director rule is NULL\n");
5165 /* it will never fail here, so needn't to check return value */
5166 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5168 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5172 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5179 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5183 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5184 struct ethtool_rxnfc *cmd)
5186 struct hclge_vport *vport = hclge_get_vport(handle);
5187 struct hclge_dev *hdev = vport->back;
5188 u16 dst_vport_id = 0, q_index = 0;
5189 struct ethtool_rx_flow_spec *fs;
5190 struct hclge_fd_rule *rule;
5195 if (!hnae3_dev_fd_supported(hdev))
5199 dev_warn(&hdev->pdev->dev,
5200 "Please enable flow director first\n");
5204 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5206 ret = hclge_fd_check_spec(hdev, fs, &unused);
5208 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5212 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5213 action = HCLGE_FD_ACTION_DROP_PACKET;
5215 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5216 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5219 if (vf > hdev->num_req_vfs) {
5220 dev_err(&hdev->pdev->dev,
5221 "Error: vf id (%d) > max vf num (%d)\n",
5222 vf, hdev->num_req_vfs);
5226 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5227 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5230 dev_err(&hdev->pdev->dev,
5231 "Error: queue id (%d) > max tqp num (%d)\n",
5236 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5240 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5244 ret = hclge_fd_get_tuple(hdev, fs, rule);
5250 rule->flow_type = fs->flow_type;
5252 rule->location = fs->location;
5253 rule->unused_tuple = unused;
5254 rule->vf_id = dst_vport_id;
5255 rule->queue_id = q_index;
5256 rule->action = action;
5257 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5259 /* to avoid rule conflict, when user configure rule by ethtool,
5260 * we need to clear all arfs rules
5262 hclge_clear_arfs_rules(handle);
5264 spin_lock_bh(&hdev->fd_rule_lock);
5265 ret = hclge_fd_config_rule(hdev, rule);
5267 spin_unlock_bh(&hdev->fd_rule_lock);
5272 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5273 struct ethtool_rxnfc *cmd)
5275 struct hclge_vport *vport = hclge_get_vport(handle);
5276 struct hclge_dev *hdev = vport->back;
5277 struct ethtool_rx_flow_spec *fs;
5280 if (!hnae3_dev_fd_supported(hdev))
5283 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5285 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5288 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5289 dev_err(&hdev->pdev->dev,
5290 "Delete fail, rule %d is inexistent\n",
5295 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5296 fs->location, NULL, false);
5300 spin_lock_bh(&hdev->fd_rule_lock);
5301 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5303 spin_unlock_bh(&hdev->fd_rule_lock);
5308 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5311 struct hclge_vport *vport = hclge_get_vport(handle);
5312 struct hclge_dev *hdev = vport->back;
5313 struct hclge_fd_rule *rule;
5314 struct hlist_node *node;
5317 if (!hnae3_dev_fd_supported(hdev))
5320 spin_lock_bh(&hdev->fd_rule_lock);
5321 for_each_set_bit(location, hdev->fd_bmap,
5322 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5323 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5327 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5329 hlist_del(&rule->rule_node);
5332 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5333 hdev->hclge_fd_rule_num = 0;
5334 bitmap_zero(hdev->fd_bmap,
5335 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5338 spin_unlock_bh(&hdev->fd_rule_lock);
5341 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5343 struct hclge_vport *vport = hclge_get_vport(handle);
5344 struct hclge_dev *hdev = vport->back;
5345 struct hclge_fd_rule *rule;
5346 struct hlist_node *node;
5349 /* Return ok here, because reset error handling will check this
5350 * return value. If error is returned here, the reset process will
5353 if (!hnae3_dev_fd_supported(hdev))
5356 /* if fd is disabled, should not restore it when reset */
5360 spin_lock_bh(&hdev->fd_rule_lock);
5361 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5362 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5364 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5367 dev_warn(&hdev->pdev->dev,
5368 "Restore rule %d failed, remove it\n",
5370 clear_bit(rule->location, hdev->fd_bmap);
5371 hlist_del(&rule->rule_node);
5373 hdev->hclge_fd_rule_num--;
5377 if (hdev->hclge_fd_rule_num)
5378 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5380 spin_unlock_bh(&hdev->fd_rule_lock);
5385 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5386 struct ethtool_rxnfc *cmd)
5388 struct hclge_vport *vport = hclge_get_vport(handle);
5389 struct hclge_dev *hdev = vport->back;
5391 if (!hnae3_dev_fd_supported(hdev))
5394 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5395 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5400 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5401 struct ethtool_rxnfc *cmd)
5403 struct hclge_vport *vport = hclge_get_vport(handle);
5404 struct hclge_fd_rule *rule = NULL;
5405 struct hclge_dev *hdev = vport->back;
5406 struct ethtool_rx_flow_spec *fs;
5407 struct hlist_node *node2;
5409 if (!hnae3_dev_fd_supported(hdev))
5412 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5414 spin_lock_bh(&hdev->fd_rule_lock);
5416 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5417 if (rule->location >= fs->location)
5421 if (!rule || fs->location != rule->location) {
5422 spin_unlock_bh(&hdev->fd_rule_lock);
5427 fs->flow_type = rule->flow_type;
5428 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5432 fs->h_u.tcp_ip4_spec.ip4src =
5433 cpu_to_be32(rule->tuples.src_ip[3]);
5434 fs->m_u.tcp_ip4_spec.ip4src =
5435 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5436 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5438 fs->h_u.tcp_ip4_spec.ip4dst =
5439 cpu_to_be32(rule->tuples.dst_ip[3]);
5440 fs->m_u.tcp_ip4_spec.ip4dst =
5441 rule->unused_tuple & BIT(INNER_DST_IP) ?
5442 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5444 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5445 fs->m_u.tcp_ip4_spec.psrc =
5446 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5447 0 : cpu_to_be16(rule->tuples_mask.src_port);
5449 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5450 fs->m_u.tcp_ip4_spec.pdst =
5451 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5452 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5454 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5455 fs->m_u.tcp_ip4_spec.tos =
5456 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5457 0 : rule->tuples_mask.ip_tos;
5461 fs->h_u.usr_ip4_spec.ip4src =
5462 cpu_to_be32(rule->tuples.src_ip[3]);
5463 fs->m_u.tcp_ip4_spec.ip4src =
5464 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5465 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5467 fs->h_u.usr_ip4_spec.ip4dst =
5468 cpu_to_be32(rule->tuples.dst_ip[3]);
5469 fs->m_u.usr_ip4_spec.ip4dst =
5470 rule->unused_tuple & BIT(INNER_DST_IP) ?
5471 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5473 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5474 fs->m_u.usr_ip4_spec.tos =
5475 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5476 0 : rule->tuples_mask.ip_tos;
5478 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5479 fs->m_u.usr_ip4_spec.proto =
5480 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5481 0 : rule->tuples_mask.ip_proto;
5483 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5489 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5490 rule->tuples.src_ip, 4);
5491 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5492 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5494 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5495 rule->tuples_mask.src_ip, 4);
5497 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5498 rule->tuples.dst_ip, 4);
5499 if (rule->unused_tuple & BIT(INNER_DST_IP))
5500 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5502 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5503 rule->tuples_mask.dst_ip, 4);
5505 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5506 fs->m_u.tcp_ip6_spec.psrc =
5507 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5508 0 : cpu_to_be16(rule->tuples_mask.src_port);
5510 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5511 fs->m_u.tcp_ip6_spec.pdst =
5512 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5513 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5516 case IPV6_USER_FLOW:
5517 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5518 rule->tuples.src_ip, 4);
5519 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5520 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5522 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5523 rule->tuples_mask.src_ip, 4);
5525 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5526 rule->tuples.dst_ip, 4);
5527 if (rule->unused_tuple & BIT(INNER_DST_IP))
5528 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5530 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5531 rule->tuples_mask.dst_ip, 4);
5533 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5534 fs->m_u.usr_ip6_spec.l4_proto =
5535 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5536 0 : rule->tuples_mask.ip_proto;
5540 ether_addr_copy(fs->h_u.ether_spec.h_source,
5541 rule->tuples.src_mac);
5542 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5543 eth_zero_addr(fs->m_u.ether_spec.h_source);
5545 ether_addr_copy(fs->m_u.ether_spec.h_source,
5546 rule->tuples_mask.src_mac);
5548 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5549 rule->tuples.dst_mac);
5550 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5551 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5553 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5554 rule->tuples_mask.dst_mac);
5556 fs->h_u.ether_spec.h_proto =
5557 cpu_to_be16(rule->tuples.ether_proto);
5558 fs->m_u.ether_spec.h_proto =
5559 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5560 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5564 spin_unlock_bh(&hdev->fd_rule_lock);
5568 if (fs->flow_type & FLOW_EXT) {
5569 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5570 fs->m_ext.vlan_tci =
5571 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5572 cpu_to_be16(VLAN_VID_MASK) :
5573 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5576 if (fs->flow_type & FLOW_MAC_EXT) {
5577 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5578 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5579 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5581 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5582 rule->tuples_mask.dst_mac);
5585 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5586 fs->ring_cookie = RX_CLS_FLOW_DISC;
5590 fs->ring_cookie = rule->queue_id;
5591 vf_id = rule->vf_id;
5592 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5593 fs->ring_cookie |= vf_id;
5596 spin_unlock_bh(&hdev->fd_rule_lock);
5601 static int hclge_get_all_rules(struct hnae3_handle *handle,
5602 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5604 struct hclge_vport *vport = hclge_get_vport(handle);
5605 struct hclge_dev *hdev = vport->back;
5606 struct hclge_fd_rule *rule;
5607 struct hlist_node *node2;
5610 if (!hnae3_dev_fd_supported(hdev))
5613 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5615 spin_lock_bh(&hdev->fd_rule_lock);
5616 hlist_for_each_entry_safe(rule, node2,
5617 &hdev->fd_rule_list, rule_node) {
5618 if (cnt == cmd->rule_cnt) {
5619 spin_unlock_bh(&hdev->fd_rule_lock);
5623 rule_locs[cnt] = rule->location;
5627 spin_unlock_bh(&hdev->fd_rule_lock);
5629 cmd->rule_cnt = cnt;
5634 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5635 struct hclge_fd_rule_tuples *tuples)
5637 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5638 tuples->ip_proto = fkeys->basic.ip_proto;
5639 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5641 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5642 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5643 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5645 memcpy(tuples->src_ip,
5646 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5647 sizeof(tuples->src_ip));
5648 memcpy(tuples->dst_ip,
5649 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5650 sizeof(tuples->dst_ip));
5654 /* traverse all rules, check whether an existed rule has the same tuples */
5655 static struct hclge_fd_rule *
5656 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5657 const struct hclge_fd_rule_tuples *tuples)
5659 struct hclge_fd_rule *rule = NULL;
5660 struct hlist_node *node;
5662 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5663 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5670 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5671 struct hclge_fd_rule *rule)
5673 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5674 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5675 BIT(INNER_SRC_PORT);
5678 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5679 if (tuples->ether_proto == ETH_P_IP) {
5680 if (tuples->ip_proto == IPPROTO_TCP)
5681 rule->flow_type = TCP_V4_FLOW;
5683 rule->flow_type = UDP_V4_FLOW;
5685 if (tuples->ip_proto == IPPROTO_TCP)
5686 rule->flow_type = TCP_V6_FLOW;
5688 rule->flow_type = UDP_V6_FLOW;
5690 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5691 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5694 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5695 u16 flow_id, struct flow_keys *fkeys)
5697 struct hclge_vport *vport = hclge_get_vport(handle);
5698 struct hclge_fd_rule_tuples new_tuples;
5699 struct hclge_dev *hdev = vport->back;
5700 struct hclge_fd_rule *rule;
5705 if (!hnae3_dev_fd_supported(hdev))
5708 memset(&new_tuples, 0, sizeof(new_tuples));
5709 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5711 spin_lock_bh(&hdev->fd_rule_lock);
5713 /* when there is already fd rule existed add by user,
5714 * arfs should not work
5716 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5717 spin_unlock_bh(&hdev->fd_rule_lock);
5722 /* check is there flow director filter existed for this flow,
5723 * if not, create a new filter for it;
5724 * if filter exist with different queue id, modify the filter;
5725 * if filter exist with same queue id, do nothing
5727 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5729 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5730 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5731 spin_unlock_bh(&hdev->fd_rule_lock);
5736 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5738 spin_unlock_bh(&hdev->fd_rule_lock);
5743 set_bit(bit_id, hdev->fd_bmap);
5744 rule->location = bit_id;
5745 rule->flow_id = flow_id;
5746 rule->queue_id = queue_id;
5747 hclge_fd_build_arfs_rule(&new_tuples, rule);
5748 ret = hclge_fd_config_rule(hdev, rule);
5750 spin_unlock_bh(&hdev->fd_rule_lock);
5755 return rule->location;
5758 spin_unlock_bh(&hdev->fd_rule_lock);
5760 if (rule->queue_id == queue_id)
5761 return rule->location;
5763 tmp_queue_id = rule->queue_id;
5764 rule->queue_id = queue_id;
5765 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5767 rule->queue_id = tmp_queue_id;
5771 return rule->location;
5774 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5776 #ifdef CONFIG_RFS_ACCEL
5777 struct hnae3_handle *handle = &hdev->vport[0].nic;
5778 struct hclge_fd_rule *rule;
5779 struct hlist_node *node;
5780 HLIST_HEAD(del_list);
5782 spin_lock_bh(&hdev->fd_rule_lock);
5783 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5784 spin_unlock_bh(&hdev->fd_rule_lock);
5787 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5788 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5789 rule->flow_id, rule->location)) {
5790 hlist_del_init(&rule->rule_node);
5791 hlist_add_head(&rule->rule_node, &del_list);
5792 hdev->hclge_fd_rule_num--;
5793 clear_bit(rule->location, hdev->fd_bmap);
5796 spin_unlock_bh(&hdev->fd_rule_lock);
5798 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5799 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5800 rule->location, NULL, false);
5806 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5808 #ifdef CONFIG_RFS_ACCEL
5809 struct hclge_vport *vport = hclge_get_vport(handle);
5810 struct hclge_dev *hdev = vport->back;
5812 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5813 hclge_del_all_fd_entries(handle, true);
5817 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5819 struct hclge_vport *vport = hclge_get_vport(handle);
5820 struct hclge_dev *hdev = vport->back;
5822 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5823 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5826 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5828 struct hclge_vport *vport = hclge_get_vport(handle);
5829 struct hclge_dev *hdev = vport->back;
5831 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5834 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5836 struct hclge_vport *vport = hclge_get_vport(handle);
5837 struct hclge_dev *hdev = vport->back;
5839 return hdev->rst_stats.hw_reset_done_cnt;
5842 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5844 struct hclge_vport *vport = hclge_get_vport(handle);
5845 struct hclge_dev *hdev = vport->back;
5848 hdev->fd_en = enable;
5849 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5851 hclge_del_all_fd_entries(handle, clear);
5853 hclge_restore_fd_entries(handle);
5856 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5858 struct hclge_desc desc;
5859 struct hclge_config_mac_mode_cmd *req =
5860 (struct hclge_config_mac_mode_cmd *)desc.data;
5864 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5865 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5866 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5867 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5868 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5869 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5870 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5871 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5872 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5873 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5874 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5875 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5876 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5877 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5878 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5879 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5883 dev_err(&hdev->pdev->dev,
5884 "mac enable fail, ret =%d.\n", ret);
5887 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5889 struct hclge_config_mac_mode_cmd *req;
5890 struct hclge_desc desc;
5894 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5895 /* 1 Read out the MAC mode config at first */
5896 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5897 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5899 dev_err(&hdev->pdev->dev,
5900 "mac loopback get fail, ret =%d.\n", ret);
5904 /* 2 Then setup the loopback flag */
5905 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5906 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5907 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5908 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5910 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5912 /* 3 Config mac work mode with loopback flag
5913 * and its original configure parameters
5915 hclge_cmd_reuse_desc(&desc, false);
5916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5918 dev_err(&hdev->pdev->dev,
5919 "mac loopback set fail, ret =%d.\n", ret);
5923 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5924 enum hnae3_loop loop_mode)
5926 #define HCLGE_SERDES_RETRY_MS 10
5927 #define HCLGE_SERDES_RETRY_NUM 100
5929 #define HCLGE_MAC_LINK_STATUS_MS 10
5930 #define HCLGE_MAC_LINK_STATUS_NUM 100
5931 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5932 #define HCLGE_MAC_LINK_STATUS_UP 1
5934 struct hclge_serdes_lb_cmd *req;
5935 struct hclge_desc desc;
5936 int mac_link_ret = 0;
5940 req = (struct hclge_serdes_lb_cmd *)desc.data;
5941 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5943 switch (loop_mode) {
5944 case HNAE3_LOOP_SERIAL_SERDES:
5945 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5947 case HNAE3_LOOP_PARALLEL_SERDES:
5948 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5951 dev_err(&hdev->pdev->dev,
5952 "unsupported serdes loopback mode %d\n", loop_mode);
5957 req->enable = loop_mode_b;
5958 req->mask = loop_mode_b;
5959 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5961 req->mask = loop_mode_b;
5962 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5965 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5967 dev_err(&hdev->pdev->dev,
5968 "serdes loopback set fail, ret = %d\n", ret);
5973 msleep(HCLGE_SERDES_RETRY_MS);
5974 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5978 dev_err(&hdev->pdev->dev,
5979 "serdes loopback get, ret = %d\n", ret);
5982 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5983 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5985 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5986 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5988 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5989 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5993 hclge_cfg_mac_mode(hdev, en);
5997 /* serdes Internal loopback, independent of the network cable.*/
5998 msleep(HCLGE_MAC_LINK_STATUS_MS);
5999 ret = hclge_get_mac_link_status(hdev);
6000 if (ret == mac_link_ret)
6002 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6004 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6009 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
6010 int stream_id, bool enable)
6012 struct hclge_desc desc;
6013 struct hclge_cfg_com_tqp_queue_cmd *req =
6014 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6017 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6018 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6019 req->stream_id = cpu_to_le16(stream_id);
6020 req->enable |= enable << HCLGE_TQP_ENABLE_B;
6022 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6024 dev_err(&hdev->pdev->dev,
6025 "Tqp enable fail, status =%d.\n", ret);
6029 static int hclge_set_loopback(struct hnae3_handle *handle,
6030 enum hnae3_loop loop_mode, bool en)
6032 struct hclge_vport *vport = hclge_get_vport(handle);
6033 struct hnae3_knic_private_info *kinfo;
6034 struct hclge_dev *hdev = vport->back;
6037 switch (loop_mode) {
6038 case HNAE3_LOOP_APP:
6039 ret = hclge_set_app_loopback(hdev, en);
6041 case HNAE3_LOOP_SERIAL_SERDES:
6042 case HNAE3_LOOP_PARALLEL_SERDES:
6043 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6047 dev_err(&hdev->pdev->dev,
6048 "loop_mode %d is not supported\n", loop_mode);
6055 kinfo = &vport->nic.kinfo;
6056 for (i = 0; i < kinfo->num_tqps; i++) {
6057 ret = hclge_tqp_enable(hdev, i, 0, en);
6065 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6067 struct hclge_vport *vport = hclge_get_vport(handle);
6068 struct hnae3_knic_private_info *kinfo;
6069 struct hnae3_queue *queue;
6070 struct hclge_tqp *tqp;
6073 kinfo = &vport->nic.kinfo;
6074 for (i = 0; i < kinfo->num_tqps; i++) {
6075 queue = handle->kinfo.tqp[i];
6076 tqp = container_of(queue, struct hclge_tqp, q);
6077 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6081 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6083 struct hclge_vport *vport = hclge_get_vport(handle);
6084 struct hclge_dev *hdev = vport->back;
6087 mod_timer(&hdev->service_timer, jiffies + HZ);
6089 del_timer_sync(&hdev->service_timer);
6090 cancel_work_sync(&hdev->service_task);
6091 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6095 static int hclge_ae_start(struct hnae3_handle *handle)
6097 struct hclge_vport *vport = hclge_get_vport(handle);
6098 struct hclge_dev *hdev = vport->back;
6101 hclge_cfg_mac_mode(hdev, true);
6102 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6103 hdev->hw.mac.link = 0;
6105 /* reset tqp stats */
6106 hclge_reset_tqp_stats(handle);
6108 hclge_mac_start_phy(hdev);
6113 static void hclge_ae_stop(struct hnae3_handle *handle)
6115 struct hclge_vport *vport = hclge_get_vport(handle);
6116 struct hclge_dev *hdev = vport->back;
6119 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6121 hclge_clear_arfs_rules(handle);
6123 /* If it is not PF reset, the firmware will disable the MAC,
6124 * so it only need to stop phy here.
6126 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6127 hdev->reset_type != HNAE3_FUNC_RESET) {
6128 hclge_mac_stop_phy(hdev);
6132 for (i = 0; i < handle->kinfo.num_tqps; i++)
6133 hclge_reset_tqp(handle, i);
6136 hclge_cfg_mac_mode(hdev, false);
6138 hclge_mac_stop_phy(hdev);
6140 /* reset tqp stats */
6141 hclge_reset_tqp_stats(handle);
6142 hclge_update_link_status(hdev);
6145 int hclge_vport_start(struct hclge_vport *vport)
6147 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6148 vport->last_active_jiffies = jiffies;
6152 void hclge_vport_stop(struct hclge_vport *vport)
6154 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6157 static int hclge_client_start(struct hnae3_handle *handle)
6159 struct hclge_vport *vport = hclge_get_vport(handle);
6161 return hclge_vport_start(vport);
6164 static void hclge_client_stop(struct hnae3_handle *handle)
6166 struct hclge_vport *vport = hclge_get_vport(handle);
6168 hclge_vport_stop(vport);
6171 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6172 u16 cmdq_resp, u8 resp_code,
6173 enum hclge_mac_vlan_tbl_opcode op)
6175 struct hclge_dev *hdev = vport->back;
6176 int return_status = -EIO;
6179 dev_err(&hdev->pdev->dev,
6180 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6185 if (op == HCLGE_MAC_VLAN_ADD) {
6186 if ((!resp_code) || (resp_code == 1)) {
6188 } else if (resp_code == 2) {
6189 return_status = -ENOSPC;
6190 dev_err(&hdev->pdev->dev,
6191 "add mac addr failed for uc_overflow.\n");
6192 } else if (resp_code == 3) {
6193 return_status = -ENOSPC;
6194 dev_err(&hdev->pdev->dev,
6195 "add mac addr failed for mc_overflow.\n");
6197 dev_err(&hdev->pdev->dev,
6198 "add mac addr failed for undefined, code=%d.\n",
6201 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6204 } else if (resp_code == 1) {
6205 return_status = -ENOENT;
6206 dev_dbg(&hdev->pdev->dev,
6207 "remove mac addr failed for miss.\n");
6209 dev_err(&hdev->pdev->dev,
6210 "remove mac addr failed for undefined, code=%d.\n",
6213 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6216 } else if (resp_code == 1) {
6217 return_status = -ENOENT;
6218 dev_dbg(&hdev->pdev->dev,
6219 "lookup mac addr failed for miss.\n");
6221 dev_err(&hdev->pdev->dev,
6222 "lookup mac addr failed for undefined, code=%d.\n",
6226 return_status = -EINVAL;
6227 dev_err(&hdev->pdev->dev,
6228 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6232 return return_status;
6235 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6240 if (vfid > 255 || vfid < 0)
6243 if (vfid >= 0 && vfid <= 191) {
6244 word_num = vfid / 32;
6245 bit_num = vfid % 32;
6247 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6249 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6251 word_num = (vfid - 192) / 32;
6252 bit_num = vfid % 32;
6254 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6256 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6262 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6264 #define HCLGE_DESC_NUMBER 3
6265 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6268 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6269 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6270 if (desc[i].data[j])
6276 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6277 const u8 *addr, bool is_mc)
6279 const unsigned char *mac_addr = addr;
6280 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6281 (mac_addr[0]) | (mac_addr[1] << 8);
6282 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6284 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6286 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6287 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6290 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6291 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6294 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6295 struct hclge_mac_vlan_tbl_entry_cmd *req)
6297 struct hclge_dev *hdev = vport->back;
6298 struct hclge_desc desc;
6303 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6305 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6307 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6309 dev_err(&hdev->pdev->dev,
6310 "del mac addr failed for cmd_send, ret =%d.\n",
6314 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6315 retval = le16_to_cpu(desc.retval);
6317 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6318 HCLGE_MAC_VLAN_REMOVE);
6321 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6322 struct hclge_mac_vlan_tbl_entry_cmd *req,
6323 struct hclge_desc *desc,
6326 struct hclge_dev *hdev = vport->back;
6331 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6333 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6334 memcpy(desc[0].data,
6336 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6337 hclge_cmd_setup_basic_desc(&desc[1],
6338 HCLGE_OPC_MAC_VLAN_ADD,
6340 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6341 hclge_cmd_setup_basic_desc(&desc[2],
6342 HCLGE_OPC_MAC_VLAN_ADD,
6344 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6346 memcpy(desc[0].data,
6348 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6349 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6352 dev_err(&hdev->pdev->dev,
6353 "lookup mac addr failed for cmd_send, ret =%d.\n",
6357 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6358 retval = le16_to_cpu(desc[0].retval);
6360 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6361 HCLGE_MAC_VLAN_LKUP);
6364 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6365 struct hclge_mac_vlan_tbl_entry_cmd *req,
6366 struct hclge_desc *mc_desc)
6368 struct hclge_dev *hdev = vport->back;
6375 struct hclge_desc desc;
6377 hclge_cmd_setup_basic_desc(&desc,
6378 HCLGE_OPC_MAC_VLAN_ADD,
6380 memcpy(desc.data, req,
6381 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6382 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6383 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6384 retval = le16_to_cpu(desc.retval);
6386 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6388 HCLGE_MAC_VLAN_ADD);
6390 hclge_cmd_reuse_desc(&mc_desc[0], false);
6391 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6392 hclge_cmd_reuse_desc(&mc_desc[1], false);
6393 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6394 hclge_cmd_reuse_desc(&mc_desc[2], false);
6395 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6396 memcpy(mc_desc[0].data, req,
6397 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6398 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6399 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6400 retval = le16_to_cpu(mc_desc[0].retval);
6402 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6404 HCLGE_MAC_VLAN_ADD);
6408 dev_err(&hdev->pdev->dev,
6409 "add mac addr failed for cmd_send, ret =%d.\n",
6417 static int hclge_init_umv_space(struct hclge_dev *hdev)
6419 u16 allocated_size = 0;
6422 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6427 if (allocated_size < hdev->wanted_umv_size)
6428 dev_warn(&hdev->pdev->dev,
6429 "Alloc umv space failed, want %d, get %d\n",
6430 hdev->wanted_umv_size, allocated_size);
6432 mutex_init(&hdev->umv_mutex);
6433 hdev->max_umv_size = allocated_size;
6434 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6435 hdev->share_umv_size = hdev->priv_umv_size +
6436 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6441 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6445 if (hdev->max_umv_size > 0) {
6446 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6450 hdev->max_umv_size = 0;
6452 mutex_destroy(&hdev->umv_mutex);
6457 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6458 u16 *allocated_size, bool is_alloc)
6460 struct hclge_umv_spc_alc_cmd *req;
6461 struct hclge_desc desc;
6464 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6466 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6467 req->space_size = cpu_to_le32(space_size);
6469 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6471 dev_err(&hdev->pdev->dev,
6472 "%s umv space failed for cmd_send, ret =%d\n",
6473 is_alloc ? "allocate" : "free", ret);
6477 if (is_alloc && allocated_size)
6478 *allocated_size = le32_to_cpu(desc.data[1]);
6483 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6485 struct hclge_vport *vport;
6488 for (i = 0; i < hdev->num_alloc_vport; i++) {
6489 vport = &hdev->vport[i];
6490 vport->used_umv_num = 0;
6493 mutex_lock(&hdev->umv_mutex);
6494 hdev->share_umv_size = hdev->priv_umv_size +
6495 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6496 mutex_unlock(&hdev->umv_mutex);
6499 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6501 struct hclge_dev *hdev = vport->back;
6504 mutex_lock(&hdev->umv_mutex);
6505 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6506 hdev->share_umv_size == 0);
6507 mutex_unlock(&hdev->umv_mutex);
6512 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6514 struct hclge_dev *hdev = vport->back;
6516 mutex_lock(&hdev->umv_mutex);
6518 if (vport->used_umv_num > hdev->priv_umv_size)
6519 hdev->share_umv_size++;
6521 if (vport->used_umv_num > 0)
6522 vport->used_umv_num--;
6524 if (vport->used_umv_num >= hdev->priv_umv_size &&
6525 hdev->share_umv_size > 0)
6526 hdev->share_umv_size--;
6527 vport->used_umv_num++;
6529 mutex_unlock(&hdev->umv_mutex);
6532 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6533 const unsigned char *addr)
6535 struct hclge_vport *vport = hclge_get_vport(handle);
6537 return hclge_add_uc_addr_common(vport, addr);
6540 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6541 const unsigned char *addr)
6543 struct hclge_dev *hdev = vport->back;
6544 struct hclge_mac_vlan_tbl_entry_cmd req;
6545 struct hclge_desc desc;
6546 u16 egress_port = 0;
6549 /* mac addr check */
6550 if (is_zero_ether_addr(addr) ||
6551 is_broadcast_ether_addr(addr) ||
6552 is_multicast_ether_addr(addr)) {
6553 dev_err(&hdev->pdev->dev,
6554 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6556 is_zero_ether_addr(addr),
6557 is_broadcast_ether_addr(addr),
6558 is_multicast_ether_addr(addr));
6562 memset(&req, 0, sizeof(req));
6564 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6565 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6567 req.egress_port = cpu_to_le16(egress_port);
6569 hclge_prepare_mac_addr(&req, addr, false);
6571 /* Lookup the mac address in the mac_vlan table, and add
6572 * it if the entry is inexistent. Repeated unicast entry
6573 * is not allowed in the mac vlan table.
6575 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6576 if (ret == -ENOENT) {
6577 if (!hclge_is_umv_space_full(vport)) {
6578 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6580 hclge_update_umv_space(vport, false);
6584 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6585 hdev->priv_umv_size);
6590 /* check if we just hit the duplicate */
6592 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6593 vport->vport_id, addr);
6597 dev_err(&hdev->pdev->dev,
6598 "PF failed to add unicast entry(%pM) in the MAC table\n",
6604 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6605 const unsigned char *addr)
6607 struct hclge_vport *vport = hclge_get_vport(handle);
6609 return hclge_rm_uc_addr_common(vport, addr);
6612 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6613 const unsigned char *addr)
6615 struct hclge_dev *hdev = vport->back;
6616 struct hclge_mac_vlan_tbl_entry_cmd req;
6619 /* mac addr check */
6620 if (is_zero_ether_addr(addr) ||
6621 is_broadcast_ether_addr(addr) ||
6622 is_multicast_ether_addr(addr)) {
6623 dev_dbg(&hdev->pdev->dev,
6624 "Remove mac err! invalid mac:%pM.\n",
6629 memset(&req, 0, sizeof(req));
6630 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6631 hclge_prepare_mac_addr(&req, addr, false);
6632 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6634 hclge_update_umv_space(vport, true);
6639 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6640 const unsigned char *addr)
6642 struct hclge_vport *vport = hclge_get_vport(handle);
6644 return hclge_add_mc_addr_common(vport, addr);
6647 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6648 const unsigned char *addr)
6650 struct hclge_dev *hdev = vport->back;
6651 struct hclge_mac_vlan_tbl_entry_cmd req;
6652 struct hclge_desc desc[3];
6655 /* mac addr check */
6656 if (!is_multicast_ether_addr(addr)) {
6657 dev_err(&hdev->pdev->dev,
6658 "Add mc mac err! invalid mac:%pM.\n",
6662 memset(&req, 0, sizeof(req));
6663 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6664 hclge_prepare_mac_addr(&req, addr, true);
6665 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6667 /* This mac addr exist, update VFID for it */
6668 hclge_update_desc_vfid(desc, vport->vport_id, false);
6669 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6671 /* This mac addr do not exist, add new entry for it */
6672 memset(desc[0].data, 0, sizeof(desc[0].data));
6673 memset(desc[1].data, 0, sizeof(desc[0].data));
6674 memset(desc[2].data, 0, sizeof(desc[0].data));
6675 hclge_update_desc_vfid(desc, vport->vport_id, false);
6676 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6679 if (status == -ENOSPC)
6680 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6685 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6686 const unsigned char *addr)
6688 struct hclge_vport *vport = hclge_get_vport(handle);
6690 return hclge_rm_mc_addr_common(vport, addr);
6693 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6694 const unsigned char *addr)
6696 struct hclge_dev *hdev = vport->back;
6697 struct hclge_mac_vlan_tbl_entry_cmd req;
6698 enum hclge_cmd_status status;
6699 struct hclge_desc desc[3];
6701 /* mac addr check */
6702 if (!is_multicast_ether_addr(addr)) {
6703 dev_dbg(&hdev->pdev->dev,
6704 "Remove mc mac err! invalid mac:%pM.\n",
6709 memset(&req, 0, sizeof(req));
6710 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6711 hclge_prepare_mac_addr(&req, addr, true);
6712 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6714 /* This mac addr exist, remove this handle's VFID for it */
6715 hclge_update_desc_vfid(desc, vport->vport_id, true);
6717 if (hclge_is_all_function_id_zero(desc))
6718 /* All the vfid is zero, so need to delete this entry */
6719 status = hclge_remove_mac_vlan_tbl(vport, &req);
6721 /* Not all the vfid is zero, update the vfid */
6722 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6725 /* Maybe this mac address is in mta table, but it cannot be
6726 * deleted here because an entry of mta represents an address
6727 * range rather than a specific address. the delete action to
6728 * all entries will take effect in update_mta_status called by
6729 * hns3_nic_set_rx_mode.
6737 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6738 enum HCLGE_MAC_ADDR_TYPE mac_type)
6740 struct hclge_vport_mac_addr_cfg *mac_cfg;
6741 struct list_head *list;
6743 if (!vport->vport_id)
6746 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6750 mac_cfg->hd_tbl_status = true;
6751 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6753 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6754 &vport->uc_mac_list : &vport->mc_mac_list;
6756 list_add_tail(&mac_cfg->node, list);
6759 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6761 enum HCLGE_MAC_ADDR_TYPE mac_type)
6763 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6764 struct list_head *list;
6765 bool uc_flag, mc_flag;
6767 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6768 &vport->uc_mac_list : &vport->mc_mac_list;
6770 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6771 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6773 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6774 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6775 if (uc_flag && mac_cfg->hd_tbl_status)
6776 hclge_rm_uc_addr_common(vport, mac_addr);
6778 if (mc_flag && mac_cfg->hd_tbl_status)
6779 hclge_rm_mc_addr_common(vport, mac_addr);
6781 list_del(&mac_cfg->node);
6788 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6789 enum HCLGE_MAC_ADDR_TYPE mac_type)
6791 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6792 struct list_head *list;
6794 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6795 &vport->uc_mac_list : &vport->mc_mac_list;
6797 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6798 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6799 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6801 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6802 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6804 mac_cfg->hd_tbl_status = false;
6806 list_del(&mac_cfg->node);
6812 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6814 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6815 struct hclge_vport *vport;
6818 mutex_lock(&hdev->vport_cfg_mutex);
6819 for (i = 0; i < hdev->num_alloc_vport; i++) {
6820 vport = &hdev->vport[i];
6821 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6822 list_del(&mac->node);
6826 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6827 list_del(&mac->node);
6831 mutex_unlock(&hdev->vport_cfg_mutex);
6834 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6835 u16 cmdq_resp, u8 resp_code)
6837 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6838 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6839 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6840 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6845 dev_err(&hdev->pdev->dev,
6846 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6851 switch (resp_code) {
6852 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6853 case HCLGE_ETHERTYPE_ALREADY_ADD:
6856 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6857 dev_err(&hdev->pdev->dev,
6858 "add mac ethertype failed for manager table overflow.\n");
6859 return_status = -EIO;
6861 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6862 dev_err(&hdev->pdev->dev,
6863 "add mac ethertype failed for key conflict.\n");
6864 return_status = -EIO;
6867 dev_err(&hdev->pdev->dev,
6868 "add mac ethertype failed for undefined, code=%d.\n",
6870 return_status = -EIO;
6873 return return_status;
6876 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6877 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6879 struct hclge_desc desc;
6884 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6885 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6887 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6889 dev_err(&hdev->pdev->dev,
6890 "add mac ethertype failed for cmd_send, ret =%d.\n",
6895 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6896 retval = le16_to_cpu(desc.retval);
6898 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6901 static int init_mgr_tbl(struct hclge_dev *hdev)
6906 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6907 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6909 dev_err(&hdev->pdev->dev,
6910 "add mac ethertype failed, ret =%d.\n",
6919 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6921 struct hclge_vport *vport = hclge_get_vport(handle);
6922 struct hclge_dev *hdev = vport->back;
6924 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6927 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6930 const unsigned char *new_addr = (const unsigned char *)p;
6931 struct hclge_vport *vport = hclge_get_vport(handle);
6932 struct hclge_dev *hdev = vport->back;
6935 /* mac addr check */
6936 if (is_zero_ether_addr(new_addr) ||
6937 is_broadcast_ether_addr(new_addr) ||
6938 is_multicast_ether_addr(new_addr)) {
6939 dev_err(&hdev->pdev->dev,
6940 "Change uc mac err! invalid mac:%p.\n",
6945 if ((!is_first || is_kdump_kernel()) &&
6946 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6947 dev_warn(&hdev->pdev->dev,
6948 "remove old uc mac address fail.\n");
6950 ret = hclge_add_uc_addr(handle, new_addr);
6952 dev_err(&hdev->pdev->dev,
6953 "add uc mac address fail, ret =%d.\n",
6957 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6958 dev_err(&hdev->pdev->dev,
6959 "restore uc mac address fail.\n");
6964 ret = hclge_pause_addr_cfg(hdev, new_addr);
6966 dev_err(&hdev->pdev->dev,
6967 "configure mac pause address fail, ret =%d.\n",
6972 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6977 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6980 struct hclge_vport *vport = hclge_get_vport(handle);
6981 struct hclge_dev *hdev = vport->back;
6983 if (!hdev->hw.mac.phydev)
6986 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6989 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6990 u8 fe_type, bool filter_en, u8 vf_id)
6992 struct hclge_vlan_filter_ctrl_cmd *req;
6993 struct hclge_desc desc;
6996 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6998 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6999 req->vlan_type = vlan_type;
7000 req->vlan_fe = filter_en ? fe_type : 0;
7003 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7005 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7011 #define HCLGE_FILTER_TYPE_VF 0
7012 #define HCLGE_FILTER_TYPE_PORT 1
7013 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7014 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7015 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7016 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7017 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7018 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7019 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7020 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7021 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7023 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7025 struct hclge_vport *vport = hclge_get_vport(handle);
7026 struct hclge_dev *hdev = vport->back;
7028 if (hdev->pdev->revision >= 0x21) {
7029 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7030 HCLGE_FILTER_FE_EGRESS, enable, 0);
7031 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7032 HCLGE_FILTER_FE_INGRESS, enable, 0);
7034 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7035 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7039 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7041 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7044 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7045 bool is_kill, u16 vlan, u8 qos,
7048 #define HCLGE_MAX_VF_BYTES 16
7049 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7050 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7051 struct hclge_desc desc[2];
7056 hclge_cmd_setup_basic_desc(&desc[0],
7057 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7058 hclge_cmd_setup_basic_desc(&desc[1],
7059 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7061 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7063 vf_byte_off = vfid / 8;
7064 vf_byte_val = 1 << (vfid % 8);
7066 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7067 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7069 req0->vlan_id = cpu_to_le16(vlan);
7070 req0->vlan_cfg = is_kill;
7072 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7073 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7075 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7077 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7079 dev_err(&hdev->pdev->dev,
7080 "Send vf vlan command fail, ret =%d.\n",
7086 #define HCLGE_VF_VLAN_NO_ENTRY 2
7087 if (!req0->resp_code || req0->resp_code == 1)
7090 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7091 dev_warn(&hdev->pdev->dev,
7092 "vf vlan table is full, vf vlan filter is disabled\n");
7096 dev_err(&hdev->pdev->dev,
7097 "Add vf vlan filter fail, ret =%d.\n",
7100 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7101 if (!req0->resp_code)
7104 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7105 dev_warn(&hdev->pdev->dev,
7106 "vlan %d filter is not in vf vlan table\n",
7111 dev_err(&hdev->pdev->dev,
7112 "Kill vf vlan filter fail, ret =%d.\n",
7119 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7120 u16 vlan_id, bool is_kill)
7122 struct hclge_vlan_filter_pf_cfg_cmd *req;
7123 struct hclge_desc desc;
7124 u8 vlan_offset_byte_val;
7125 u8 vlan_offset_byte;
7129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7131 vlan_offset_160 = vlan_id / 160;
7132 vlan_offset_byte = (vlan_id % 160) / 8;
7133 vlan_offset_byte_val = 1 << (vlan_id % 8);
7135 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7136 req->vlan_offset = vlan_offset_160;
7137 req->vlan_cfg = is_kill;
7138 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7140 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7142 dev_err(&hdev->pdev->dev,
7143 "port vlan command, send fail, ret =%d.\n", ret);
7147 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7148 u16 vport_id, u16 vlan_id, u8 qos,
7151 u16 vport_idx, vport_num = 0;
7154 if (is_kill && !vlan_id)
7157 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7160 dev_err(&hdev->pdev->dev,
7161 "Set %d vport vlan filter config fail, ret =%d.\n",
7166 /* vlan 0 may be added twice when 8021q module is enabled */
7167 if (!is_kill && !vlan_id &&
7168 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7171 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7172 dev_err(&hdev->pdev->dev,
7173 "Add port vlan failed, vport %d is already in vlan %d\n",
7179 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7180 dev_err(&hdev->pdev->dev,
7181 "Delete port vlan failed, vport %d is not in vlan %d\n",
7186 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7189 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7190 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7196 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7198 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7199 struct hclge_vport_vtag_tx_cfg_cmd *req;
7200 struct hclge_dev *hdev = vport->back;
7201 struct hclge_desc desc;
7204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7206 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7207 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7208 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7209 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7210 vcfg->accept_tag1 ? 1 : 0);
7211 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7212 vcfg->accept_untag1 ? 1 : 0);
7213 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7214 vcfg->accept_tag2 ? 1 : 0);
7215 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7216 vcfg->accept_untag2 ? 1 : 0);
7217 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7218 vcfg->insert_tag1_en ? 1 : 0);
7219 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7220 vcfg->insert_tag2_en ? 1 : 0);
7221 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7223 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7224 req->vf_bitmap[req->vf_offset] =
7225 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7227 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7229 dev_err(&hdev->pdev->dev,
7230 "Send port txvlan cfg command fail, ret =%d\n",
7236 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7238 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7239 struct hclge_vport_vtag_rx_cfg_cmd *req;
7240 struct hclge_dev *hdev = vport->back;
7241 struct hclge_desc desc;
7244 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7246 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7247 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7248 vcfg->strip_tag1_en ? 1 : 0);
7249 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7250 vcfg->strip_tag2_en ? 1 : 0);
7251 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7252 vcfg->vlan1_vlan_prionly ? 1 : 0);
7253 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7254 vcfg->vlan2_vlan_prionly ? 1 : 0);
7256 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7257 req->vf_bitmap[req->vf_offset] =
7258 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7260 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7262 dev_err(&hdev->pdev->dev,
7263 "Send port rxvlan cfg command fail, ret =%d\n",
7269 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7270 u16 port_base_vlan_state,
7275 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7276 vport->txvlan_cfg.accept_tag1 = true;
7277 vport->txvlan_cfg.insert_tag1_en = false;
7278 vport->txvlan_cfg.default_tag1 = 0;
7280 vport->txvlan_cfg.accept_tag1 = false;
7281 vport->txvlan_cfg.insert_tag1_en = true;
7282 vport->txvlan_cfg.default_tag1 = vlan_tag;
7285 vport->txvlan_cfg.accept_untag1 = true;
7287 /* accept_tag2 and accept_untag2 are not supported on
7288 * pdev revision(0x20), new revision support them,
7289 * this two fields can not be configured by user.
7291 vport->txvlan_cfg.accept_tag2 = true;
7292 vport->txvlan_cfg.accept_untag2 = true;
7293 vport->txvlan_cfg.insert_tag2_en = false;
7294 vport->txvlan_cfg.default_tag2 = 0;
7296 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7297 vport->rxvlan_cfg.strip_tag1_en = false;
7298 vport->rxvlan_cfg.strip_tag2_en =
7299 vport->rxvlan_cfg.rx_vlan_offload_en;
7301 vport->rxvlan_cfg.strip_tag1_en =
7302 vport->rxvlan_cfg.rx_vlan_offload_en;
7303 vport->rxvlan_cfg.strip_tag2_en = true;
7305 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7306 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7308 ret = hclge_set_vlan_tx_offload_cfg(vport);
7312 return hclge_set_vlan_rx_offload_cfg(vport);
7315 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7317 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7318 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7319 struct hclge_desc desc;
7322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7323 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7324 rx_req->ot_fst_vlan_type =
7325 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7326 rx_req->ot_sec_vlan_type =
7327 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7328 rx_req->in_fst_vlan_type =
7329 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7330 rx_req->in_sec_vlan_type =
7331 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7333 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7335 dev_err(&hdev->pdev->dev,
7336 "Send rxvlan protocol type command fail, ret =%d\n",
7341 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7343 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7344 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7345 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7347 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7349 dev_err(&hdev->pdev->dev,
7350 "Send txvlan protocol type command fail, ret =%d\n",
7356 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7358 #define HCLGE_DEF_VLAN_TYPE 0x8100
7360 struct hnae3_handle *handle = &hdev->vport[0].nic;
7361 struct hclge_vport *vport;
7365 if (hdev->pdev->revision >= 0x21) {
7366 /* for revision 0x21, vf vlan filter is per function */
7367 for (i = 0; i < hdev->num_alloc_vport; i++) {
7368 vport = &hdev->vport[i];
7369 ret = hclge_set_vlan_filter_ctrl(hdev,
7370 HCLGE_FILTER_TYPE_VF,
7371 HCLGE_FILTER_FE_EGRESS,
7378 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7379 HCLGE_FILTER_FE_INGRESS, true,
7384 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7385 HCLGE_FILTER_FE_EGRESS_V1_B,
7391 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7393 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7394 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7395 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7396 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7397 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7398 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7400 ret = hclge_set_vlan_protocol_type(hdev);
7404 for (i = 0; i < hdev->num_alloc_vport; i++) {
7407 vport = &hdev->vport[i];
7408 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7410 ret = hclge_vlan_offload_cfg(vport,
7411 vport->port_base_vlan_cfg.state,
7417 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7420 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7423 struct hclge_vport_vlan_cfg *vlan;
7425 /* vlan 0 is reserved */
7429 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7433 vlan->hd_tbl_status = writen_to_tbl;
7434 vlan->vlan_id = vlan_id;
7436 list_add_tail(&vlan->node, &vport->vlan_list);
7439 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7441 struct hclge_vport_vlan_cfg *vlan, *tmp;
7442 struct hclge_dev *hdev = vport->back;
7445 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7446 if (!vlan->hd_tbl_status) {
7447 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7449 vlan->vlan_id, 0, false);
7451 dev_err(&hdev->pdev->dev,
7452 "restore vport vlan list failed, ret=%d\n",
7457 vlan->hd_tbl_status = true;
7463 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7466 struct hclge_vport_vlan_cfg *vlan, *tmp;
7467 struct hclge_dev *hdev = vport->back;
7469 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7470 if (vlan->vlan_id == vlan_id) {
7471 if (is_write_tbl && vlan->hd_tbl_status)
7472 hclge_set_vlan_filter_hw(hdev,
7478 list_del(&vlan->node);
7485 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7487 struct hclge_vport_vlan_cfg *vlan, *tmp;
7488 struct hclge_dev *hdev = vport->back;
7490 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7491 if (vlan->hd_tbl_status)
7492 hclge_set_vlan_filter_hw(hdev,
7498 vlan->hd_tbl_status = false;
7500 list_del(&vlan->node);
7506 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7508 struct hclge_vport_vlan_cfg *vlan, *tmp;
7509 struct hclge_vport *vport;
7512 mutex_lock(&hdev->vport_cfg_mutex);
7513 for (i = 0; i < hdev->num_alloc_vport; i++) {
7514 vport = &hdev->vport[i];
7515 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7516 list_del(&vlan->node);
7520 mutex_unlock(&hdev->vport_cfg_mutex);
7523 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7525 struct hclge_vport *vport = hclge_get_vport(handle);
7527 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7528 vport->rxvlan_cfg.strip_tag1_en = false;
7529 vport->rxvlan_cfg.strip_tag2_en = enable;
7531 vport->rxvlan_cfg.strip_tag1_en = enable;
7532 vport->rxvlan_cfg.strip_tag2_en = true;
7534 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7535 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7536 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7538 return hclge_set_vlan_rx_offload_cfg(vport);
7541 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7542 u16 port_base_vlan_state,
7543 struct hclge_vlan_info *new_info,
7544 struct hclge_vlan_info *old_info)
7546 struct hclge_dev *hdev = vport->back;
7549 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7550 hclge_rm_vport_all_vlan_table(vport, false);
7551 return hclge_set_vlan_filter_hw(hdev,
7552 htons(new_info->vlan_proto),
7555 new_info->qos, false);
7558 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7559 vport->vport_id, old_info->vlan_tag,
7560 old_info->qos, true);
7564 return hclge_add_vport_all_vlan_table(vport);
7567 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7568 struct hclge_vlan_info *vlan_info)
7570 struct hnae3_handle *nic = &vport->nic;
7571 struct hclge_vlan_info *old_vlan_info;
7572 struct hclge_dev *hdev = vport->back;
7575 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7577 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7581 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7582 /* add new VLAN tag */
7583 ret = hclge_set_vlan_filter_hw(hdev,
7584 htons(vlan_info->vlan_proto),
7586 vlan_info->vlan_tag,
7587 vlan_info->qos, false);
7591 /* remove old VLAN tag */
7592 ret = hclge_set_vlan_filter_hw(hdev,
7593 htons(old_vlan_info->vlan_proto),
7595 old_vlan_info->vlan_tag,
7596 old_vlan_info->qos, true);
7603 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7608 /* update state only when disable/enable port based VLAN */
7609 vport->port_base_vlan_cfg.state = state;
7610 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7611 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7613 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7616 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7617 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7618 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7623 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7624 enum hnae3_port_base_vlan_state state,
7627 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7629 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7631 return HNAE3_PORT_BASE_VLAN_ENABLE;
7634 return HNAE3_PORT_BASE_VLAN_DISABLE;
7635 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7636 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7638 return HNAE3_PORT_BASE_VLAN_MODIFY;
7642 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7643 u16 vlan, u8 qos, __be16 proto)
7645 struct hclge_vport *vport = hclge_get_vport(handle);
7646 struct hclge_dev *hdev = vport->back;
7647 struct hclge_vlan_info vlan_info;
7651 if (hdev->pdev->revision == 0x20)
7654 /* qos is a 3 bits value, so can not be bigger than 7 */
7655 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7657 if (proto != htons(ETH_P_8021Q))
7658 return -EPROTONOSUPPORT;
7660 vport = &hdev->vport[vfid];
7661 state = hclge_get_port_base_vlan_state(vport,
7662 vport->port_base_vlan_cfg.state,
7664 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7667 vlan_info.vlan_tag = vlan;
7668 vlan_info.qos = qos;
7669 vlan_info.vlan_proto = ntohs(proto);
7671 /* update port based VLAN for PF */
7673 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7674 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7675 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7680 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7681 return hclge_update_port_base_vlan_cfg(vport, state,
7684 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7692 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7693 u16 vlan_id, bool is_kill)
7695 struct hclge_vport *vport = hclge_get_vport(handle);
7696 struct hclge_dev *hdev = vport->back;
7697 bool writen_to_tbl = false;
7700 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7701 * filter entry. In this case, we don't update VLAN filter table
7702 * when user add new VLAN or remove exist VLAN, just update the vport
7703 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7704 * table until port based VLAN disabled
7706 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7707 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7708 vlan_id, 0, is_kill);
7709 writen_to_tbl = true;
7716 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7718 hclge_add_vport_vlan_table(vport, vlan_id,
7724 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7726 struct hclge_config_max_frm_size_cmd *req;
7727 struct hclge_desc desc;
7729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7731 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7732 req->max_frm_size = cpu_to_le16(new_mps);
7733 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7735 return hclge_cmd_send(&hdev->hw, &desc, 1);
7738 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7740 struct hclge_vport *vport = hclge_get_vport(handle);
7742 return hclge_set_vport_mtu(vport, new_mtu);
7745 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7747 struct hclge_dev *hdev = vport->back;
7748 int i, max_frm_size, ret = 0;
7750 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7751 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7752 max_frm_size > HCLGE_MAC_MAX_FRAME)
7755 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7756 mutex_lock(&hdev->vport_lock);
7757 /* VF's mps must fit within hdev->mps */
7758 if (vport->vport_id && max_frm_size > hdev->mps) {
7759 mutex_unlock(&hdev->vport_lock);
7761 } else if (vport->vport_id) {
7762 vport->mps = max_frm_size;
7763 mutex_unlock(&hdev->vport_lock);
7767 /* PF's mps must be greater then VF's mps */
7768 for (i = 1; i < hdev->num_alloc_vport; i++)
7769 if (max_frm_size < hdev->vport[i].mps) {
7770 mutex_unlock(&hdev->vport_lock);
7774 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7776 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7778 dev_err(&hdev->pdev->dev,
7779 "Change mtu fail, ret =%d\n", ret);
7783 hdev->mps = max_frm_size;
7784 vport->mps = max_frm_size;
7786 ret = hclge_buffer_alloc(hdev);
7788 dev_err(&hdev->pdev->dev,
7789 "Allocate buffer fail, ret =%d\n", ret);
7792 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7793 mutex_unlock(&hdev->vport_lock);
7797 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7800 struct hclge_reset_tqp_queue_cmd *req;
7801 struct hclge_desc desc;
7804 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7806 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7807 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7808 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7812 dev_err(&hdev->pdev->dev,
7813 "Send tqp reset cmd error, status =%d\n", ret);
7820 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7822 struct hclge_reset_tqp_queue_cmd *req;
7823 struct hclge_desc desc;
7826 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7828 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7829 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7831 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7833 dev_err(&hdev->pdev->dev,
7834 "Get reset status error, status =%d\n", ret);
7838 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7841 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7843 struct hnae3_queue *queue;
7844 struct hclge_tqp *tqp;
7846 queue = handle->kinfo.tqp[queue_id];
7847 tqp = container_of(queue, struct hclge_tqp, q);
7852 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7854 struct hclge_vport *vport = hclge_get_vport(handle);
7855 struct hclge_dev *hdev = vport->back;
7856 int reset_try_times = 0;
7861 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7863 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7865 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7869 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7871 dev_err(&hdev->pdev->dev,
7872 "Send reset tqp cmd fail, ret = %d\n", ret);
7876 reset_try_times = 0;
7877 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7878 /* Wait for tqp hw reset */
7880 reset_status = hclge_get_reset_status(hdev, queue_gid);
7885 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7886 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7890 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7892 dev_err(&hdev->pdev->dev,
7893 "Deassert the soft reset fail, ret = %d\n", ret);
7898 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7900 struct hclge_dev *hdev = vport->back;
7901 int reset_try_times = 0;
7906 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7908 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7910 dev_warn(&hdev->pdev->dev,
7911 "Send reset tqp cmd fail, ret = %d\n", ret);
7915 reset_try_times = 0;
7916 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7917 /* Wait for tqp hw reset */
7919 reset_status = hclge_get_reset_status(hdev, queue_gid);
7924 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7925 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7929 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7931 dev_warn(&hdev->pdev->dev,
7932 "Deassert the soft reset fail, ret = %d\n", ret);
7935 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7937 struct hclge_vport *vport = hclge_get_vport(handle);
7938 struct hclge_dev *hdev = vport->back;
7940 return hdev->fw_version;
7943 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7945 struct phy_device *phydev = hdev->hw.mac.phydev;
7950 phy_set_asym_pause(phydev, rx_en, tx_en);
7953 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7958 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7959 else if (rx_en && !tx_en)
7960 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7961 else if (!rx_en && tx_en)
7962 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7964 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7966 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7969 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7971 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7976 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7981 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7983 struct phy_device *phydev = hdev->hw.mac.phydev;
7984 u16 remote_advertising = 0;
7985 u16 local_advertising = 0;
7986 u32 rx_pause, tx_pause;
7989 if (!phydev->link || !phydev->autoneg)
7992 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7995 remote_advertising = LPA_PAUSE_CAP;
7997 if (phydev->asym_pause)
7998 remote_advertising |= LPA_PAUSE_ASYM;
8000 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8001 remote_advertising);
8002 tx_pause = flowctl & FLOW_CTRL_TX;
8003 rx_pause = flowctl & FLOW_CTRL_RX;
8005 if (phydev->duplex == HCLGE_MAC_HALF) {
8010 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8013 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8014 u32 *rx_en, u32 *tx_en)
8016 struct hclge_vport *vport = hclge_get_vport(handle);
8017 struct hclge_dev *hdev = vport->back;
8019 *auto_neg = hclge_get_autoneg(handle);
8021 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8027 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8030 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8033 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8042 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8043 u32 rx_en, u32 tx_en)
8045 struct hclge_vport *vport = hclge_get_vport(handle);
8046 struct hclge_dev *hdev = vport->back;
8047 struct phy_device *phydev = hdev->hw.mac.phydev;
8050 fc_autoneg = hclge_get_autoneg(handle);
8051 if (auto_neg != fc_autoneg) {
8052 dev_info(&hdev->pdev->dev,
8053 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8057 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8058 dev_info(&hdev->pdev->dev,
8059 "Priority flow control enabled. Cannot set link flow control.\n");
8063 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8066 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8069 return phy_start_aneg(phydev);
8071 if (hdev->pdev->revision == 0x20)
8074 return hclge_restart_autoneg(handle);
8077 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8078 u8 *auto_neg, u32 *speed, u8 *duplex)
8080 struct hclge_vport *vport = hclge_get_vport(handle);
8081 struct hclge_dev *hdev = vport->back;
8084 *speed = hdev->hw.mac.speed;
8086 *duplex = hdev->hw.mac.duplex;
8088 *auto_neg = hdev->hw.mac.autoneg;
8091 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8094 struct hclge_vport *vport = hclge_get_vport(handle);
8095 struct hclge_dev *hdev = vport->back;
8098 *media_type = hdev->hw.mac.media_type;
8101 *module_type = hdev->hw.mac.module_type;
8104 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8105 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8107 struct hclge_vport *vport = hclge_get_vport(handle);
8108 struct hclge_dev *hdev = vport->back;
8109 struct phy_device *phydev = hdev->hw.mac.phydev;
8110 int mdix_ctrl, mdix, retval, is_resolved;
8113 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8114 *tp_mdix = ETH_TP_MDI_INVALID;
8118 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8120 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8121 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8122 HCLGE_PHY_MDIX_CTRL_S);
8124 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8125 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8126 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8128 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8130 switch (mdix_ctrl) {
8132 *tp_mdix_ctrl = ETH_TP_MDI;
8135 *tp_mdix_ctrl = ETH_TP_MDI_X;
8138 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8141 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8146 *tp_mdix = ETH_TP_MDI_INVALID;
8148 *tp_mdix = ETH_TP_MDI_X;
8150 *tp_mdix = ETH_TP_MDI;
8153 static void hclge_info_show(struct hclge_dev *hdev)
8155 struct device *dev = &hdev->pdev->dev;
8157 dev_info(dev, "PF info begin:\n");
8159 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8160 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8161 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8162 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8163 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8164 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8165 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8166 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8167 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8168 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8169 dev_info(dev, "This is %s PF\n",
8170 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8171 dev_info(dev, "DCB %s\n",
8172 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8173 dev_info(dev, "MQPRIO %s\n",
8174 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8176 dev_info(dev, "PF info end.\n");
8179 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8180 struct hclge_vport *vport)
8182 struct hnae3_client *client = vport->nic.client;
8183 struct hclge_dev *hdev = ae_dev->priv;
8186 ret = client->ops->init_instance(&vport->nic);
8190 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8191 hnae3_set_client_init_flag(client, ae_dev, 1);
8193 if (netif_msg_drv(&hdev->vport->nic))
8194 hclge_info_show(hdev);
8199 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8200 struct hclge_vport *vport)
8202 struct hnae3_client *client = vport->roce.client;
8203 struct hclge_dev *hdev = ae_dev->priv;
8206 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8210 client = hdev->roce_client;
8211 ret = hclge_init_roce_base_info(vport);
8215 ret = client->ops->init_instance(&vport->roce);
8219 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8220 hnae3_set_client_init_flag(client, ae_dev, 1);
8225 static int hclge_init_client_instance(struct hnae3_client *client,
8226 struct hnae3_ae_dev *ae_dev)
8228 struct hclge_dev *hdev = ae_dev->priv;
8229 struct hclge_vport *vport;
8232 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8233 vport = &hdev->vport[i];
8235 switch (client->type) {
8236 case HNAE3_CLIENT_KNIC:
8238 hdev->nic_client = client;
8239 vport->nic.client = client;
8240 ret = hclge_init_nic_client_instance(ae_dev, vport);
8244 ret = hclge_init_roce_client_instance(ae_dev, vport);
8249 case HNAE3_CLIENT_UNIC:
8250 hdev->nic_client = client;
8251 vport->nic.client = client;
8253 ret = client->ops->init_instance(&vport->nic);
8257 hnae3_set_client_init_flag(client, ae_dev, 1);
8260 case HNAE3_CLIENT_ROCE:
8261 if (hnae3_dev_roce_supported(hdev)) {
8262 hdev->roce_client = client;
8263 vport->roce.client = client;
8266 ret = hclge_init_roce_client_instance(ae_dev, vport);
8279 hdev->nic_client = NULL;
8280 vport->nic.client = NULL;
8283 hdev->roce_client = NULL;
8284 vport->roce.client = NULL;
8288 static void hclge_uninit_client_instance(struct hnae3_client *client,
8289 struct hnae3_ae_dev *ae_dev)
8291 struct hclge_dev *hdev = ae_dev->priv;
8292 struct hclge_vport *vport;
8295 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8296 vport = &hdev->vport[i];
8297 if (hdev->roce_client) {
8298 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8299 hdev->roce_client->ops->uninit_instance(&vport->roce,
8301 hdev->roce_client = NULL;
8302 vport->roce.client = NULL;
8304 if (client->type == HNAE3_CLIENT_ROCE)
8306 if (hdev->nic_client && client->ops->uninit_instance) {
8307 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8308 client->ops->uninit_instance(&vport->nic, 0);
8309 hdev->nic_client = NULL;
8310 vport->nic.client = NULL;
8315 static int hclge_pci_init(struct hclge_dev *hdev)
8317 struct pci_dev *pdev = hdev->pdev;
8318 struct hclge_hw *hw;
8321 ret = pci_enable_device(pdev);
8323 dev_err(&pdev->dev, "failed to enable PCI device\n");
8327 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8329 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8332 "can't set consistent PCI DMA");
8333 goto err_disable_device;
8335 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8338 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8340 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8341 goto err_disable_device;
8344 pci_set_master(pdev);
8346 hw->io_base = pcim_iomap(pdev, 2, 0);
8348 dev_err(&pdev->dev, "Can't map configuration register space\n");
8350 goto err_clr_master;
8353 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8357 pci_clear_master(pdev);
8358 pci_release_regions(pdev);
8360 pci_disable_device(pdev);
8365 static void hclge_pci_uninit(struct hclge_dev *hdev)
8367 struct pci_dev *pdev = hdev->pdev;
8369 pcim_iounmap(pdev, hdev->hw.io_base);
8370 pci_free_irq_vectors(pdev);
8371 pci_clear_master(pdev);
8372 pci_release_mem_regions(pdev);
8373 pci_disable_device(pdev);
8376 static void hclge_state_init(struct hclge_dev *hdev)
8378 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8379 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8380 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8381 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8382 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8383 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8386 static void hclge_state_uninit(struct hclge_dev *hdev)
8388 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8389 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8391 if (hdev->service_timer.function)
8392 del_timer_sync(&hdev->service_timer);
8393 if (hdev->reset_timer.function)
8394 del_timer_sync(&hdev->reset_timer);
8395 if (hdev->service_task.func)
8396 cancel_work_sync(&hdev->service_task);
8397 if (hdev->rst_service_task.func)
8398 cancel_work_sync(&hdev->rst_service_task);
8399 if (hdev->mbx_service_task.func)
8400 cancel_work_sync(&hdev->mbx_service_task);
8403 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8405 #define HCLGE_FLR_WAIT_MS 100
8406 #define HCLGE_FLR_WAIT_CNT 50
8407 struct hclge_dev *hdev = ae_dev->priv;
8410 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8411 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8412 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8413 hclge_reset_event(hdev->pdev, NULL);
8415 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8416 cnt++ < HCLGE_FLR_WAIT_CNT)
8417 msleep(HCLGE_FLR_WAIT_MS);
8419 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8420 dev_err(&hdev->pdev->dev,
8421 "flr wait down timeout: %d\n", cnt);
8424 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8426 struct hclge_dev *hdev = ae_dev->priv;
8428 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8431 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8433 struct pci_dev *pdev = ae_dev->pdev;
8434 struct hclge_dev *hdev;
8437 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8444 hdev->ae_dev = ae_dev;
8445 hdev->reset_type = HNAE3_NONE_RESET;
8446 hdev->reset_level = HNAE3_FUNC_RESET;
8447 ae_dev->priv = hdev;
8448 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8450 mutex_init(&hdev->vport_lock);
8451 mutex_init(&hdev->vport_cfg_mutex);
8452 spin_lock_init(&hdev->fd_rule_lock);
8454 ret = hclge_pci_init(hdev);
8456 dev_err(&pdev->dev, "PCI init failed\n");
8460 /* Firmware command queue initialize */
8461 ret = hclge_cmd_queue_init(hdev);
8463 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8464 goto err_pci_uninit;
8467 /* Firmware command initialize */
8468 ret = hclge_cmd_init(hdev);
8470 goto err_cmd_uninit;
8472 ret = hclge_get_cap(hdev);
8474 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8476 goto err_cmd_uninit;
8479 ret = hclge_configure(hdev);
8481 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8482 goto err_cmd_uninit;
8485 ret = hclge_init_msi(hdev);
8487 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8488 goto err_cmd_uninit;
8491 ret = hclge_misc_irq_init(hdev);
8494 "Misc IRQ(vector0) init error, ret = %d.\n",
8496 goto err_msi_uninit;
8499 ret = hclge_alloc_tqps(hdev);
8501 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8502 goto err_msi_irq_uninit;
8505 ret = hclge_alloc_vport(hdev);
8507 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8508 goto err_msi_irq_uninit;
8511 ret = hclge_map_tqp(hdev);
8513 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8514 goto err_msi_irq_uninit;
8517 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8518 ret = hclge_mac_mdio_config(hdev);
8520 dev_err(&hdev->pdev->dev,
8521 "mdio config fail ret=%d\n", ret);
8522 goto err_msi_irq_uninit;
8526 ret = hclge_init_umv_space(hdev);
8528 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8529 goto err_mdiobus_unreg;
8532 ret = hclge_mac_init(hdev);
8534 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8535 goto err_mdiobus_unreg;
8538 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8540 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8541 goto err_mdiobus_unreg;
8544 ret = hclge_config_gro(hdev, true);
8546 goto err_mdiobus_unreg;
8548 ret = hclge_init_vlan_config(hdev);
8550 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8551 goto err_mdiobus_unreg;
8554 ret = hclge_tm_schd_init(hdev);
8556 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8557 goto err_mdiobus_unreg;
8560 hclge_rss_init_cfg(hdev);
8561 ret = hclge_rss_init_hw(hdev);
8563 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8564 goto err_mdiobus_unreg;
8567 ret = init_mgr_tbl(hdev);
8569 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8570 goto err_mdiobus_unreg;
8573 ret = hclge_init_fd_config(hdev);
8576 "fd table init fail, ret=%d\n", ret);
8577 goto err_mdiobus_unreg;
8580 ret = hclge_hw_error_set_state(hdev, true);
8583 "fail(%d) to enable hw error interrupts\n", ret);
8584 goto err_mdiobus_unreg;
8587 INIT_KFIFO(hdev->mac_tnl_log);
8589 hclge_dcb_ops_set(hdev);
8591 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8592 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8593 INIT_WORK(&hdev->service_task, hclge_service_task);
8594 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8595 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8597 hclge_clear_all_event_cause(hdev);
8599 /* Enable MISC vector(vector0) */
8600 hclge_enable_vector(&hdev->misc_vector, true);
8602 hclge_state_init(hdev);
8603 hdev->last_reset_time = jiffies;
8605 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8609 if (hdev->hw.mac.phydev)
8610 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8612 hclge_misc_irq_uninit(hdev);
8614 pci_free_irq_vectors(pdev);
8616 hclge_cmd_uninit(hdev);
8618 pcim_iounmap(pdev, hdev->hw.io_base);
8619 pci_clear_master(pdev);
8620 pci_release_regions(pdev);
8621 pci_disable_device(pdev);
8626 static void hclge_stats_clear(struct hclge_dev *hdev)
8628 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8631 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8633 struct hclge_vport *vport = hdev->vport;
8636 for (i = 0; i < hdev->num_alloc_vport; i++) {
8637 hclge_vport_stop(vport);
8642 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8644 struct hclge_dev *hdev = ae_dev->priv;
8645 struct pci_dev *pdev = ae_dev->pdev;
8648 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8650 hclge_stats_clear(hdev);
8651 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8653 ret = hclge_cmd_init(hdev);
8655 dev_err(&pdev->dev, "Cmd queue init failed\n");
8659 ret = hclge_map_tqp(hdev);
8661 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8665 hclge_reset_umv_space(hdev);
8667 ret = hclge_mac_init(hdev);
8669 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8673 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8675 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8679 ret = hclge_config_gro(hdev, true);
8683 ret = hclge_init_vlan_config(hdev);
8685 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8689 ret = hclge_tm_init_hw(hdev, true);
8691 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8695 ret = hclge_rss_init_hw(hdev);
8697 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8701 ret = hclge_init_fd_config(hdev);
8704 "fd table init fail, ret=%d\n", ret);
8708 /* Re-enable the hw error interrupts because
8709 * the interrupts get disabled on core/global reset.
8711 ret = hclge_hw_error_set_state(hdev, true);
8714 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8718 hclge_reset_vport_state(hdev);
8720 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8726 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8728 struct hclge_dev *hdev = ae_dev->priv;
8729 struct hclge_mac *mac = &hdev->hw.mac;
8731 hclge_state_uninit(hdev);
8734 mdiobus_unregister(mac->mdio_bus);
8736 hclge_uninit_umv_space(hdev);
8738 /* Disable MISC vector(vector0) */
8739 hclge_enable_vector(&hdev->misc_vector, false);
8740 synchronize_irq(hdev->misc_vector.vector_irq);
8742 hclge_config_mac_tnl_int(hdev, false);
8743 hclge_hw_error_set_state(hdev, false);
8744 hclge_cmd_uninit(hdev);
8745 hclge_misc_irq_uninit(hdev);
8746 hclge_pci_uninit(hdev);
8747 mutex_destroy(&hdev->vport_lock);
8748 hclge_uninit_vport_mac_table(hdev);
8749 hclge_uninit_vport_vlan_table(hdev);
8750 mutex_destroy(&hdev->vport_cfg_mutex);
8751 ae_dev->priv = NULL;
8754 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8756 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8757 struct hclge_vport *vport = hclge_get_vport(handle);
8758 struct hclge_dev *hdev = vport->back;
8760 return min_t(u32, hdev->rss_size_max,
8761 vport->alloc_tqps / kinfo->num_tc);
8764 static void hclge_get_channels(struct hnae3_handle *handle,
8765 struct ethtool_channels *ch)
8767 ch->max_combined = hclge_get_max_channels(handle);
8768 ch->other_count = 1;
8770 ch->combined_count = handle->kinfo.rss_size;
8773 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8774 u16 *alloc_tqps, u16 *max_rss_size)
8776 struct hclge_vport *vport = hclge_get_vport(handle);
8777 struct hclge_dev *hdev = vport->back;
8779 *alloc_tqps = vport->alloc_tqps;
8780 *max_rss_size = hdev->rss_size_max;
8783 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8784 bool rxfh_configured)
8786 struct hclge_vport *vport = hclge_get_vport(handle);
8787 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8788 struct hclge_dev *hdev = vport->back;
8789 int cur_rss_size = kinfo->rss_size;
8790 int cur_tqps = kinfo->num_tqps;
8791 u16 tc_offset[HCLGE_MAX_TC_NUM];
8792 u16 tc_valid[HCLGE_MAX_TC_NUM];
8793 u16 tc_size[HCLGE_MAX_TC_NUM];
8798 kinfo->req_rss_size = new_tqps_num;
8800 ret = hclge_tm_vport_map_update(hdev);
8802 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8806 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8807 roundup_size = ilog2(roundup_size);
8808 /* Set the RSS TC mode according to the new RSS size */
8809 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8812 if (!(hdev->hw_tc_map & BIT(i)))
8816 tc_size[i] = roundup_size;
8817 tc_offset[i] = kinfo->rss_size * i;
8819 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8823 /* RSS indirection table has been configuared by user */
8824 if (rxfh_configured)
8827 /* Reinitializes the rss indirect table according to the new RSS size */
8828 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8832 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8833 rss_indir[i] = i % kinfo->rss_size;
8835 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8837 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8844 dev_info(&hdev->pdev->dev,
8845 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8846 cur_rss_size, kinfo->rss_size,
8847 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8852 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8853 u32 *regs_num_64_bit)
8855 struct hclge_desc desc;
8859 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8862 dev_err(&hdev->pdev->dev,
8863 "Query register number cmd failed, ret = %d.\n", ret);
8867 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8868 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8870 total_num = *regs_num_32_bit + *regs_num_64_bit;
8877 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8880 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8882 struct hclge_desc *desc;
8883 u32 *reg_val = data;
8892 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8893 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8897 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8898 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8900 dev_err(&hdev->pdev->dev,
8901 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8906 for (i = 0; i < cmd_num; i++) {
8908 desc_data = (__le32 *)(&desc[i].data[0]);
8909 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8911 desc_data = (__le32 *)(&desc[i]);
8912 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8914 for (k = 0; k < n; k++) {
8915 *reg_val++ = le32_to_cpu(*desc_data++);
8927 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8930 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8932 struct hclge_desc *desc;
8933 u64 *reg_val = data;
8942 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8943 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8947 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8948 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8950 dev_err(&hdev->pdev->dev,
8951 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8956 for (i = 0; i < cmd_num; i++) {
8958 desc_data = (__le64 *)(&desc[i].data[0]);
8959 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8961 desc_data = (__le64 *)(&desc[i]);
8962 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8964 for (k = 0; k < n; k++) {
8965 *reg_val++ = le64_to_cpu(*desc_data++);
8977 #define MAX_SEPARATE_NUM 4
8978 #define SEPARATOR_VALUE 0xFFFFFFFF
8979 #define REG_NUM_PER_LINE 4
8980 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8982 static int hclge_get_regs_len(struct hnae3_handle *handle)
8984 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8985 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8986 struct hclge_vport *vport = hclge_get_vport(handle);
8987 struct hclge_dev *hdev = vport->back;
8988 u32 regs_num_32_bit, regs_num_64_bit;
8991 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8993 dev_err(&hdev->pdev->dev,
8994 "Get register number failed, ret = %d.\n", ret);
8998 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8999 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9000 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9001 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9003 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9004 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9005 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9008 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9011 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9012 struct hclge_vport *vport = hclge_get_vport(handle);
9013 struct hclge_dev *hdev = vport->back;
9014 u32 regs_num_32_bit, regs_num_64_bit;
9015 int i, j, reg_um, separator_num;
9019 *version = hdev->fw_version;
9021 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
9023 dev_err(&hdev->pdev->dev,
9024 "Get register number failed, ret = %d.\n", ret);
9028 /* fetching per-PF registers valus from PF PCIe register space */
9029 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9030 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9031 for (i = 0; i < reg_um; i++)
9032 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9033 for (i = 0; i < separator_num; i++)
9034 *reg++ = SEPARATOR_VALUE;
9036 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9037 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9038 for (i = 0; i < reg_um; i++)
9039 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9040 for (i = 0; i < separator_num; i++)
9041 *reg++ = SEPARATOR_VALUE;
9043 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9044 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9045 for (j = 0; j < kinfo->num_tqps; j++) {
9046 for (i = 0; i < reg_um; i++)
9047 *reg++ = hclge_read_dev(&hdev->hw,
9048 ring_reg_addr_list[i] +
9050 for (i = 0; i < separator_num; i++)
9051 *reg++ = SEPARATOR_VALUE;
9054 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9055 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9056 for (j = 0; j < hdev->num_msi_used - 1; j++) {
9057 for (i = 0; i < reg_um; i++)
9058 *reg++ = hclge_read_dev(&hdev->hw,
9059 tqp_intr_reg_addr_list[i] +
9061 for (i = 0; i < separator_num; i++)
9062 *reg++ = SEPARATOR_VALUE;
9065 /* fetching PF common registers values from firmware */
9066 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9068 dev_err(&hdev->pdev->dev,
9069 "Get 32 bit register failed, ret = %d.\n", ret);
9073 reg += regs_num_32_bit;
9074 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9076 dev_err(&hdev->pdev->dev,
9077 "Get 64 bit register failed, ret = %d.\n", ret);
9080 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9082 struct hclge_set_led_state_cmd *req;
9083 struct hclge_desc desc;
9086 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9088 req = (struct hclge_set_led_state_cmd *)desc.data;
9089 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9090 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9092 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9094 dev_err(&hdev->pdev->dev,
9095 "Send set led state cmd error, ret =%d\n", ret);
9100 enum hclge_led_status {
9103 HCLGE_LED_NO_CHANGE = 0xFF,
9106 static int hclge_set_led_id(struct hnae3_handle *handle,
9107 enum ethtool_phys_id_state status)
9109 struct hclge_vport *vport = hclge_get_vport(handle);
9110 struct hclge_dev *hdev = vport->back;
9113 case ETHTOOL_ID_ACTIVE:
9114 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9115 case ETHTOOL_ID_INACTIVE:
9116 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9122 static void hclge_get_link_mode(struct hnae3_handle *handle,
9123 unsigned long *supported,
9124 unsigned long *advertising)
9126 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9127 struct hclge_vport *vport = hclge_get_vport(handle);
9128 struct hclge_dev *hdev = vport->back;
9129 unsigned int idx = 0;
9131 for (; idx < size; idx++) {
9132 supported[idx] = hdev->hw.mac.supported[idx];
9133 advertising[idx] = hdev->hw.mac.advertising[idx];
9137 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9139 struct hclge_vport *vport = hclge_get_vport(handle);
9140 struct hclge_dev *hdev = vport->back;
9142 return hclge_config_gro(hdev, enable);
9145 static const struct hnae3_ae_ops hclge_ops = {
9146 .init_ae_dev = hclge_init_ae_dev,
9147 .uninit_ae_dev = hclge_uninit_ae_dev,
9148 .flr_prepare = hclge_flr_prepare,
9149 .flr_done = hclge_flr_done,
9150 .init_client_instance = hclge_init_client_instance,
9151 .uninit_client_instance = hclge_uninit_client_instance,
9152 .map_ring_to_vector = hclge_map_ring_to_vector,
9153 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9154 .get_vector = hclge_get_vector,
9155 .put_vector = hclge_put_vector,
9156 .set_promisc_mode = hclge_set_promisc_mode,
9157 .set_loopback = hclge_set_loopback,
9158 .start = hclge_ae_start,
9159 .stop = hclge_ae_stop,
9160 .client_start = hclge_client_start,
9161 .client_stop = hclge_client_stop,
9162 .get_status = hclge_get_status,
9163 .get_ksettings_an_result = hclge_get_ksettings_an_result,
9164 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9165 .get_media_type = hclge_get_media_type,
9166 .check_port_speed = hclge_check_port_speed,
9167 .get_fec = hclge_get_fec,
9168 .set_fec = hclge_set_fec,
9169 .get_rss_key_size = hclge_get_rss_key_size,
9170 .get_rss_indir_size = hclge_get_rss_indir_size,
9171 .get_rss = hclge_get_rss,
9172 .set_rss = hclge_set_rss,
9173 .set_rss_tuple = hclge_set_rss_tuple,
9174 .get_rss_tuple = hclge_get_rss_tuple,
9175 .get_tc_size = hclge_get_tc_size,
9176 .get_mac_addr = hclge_get_mac_addr,
9177 .set_mac_addr = hclge_set_mac_addr,
9178 .do_ioctl = hclge_do_ioctl,
9179 .add_uc_addr = hclge_add_uc_addr,
9180 .rm_uc_addr = hclge_rm_uc_addr,
9181 .add_mc_addr = hclge_add_mc_addr,
9182 .rm_mc_addr = hclge_rm_mc_addr,
9183 .set_autoneg = hclge_set_autoneg,
9184 .get_autoneg = hclge_get_autoneg,
9185 .restart_autoneg = hclge_restart_autoneg,
9186 .get_pauseparam = hclge_get_pauseparam,
9187 .set_pauseparam = hclge_set_pauseparam,
9188 .set_mtu = hclge_set_mtu,
9189 .reset_queue = hclge_reset_tqp,
9190 .get_stats = hclge_get_stats,
9191 .get_mac_pause_stats = hclge_get_mac_pause_stat,
9192 .update_stats = hclge_update_stats,
9193 .get_strings = hclge_get_strings,
9194 .get_sset_count = hclge_get_sset_count,
9195 .get_fw_version = hclge_get_fw_version,
9196 .get_mdix_mode = hclge_get_mdix_mode,
9197 .enable_vlan_filter = hclge_enable_vlan_filter,
9198 .set_vlan_filter = hclge_set_vlan_filter,
9199 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9200 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9201 .reset_event = hclge_reset_event,
9202 .set_default_reset_request = hclge_set_def_reset_request,
9203 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9204 .set_channels = hclge_set_channels,
9205 .get_channels = hclge_get_channels,
9206 .get_regs_len = hclge_get_regs_len,
9207 .get_regs = hclge_get_regs,
9208 .set_led_id = hclge_set_led_id,
9209 .get_link_mode = hclge_get_link_mode,
9210 .add_fd_entry = hclge_add_fd_entry,
9211 .del_fd_entry = hclge_del_fd_entry,
9212 .del_all_fd_entries = hclge_del_all_fd_entries,
9213 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9214 .get_fd_rule_info = hclge_get_fd_rule_info,
9215 .get_fd_all_rules = hclge_get_all_rules,
9216 .restore_fd_rules = hclge_restore_fd_entries,
9217 .enable_fd = hclge_enable_fd,
9218 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9219 .dbg_run_cmd = hclge_dbg_run_cmd,
9220 .handle_hw_ras_error = hclge_handle_hw_ras_error,
9221 .get_hw_reset_stat = hclge_get_hw_reset_stat,
9222 .ae_dev_resetting = hclge_ae_dev_resetting,
9223 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9224 .set_gro_en = hclge_gro_en,
9225 .get_global_queue_id = hclge_covert_handle_qid_global,
9226 .set_timer_task = hclge_set_timer_task,
9227 .mac_connect_phy = hclge_mac_connect_phy,
9228 .mac_disconnect_phy = hclge_mac_disconnect_phy,
9231 static struct hnae3_ae_algo ae_algo = {
9233 .pdev_id_table = ae_algo_pci_tbl,
9236 static int hclge_init(void)
9238 pr_info("%s is initializing\n", HCLGE_NAME);
9240 hnae3_register_ae_algo(&ae_algo);
9245 static void hclge_exit(void)
9247 hnae3_unregister_ae_algo(&ae_algo);
9249 module_init(hclge_init);
9250 module_exit(hclge_exit);
9252 MODULE_LICENSE("GPL");
9253 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9254 MODULE_DESCRIPTION("HCLGE Driver");
9255 MODULE_VERSION(HCLGE_MOD_VERSION);