1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 #define HCLGE_BUF_SIZE_UNIT 256
31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
35 u16 *allocated_size, bool is_alloc);
37 static struct hnae3_ae_algo ae_algo;
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47 /* required last entry */
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
54 HCLGE_CMDQ_TX_ADDR_H_REG,
55 HCLGE_CMDQ_TX_DEPTH_REG,
56 HCLGE_CMDQ_TX_TAIL_REG,
57 HCLGE_CMDQ_TX_HEAD_REG,
58 HCLGE_CMDQ_RX_ADDR_L_REG,
59 HCLGE_CMDQ_RX_ADDR_H_REG,
60 HCLGE_CMDQ_RX_DEPTH_REG,
61 HCLGE_CMDQ_RX_TAIL_REG,
62 HCLGE_CMDQ_RX_HEAD_REG,
63 HCLGE_VECTOR0_CMDQ_SRC_REG,
64 HCLGE_CMDQ_INTR_STS_REG,
65 HCLGE_CMDQ_INTR_EN_REG,
66 HCLGE_CMDQ_INTR_GEN_REG};
68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
69 HCLGE_VECTOR0_OTER_EN_REG,
70 HCLGE_MISC_RESET_STS_REG,
71 HCLGE_MISC_VECTOR_INT_STS,
72 HCLGE_GLOBAL_RESET_REG,
76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
77 HCLGE_RING_RX_ADDR_H_REG,
78 HCLGE_RING_RX_BD_NUM_REG,
79 HCLGE_RING_RX_BD_LENGTH_REG,
80 HCLGE_RING_RX_MERGE_EN_REG,
81 HCLGE_RING_RX_TAIL_REG,
82 HCLGE_RING_RX_HEAD_REG,
83 HCLGE_RING_RX_FBD_NUM_REG,
84 HCLGE_RING_RX_OFFSET_REG,
85 HCLGE_RING_RX_FBD_OFFSET_REG,
86 HCLGE_RING_RX_STASH_REG,
87 HCLGE_RING_RX_BD_ERR_REG,
88 HCLGE_RING_TX_ADDR_L_REG,
89 HCLGE_RING_TX_ADDR_H_REG,
90 HCLGE_RING_TX_BD_NUM_REG,
91 HCLGE_RING_TX_PRIORITY_REG,
93 HCLGE_RING_TX_MERGE_EN_REG,
94 HCLGE_RING_TX_TAIL_REG,
95 HCLGE_RING_TX_HEAD_REG,
96 HCLGE_RING_TX_FBD_NUM_REG,
97 HCLGE_RING_TX_OFFSET_REG,
98 HCLGE_RING_TX_EBD_NUM_REG,
99 HCLGE_RING_TX_EBD_OFFSET_REG,
100 HCLGE_RING_TX_BD_ERR_REG,
103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
104 HCLGE_TQP_INTR_GL0_REG,
105 HCLGE_TQP_INTR_GL1_REG,
106 HCLGE_TQP_INTR_GL2_REG,
107 HCLGE_TQP_INTR_RL_REG};
109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
111 "Serdes serial Loopback test",
112 "Serdes parallel Loopback test",
116 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
117 {"mac_tx_mac_pause_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
119 {"mac_rx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
121 {"mac_tx_control_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
123 {"mac_rx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
125 {"mac_tx_pfc_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
127 {"mac_tx_pfc_pri0_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
129 {"mac_tx_pfc_pri1_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
131 {"mac_tx_pfc_pri2_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
133 {"mac_tx_pfc_pri3_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
135 {"mac_tx_pfc_pri4_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
137 {"mac_tx_pfc_pri5_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
139 {"mac_tx_pfc_pri6_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
141 {"mac_tx_pfc_pri7_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
143 {"mac_rx_pfc_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
145 {"mac_rx_pfc_pri0_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
147 {"mac_rx_pfc_pri1_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
149 {"mac_rx_pfc_pri2_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
151 {"mac_rx_pfc_pri3_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
153 {"mac_rx_pfc_pri4_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
155 {"mac_rx_pfc_pri5_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
157 {"mac_rx_pfc_pri6_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
159 {"mac_rx_pfc_pri7_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
161 {"mac_tx_total_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
163 {"mac_tx_total_oct_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
165 {"mac_tx_good_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
167 {"mac_tx_bad_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
169 {"mac_tx_good_oct_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
171 {"mac_tx_bad_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
173 {"mac_tx_uni_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
175 {"mac_tx_multi_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
177 {"mac_tx_broad_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
179 {"mac_tx_undersize_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
181 {"mac_tx_oversize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
183 {"mac_tx_64_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
185 {"mac_tx_65_127_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
187 {"mac_tx_128_255_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
189 {"mac_tx_256_511_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
191 {"mac_tx_512_1023_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
193 {"mac_tx_1024_1518_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
195 {"mac_tx_1519_2047_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
197 {"mac_tx_2048_4095_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
199 {"mac_tx_4096_8191_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
201 {"mac_tx_8192_9216_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
203 {"mac_tx_9217_12287_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
205 {"mac_tx_12288_16383_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
207 {"mac_tx_1519_max_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
209 {"mac_tx_1519_max_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
211 {"mac_rx_total_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
213 {"mac_rx_total_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
215 {"mac_rx_good_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
217 {"mac_rx_bad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
219 {"mac_rx_good_oct_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
221 {"mac_rx_bad_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
223 {"mac_rx_uni_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
225 {"mac_rx_multi_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
227 {"mac_rx_broad_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
229 {"mac_rx_undersize_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
231 {"mac_rx_oversize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
233 {"mac_rx_64_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
235 {"mac_rx_65_127_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
237 {"mac_rx_128_255_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
239 {"mac_rx_256_511_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
241 {"mac_rx_512_1023_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
243 {"mac_rx_1024_1518_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
245 {"mac_rx_1519_2047_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
247 {"mac_rx_2048_4095_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
249 {"mac_rx_4096_8191_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
251 {"mac_rx_8192_9216_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
253 {"mac_rx_9217_12287_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
255 {"mac_rx_12288_16383_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
257 {"mac_rx_1519_max_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
259 {"mac_rx_1519_max_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
262 {"mac_tx_fragment_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
264 {"mac_tx_undermin_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
266 {"mac_tx_jabber_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
268 {"mac_tx_err_all_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
270 {"mac_tx_from_app_good_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
272 {"mac_tx_from_app_bad_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
274 {"mac_rx_fragment_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
276 {"mac_rx_undermin_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
278 {"mac_rx_jabber_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
280 {"mac_rx_fcs_err_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
282 {"mac_rx_send_app_good_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
284 {"mac_rx_send_app_bad_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
290 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
291 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
292 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
293 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
294 .i_port_bitmap = 0x1,
298 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
300 #define HCLGE_MAC_CMD_NUM 21
302 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
303 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
308 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
309 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
311 dev_err(&hdev->pdev->dev,
312 "Get MAC pkt stats fail, status = %d.\n", ret);
317 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
318 /* for special opcode 0032, only the first desc has the head */
319 if (unlikely(i == 0)) {
320 desc_data = (__le64 *)(&desc[i].data[0]);
321 n = HCLGE_RD_FIRST_STATS_NUM;
323 desc_data = (__le64 *)(&desc[i]);
324 n = HCLGE_RD_OTHER_STATS_NUM;
327 for (k = 0; k < n; k++) {
328 *data += le64_to_cpu(*desc_data);
337 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
339 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
340 struct hclge_desc *desc;
345 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
346 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
347 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
353 for (i = 0; i < desc_num; i++) {
354 /* for special opcode 0034, only the first desc has the head */
356 desc_data = (__le64 *)(&desc[i].data[0]);
357 n = HCLGE_RD_FIRST_STATS_NUM;
359 desc_data = (__le64 *)(&desc[i]);
360 n = HCLGE_RD_OTHER_STATS_NUM;
363 for (k = 0; k < n; k++) {
364 *data += le64_to_cpu(*desc_data);
375 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
377 struct hclge_desc desc;
382 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
383 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
387 desc_data = (__le32 *)(&desc.data[0]);
388 reg_num = le32_to_cpu(*desc_data);
390 *desc_num = 1 + ((reg_num - 3) >> 2) +
391 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
396 static int hclge_mac_update_stats(struct hclge_dev *hdev)
401 ret = hclge_mac_query_reg_num(hdev, &desc_num);
403 /* The firmware supports the new statistics acquisition method */
405 ret = hclge_mac_update_stats_complete(hdev, desc_num);
406 else if (ret == -EOPNOTSUPP)
407 ret = hclge_mac_update_stats_defective(hdev);
409 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
414 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
416 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
417 struct hclge_vport *vport = hclge_get_vport(handle);
418 struct hclge_dev *hdev = vport->back;
419 struct hnae3_queue *queue;
420 struct hclge_desc desc[1];
421 struct hclge_tqp *tqp;
424 for (i = 0; i < kinfo->num_tqps; i++) {
425 queue = handle->kinfo.tqp[i];
426 tqp = container_of(queue, struct hclge_tqp, q);
427 /* command : HCLGE_OPC_QUERY_IGU_STAT */
428 hclge_cmd_setup_basic_desc(&desc[0],
429 HCLGE_OPC_QUERY_RX_STATUS,
432 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
433 ret = hclge_cmd_send(&hdev->hw, desc, 1);
435 dev_err(&hdev->pdev->dev,
436 "Query tqp stat fail, status = %d,queue = %d\n",
440 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
441 le32_to_cpu(desc[0].data[1]);
444 for (i = 0; i < kinfo->num_tqps; i++) {
445 queue = handle->kinfo.tqp[i];
446 tqp = container_of(queue, struct hclge_tqp, q);
447 /* command : HCLGE_OPC_QUERY_IGU_STAT */
448 hclge_cmd_setup_basic_desc(&desc[0],
449 HCLGE_OPC_QUERY_TX_STATUS,
452 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
453 ret = hclge_cmd_send(&hdev->hw, desc, 1);
455 dev_err(&hdev->pdev->dev,
456 "Query tqp stat fail, status = %d,queue = %d\n",
460 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
461 le32_to_cpu(desc[0].data[1]);
467 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
469 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
470 struct hclge_tqp *tqp;
474 for (i = 0; i < kinfo->num_tqps; i++) {
475 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
476 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
479 for (i = 0; i < kinfo->num_tqps; i++) {
480 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
481 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
487 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
489 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
491 return kinfo->num_tqps * (2);
494 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
496 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
500 for (i = 0; i < kinfo->num_tqps; i++) {
501 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
502 struct hclge_tqp, q);
503 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
505 buff = buff + ETH_GSTRING_LEN;
508 for (i = 0; i < kinfo->num_tqps; i++) {
509 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
510 struct hclge_tqp, q);
511 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
513 buff = buff + ETH_GSTRING_LEN;
519 static u64 *hclge_comm_get_stats(void *comm_stats,
520 const struct hclge_comm_stats_str strs[],
526 for (i = 0; i < size; i++)
527 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
532 static u8 *hclge_comm_get_strings(u32 stringset,
533 const struct hclge_comm_stats_str strs[],
536 char *buff = (char *)data;
539 if (stringset != ETH_SS_STATS)
542 for (i = 0; i < size; i++) {
543 snprintf(buff, ETH_GSTRING_LEN,
545 buff = buff + ETH_GSTRING_LEN;
551 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
553 struct hnae3_handle *handle;
556 handle = &hdev->vport[0].nic;
557 if (handle->client) {
558 status = hclge_tqps_update_stats(handle);
560 dev_err(&hdev->pdev->dev,
561 "Update TQPS stats fail, status = %d.\n",
566 status = hclge_mac_update_stats(hdev);
568 dev_err(&hdev->pdev->dev,
569 "Update MAC stats fail, status = %d.\n", status);
572 static void hclge_update_stats(struct hnae3_handle *handle,
573 struct net_device_stats *net_stats)
575 struct hclge_vport *vport = hclge_get_vport(handle);
576 struct hclge_dev *hdev = vport->back;
579 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
582 status = hclge_mac_update_stats(hdev);
584 dev_err(&hdev->pdev->dev,
585 "Update MAC stats fail, status = %d.\n",
588 status = hclge_tqps_update_stats(handle);
590 dev_err(&hdev->pdev->dev,
591 "Update TQPS stats fail, status = %d.\n",
594 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
597 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
599 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
600 HNAE3_SUPPORT_PHY_LOOPBACK |\
601 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
602 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
604 struct hclge_vport *vport = hclge_get_vport(handle);
605 struct hclge_dev *hdev = vport->back;
608 /* Loopback test support rules:
609 * mac: only GE mode support
610 * serdes: all mac mode will support include GE/XGE/LGE/CGE
611 * phy: only support when phy device exist on board
613 if (stringset == ETH_SS_TEST) {
614 /* clear loopback bit flags at first */
615 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
616 if (hdev->pdev->revision >= 0x21 ||
617 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
618 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
619 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
621 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
625 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
626 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
627 } else if (stringset == ETH_SS_STATS) {
628 count = ARRAY_SIZE(g_mac_stats_string) +
629 hclge_tqps_get_sset_count(handle, stringset);
635 static void hclge_get_strings(struct hnae3_handle *handle,
639 u8 *p = (char *)data;
642 if (stringset == ETH_SS_STATS) {
643 size = ARRAY_SIZE(g_mac_stats_string);
644 p = hclge_comm_get_strings(stringset,
648 p = hclge_tqps_get_strings(handle, p);
649 } else if (stringset == ETH_SS_TEST) {
650 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
652 hns3_nic_test_strs[HNAE3_LOOP_APP],
654 p += ETH_GSTRING_LEN;
656 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
658 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
660 p += ETH_GSTRING_LEN;
662 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
664 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
666 p += ETH_GSTRING_LEN;
668 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_PHY],
672 p += ETH_GSTRING_LEN;
677 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
679 struct hclge_vport *vport = hclge_get_vport(handle);
680 struct hclge_dev *hdev = vport->back;
683 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
685 ARRAY_SIZE(g_mac_stats_string),
687 p = hclge_tqps_get_stats(handle, p);
690 static int hclge_parse_func_status(struct hclge_dev *hdev,
691 struct hclge_func_status_cmd *status)
693 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
696 /* Set the pf to main pf */
697 if (status->pf_state & HCLGE_PF_STATE_MAIN)
698 hdev->flag |= HCLGE_FLAG_MAIN;
700 hdev->flag &= ~HCLGE_FLAG_MAIN;
705 static int hclge_query_function_status(struct hclge_dev *hdev)
707 struct hclge_func_status_cmd *req;
708 struct hclge_desc desc;
712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
713 req = (struct hclge_func_status_cmd *)desc.data;
716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
718 dev_err(&hdev->pdev->dev,
719 "query function status failed %d.\n",
725 /* Check pf reset is done */
728 usleep_range(1000, 2000);
729 } while (timeout++ < 5);
731 ret = hclge_parse_func_status(hdev, req);
736 static int hclge_query_pf_resource(struct hclge_dev *hdev)
738 struct hclge_pf_res_cmd *req;
739 struct hclge_desc desc;
742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
745 dev_err(&hdev->pdev->dev,
746 "query pf resource failed %d.\n", ret);
750 req = (struct hclge_pf_res_cmd *)desc.data;
751 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
752 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
754 if (req->tx_buf_size)
756 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
758 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
760 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
762 if (req->dv_buf_size)
764 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
766 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
768 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
770 if (hnae3_dev_roce_supported(hdev)) {
771 hdev->roce_base_msix_offset =
772 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
773 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
775 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
776 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
778 /* PF should have NIC vectors and Roce vectors,
779 * NIC vectors are queued before Roce vectors.
781 hdev->num_msi = hdev->num_roce_msi +
782 hdev->roce_base_msix_offset;
785 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
786 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
792 static int hclge_parse_speed(int speed_cmd, int *speed)
796 *speed = HCLGE_MAC_SPEED_10M;
799 *speed = HCLGE_MAC_SPEED_100M;
802 *speed = HCLGE_MAC_SPEED_1G;
805 *speed = HCLGE_MAC_SPEED_10G;
808 *speed = HCLGE_MAC_SPEED_25G;
811 *speed = HCLGE_MAC_SPEED_40G;
814 *speed = HCLGE_MAC_SPEED_50G;
817 *speed = HCLGE_MAC_SPEED_100G;
826 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
829 unsigned long *supported = hdev->hw.mac.supported;
831 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
832 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
835 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
836 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
839 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
840 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
843 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
844 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
847 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
848 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
851 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
852 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
855 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
857 u8 media_type = hdev->hw.mac.media_type;
859 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
862 hclge_parse_fiber_link_mode(hdev, speed_ability);
865 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
867 struct hclge_cfg_param_cmd *req;
868 u64 mac_addr_tmp_high;
872 req = (struct hclge_cfg_param_cmd *)desc[0].data;
874 /* get the configuration */
875 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
878 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
879 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
880 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
881 HCLGE_CFG_TQP_DESC_N_M,
882 HCLGE_CFG_TQP_DESC_N_S);
884 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
885 HCLGE_CFG_PHY_ADDR_M,
886 HCLGE_CFG_PHY_ADDR_S);
887 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
888 HCLGE_CFG_MEDIA_TP_M,
889 HCLGE_CFG_MEDIA_TP_S);
890 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
891 HCLGE_CFG_RX_BUF_LEN_M,
892 HCLGE_CFG_RX_BUF_LEN_S);
893 /* get mac_address */
894 mac_addr_tmp = __le32_to_cpu(req->param[2]);
895 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
896 HCLGE_CFG_MAC_ADDR_H_M,
897 HCLGE_CFG_MAC_ADDR_H_S);
899 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
901 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
902 HCLGE_CFG_DEFAULT_SPEED_M,
903 HCLGE_CFG_DEFAULT_SPEED_S);
904 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
905 HCLGE_CFG_RSS_SIZE_M,
906 HCLGE_CFG_RSS_SIZE_S);
908 for (i = 0; i < ETH_ALEN; i++)
909 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
911 req = (struct hclge_cfg_param_cmd *)desc[1].data;
912 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
914 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
915 HCLGE_CFG_SPEED_ABILITY_M,
916 HCLGE_CFG_SPEED_ABILITY_S);
917 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
918 HCLGE_CFG_UMV_TBL_SPACE_M,
919 HCLGE_CFG_UMV_TBL_SPACE_S);
921 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
924 /* hclge_get_cfg: query the static parameter from flash
925 * @hdev: pointer to struct hclge_dev
926 * @hcfg: the config structure to be getted
928 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
930 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
931 struct hclge_cfg_param_cmd *req;
934 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
937 req = (struct hclge_cfg_param_cmd *)desc[i].data;
938 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
940 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
941 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
942 /* Len should be united by 4 bytes when send to hardware */
943 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
944 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
945 req->offset = cpu_to_le32(offset);
948 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
950 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
954 hclge_parse_cfg(hcfg, desc);
959 static int hclge_get_cap(struct hclge_dev *hdev)
963 ret = hclge_query_function_status(hdev);
965 dev_err(&hdev->pdev->dev,
966 "query function status error %d.\n", ret);
970 /* get pf resource */
971 ret = hclge_query_pf_resource(hdev);
973 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
978 static int hclge_configure(struct hclge_dev *hdev)
980 struct hclge_cfg cfg;
983 ret = hclge_get_cfg(hdev, &cfg);
985 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
989 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
990 hdev->base_tqp_pid = 0;
991 hdev->rss_size_max = cfg.rss_size_max;
992 hdev->rx_buf_len = cfg.rx_buf_len;
993 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
994 hdev->hw.mac.media_type = cfg.media_type;
995 hdev->hw.mac.phy_addr = cfg.phy_addr;
996 hdev->num_desc = cfg.tqp_desc_num;
997 hdev->tm_info.num_pg = 1;
998 hdev->tc_max = cfg.tc_num;
999 hdev->tm_info.hw_pfc_map = 0;
1000 hdev->wanted_umv_size = cfg.umv_space;
1002 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1004 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1008 hclge_parse_link_mode(hdev, cfg.speed_ability);
1010 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1011 (hdev->tc_max < 1)) {
1012 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1017 /* Dev does not support DCB */
1018 if (!hnae3_dev_dcb_supported(hdev)) {
1022 hdev->pfc_max = hdev->tc_max;
1025 hdev->tm_info.num_tc = 1;
1027 /* Currently not support uncontiuous tc */
1028 for (i = 0; i < hdev->tm_info.num_tc; i++)
1029 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1031 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1036 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1039 struct hclge_cfg_tso_status_cmd *req;
1040 struct hclge_desc desc;
1043 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1045 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1048 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1049 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1050 req->tso_mss_min = cpu_to_le16(tso_mss);
1053 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1054 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1055 req->tso_mss_max = cpu_to_le16(tso_mss);
1057 return hclge_cmd_send(&hdev->hw, &desc, 1);
1060 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1062 struct hclge_cfg_gro_status_cmd *req;
1063 struct hclge_desc desc;
1066 if (!hnae3_dev_gro_supported(hdev))
1069 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1070 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1072 req->gro_en = cpu_to_le16(en ? 1 : 0);
1074 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1076 dev_err(&hdev->pdev->dev,
1077 "GRO hardware config cmd failed, ret = %d\n", ret);
1082 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1084 struct hclge_tqp *tqp;
1087 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1088 sizeof(struct hclge_tqp), GFP_KERNEL);
1094 for (i = 0; i < hdev->num_tqps; i++) {
1095 tqp->dev = &hdev->pdev->dev;
1098 tqp->q.ae_algo = &ae_algo;
1099 tqp->q.buf_size = hdev->rx_buf_len;
1100 tqp->q.desc_num = hdev->num_desc;
1101 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1102 i * HCLGE_TQP_REG_SIZE;
1110 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1111 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1113 struct hclge_tqp_map_cmd *req;
1114 struct hclge_desc desc;
1117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1119 req = (struct hclge_tqp_map_cmd *)desc.data;
1120 req->tqp_id = cpu_to_le16(tqp_pid);
1121 req->tqp_vf = func_id;
1122 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1123 1 << HCLGE_TQP_MAP_EN_B;
1124 req->tqp_vid = cpu_to_le16(tqp_vid);
1126 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1128 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1133 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1135 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1136 struct hclge_dev *hdev = vport->back;
1139 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1140 alloced < num_tqps; i++) {
1141 if (!hdev->htqp[i].alloced) {
1142 hdev->htqp[i].q.handle = &vport->nic;
1143 hdev->htqp[i].q.tqp_index = alloced;
1144 hdev->htqp[i].q.desc_num = kinfo->num_desc;
1145 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1146 hdev->htqp[i].alloced = true;
1150 vport->alloc_tqps = alloced;
1151 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1152 vport->alloc_tqps / hdev->tm_info.num_tc);
1157 static int hclge_knic_setup(struct hclge_vport *vport,
1158 u16 num_tqps, u16 num_desc)
1160 struct hnae3_handle *nic = &vport->nic;
1161 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1162 struct hclge_dev *hdev = vport->back;
1165 kinfo->num_desc = num_desc;
1166 kinfo->rx_buf_len = hdev->rx_buf_len;
1168 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1169 sizeof(struct hnae3_queue *), GFP_KERNEL);
1173 ret = hclge_assign_tqp(vport, num_tqps);
1175 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1180 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1181 struct hclge_vport *vport)
1183 struct hnae3_handle *nic = &vport->nic;
1184 struct hnae3_knic_private_info *kinfo;
1187 kinfo = &nic->kinfo;
1188 for (i = 0; i < vport->alloc_tqps; i++) {
1189 struct hclge_tqp *q =
1190 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1194 is_pf = !(vport->vport_id);
1195 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1204 static int hclge_map_tqp(struct hclge_dev *hdev)
1206 struct hclge_vport *vport = hdev->vport;
1209 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1210 for (i = 0; i < num_vport; i++) {
1213 ret = hclge_map_tqp_to_vport(hdev, vport);
1223 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1225 /* this would be initialized later */
1228 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1230 struct hnae3_handle *nic = &vport->nic;
1231 struct hclge_dev *hdev = vport->back;
1234 nic->pdev = hdev->pdev;
1235 nic->ae_algo = &ae_algo;
1236 nic->numa_node_mask = hdev->numa_node_mask;
1238 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1239 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1241 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1246 hclge_unic_setup(vport, num_tqps);
1252 static int hclge_alloc_vport(struct hclge_dev *hdev)
1254 struct pci_dev *pdev = hdev->pdev;
1255 struct hclge_vport *vport;
1261 /* We need to alloc a vport for main NIC of PF */
1262 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1264 if (hdev->num_tqps < num_vport) {
1265 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1266 hdev->num_tqps, num_vport);
1270 /* Alloc the same number of TQPs for every vport */
1271 tqp_per_vport = hdev->num_tqps / num_vport;
1272 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1274 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1279 hdev->vport = vport;
1280 hdev->num_alloc_vport = num_vport;
1282 if (IS_ENABLED(CONFIG_PCI_IOV))
1283 hdev->num_alloc_vfs = hdev->num_req_vfs;
1285 for (i = 0; i < num_vport; i++) {
1287 vport->vport_id = i;
1288 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1291 ret = hclge_vport_setup(vport, tqp_main_vport);
1293 ret = hclge_vport_setup(vport, tqp_per_vport);
1296 "vport setup failed for vport %d, %d\n",
1307 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1308 struct hclge_pkt_buf_alloc *buf_alloc)
1310 /* TX buffer size is unit by 128 byte */
1311 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1312 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1313 struct hclge_tx_buff_alloc_cmd *req;
1314 struct hclge_desc desc;
1318 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1320 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1321 for (i = 0; i < HCLGE_TC_NUM; i++) {
1322 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1324 req->tx_pkt_buff[i] =
1325 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1326 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1329 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1331 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1337 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1338 struct hclge_pkt_buf_alloc *buf_alloc)
1340 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1343 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1348 static int hclge_get_tc_num(struct hclge_dev *hdev)
1352 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1353 if (hdev->hw_tc_map & BIT(i))
1358 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1362 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1363 if (hdev->hw_tc_map & BIT(i) &&
1364 hdev->tm_info.hw_pfc_map & BIT(i))
1369 /* Get the number of pfc enabled TCs, which have private buffer */
1370 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1371 struct hclge_pkt_buf_alloc *buf_alloc)
1373 struct hclge_priv_buf *priv;
1376 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1377 priv = &buf_alloc->priv_buf[i];
1378 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1386 /* Get the number of pfc disabled TCs, which have private buffer */
1387 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1388 struct hclge_pkt_buf_alloc *buf_alloc)
1390 struct hclge_priv_buf *priv;
1393 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1394 priv = &buf_alloc->priv_buf[i];
1395 if (hdev->hw_tc_map & BIT(i) &&
1396 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1404 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1406 struct hclge_priv_buf *priv;
1410 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1411 priv = &buf_alloc->priv_buf[i];
1413 rx_priv += priv->buf_size;
1418 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1420 u32 i, total_tx_size = 0;
1422 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1423 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1425 return total_tx_size;
1428 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1429 struct hclge_pkt_buf_alloc *buf_alloc,
1432 u32 shared_buf_min, shared_buf_tc, shared_std;
1433 int tc_num, pfc_enable_num;
1434 u32 shared_buf, aligned_mps;
1438 tc_num = hclge_get_tc_num(hdev);
1439 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1440 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1442 if (hnae3_dev_dcb_supported(hdev))
1443 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1445 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1446 + hdev->dv_buf_size;
1448 shared_buf_tc = pfc_enable_num * aligned_mps +
1449 (tc_num - pfc_enable_num) * aligned_mps / 2 +
1451 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1452 HCLGE_BUF_SIZE_UNIT);
1454 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1455 if (rx_all < rx_priv + shared_std)
1458 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1459 buf_alloc->s_buf.buf_size = shared_buf;
1460 if (hnae3_dev_dcb_supported(hdev)) {
1461 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1462 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1463 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1465 buf_alloc->s_buf.self.high = aligned_mps +
1466 HCLGE_NON_DCB_ADDITIONAL_BUF;
1467 buf_alloc->s_buf.self.low =
1468 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1471 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1472 if ((hdev->hw_tc_map & BIT(i)) &&
1473 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1474 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1475 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1477 buf_alloc->s_buf.tc_thrd[i].low = 0;
1478 buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1485 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1486 struct hclge_pkt_buf_alloc *buf_alloc)
1490 total_size = hdev->pkt_buf_size;
1492 /* alloc tx buffer for all enabled tc */
1493 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1494 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1496 if (total_size < hdev->tx_buf_size)
1499 if (hdev->hw_tc_map & BIT(i))
1500 priv->tx_buf_size = hdev->tx_buf_size;
1502 priv->tx_buf_size = 0;
1504 total_size -= priv->tx_buf_size;
1510 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1511 * @hdev: pointer to struct hclge_dev
1512 * @buf_alloc: pointer to buffer calculation data
1513 * @return: 0: calculate sucessful, negative: fail
1515 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1516 struct hclge_pkt_buf_alloc *buf_alloc)
1518 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1519 int no_pfc_priv_num, pfc_priv_num;
1520 struct hclge_priv_buf *priv;
1523 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1524 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1526 /* When DCB is not supported, rx private
1527 * buffer is not allocated.
1529 if (!hnae3_dev_dcb_supported(hdev)) {
1530 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1536 /* step 1, try to alloc private buffer for all enabled tc */
1537 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1538 priv = &buf_alloc->priv_buf[i];
1539 if (hdev->hw_tc_map & BIT(i)) {
1541 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1542 priv->wl.low = aligned_mps;
1544 roundup(priv->wl.low + aligned_mps,
1545 HCLGE_BUF_SIZE_UNIT);
1546 priv->buf_size = priv->wl.high +
1550 priv->wl.high = 2 * aligned_mps;
1551 priv->buf_size = priv->wl.high +
1562 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1565 /* step 2, try to decrease the buffer size of
1566 * no pfc TC's private buffer
1568 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1569 priv = &buf_alloc->priv_buf[i];
1576 if (!(hdev->hw_tc_map & BIT(i)))
1581 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1583 priv->wl.high = priv->wl.low + aligned_mps;
1584 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1587 priv->wl.high = aligned_mps;
1588 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1592 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1595 /* step 3, try to reduce the number of pfc disabled TCs,
1596 * which have private buffer
1598 /* get the total no pfc enable TC number, which have private buffer */
1599 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1601 /* let the last to be cleared first */
1602 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1603 priv = &buf_alloc->priv_buf[i];
1605 if (hdev->hw_tc_map & BIT(i) &&
1606 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1607 /* Clear the no pfc TC private buffer */
1615 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1616 no_pfc_priv_num == 0)
1620 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1623 /* step 4, try to reduce the number of pfc enabled TCs
1624 * which have private buffer.
1626 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1628 /* let the last to be cleared first */
1629 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1630 priv = &buf_alloc->priv_buf[i];
1632 if (hdev->hw_tc_map & BIT(i) &&
1633 hdev->tm_info.hw_pfc_map & BIT(i)) {
1634 /* Reduce the number of pfc TC with private buffer */
1642 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1646 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1652 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1653 struct hclge_pkt_buf_alloc *buf_alloc)
1655 struct hclge_rx_priv_buff_cmd *req;
1656 struct hclge_desc desc;
1660 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1661 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1663 /* Alloc private buffer TCs */
1664 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1665 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1668 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1670 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1674 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1675 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1677 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1679 dev_err(&hdev->pdev->dev,
1680 "rx private buffer alloc cmd failed %d\n", ret);
1685 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1686 struct hclge_pkt_buf_alloc *buf_alloc)
1688 struct hclge_rx_priv_wl_buf *req;
1689 struct hclge_priv_buf *priv;
1690 struct hclge_desc desc[2];
1694 for (i = 0; i < 2; i++) {
1695 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1697 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1699 /* The first descriptor set the NEXT bit to 1 */
1701 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1703 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1705 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1706 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1708 priv = &buf_alloc->priv_buf[idx];
1709 req->tc_wl[j].high =
1710 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1711 req->tc_wl[j].high |=
1712 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1714 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1715 req->tc_wl[j].low |=
1716 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1720 /* Send 2 descriptor at one time */
1721 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1723 dev_err(&hdev->pdev->dev,
1724 "rx private waterline config cmd failed %d\n",
1729 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1730 struct hclge_pkt_buf_alloc *buf_alloc)
1732 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1733 struct hclge_rx_com_thrd *req;
1734 struct hclge_desc desc[2];
1735 struct hclge_tc_thrd *tc;
1739 for (i = 0; i < 2; i++) {
1740 hclge_cmd_setup_basic_desc(&desc[i],
1741 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1742 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1744 /* The first descriptor set the NEXT bit to 1 */
1746 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1748 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1750 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1751 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1753 req->com_thrd[j].high =
1754 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1755 req->com_thrd[j].high |=
1756 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1757 req->com_thrd[j].low =
1758 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1759 req->com_thrd[j].low |=
1760 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1764 /* Send 2 descriptors at one time */
1765 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1767 dev_err(&hdev->pdev->dev,
1768 "common threshold config cmd failed %d\n", ret);
1772 static int hclge_common_wl_config(struct hclge_dev *hdev,
1773 struct hclge_pkt_buf_alloc *buf_alloc)
1775 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1776 struct hclge_rx_com_wl *req;
1777 struct hclge_desc desc;
1780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1782 req = (struct hclge_rx_com_wl *)desc.data;
1783 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1784 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1786 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1787 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1789 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1791 dev_err(&hdev->pdev->dev,
1792 "common waterline config cmd failed %d\n", ret);
1797 int hclge_buffer_alloc(struct hclge_dev *hdev)
1799 struct hclge_pkt_buf_alloc *pkt_buf;
1802 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1806 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1808 dev_err(&hdev->pdev->dev,
1809 "could not calc tx buffer size for all TCs %d\n", ret);
1813 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1815 dev_err(&hdev->pdev->dev,
1816 "could not alloc tx buffers %d\n", ret);
1820 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1822 dev_err(&hdev->pdev->dev,
1823 "could not calc rx priv buffer size for all TCs %d\n",
1828 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1830 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1835 if (hnae3_dev_dcb_supported(hdev)) {
1836 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1838 dev_err(&hdev->pdev->dev,
1839 "could not configure rx private waterline %d\n",
1844 ret = hclge_common_thrd_config(hdev, pkt_buf);
1846 dev_err(&hdev->pdev->dev,
1847 "could not configure common threshold %d\n",
1853 ret = hclge_common_wl_config(hdev, pkt_buf);
1855 dev_err(&hdev->pdev->dev,
1856 "could not configure common waterline %d\n", ret);
1863 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1865 struct hnae3_handle *roce = &vport->roce;
1866 struct hnae3_handle *nic = &vport->nic;
1868 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1870 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1871 vport->back->num_msi_left == 0)
1874 roce->rinfo.base_vector = vport->back->roce_base_vector;
1876 roce->rinfo.netdev = nic->kinfo.netdev;
1877 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1879 roce->pdev = nic->pdev;
1880 roce->ae_algo = nic->ae_algo;
1881 roce->numa_node_mask = nic->numa_node_mask;
1886 static int hclge_init_msi(struct hclge_dev *hdev)
1888 struct pci_dev *pdev = hdev->pdev;
1892 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1893 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1896 "failed(%d) to allocate MSI/MSI-X vectors\n",
1900 if (vectors < hdev->num_msi)
1901 dev_warn(&hdev->pdev->dev,
1902 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1903 hdev->num_msi, vectors);
1905 hdev->num_msi = vectors;
1906 hdev->num_msi_left = vectors;
1907 hdev->base_msi_vector = pdev->irq;
1908 hdev->roce_base_vector = hdev->base_msi_vector +
1909 hdev->roce_base_msix_offset;
1911 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1912 sizeof(u16), GFP_KERNEL);
1913 if (!hdev->vector_status) {
1914 pci_free_irq_vectors(pdev);
1918 for (i = 0; i < hdev->num_msi; i++)
1919 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1921 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1922 sizeof(int), GFP_KERNEL);
1923 if (!hdev->vector_irq) {
1924 pci_free_irq_vectors(pdev);
1931 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1934 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1935 duplex = HCLGE_MAC_FULL;
1940 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1943 struct hclge_config_mac_speed_dup_cmd *req;
1944 struct hclge_desc desc;
1947 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1949 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1951 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1954 case HCLGE_MAC_SPEED_10M:
1955 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1956 HCLGE_CFG_SPEED_S, 6);
1958 case HCLGE_MAC_SPEED_100M:
1959 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1960 HCLGE_CFG_SPEED_S, 7);
1962 case HCLGE_MAC_SPEED_1G:
1963 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1964 HCLGE_CFG_SPEED_S, 0);
1966 case HCLGE_MAC_SPEED_10G:
1967 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1968 HCLGE_CFG_SPEED_S, 1);
1970 case HCLGE_MAC_SPEED_25G:
1971 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1972 HCLGE_CFG_SPEED_S, 2);
1974 case HCLGE_MAC_SPEED_40G:
1975 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1976 HCLGE_CFG_SPEED_S, 3);
1978 case HCLGE_MAC_SPEED_50G:
1979 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1980 HCLGE_CFG_SPEED_S, 4);
1982 case HCLGE_MAC_SPEED_100G:
1983 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1984 HCLGE_CFG_SPEED_S, 5);
1987 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1991 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1996 dev_err(&hdev->pdev->dev,
1997 "mac speed/duplex config cmd failed %d.\n", ret);
2004 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2008 duplex = hclge_check_speed_dup(duplex, speed);
2009 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2012 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2016 hdev->hw.mac.speed = speed;
2017 hdev->hw.mac.duplex = duplex;
2022 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2025 struct hclge_vport *vport = hclge_get_vport(handle);
2026 struct hclge_dev *hdev = vport->back;
2028 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2031 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2033 struct hclge_config_auto_neg_cmd *req;
2034 struct hclge_desc desc;
2038 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2040 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2041 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2042 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2044 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2046 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2052 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2054 struct hclge_vport *vport = hclge_get_vport(handle);
2055 struct hclge_dev *hdev = vport->back;
2057 return hclge_set_autoneg_en(hdev, enable);
2060 static int hclge_get_autoneg(struct hnae3_handle *handle)
2062 struct hclge_vport *vport = hclge_get_vport(handle);
2063 struct hclge_dev *hdev = vport->back;
2064 struct phy_device *phydev = hdev->hw.mac.phydev;
2067 return phydev->autoneg;
2069 return hdev->hw.mac.autoneg;
2072 static int hclge_mac_init(struct hclge_dev *hdev)
2074 struct hclge_mac *mac = &hdev->hw.mac;
2077 hdev->support_sfp_query = true;
2078 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2079 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2080 hdev->hw.mac.duplex);
2082 dev_err(&hdev->pdev->dev,
2083 "Config mac speed dup fail ret=%d\n", ret);
2089 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2091 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2095 ret = hclge_buffer_alloc(hdev);
2097 dev_err(&hdev->pdev->dev,
2098 "allocate buffer fail, ret=%d\n", ret);
2103 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2105 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2106 schedule_work(&hdev->mbx_service_task);
2109 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2111 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2112 schedule_work(&hdev->rst_service_task);
2115 static void hclge_task_schedule(struct hclge_dev *hdev)
2117 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2118 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2119 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2120 (void)schedule_work(&hdev->service_task);
2123 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2125 struct hclge_link_status_cmd *req;
2126 struct hclge_desc desc;
2130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2131 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2133 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2138 req = (struct hclge_link_status_cmd *)desc.data;
2139 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2141 return !!link_status;
2144 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2149 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2152 mac_state = hclge_get_mac_link_status(hdev);
2154 if (hdev->hw.mac.phydev) {
2155 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2156 link_stat = mac_state &
2157 hdev->hw.mac.phydev->link;
2162 link_stat = mac_state;
2168 static void hclge_update_link_status(struct hclge_dev *hdev)
2170 struct hnae3_client *rclient = hdev->roce_client;
2171 struct hnae3_client *client = hdev->nic_client;
2172 struct hnae3_handle *rhandle;
2173 struct hnae3_handle *handle;
2179 state = hclge_get_mac_phy_link(hdev);
2180 if (state != hdev->hw.mac.link) {
2181 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2182 handle = &hdev->vport[i].nic;
2183 client->ops->link_status_change(handle, state);
2184 rhandle = &hdev->vport[i].roce;
2185 if (rclient && rclient->ops->link_status_change)
2186 rclient->ops->link_status_change(rhandle,
2189 hdev->hw.mac.link = state;
2193 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2195 struct hclge_sfp_speed_cmd *resp = NULL;
2196 struct hclge_desc desc;
2199 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2200 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2201 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 if (ret == -EOPNOTSUPP) {
2203 dev_warn(&hdev->pdev->dev,
2204 "IMP do not support get SFP speed %d\n", ret);
2207 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2211 *speed = resp->sfp_speed;
2216 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2218 struct hclge_mac mac = hdev->hw.mac;
2222 /* get the speed from SFP cmd when phy
2228 /* if IMP does not support get SFP/qSFP speed, return directly */
2229 if (!hdev->support_sfp_query)
2232 ret = hclge_get_sfp_speed(hdev, &speed);
2233 if (ret == -EOPNOTSUPP) {
2234 hdev->support_sfp_query = false;
2240 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2241 return 0; /* do nothing if no SFP */
2243 /* must config full duplex for SFP */
2244 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2247 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2249 struct hclge_vport *vport = hclge_get_vport(handle);
2250 struct hclge_dev *hdev = vport->back;
2252 return hclge_update_speed_duplex(hdev);
2255 static int hclge_get_status(struct hnae3_handle *handle)
2257 struct hclge_vport *vport = hclge_get_vport(handle);
2258 struct hclge_dev *hdev = vport->back;
2260 hclge_update_link_status(hdev);
2262 return hdev->hw.mac.link;
2265 static void hclge_service_timer(struct timer_list *t)
2267 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2269 mod_timer(&hdev->service_timer, jiffies + HZ);
2270 hdev->hw_stats.stats_timer++;
2271 hclge_task_schedule(hdev);
2274 static void hclge_service_complete(struct hclge_dev *hdev)
2276 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2278 /* Flush memory before next watchdog */
2279 smp_mb__before_atomic();
2280 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2283 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2285 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2287 /* fetch the events from their corresponding regs */
2288 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2289 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2290 msix_src_reg = hclge_read_dev(&hdev->hw,
2291 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2293 /* Assumption: If by any chance reset and mailbox events are reported
2294 * together then we will only process reset event in this go and will
2295 * defer the processing of the mailbox events. Since, we would have not
2296 * cleared RX CMDQ event this time we would receive again another
2297 * interrupt from H/W just for the mailbox.
2300 /* check for vector0 reset event sources */
2301 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2302 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2303 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2304 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2305 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2306 return HCLGE_VECTOR0_EVENT_RST;
2309 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2310 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2311 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2312 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2313 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2314 return HCLGE_VECTOR0_EVENT_RST;
2317 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2318 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2319 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2320 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2321 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2322 return HCLGE_VECTOR0_EVENT_RST;
2325 /* check for vector0 msix event source */
2326 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2327 return HCLGE_VECTOR0_EVENT_ERR;
2329 /* check for vector0 mailbox(=CMDQ RX) event source */
2330 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2331 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2332 *clearval = cmdq_src_reg;
2333 return HCLGE_VECTOR0_EVENT_MBX;
2336 return HCLGE_VECTOR0_EVENT_OTHER;
2339 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2342 switch (event_type) {
2343 case HCLGE_VECTOR0_EVENT_RST:
2344 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2346 case HCLGE_VECTOR0_EVENT_MBX:
2347 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2354 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2356 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2357 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2358 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2359 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2360 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2363 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2365 writel(enable ? 1 : 0, vector->addr);
2368 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2370 struct hclge_dev *hdev = data;
2374 hclge_enable_vector(&hdev->misc_vector, false);
2375 event_cause = hclge_check_event_cause(hdev, &clearval);
2377 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2378 switch (event_cause) {
2379 case HCLGE_VECTOR0_EVENT_ERR:
2380 /* we do not know what type of reset is required now. This could
2381 * only be decided after we fetch the type of errors which
2382 * caused this event. Therefore, we will do below for now:
2383 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2384 * have defered type of reset to be used.
2385 * 2. Schedule the reset serivce task.
2386 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2387 * will fetch the correct type of reset. This would be done
2388 * by first decoding the types of errors.
2390 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2392 case HCLGE_VECTOR0_EVENT_RST:
2393 hclge_reset_task_schedule(hdev);
2395 case HCLGE_VECTOR0_EVENT_MBX:
2396 /* If we are here then,
2397 * 1. Either we are not handling any mbx task and we are not
2400 * 2. We could be handling a mbx task but nothing more is
2402 * In both cases, we should schedule mbx task as there are more
2403 * mbx messages reported by this interrupt.
2405 hclge_mbx_task_schedule(hdev);
2408 dev_warn(&hdev->pdev->dev,
2409 "received unknown or unhandled event of vector0\n");
2413 /* clear the source of interrupt if it is not cause by reset */
2414 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2415 hclge_clear_event_cause(hdev, event_cause, clearval);
2416 hclge_enable_vector(&hdev->misc_vector, true);
2422 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2424 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2425 dev_warn(&hdev->pdev->dev,
2426 "vector(vector_id %d) has been freed.\n", vector_id);
2430 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2431 hdev->num_msi_left += 1;
2432 hdev->num_msi_used -= 1;
2435 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2437 struct hclge_misc_vector *vector = &hdev->misc_vector;
2439 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2441 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2442 hdev->vector_status[0] = 0;
2444 hdev->num_msi_left -= 1;
2445 hdev->num_msi_used += 1;
2448 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2452 hclge_get_misc_vector(hdev);
2454 /* this would be explicitly freed in the end */
2455 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2456 0, "hclge_misc", hdev);
2458 hclge_free_vector(hdev, 0);
2459 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2460 hdev->misc_vector.vector_irq);
2466 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2468 free_irq(hdev->misc_vector.vector_irq, hdev);
2469 hclge_free_vector(hdev, 0);
2472 int hclge_notify_client(struct hclge_dev *hdev,
2473 enum hnae3_reset_notify_type type)
2475 struct hnae3_client *client = hdev->nic_client;
2478 if (!client->ops->reset_notify)
2481 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2482 struct hnae3_handle *handle = &hdev->vport[i].nic;
2485 ret = client->ops->reset_notify(handle, type);
2487 dev_err(&hdev->pdev->dev,
2488 "notify nic client failed %d(%d)\n", type, ret);
2496 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2497 enum hnae3_reset_notify_type type)
2499 struct hnae3_client *client = hdev->roce_client;
2506 if (!client->ops->reset_notify)
2509 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2510 struct hnae3_handle *handle = &hdev->vport[i].roce;
2512 ret = client->ops->reset_notify(handle, type);
2514 dev_err(&hdev->pdev->dev,
2515 "notify roce client failed %d(%d)",
2524 static int hclge_reset_wait(struct hclge_dev *hdev)
2526 #define HCLGE_RESET_WATI_MS 100
2527 #define HCLGE_RESET_WAIT_CNT 200
2528 u32 val, reg, reg_bit;
2531 switch (hdev->reset_type) {
2532 case HNAE3_IMP_RESET:
2533 reg = HCLGE_GLOBAL_RESET_REG;
2534 reg_bit = HCLGE_IMP_RESET_BIT;
2536 case HNAE3_GLOBAL_RESET:
2537 reg = HCLGE_GLOBAL_RESET_REG;
2538 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2540 case HNAE3_CORE_RESET:
2541 reg = HCLGE_GLOBAL_RESET_REG;
2542 reg_bit = HCLGE_CORE_RESET_BIT;
2544 case HNAE3_FUNC_RESET:
2545 reg = HCLGE_FUN_RST_ING;
2546 reg_bit = HCLGE_FUN_RST_ING_B;
2548 case HNAE3_FLR_RESET:
2551 dev_err(&hdev->pdev->dev,
2552 "Wait for unsupported reset type: %d\n",
2557 if (hdev->reset_type == HNAE3_FLR_RESET) {
2558 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2559 cnt++ < HCLGE_RESET_WAIT_CNT)
2560 msleep(HCLGE_RESET_WATI_MS);
2562 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2563 dev_err(&hdev->pdev->dev,
2564 "flr wait timeout: %d\n", cnt);
2571 val = hclge_read_dev(&hdev->hw, reg);
2572 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2573 msleep(HCLGE_RESET_WATI_MS);
2574 val = hclge_read_dev(&hdev->hw, reg);
2578 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2579 dev_warn(&hdev->pdev->dev,
2580 "Wait for reset timeout: %d\n", hdev->reset_type);
2587 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2589 struct hclge_vf_rst_cmd *req;
2590 struct hclge_desc desc;
2592 req = (struct hclge_vf_rst_cmd *)desc.data;
2593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2594 req->dest_vfid = func_id;
2599 return hclge_cmd_send(&hdev->hw, &desc, 1);
2602 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2606 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2607 struct hclge_vport *vport = &hdev->vport[i];
2610 /* Send cmd to set/clear VF's FUNC_RST_ING */
2611 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2613 dev_err(&hdev->pdev->dev,
2614 "set vf(%d) rst failed %d!\n",
2615 vport->vport_id, ret);
2622 /* Inform VF to process the reset.
2623 * hclge_inform_reset_assert_to_vf may fail if VF
2624 * driver is not loaded.
2626 ret = hclge_inform_reset_assert_to_vf(vport);
2628 dev_warn(&hdev->pdev->dev,
2629 "inform reset to vf(%d) failed %d!\n",
2630 vport->vport_id, ret);
2636 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2638 struct hclge_desc desc;
2639 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2642 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2643 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2644 req->fun_reset_vfid = func_id;
2646 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2648 dev_err(&hdev->pdev->dev,
2649 "send function reset cmd fail, status =%d\n", ret);
2654 static void hclge_do_reset(struct hclge_dev *hdev)
2656 struct pci_dev *pdev = hdev->pdev;
2659 switch (hdev->reset_type) {
2660 case HNAE3_GLOBAL_RESET:
2661 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2662 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2663 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2664 dev_info(&pdev->dev, "Global Reset requested\n");
2666 case HNAE3_CORE_RESET:
2667 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2668 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2669 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2670 dev_info(&pdev->dev, "Core Reset requested\n");
2672 case HNAE3_FUNC_RESET:
2673 dev_info(&pdev->dev, "PF Reset requested\n");
2674 /* schedule again to check later */
2675 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2676 hclge_reset_task_schedule(hdev);
2678 case HNAE3_FLR_RESET:
2679 dev_info(&pdev->dev, "FLR requested\n");
2680 /* schedule again to check later */
2681 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2682 hclge_reset_task_schedule(hdev);
2685 dev_warn(&pdev->dev,
2686 "Unsupported reset type: %d\n", hdev->reset_type);
2691 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2692 unsigned long *addr)
2694 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2696 /* first, resolve any unknown reset type to the known type(s) */
2697 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2698 /* we will intentionally ignore any errors from this function
2699 * as we will end up in *some* reset request in any case
2701 hclge_handle_hw_msix_error(hdev, addr);
2702 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2703 /* We defered the clearing of the error event which caused
2704 * interrupt since it was not posssible to do that in
2705 * interrupt context (and this is the reason we introduced
2706 * new UNKNOWN reset type). Now, the errors have been
2707 * handled and cleared in hardware we can safely enable
2708 * interrupts. This is an exception to the norm.
2710 hclge_enable_vector(&hdev->misc_vector, true);
2713 /* return the highest priority reset level amongst all */
2714 if (test_bit(HNAE3_IMP_RESET, addr)) {
2715 rst_level = HNAE3_IMP_RESET;
2716 clear_bit(HNAE3_IMP_RESET, addr);
2717 clear_bit(HNAE3_GLOBAL_RESET, addr);
2718 clear_bit(HNAE3_CORE_RESET, addr);
2719 clear_bit(HNAE3_FUNC_RESET, addr);
2720 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2721 rst_level = HNAE3_GLOBAL_RESET;
2722 clear_bit(HNAE3_GLOBAL_RESET, addr);
2723 clear_bit(HNAE3_CORE_RESET, addr);
2724 clear_bit(HNAE3_FUNC_RESET, addr);
2725 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2726 rst_level = HNAE3_CORE_RESET;
2727 clear_bit(HNAE3_CORE_RESET, addr);
2728 clear_bit(HNAE3_FUNC_RESET, addr);
2729 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2730 rst_level = HNAE3_FUNC_RESET;
2731 clear_bit(HNAE3_FUNC_RESET, addr);
2732 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2733 rst_level = HNAE3_FLR_RESET;
2734 clear_bit(HNAE3_FLR_RESET, addr);
2740 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2744 switch (hdev->reset_type) {
2745 case HNAE3_IMP_RESET:
2746 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2748 case HNAE3_GLOBAL_RESET:
2749 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2751 case HNAE3_CORE_RESET:
2752 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2761 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2762 hclge_enable_vector(&hdev->misc_vector, true);
2765 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2769 switch (hdev->reset_type) {
2770 case HNAE3_FUNC_RESET:
2772 case HNAE3_FLR_RESET:
2773 ret = hclge_set_all_vf_rst(hdev, true);
2782 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2787 switch (hdev->reset_type) {
2788 case HNAE3_FUNC_RESET:
2789 /* There is no mechanism for PF to know if VF has stopped IO
2790 * for now, just wait 100 ms for VF to stop IO
2793 ret = hclge_func_reset_cmd(hdev, 0);
2795 dev_err(&hdev->pdev->dev,
2796 "asserting function reset fail %d!\n", ret);
2800 /* After performaning pf reset, it is not necessary to do the
2801 * mailbox handling or send any command to firmware, because
2802 * any mailbox handling or command to firmware is only valid
2803 * after hclge_cmd_init is called.
2805 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2807 case HNAE3_FLR_RESET:
2808 /* There is no mechanism for PF to know if VF has stopped IO
2809 * for now, just wait 100 ms for VF to stop IO
2812 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2813 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2815 case HNAE3_IMP_RESET:
2816 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2817 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2818 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2824 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2829 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2831 #define MAX_RESET_FAIL_CNT 5
2832 #define RESET_UPGRADE_DELAY_SEC 10
2834 if (hdev->reset_pending) {
2835 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2836 hdev->reset_pending);
2838 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2839 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2840 BIT(HCLGE_IMP_RESET_BIT))) {
2841 dev_info(&hdev->pdev->dev,
2842 "reset failed because IMP Reset is pending\n");
2843 hclge_clear_reset_cause(hdev);
2845 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2846 hdev->reset_fail_cnt++;
2848 set_bit(hdev->reset_type, &hdev->reset_pending);
2849 dev_info(&hdev->pdev->dev,
2850 "re-schedule to wait for hw reset done\n");
2854 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2855 hclge_clear_reset_cause(hdev);
2856 mod_timer(&hdev->reset_timer,
2857 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2862 hclge_clear_reset_cause(hdev);
2863 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2867 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2871 switch (hdev->reset_type) {
2872 case HNAE3_FUNC_RESET:
2874 case HNAE3_FLR_RESET:
2875 ret = hclge_set_all_vf_rst(hdev, false);
2884 static void hclge_reset(struct hclge_dev *hdev)
2886 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2887 bool is_timeout = false;
2890 /* Initialize ae_dev reset status as well, in case enet layer wants to
2891 * know if device is undergoing reset
2893 ae_dev->reset_type = hdev->reset_type;
2894 hdev->reset_count++;
2895 /* perform reset of the stack & ae device for a client */
2896 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2900 ret = hclge_reset_prepare_down(hdev);
2905 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2907 goto err_reset_lock;
2911 ret = hclge_reset_prepare_wait(hdev);
2915 if (hclge_reset_wait(hdev)) {
2920 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2925 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2927 goto err_reset_lock;
2929 ret = hclge_reset_ae_dev(hdev->ae_dev);
2931 goto err_reset_lock;
2933 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2935 goto err_reset_lock;
2937 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
2939 goto err_reset_lock;
2941 hclge_clear_reset_cause(hdev);
2943 ret = hclge_reset_prepare_up(hdev);
2945 goto err_reset_lock;
2947 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2949 goto err_reset_lock;
2953 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2957 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2961 hdev->last_reset_time = jiffies;
2962 hdev->reset_fail_cnt = 0;
2963 ae_dev->reset_type = HNAE3_NONE_RESET;
2970 if (hclge_reset_err_handle(hdev, is_timeout))
2971 hclge_reset_task_schedule(hdev);
2974 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2976 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2977 struct hclge_dev *hdev = ae_dev->priv;
2979 /* We might end up getting called broadly because of 2 below cases:
2980 * 1. Recoverable error was conveyed through APEI and only way to bring
2981 * normalcy is to reset.
2982 * 2. A new reset request from the stack due to timeout
2984 * For the first case,error event might not have ae handle available.
2985 * check if this is a new reset request and we are not here just because
2986 * last reset attempt did not succeed and watchdog hit us again. We will
2987 * know this if last reset request did not occur very recently (watchdog
2988 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2989 * In case of new request we reset the "reset level" to PF reset.
2990 * And if it is a repeat reset request of the most recent one then we
2991 * want to make sure we throttle the reset request. Therefore, we will
2992 * not allow it again before 3*HZ times.
2995 handle = &hdev->vport[0].nic;
2997 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2999 else if (hdev->default_reset_request)
3001 hclge_get_reset_level(hdev,
3002 &hdev->default_reset_request);
3003 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3004 hdev->reset_level = HNAE3_FUNC_RESET;
3006 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3009 /* request reset & schedule reset task */
3010 set_bit(hdev->reset_level, &hdev->reset_request);
3011 hclge_reset_task_schedule(hdev);
3013 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3014 hdev->reset_level++;
3017 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3018 enum hnae3_reset_type rst_type)
3020 struct hclge_dev *hdev = ae_dev->priv;
3022 set_bit(rst_type, &hdev->default_reset_request);
3025 static void hclge_reset_timer(struct timer_list *t)
3027 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3029 dev_info(&hdev->pdev->dev,
3030 "triggering global reset in reset timer\n");
3031 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3032 hclge_reset_event(hdev->pdev, NULL);
3035 static void hclge_reset_subtask(struct hclge_dev *hdev)
3037 /* check if there is any ongoing reset in the hardware. This status can
3038 * be checked from reset_pending. If there is then, we need to wait for
3039 * hardware to complete reset.
3040 * a. If we are able to figure out in reasonable time that hardware
3041 * has fully resetted then, we can proceed with driver, client
3043 * b. else, we can come back later to check this status so re-sched
3046 hdev->last_reset_time = jiffies;
3047 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3048 if (hdev->reset_type != HNAE3_NONE_RESET)
3051 /* check if we got any *new* reset requests to be honored */
3052 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3053 if (hdev->reset_type != HNAE3_NONE_RESET)
3054 hclge_do_reset(hdev);
3056 hdev->reset_type = HNAE3_NONE_RESET;
3059 static void hclge_reset_service_task(struct work_struct *work)
3061 struct hclge_dev *hdev =
3062 container_of(work, struct hclge_dev, rst_service_task);
3064 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3067 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3069 hclge_reset_subtask(hdev);
3071 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3074 static void hclge_mailbox_service_task(struct work_struct *work)
3076 struct hclge_dev *hdev =
3077 container_of(work, struct hclge_dev, mbx_service_task);
3079 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3082 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3084 hclge_mbx_handler(hdev);
3086 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3089 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3093 /* start from vport 1 for PF is always alive */
3094 for (i = 1; i < hdev->num_alloc_vport; i++) {
3095 struct hclge_vport *vport = &hdev->vport[i];
3097 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3098 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3100 /* If vf is not alive, set to default value */
3101 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3102 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3106 static void hclge_service_task(struct work_struct *work)
3108 struct hclge_dev *hdev =
3109 container_of(work, struct hclge_dev, service_task);
3111 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3112 hclge_update_stats_for_all(hdev);
3113 hdev->hw_stats.stats_timer = 0;
3116 hclge_update_speed_duplex(hdev);
3117 hclge_update_link_status(hdev);
3118 hclge_update_vport_alive(hdev);
3119 hclge_service_complete(hdev);
3122 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3124 /* VF handle has no client */
3125 if (!handle->client)
3126 return container_of(handle, struct hclge_vport, nic);
3127 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3128 return container_of(handle, struct hclge_vport, roce);
3130 return container_of(handle, struct hclge_vport, nic);
3133 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3134 struct hnae3_vector_info *vector_info)
3136 struct hclge_vport *vport = hclge_get_vport(handle);
3137 struct hnae3_vector_info *vector = vector_info;
3138 struct hclge_dev *hdev = vport->back;
3142 vector_num = min(hdev->num_msi_left, vector_num);
3144 for (j = 0; j < vector_num; j++) {
3145 for (i = 1; i < hdev->num_msi; i++) {
3146 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3147 vector->vector = pci_irq_vector(hdev->pdev, i);
3148 vector->io_addr = hdev->hw.io_base +
3149 HCLGE_VECTOR_REG_BASE +
3150 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3152 HCLGE_VECTOR_VF_OFFSET;
3153 hdev->vector_status[i] = vport->vport_id;
3154 hdev->vector_irq[i] = vector->vector;
3163 hdev->num_msi_left -= alloc;
3164 hdev->num_msi_used += alloc;
3169 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3173 for (i = 0; i < hdev->num_msi; i++)
3174 if (vector == hdev->vector_irq[i])
3180 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3182 struct hclge_vport *vport = hclge_get_vport(handle);
3183 struct hclge_dev *hdev = vport->back;
3186 vector_id = hclge_get_vector_index(hdev, vector);
3187 if (vector_id < 0) {
3188 dev_err(&hdev->pdev->dev,
3189 "Get vector index fail. vector_id =%d\n", vector_id);
3193 hclge_free_vector(hdev, vector_id);
3198 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3200 return HCLGE_RSS_KEY_SIZE;
3203 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3205 return HCLGE_RSS_IND_TBL_SIZE;
3208 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3209 const u8 hfunc, const u8 *key)
3211 struct hclge_rss_config_cmd *req;
3212 struct hclge_desc desc;
3217 req = (struct hclge_rss_config_cmd *)desc.data;
3219 for (key_offset = 0; key_offset < 3; key_offset++) {
3220 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3223 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3224 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3226 if (key_offset == 2)
3228 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3230 key_size = HCLGE_RSS_HASH_KEY_NUM;
3232 memcpy(req->hash_key,
3233 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3235 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3237 dev_err(&hdev->pdev->dev,
3238 "Configure RSS config fail, status = %d\n",
3246 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3248 struct hclge_rss_indirection_table_cmd *req;
3249 struct hclge_desc desc;
3253 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3255 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3256 hclge_cmd_setup_basic_desc
3257 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3259 req->start_table_index =
3260 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3261 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3263 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3264 req->rss_result[j] =
3265 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3269 dev_err(&hdev->pdev->dev,
3270 "Configure rss indir table fail,status = %d\n",
3278 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3279 u16 *tc_size, u16 *tc_offset)
3281 struct hclge_rss_tc_mode_cmd *req;
3282 struct hclge_desc desc;
3286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3287 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3289 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3292 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3293 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3294 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3295 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3296 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3298 req->rss_tc_mode[i] = cpu_to_le16(mode);
3301 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3303 dev_err(&hdev->pdev->dev,
3304 "Configure rss tc mode fail, status = %d\n", ret);
3309 static void hclge_get_rss_type(struct hclge_vport *vport)
3311 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3312 vport->rss_tuple_sets.ipv4_udp_en ||
3313 vport->rss_tuple_sets.ipv4_sctp_en ||
3314 vport->rss_tuple_sets.ipv6_tcp_en ||
3315 vport->rss_tuple_sets.ipv6_udp_en ||
3316 vport->rss_tuple_sets.ipv6_sctp_en)
3317 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3318 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3319 vport->rss_tuple_sets.ipv6_fragment_en)
3320 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3322 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3325 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3327 struct hclge_rss_input_tuple_cmd *req;
3328 struct hclge_desc desc;
3331 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3333 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3335 /* Get the tuple cfg from pf */
3336 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3337 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3338 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3339 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3340 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3341 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3342 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3343 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3344 hclge_get_rss_type(&hdev->vport[0]);
3345 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3347 dev_err(&hdev->pdev->dev,
3348 "Configure rss input fail, status = %d\n", ret);
3352 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3355 struct hclge_vport *vport = hclge_get_vport(handle);
3358 /* Get hash algorithm */
3360 switch (vport->rss_algo) {
3361 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3362 *hfunc = ETH_RSS_HASH_TOP;
3364 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3365 *hfunc = ETH_RSS_HASH_XOR;
3368 *hfunc = ETH_RSS_HASH_UNKNOWN;
3373 /* Get the RSS Key required by the user */
3375 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3377 /* Get indirect table */
3379 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3380 indir[i] = vport->rss_indirection_tbl[i];
3385 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3386 const u8 *key, const u8 hfunc)
3388 struct hclge_vport *vport = hclge_get_vport(handle);
3389 struct hclge_dev *hdev = vport->back;
3393 /* Set the RSS Hash Key if specififed by the user */
3396 case ETH_RSS_HASH_TOP:
3397 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3399 case ETH_RSS_HASH_XOR:
3400 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3402 case ETH_RSS_HASH_NO_CHANGE:
3403 hash_algo = vport->rss_algo;
3409 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3413 /* Update the shadow RSS key with user specified qids */
3414 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3415 vport->rss_algo = hash_algo;
3418 /* Update the shadow RSS table with user specified qids */
3419 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3420 vport->rss_indirection_tbl[i] = indir[i];
3422 /* Update the hardware */
3423 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3426 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3428 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3430 if (nfc->data & RXH_L4_B_2_3)
3431 hash_sets |= HCLGE_D_PORT_BIT;
3433 hash_sets &= ~HCLGE_D_PORT_BIT;
3435 if (nfc->data & RXH_IP_SRC)
3436 hash_sets |= HCLGE_S_IP_BIT;
3438 hash_sets &= ~HCLGE_S_IP_BIT;
3440 if (nfc->data & RXH_IP_DST)
3441 hash_sets |= HCLGE_D_IP_BIT;
3443 hash_sets &= ~HCLGE_D_IP_BIT;
3445 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3446 hash_sets |= HCLGE_V_TAG_BIT;
3451 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3452 struct ethtool_rxnfc *nfc)
3454 struct hclge_vport *vport = hclge_get_vport(handle);
3455 struct hclge_dev *hdev = vport->back;
3456 struct hclge_rss_input_tuple_cmd *req;
3457 struct hclge_desc desc;
3461 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3462 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3465 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3468 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3469 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3470 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3471 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3472 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3473 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3474 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3475 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3477 tuple_sets = hclge_get_rss_hash_bits(nfc);
3478 switch (nfc->flow_type) {
3480 req->ipv4_tcp_en = tuple_sets;
3483 req->ipv6_tcp_en = tuple_sets;
3486 req->ipv4_udp_en = tuple_sets;
3489 req->ipv6_udp_en = tuple_sets;
3492 req->ipv4_sctp_en = tuple_sets;
3495 if ((nfc->data & RXH_L4_B_0_1) ||
3496 (nfc->data & RXH_L4_B_2_3))
3499 req->ipv6_sctp_en = tuple_sets;
3502 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3505 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3513 dev_err(&hdev->pdev->dev,
3514 "Set rss tuple fail, status = %d\n", ret);
3518 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3519 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3520 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3521 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3522 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3523 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3524 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3525 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3526 hclge_get_rss_type(vport);
3530 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3531 struct ethtool_rxnfc *nfc)
3533 struct hclge_vport *vport = hclge_get_vport(handle);
3538 switch (nfc->flow_type) {
3540 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3543 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3546 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3549 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3552 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3555 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3559 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3568 if (tuple_sets & HCLGE_D_PORT_BIT)
3569 nfc->data |= RXH_L4_B_2_3;
3570 if (tuple_sets & HCLGE_S_PORT_BIT)
3571 nfc->data |= RXH_L4_B_0_1;
3572 if (tuple_sets & HCLGE_D_IP_BIT)
3573 nfc->data |= RXH_IP_DST;
3574 if (tuple_sets & HCLGE_S_IP_BIT)
3575 nfc->data |= RXH_IP_SRC;
3580 static int hclge_get_tc_size(struct hnae3_handle *handle)
3582 struct hclge_vport *vport = hclge_get_vport(handle);
3583 struct hclge_dev *hdev = vport->back;
3585 return hdev->rss_size_max;
3588 int hclge_rss_init_hw(struct hclge_dev *hdev)
3590 struct hclge_vport *vport = hdev->vport;
3591 u8 *rss_indir = vport[0].rss_indirection_tbl;
3592 u16 rss_size = vport[0].alloc_rss_size;
3593 u8 *key = vport[0].rss_hash_key;
3594 u8 hfunc = vport[0].rss_algo;
3595 u16 tc_offset[HCLGE_MAX_TC_NUM];
3596 u16 tc_valid[HCLGE_MAX_TC_NUM];
3597 u16 tc_size[HCLGE_MAX_TC_NUM];
3601 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3605 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3609 ret = hclge_set_rss_input_tuple(hdev);
3613 /* Each TC have the same queue size, and tc_size set to hardware is
3614 * the log2 of roundup power of two of rss_size, the acutal queue
3615 * size is limited by indirection table.
3617 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3618 dev_err(&hdev->pdev->dev,
3619 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3624 roundup_size = roundup_pow_of_two(rss_size);
3625 roundup_size = ilog2(roundup_size);
3627 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3630 if (!(hdev->hw_tc_map & BIT(i)))
3634 tc_size[i] = roundup_size;
3635 tc_offset[i] = rss_size * i;
3638 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3641 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3643 struct hclge_vport *vport = hdev->vport;
3646 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3647 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3648 vport[j].rss_indirection_tbl[i] =
3649 i % vport[j].alloc_rss_size;
3653 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3655 struct hclge_vport *vport = hdev->vport;
3658 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3659 vport[i].rss_tuple_sets.ipv4_tcp_en =
3660 HCLGE_RSS_INPUT_TUPLE_OTHER;
3661 vport[i].rss_tuple_sets.ipv4_udp_en =
3662 HCLGE_RSS_INPUT_TUPLE_OTHER;
3663 vport[i].rss_tuple_sets.ipv4_sctp_en =
3664 HCLGE_RSS_INPUT_TUPLE_SCTP;
3665 vport[i].rss_tuple_sets.ipv4_fragment_en =
3666 HCLGE_RSS_INPUT_TUPLE_OTHER;
3667 vport[i].rss_tuple_sets.ipv6_tcp_en =
3668 HCLGE_RSS_INPUT_TUPLE_OTHER;
3669 vport[i].rss_tuple_sets.ipv6_udp_en =
3670 HCLGE_RSS_INPUT_TUPLE_OTHER;
3671 vport[i].rss_tuple_sets.ipv6_sctp_en =
3672 HCLGE_RSS_INPUT_TUPLE_SCTP;
3673 vport[i].rss_tuple_sets.ipv6_fragment_en =
3674 HCLGE_RSS_INPUT_TUPLE_OTHER;
3676 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3678 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3681 hclge_rss_indir_init_cfg(hdev);
3684 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3685 int vector_id, bool en,
3686 struct hnae3_ring_chain_node *ring_chain)
3688 struct hclge_dev *hdev = vport->back;
3689 struct hnae3_ring_chain_node *node;
3690 struct hclge_desc desc;
3691 struct hclge_ctrl_vector_chain_cmd *req
3692 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3693 enum hclge_cmd_status status;
3694 enum hclge_opcode_type op;
3695 u16 tqp_type_and_id;
3698 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3699 hclge_cmd_setup_basic_desc(&desc, op, false);
3700 req->int_vector_id = vector_id;
3703 for (node = ring_chain; node; node = node->next) {
3704 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3705 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3707 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3708 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3709 HCLGE_TQP_ID_S, node->tqp_index);
3710 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3712 hnae3_get_field(node->int_gl_idx,
3713 HNAE3_RING_GL_IDX_M,
3714 HNAE3_RING_GL_IDX_S));
3715 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3716 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3717 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3718 req->vfid = vport->vport_id;
3720 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3722 dev_err(&hdev->pdev->dev,
3723 "Map TQP fail, status is %d.\n",
3729 hclge_cmd_setup_basic_desc(&desc,
3732 req->int_vector_id = vector_id;
3737 req->int_cause_num = i;
3738 req->vfid = vport->vport_id;
3739 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3741 dev_err(&hdev->pdev->dev,
3742 "Map TQP fail, status is %d.\n", status);
3750 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3752 struct hnae3_ring_chain_node *ring_chain)
3754 struct hclge_vport *vport = hclge_get_vport(handle);
3755 struct hclge_dev *hdev = vport->back;
3758 vector_id = hclge_get_vector_index(hdev, vector);
3759 if (vector_id < 0) {
3760 dev_err(&hdev->pdev->dev,
3761 "Get vector index fail. vector_id =%d\n", vector_id);
3765 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3768 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3770 struct hnae3_ring_chain_node *ring_chain)
3772 struct hclge_vport *vport = hclge_get_vport(handle);
3773 struct hclge_dev *hdev = vport->back;
3776 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3779 vector_id = hclge_get_vector_index(hdev, vector);
3780 if (vector_id < 0) {
3781 dev_err(&handle->pdev->dev,
3782 "Get vector index fail. ret =%d\n", vector_id);
3786 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3788 dev_err(&handle->pdev->dev,
3789 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3796 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3797 struct hclge_promisc_param *param)
3799 struct hclge_promisc_cfg_cmd *req;
3800 struct hclge_desc desc;
3803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3805 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3806 req->vf_id = param->vf_id;
3808 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3809 * pdev revision(0x20), new revision support them. The
3810 * value of this two fields will not return error when driver
3811 * send command to fireware in revision(0x20).
3813 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3814 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3818 dev_err(&hdev->pdev->dev,
3819 "Set promisc mode fail, status is %d.\n", ret);
3824 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3825 bool en_mc, bool en_bc, int vport_id)
3830 memset(param, 0, sizeof(struct hclge_promisc_param));
3832 param->enable = HCLGE_PROMISC_EN_UC;
3834 param->enable |= HCLGE_PROMISC_EN_MC;
3836 param->enable |= HCLGE_PROMISC_EN_BC;
3837 param->vf_id = vport_id;
3840 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3843 struct hclge_vport *vport = hclge_get_vport(handle);
3844 struct hclge_dev *hdev = vport->back;
3845 struct hclge_promisc_param param;
3847 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3849 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3852 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3854 struct hclge_get_fd_mode_cmd *req;
3855 struct hclge_desc desc;
3858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3860 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3862 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3864 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3868 *fd_mode = req->mode;
3873 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3874 u32 *stage1_entry_num,
3875 u32 *stage2_entry_num,
3876 u16 *stage1_counter_num,
3877 u16 *stage2_counter_num)
3879 struct hclge_get_fd_allocation_cmd *req;
3880 struct hclge_desc desc;
3883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3885 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3887 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3889 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3894 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3895 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3896 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3897 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3902 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3904 struct hclge_set_fd_key_config_cmd *req;
3905 struct hclge_fd_key_cfg *stage;
3906 struct hclge_desc desc;
3909 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3911 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3912 stage = &hdev->fd_cfg.key_cfg[stage_num];
3913 req->stage = stage_num;
3914 req->key_select = stage->key_sel;
3915 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3916 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3917 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3918 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3919 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3920 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3922 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3924 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3929 static int hclge_init_fd_config(struct hclge_dev *hdev)
3931 #define LOW_2_WORDS 0x03
3932 struct hclge_fd_key_cfg *key_cfg;
3935 if (!hnae3_dev_fd_supported(hdev))
3938 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3942 switch (hdev->fd_cfg.fd_mode) {
3943 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3944 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3946 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3947 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3950 dev_err(&hdev->pdev->dev,
3951 "Unsupported flow director mode %d\n",
3952 hdev->fd_cfg.fd_mode);
3956 hdev->fd_cfg.fd_en = true;
3957 hdev->fd_cfg.proto_support =
3958 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3959 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3960 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3961 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3962 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3963 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3964 key_cfg->outer_sipv6_word_en = 0;
3965 key_cfg->outer_dipv6_word_en = 0;
3967 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3968 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3969 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3970 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3972 /* If use max 400bit key, we can support tuples for ether type */
3973 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3974 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3975 key_cfg->tuple_active |=
3976 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3979 /* roce_type is used to filter roce frames
3980 * dst_vport is used to specify the rule
3982 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3984 ret = hclge_get_fd_allocation(hdev,
3985 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3986 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3987 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3988 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3992 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3995 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3996 int loc, u8 *key, bool is_add)
3998 struct hclge_fd_tcam_config_1_cmd *req1;
3999 struct hclge_fd_tcam_config_2_cmd *req2;
4000 struct hclge_fd_tcam_config_3_cmd *req3;
4001 struct hclge_desc desc[3];
4004 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4005 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4006 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4007 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4008 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4010 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4011 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4012 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4014 req1->stage = stage;
4015 req1->xy_sel = sel_x ? 1 : 0;
4016 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4017 req1->index = cpu_to_le32(loc);
4018 req1->entry_vld = sel_x ? is_add : 0;
4021 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4022 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4023 sizeof(req2->tcam_data));
4024 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4025 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4028 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4030 dev_err(&hdev->pdev->dev,
4031 "config tcam key fail, ret=%d\n",
4037 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4038 struct hclge_fd_ad_data *action)
4040 struct hclge_fd_ad_config_cmd *req;
4041 struct hclge_desc desc;
4045 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4047 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4048 req->index = cpu_to_le32(loc);
4051 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4052 action->write_rule_id_to_bd);
4053 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4056 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4057 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4058 action->forward_to_direct_queue);
4059 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4061 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4062 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4063 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4064 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4065 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4066 action->counter_id);
4068 req->ad_data = cpu_to_le64(ad_data);
4069 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4071 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4076 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4077 struct hclge_fd_rule *rule)
4079 u16 tmp_x_s, tmp_y_s;
4080 u32 tmp_x_l, tmp_y_l;
4083 if (rule->unused_tuple & tuple_bit)
4086 switch (tuple_bit) {
4089 case BIT(INNER_DST_MAC):
4090 for (i = 0; i < 6; i++) {
4091 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4092 rule->tuples_mask.dst_mac[i]);
4093 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4094 rule->tuples_mask.dst_mac[i]);
4098 case BIT(INNER_SRC_MAC):
4099 for (i = 0; i < 6; i++) {
4100 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4101 rule->tuples.src_mac[i]);
4102 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4103 rule->tuples.src_mac[i]);
4107 case BIT(INNER_VLAN_TAG_FST):
4108 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4109 rule->tuples_mask.vlan_tag1);
4110 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4111 rule->tuples_mask.vlan_tag1);
4112 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4113 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4116 case BIT(INNER_ETH_TYPE):
4117 calc_x(tmp_x_s, rule->tuples.ether_proto,
4118 rule->tuples_mask.ether_proto);
4119 calc_y(tmp_y_s, rule->tuples.ether_proto,
4120 rule->tuples_mask.ether_proto);
4121 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4122 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4125 case BIT(INNER_IP_TOS):
4126 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4127 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4130 case BIT(INNER_IP_PROTO):
4131 calc_x(*key_x, rule->tuples.ip_proto,
4132 rule->tuples_mask.ip_proto);
4133 calc_y(*key_y, rule->tuples.ip_proto,
4134 rule->tuples_mask.ip_proto);
4137 case BIT(INNER_SRC_IP):
4138 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4139 rule->tuples_mask.src_ip[3]);
4140 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4141 rule->tuples_mask.src_ip[3]);
4142 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4143 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4146 case BIT(INNER_DST_IP):
4147 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4148 rule->tuples_mask.dst_ip[3]);
4149 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4150 rule->tuples_mask.dst_ip[3]);
4151 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4152 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4155 case BIT(INNER_SRC_PORT):
4156 calc_x(tmp_x_s, rule->tuples.src_port,
4157 rule->tuples_mask.src_port);
4158 calc_y(tmp_y_s, rule->tuples.src_port,
4159 rule->tuples_mask.src_port);
4160 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4161 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4164 case BIT(INNER_DST_PORT):
4165 calc_x(tmp_x_s, rule->tuples.dst_port,
4166 rule->tuples_mask.dst_port);
4167 calc_y(tmp_y_s, rule->tuples.dst_port,
4168 rule->tuples_mask.dst_port);
4169 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4170 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4178 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4179 u8 vf_id, u8 network_port_id)
4181 u32 port_number = 0;
4183 if (port_type == HOST_PORT) {
4184 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4186 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4188 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4190 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4191 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4192 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4198 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4199 __le32 *key_x, __le32 *key_y,
4200 struct hclge_fd_rule *rule)
4202 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4203 u8 cur_pos = 0, tuple_size, shift_bits;
4206 for (i = 0; i < MAX_META_DATA; i++) {
4207 tuple_size = meta_data_key_info[i].key_length;
4208 tuple_bit = key_cfg->meta_data_active & BIT(i);
4210 switch (tuple_bit) {
4211 case BIT(ROCE_TYPE):
4212 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4213 cur_pos += tuple_size;
4215 case BIT(DST_VPORT):
4216 port_number = hclge_get_port_number(HOST_PORT, 0,
4218 hnae3_set_field(meta_data,
4219 GENMASK(cur_pos + tuple_size, cur_pos),
4220 cur_pos, port_number);
4221 cur_pos += tuple_size;
4228 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4229 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4230 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4232 *key_x = cpu_to_le32(tmp_x << shift_bits);
4233 *key_y = cpu_to_le32(tmp_y << shift_bits);
4236 /* A complete key is combined with meta data key and tuple key.
4237 * Meta data key is stored at the MSB region, and tuple key is stored at
4238 * the LSB region, unused bits will be filled 0.
4240 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4241 struct hclge_fd_rule *rule)
4243 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4244 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4245 u8 *cur_key_x, *cur_key_y;
4246 int i, ret, tuple_size;
4247 u8 meta_data_region;
4249 memset(key_x, 0, sizeof(key_x));
4250 memset(key_y, 0, sizeof(key_y));
4254 for (i = 0 ; i < MAX_TUPLE; i++) {
4258 tuple_size = tuple_key_info[i].key_length / 8;
4259 check_tuple = key_cfg->tuple_active & BIT(i);
4261 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4264 cur_key_x += tuple_size;
4265 cur_key_y += tuple_size;
4269 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4270 MAX_META_DATA_LENGTH / 8;
4272 hclge_fd_convert_meta_data(key_cfg,
4273 (__le32 *)(key_x + meta_data_region),
4274 (__le32 *)(key_y + meta_data_region),
4277 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4280 dev_err(&hdev->pdev->dev,
4281 "fd key_y config fail, loc=%d, ret=%d\n",
4282 rule->queue_id, ret);
4286 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4289 dev_err(&hdev->pdev->dev,
4290 "fd key_x config fail, loc=%d, ret=%d\n",
4291 rule->queue_id, ret);
4295 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4296 struct hclge_fd_rule *rule)
4298 struct hclge_fd_ad_data ad_data;
4300 ad_data.ad_id = rule->location;
4302 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4303 ad_data.drop_packet = true;
4304 ad_data.forward_to_direct_queue = false;
4305 ad_data.queue_id = 0;
4307 ad_data.drop_packet = false;
4308 ad_data.forward_to_direct_queue = true;
4309 ad_data.queue_id = rule->queue_id;
4312 ad_data.use_counter = false;
4313 ad_data.counter_id = 0;
4315 ad_data.use_next_stage = false;
4316 ad_data.next_input_key = 0;
4318 ad_data.write_rule_id_to_bd = true;
4319 ad_data.rule_id = rule->location;
4321 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4324 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4325 struct ethtool_rx_flow_spec *fs, u32 *unused)
4327 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4328 struct ethtool_usrip4_spec *usr_ip4_spec;
4329 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4330 struct ethtool_usrip6_spec *usr_ip6_spec;
4331 struct ethhdr *ether_spec;
4333 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4336 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4339 if ((fs->flow_type & FLOW_EXT) &&
4340 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4341 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4345 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4349 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4350 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4352 if (!tcp_ip4_spec->ip4src)
4353 *unused |= BIT(INNER_SRC_IP);
4355 if (!tcp_ip4_spec->ip4dst)
4356 *unused |= BIT(INNER_DST_IP);
4358 if (!tcp_ip4_spec->psrc)
4359 *unused |= BIT(INNER_SRC_PORT);
4361 if (!tcp_ip4_spec->pdst)
4362 *unused |= BIT(INNER_DST_PORT);
4364 if (!tcp_ip4_spec->tos)
4365 *unused |= BIT(INNER_IP_TOS);
4369 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4370 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4371 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4373 if (!usr_ip4_spec->ip4src)
4374 *unused |= BIT(INNER_SRC_IP);
4376 if (!usr_ip4_spec->ip4dst)
4377 *unused |= BIT(INNER_DST_IP);
4379 if (!usr_ip4_spec->tos)
4380 *unused |= BIT(INNER_IP_TOS);
4382 if (!usr_ip4_spec->proto)
4383 *unused |= BIT(INNER_IP_PROTO);
4385 if (usr_ip4_spec->l4_4_bytes)
4388 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4395 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4396 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4399 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4400 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4401 *unused |= BIT(INNER_SRC_IP);
4403 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4404 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4405 *unused |= BIT(INNER_DST_IP);
4407 if (!tcp_ip6_spec->psrc)
4408 *unused |= BIT(INNER_SRC_PORT);
4410 if (!tcp_ip6_spec->pdst)
4411 *unused |= BIT(INNER_DST_PORT);
4413 if (tcp_ip6_spec->tclass)
4417 case IPV6_USER_FLOW:
4418 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4419 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4420 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4421 BIT(INNER_DST_PORT);
4423 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4424 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4425 *unused |= BIT(INNER_SRC_IP);
4427 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4428 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4429 *unused |= BIT(INNER_DST_IP);
4431 if (!usr_ip6_spec->l4_proto)
4432 *unused |= BIT(INNER_IP_PROTO);
4434 if (usr_ip6_spec->tclass)
4437 if (usr_ip6_spec->l4_4_bytes)
4442 ether_spec = &fs->h_u.ether_spec;
4443 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4444 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4445 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4447 if (is_zero_ether_addr(ether_spec->h_source))
4448 *unused |= BIT(INNER_SRC_MAC);
4450 if (is_zero_ether_addr(ether_spec->h_dest))
4451 *unused |= BIT(INNER_DST_MAC);
4453 if (!ether_spec->h_proto)
4454 *unused |= BIT(INNER_ETH_TYPE);
4461 if ((fs->flow_type & FLOW_EXT)) {
4462 if (fs->h_ext.vlan_etype)
4464 if (!fs->h_ext.vlan_tci)
4465 *unused |= BIT(INNER_VLAN_TAG_FST);
4467 if (fs->m_ext.vlan_tci) {
4468 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4472 *unused |= BIT(INNER_VLAN_TAG_FST);
4475 if (fs->flow_type & FLOW_MAC_EXT) {
4476 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4479 if (is_zero_ether_addr(fs->h_ext.h_dest))
4480 *unused |= BIT(INNER_DST_MAC);
4482 *unused &= ~(BIT(INNER_DST_MAC));
4488 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4490 struct hclge_fd_rule *rule = NULL;
4491 struct hlist_node *node2;
4493 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4494 if (rule->location >= location)
4498 return rule && rule->location == location;
4501 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4502 struct hclge_fd_rule *new_rule,
4506 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4507 struct hlist_node *node2;
4509 if (is_add && !new_rule)
4512 hlist_for_each_entry_safe(rule, node2,
4513 &hdev->fd_rule_list, rule_node) {
4514 if (rule->location >= location)
4519 if (rule && rule->location == location) {
4520 hlist_del(&rule->rule_node);
4522 hdev->hclge_fd_rule_num--;
4527 } else if (!is_add) {
4528 dev_err(&hdev->pdev->dev,
4529 "delete fail, rule %d is inexistent\n",
4534 INIT_HLIST_NODE(&new_rule->rule_node);
4537 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4539 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4541 hdev->hclge_fd_rule_num++;
4546 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4547 struct ethtool_rx_flow_spec *fs,
4548 struct hclge_fd_rule *rule)
4550 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4552 switch (flow_type) {
4556 rule->tuples.src_ip[3] =
4557 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4558 rule->tuples_mask.src_ip[3] =
4559 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4561 rule->tuples.dst_ip[3] =
4562 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4563 rule->tuples_mask.dst_ip[3] =
4564 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4566 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4567 rule->tuples_mask.src_port =
4568 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4570 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4571 rule->tuples_mask.dst_port =
4572 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4574 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4575 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4577 rule->tuples.ether_proto = ETH_P_IP;
4578 rule->tuples_mask.ether_proto = 0xFFFF;
4582 rule->tuples.src_ip[3] =
4583 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4584 rule->tuples_mask.src_ip[3] =
4585 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4587 rule->tuples.dst_ip[3] =
4588 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4589 rule->tuples_mask.dst_ip[3] =
4590 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4592 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4593 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4595 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4596 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4598 rule->tuples.ether_proto = ETH_P_IP;
4599 rule->tuples_mask.ether_proto = 0xFFFF;
4605 be32_to_cpu_array(rule->tuples.src_ip,
4606 fs->h_u.tcp_ip6_spec.ip6src, 4);
4607 be32_to_cpu_array(rule->tuples_mask.src_ip,
4608 fs->m_u.tcp_ip6_spec.ip6src, 4);
4610 be32_to_cpu_array(rule->tuples.dst_ip,
4611 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4612 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4613 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4615 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4616 rule->tuples_mask.src_port =
4617 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4619 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4620 rule->tuples_mask.dst_port =
4621 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4623 rule->tuples.ether_proto = ETH_P_IPV6;
4624 rule->tuples_mask.ether_proto = 0xFFFF;
4627 case IPV6_USER_FLOW:
4628 be32_to_cpu_array(rule->tuples.src_ip,
4629 fs->h_u.usr_ip6_spec.ip6src, 4);
4630 be32_to_cpu_array(rule->tuples_mask.src_ip,
4631 fs->m_u.usr_ip6_spec.ip6src, 4);
4633 be32_to_cpu_array(rule->tuples.dst_ip,
4634 fs->h_u.usr_ip6_spec.ip6dst, 4);
4635 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4636 fs->m_u.usr_ip6_spec.ip6dst, 4);
4638 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4639 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4641 rule->tuples.ether_proto = ETH_P_IPV6;
4642 rule->tuples_mask.ether_proto = 0xFFFF;
4646 ether_addr_copy(rule->tuples.src_mac,
4647 fs->h_u.ether_spec.h_source);
4648 ether_addr_copy(rule->tuples_mask.src_mac,
4649 fs->m_u.ether_spec.h_source);
4651 ether_addr_copy(rule->tuples.dst_mac,
4652 fs->h_u.ether_spec.h_dest);
4653 ether_addr_copy(rule->tuples_mask.dst_mac,
4654 fs->m_u.ether_spec.h_dest);
4656 rule->tuples.ether_proto =
4657 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4658 rule->tuples_mask.ether_proto =
4659 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4666 switch (flow_type) {
4669 rule->tuples.ip_proto = IPPROTO_SCTP;
4670 rule->tuples_mask.ip_proto = 0xFF;
4674 rule->tuples.ip_proto = IPPROTO_TCP;
4675 rule->tuples_mask.ip_proto = 0xFF;
4679 rule->tuples.ip_proto = IPPROTO_UDP;
4680 rule->tuples_mask.ip_proto = 0xFF;
4686 if ((fs->flow_type & FLOW_EXT)) {
4687 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4688 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4691 if (fs->flow_type & FLOW_MAC_EXT) {
4692 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4693 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4699 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4700 struct ethtool_rxnfc *cmd)
4702 struct hclge_vport *vport = hclge_get_vport(handle);
4703 struct hclge_dev *hdev = vport->back;
4704 u16 dst_vport_id = 0, q_index = 0;
4705 struct ethtool_rx_flow_spec *fs;
4706 struct hclge_fd_rule *rule;
4711 if (!hnae3_dev_fd_supported(hdev))
4714 if (!hdev->fd_cfg.fd_en) {
4715 dev_warn(&hdev->pdev->dev,
4716 "Please enable flow director first\n");
4720 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4722 ret = hclge_fd_check_spec(hdev, fs, &unused);
4724 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4728 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4729 action = HCLGE_FD_ACTION_DROP_PACKET;
4731 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4732 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4735 if (vf > hdev->num_req_vfs) {
4736 dev_err(&hdev->pdev->dev,
4737 "Error: vf id (%d) > max vf num (%d)\n",
4738 vf, hdev->num_req_vfs);
4742 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4743 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4746 dev_err(&hdev->pdev->dev,
4747 "Error: queue id (%d) > max tqp num (%d)\n",
4752 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4756 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4760 ret = hclge_fd_get_tuple(hdev, fs, rule);
4764 rule->flow_type = fs->flow_type;
4766 rule->location = fs->location;
4767 rule->unused_tuple = unused;
4768 rule->vf_id = dst_vport_id;
4769 rule->queue_id = q_index;
4770 rule->action = action;
4772 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4776 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4780 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4791 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4792 struct ethtool_rxnfc *cmd)
4794 struct hclge_vport *vport = hclge_get_vport(handle);
4795 struct hclge_dev *hdev = vport->back;
4796 struct ethtool_rx_flow_spec *fs;
4799 if (!hnae3_dev_fd_supported(hdev))
4802 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4804 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4807 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4808 dev_err(&hdev->pdev->dev,
4809 "Delete fail, rule %d is inexistent\n",
4814 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4815 fs->location, NULL, false);
4819 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4823 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4826 struct hclge_vport *vport = hclge_get_vport(handle);
4827 struct hclge_dev *hdev = vport->back;
4828 struct hclge_fd_rule *rule;
4829 struct hlist_node *node;
4831 if (!hnae3_dev_fd_supported(hdev))
4835 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4837 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4838 rule->location, NULL, false);
4839 hlist_del(&rule->rule_node);
4841 hdev->hclge_fd_rule_num--;
4844 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4846 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4847 rule->location, NULL, false);
4851 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4853 struct hclge_vport *vport = hclge_get_vport(handle);
4854 struct hclge_dev *hdev = vport->back;
4855 struct hclge_fd_rule *rule;
4856 struct hlist_node *node;
4859 /* Return ok here, because reset error handling will check this
4860 * return value. If error is returned here, the reset process will
4863 if (!hnae3_dev_fd_supported(hdev))
4866 /* if fd is disabled, should not restore it when reset */
4867 if (!hdev->fd_cfg.fd_en)
4870 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4871 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4873 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4876 dev_warn(&hdev->pdev->dev,
4877 "Restore rule %d failed, remove it\n",
4879 hlist_del(&rule->rule_node);
4881 hdev->hclge_fd_rule_num--;
4887 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4888 struct ethtool_rxnfc *cmd)
4890 struct hclge_vport *vport = hclge_get_vport(handle);
4891 struct hclge_dev *hdev = vport->back;
4893 if (!hnae3_dev_fd_supported(hdev))
4896 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4897 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4902 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4903 struct ethtool_rxnfc *cmd)
4905 struct hclge_vport *vport = hclge_get_vport(handle);
4906 struct hclge_fd_rule *rule = NULL;
4907 struct hclge_dev *hdev = vport->back;
4908 struct ethtool_rx_flow_spec *fs;
4909 struct hlist_node *node2;
4911 if (!hnae3_dev_fd_supported(hdev))
4914 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4916 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4917 if (rule->location >= fs->location)
4921 if (!rule || fs->location != rule->location)
4924 fs->flow_type = rule->flow_type;
4925 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4929 fs->h_u.tcp_ip4_spec.ip4src =
4930 cpu_to_be32(rule->tuples.src_ip[3]);
4931 fs->m_u.tcp_ip4_spec.ip4src =
4932 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4933 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4935 fs->h_u.tcp_ip4_spec.ip4dst =
4936 cpu_to_be32(rule->tuples.dst_ip[3]);
4937 fs->m_u.tcp_ip4_spec.ip4dst =
4938 rule->unused_tuple & BIT(INNER_DST_IP) ?
4939 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4941 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4942 fs->m_u.tcp_ip4_spec.psrc =
4943 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4944 0 : cpu_to_be16(rule->tuples_mask.src_port);
4946 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4947 fs->m_u.tcp_ip4_spec.pdst =
4948 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4949 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4951 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4952 fs->m_u.tcp_ip4_spec.tos =
4953 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4954 0 : rule->tuples_mask.ip_tos;
4958 fs->h_u.usr_ip4_spec.ip4src =
4959 cpu_to_be32(rule->tuples.src_ip[3]);
4960 fs->m_u.tcp_ip4_spec.ip4src =
4961 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4962 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4964 fs->h_u.usr_ip4_spec.ip4dst =
4965 cpu_to_be32(rule->tuples.dst_ip[3]);
4966 fs->m_u.usr_ip4_spec.ip4dst =
4967 rule->unused_tuple & BIT(INNER_DST_IP) ?
4968 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4970 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4971 fs->m_u.usr_ip4_spec.tos =
4972 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4973 0 : rule->tuples_mask.ip_tos;
4975 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4976 fs->m_u.usr_ip4_spec.proto =
4977 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4978 0 : rule->tuples_mask.ip_proto;
4980 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4986 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4987 rule->tuples.src_ip, 4);
4988 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4989 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4991 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4992 rule->tuples_mask.src_ip, 4);
4994 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4995 rule->tuples.dst_ip, 4);
4996 if (rule->unused_tuple & BIT(INNER_DST_IP))
4997 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4999 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5000 rule->tuples_mask.dst_ip, 4);
5002 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5003 fs->m_u.tcp_ip6_spec.psrc =
5004 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5005 0 : cpu_to_be16(rule->tuples_mask.src_port);
5007 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5008 fs->m_u.tcp_ip6_spec.pdst =
5009 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5010 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5013 case IPV6_USER_FLOW:
5014 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5015 rule->tuples.src_ip, 4);
5016 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5017 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5019 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5020 rule->tuples_mask.src_ip, 4);
5022 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5023 rule->tuples.dst_ip, 4);
5024 if (rule->unused_tuple & BIT(INNER_DST_IP))
5025 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5027 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5028 rule->tuples_mask.dst_ip, 4);
5030 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5031 fs->m_u.usr_ip6_spec.l4_proto =
5032 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5033 0 : rule->tuples_mask.ip_proto;
5037 ether_addr_copy(fs->h_u.ether_spec.h_source,
5038 rule->tuples.src_mac);
5039 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5040 eth_zero_addr(fs->m_u.ether_spec.h_source);
5042 ether_addr_copy(fs->m_u.ether_spec.h_source,
5043 rule->tuples_mask.src_mac);
5045 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5046 rule->tuples.dst_mac);
5047 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5048 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5050 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5051 rule->tuples_mask.dst_mac);
5053 fs->h_u.ether_spec.h_proto =
5054 cpu_to_be16(rule->tuples.ether_proto);
5055 fs->m_u.ether_spec.h_proto =
5056 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5057 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5064 if (fs->flow_type & FLOW_EXT) {
5065 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5066 fs->m_ext.vlan_tci =
5067 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5068 cpu_to_be16(VLAN_VID_MASK) :
5069 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5072 if (fs->flow_type & FLOW_MAC_EXT) {
5073 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5074 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5075 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5077 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5078 rule->tuples_mask.dst_mac);
5081 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5082 fs->ring_cookie = RX_CLS_FLOW_DISC;
5086 fs->ring_cookie = rule->queue_id;
5087 vf_id = rule->vf_id;
5088 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5089 fs->ring_cookie |= vf_id;
5095 static int hclge_get_all_rules(struct hnae3_handle *handle,
5096 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5098 struct hclge_vport *vport = hclge_get_vport(handle);
5099 struct hclge_dev *hdev = vport->back;
5100 struct hclge_fd_rule *rule;
5101 struct hlist_node *node2;
5104 if (!hnae3_dev_fd_supported(hdev))
5107 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5109 hlist_for_each_entry_safe(rule, node2,
5110 &hdev->fd_rule_list, rule_node) {
5111 if (cnt == cmd->rule_cnt)
5114 rule_locs[cnt] = rule->location;
5118 cmd->rule_cnt = cnt;
5123 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5125 struct hclge_vport *vport = hclge_get_vport(handle);
5126 struct hclge_dev *hdev = vport->back;
5128 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5129 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5132 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5134 struct hclge_vport *vport = hclge_get_vport(handle);
5135 struct hclge_dev *hdev = vport->back;
5137 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5140 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5142 struct hclge_vport *vport = hclge_get_vport(handle);
5143 struct hclge_dev *hdev = vport->back;
5145 return hdev->reset_count;
5148 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5150 struct hclge_vport *vport = hclge_get_vport(handle);
5151 struct hclge_dev *hdev = vport->back;
5153 hdev->fd_cfg.fd_en = enable;
5155 hclge_del_all_fd_entries(handle, false);
5157 hclge_restore_fd_entries(handle);
5160 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5162 struct hclge_desc desc;
5163 struct hclge_config_mac_mode_cmd *req =
5164 (struct hclge_config_mac_mode_cmd *)desc.data;
5168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5169 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5170 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5171 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5172 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5173 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5174 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5175 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5176 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5177 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5178 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5179 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5180 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5181 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5182 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5183 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5185 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5187 dev_err(&hdev->pdev->dev,
5188 "mac enable fail, ret =%d.\n", ret);
5191 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5193 struct hclge_config_mac_mode_cmd *req;
5194 struct hclge_desc desc;
5198 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5199 /* 1 Read out the MAC mode config at first */
5200 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5201 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5203 dev_err(&hdev->pdev->dev,
5204 "mac loopback get fail, ret =%d.\n", ret);
5208 /* 2 Then setup the loopback flag */
5209 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5210 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5211 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5212 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5214 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5216 /* 3 Config mac work mode with loopback flag
5217 * and its original configure parameters
5219 hclge_cmd_reuse_desc(&desc, false);
5220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5222 dev_err(&hdev->pdev->dev,
5223 "mac loopback set fail, ret =%d.\n", ret);
5227 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5228 enum hnae3_loop loop_mode)
5230 #define HCLGE_SERDES_RETRY_MS 10
5231 #define HCLGE_SERDES_RETRY_NUM 100
5232 struct hclge_serdes_lb_cmd *req;
5233 struct hclge_desc desc;
5237 req = (struct hclge_serdes_lb_cmd *)desc.data;
5238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5240 switch (loop_mode) {
5241 case HNAE3_LOOP_SERIAL_SERDES:
5242 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5244 case HNAE3_LOOP_PARALLEL_SERDES:
5245 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5248 dev_err(&hdev->pdev->dev,
5249 "unsupported serdes loopback mode %d\n", loop_mode);
5254 req->enable = loop_mode_b;
5255 req->mask = loop_mode_b;
5257 req->mask = loop_mode_b;
5260 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5262 dev_err(&hdev->pdev->dev,
5263 "serdes loopback set fail, ret = %d\n", ret);
5268 msleep(HCLGE_SERDES_RETRY_MS);
5269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5273 dev_err(&hdev->pdev->dev,
5274 "serdes loopback get, ret = %d\n", ret);
5277 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5278 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5280 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5281 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5283 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5284 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5288 hclge_cfg_mac_mode(hdev, en);
5292 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5293 int stream_id, bool enable)
5295 struct hclge_desc desc;
5296 struct hclge_cfg_com_tqp_queue_cmd *req =
5297 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5300 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5301 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5302 req->stream_id = cpu_to_le16(stream_id);
5303 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5305 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5307 dev_err(&hdev->pdev->dev,
5308 "Tqp enable fail, status =%d.\n", ret);
5312 static int hclge_set_loopback(struct hnae3_handle *handle,
5313 enum hnae3_loop loop_mode, bool en)
5315 struct hclge_vport *vport = hclge_get_vport(handle);
5316 struct hnae3_knic_private_info *kinfo;
5317 struct hclge_dev *hdev = vport->back;
5320 switch (loop_mode) {
5321 case HNAE3_LOOP_APP:
5322 ret = hclge_set_app_loopback(hdev, en);
5324 case HNAE3_LOOP_SERIAL_SERDES:
5325 case HNAE3_LOOP_PARALLEL_SERDES:
5326 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5330 dev_err(&hdev->pdev->dev,
5331 "loop_mode %d is not supported\n", loop_mode);
5338 kinfo = &vport->nic.kinfo;
5339 for (i = 0; i < kinfo->num_tqps; i++) {
5340 ret = hclge_tqp_enable(hdev, i, 0, en);
5348 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5350 struct hclge_vport *vport = hclge_get_vport(handle);
5351 struct hnae3_knic_private_info *kinfo;
5352 struct hnae3_queue *queue;
5353 struct hclge_tqp *tqp;
5356 kinfo = &vport->nic.kinfo;
5357 for (i = 0; i < kinfo->num_tqps; i++) {
5358 queue = handle->kinfo.tqp[i];
5359 tqp = container_of(queue, struct hclge_tqp, q);
5360 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5364 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5366 struct hclge_vport *vport = hclge_get_vport(handle);
5367 struct hclge_dev *hdev = vport->back;
5370 mod_timer(&hdev->service_timer, jiffies + HZ);
5372 del_timer_sync(&hdev->service_timer);
5373 cancel_work_sync(&hdev->service_task);
5374 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5378 static int hclge_ae_start(struct hnae3_handle *handle)
5380 struct hclge_vport *vport = hclge_get_vport(handle);
5381 struct hclge_dev *hdev = vport->back;
5384 hclge_cfg_mac_mode(hdev, true);
5385 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5386 hdev->hw.mac.link = 0;
5388 /* reset tqp stats */
5389 hclge_reset_tqp_stats(handle);
5391 hclge_mac_start_phy(hdev);
5396 static void hclge_ae_stop(struct hnae3_handle *handle)
5398 struct hclge_vport *vport = hclge_get_vport(handle);
5399 struct hclge_dev *hdev = vport->back;
5402 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5404 /* If it is not PF reset, the firmware will disable the MAC,
5405 * so it only need to stop phy here.
5407 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5408 hdev->reset_type != HNAE3_FUNC_RESET) {
5409 hclge_mac_stop_phy(hdev);
5413 for (i = 0; i < handle->kinfo.num_tqps; i++)
5414 hclge_reset_tqp(handle, i);
5417 hclge_cfg_mac_mode(hdev, false);
5419 hclge_mac_stop_phy(hdev);
5421 /* reset tqp stats */
5422 hclge_reset_tqp_stats(handle);
5423 hclge_update_link_status(hdev);
5426 int hclge_vport_start(struct hclge_vport *vport)
5428 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5429 vport->last_active_jiffies = jiffies;
5433 void hclge_vport_stop(struct hclge_vport *vport)
5435 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5438 static int hclge_client_start(struct hnae3_handle *handle)
5440 struct hclge_vport *vport = hclge_get_vport(handle);
5442 return hclge_vport_start(vport);
5445 static void hclge_client_stop(struct hnae3_handle *handle)
5447 struct hclge_vport *vport = hclge_get_vport(handle);
5449 hclge_vport_stop(vport);
5452 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5453 u16 cmdq_resp, u8 resp_code,
5454 enum hclge_mac_vlan_tbl_opcode op)
5456 struct hclge_dev *hdev = vport->back;
5457 int return_status = -EIO;
5460 dev_err(&hdev->pdev->dev,
5461 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5466 if (op == HCLGE_MAC_VLAN_ADD) {
5467 if ((!resp_code) || (resp_code == 1)) {
5469 } else if (resp_code == 2) {
5470 return_status = -ENOSPC;
5471 dev_err(&hdev->pdev->dev,
5472 "add mac addr failed for uc_overflow.\n");
5473 } else if (resp_code == 3) {
5474 return_status = -ENOSPC;
5475 dev_err(&hdev->pdev->dev,
5476 "add mac addr failed for mc_overflow.\n");
5478 dev_err(&hdev->pdev->dev,
5479 "add mac addr failed for undefined, code=%d.\n",
5482 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5485 } else if (resp_code == 1) {
5486 return_status = -ENOENT;
5487 dev_dbg(&hdev->pdev->dev,
5488 "remove mac addr failed for miss.\n");
5490 dev_err(&hdev->pdev->dev,
5491 "remove mac addr failed for undefined, code=%d.\n",
5494 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5497 } else if (resp_code == 1) {
5498 return_status = -ENOENT;
5499 dev_dbg(&hdev->pdev->dev,
5500 "lookup mac addr failed for miss.\n");
5502 dev_err(&hdev->pdev->dev,
5503 "lookup mac addr failed for undefined, code=%d.\n",
5507 return_status = -EINVAL;
5508 dev_err(&hdev->pdev->dev,
5509 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5513 return return_status;
5516 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5521 if (vfid > 255 || vfid < 0)
5524 if (vfid >= 0 && vfid <= 191) {
5525 word_num = vfid / 32;
5526 bit_num = vfid % 32;
5528 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5530 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5532 word_num = (vfid - 192) / 32;
5533 bit_num = vfid % 32;
5535 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5537 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5543 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5545 #define HCLGE_DESC_NUMBER 3
5546 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5549 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5550 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5551 if (desc[i].data[j])
5557 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5560 const unsigned char *mac_addr = addr;
5561 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5562 (mac_addr[0]) | (mac_addr[1] << 8);
5563 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5565 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5566 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5569 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5570 struct hclge_mac_vlan_tbl_entry_cmd *req)
5572 struct hclge_dev *hdev = vport->back;
5573 struct hclge_desc desc;
5578 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5580 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5582 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5584 dev_err(&hdev->pdev->dev,
5585 "del mac addr failed for cmd_send, ret =%d.\n",
5589 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5590 retval = le16_to_cpu(desc.retval);
5592 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5593 HCLGE_MAC_VLAN_REMOVE);
5596 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5597 struct hclge_mac_vlan_tbl_entry_cmd *req,
5598 struct hclge_desc *desc,
5601 struct hclge_dev *hdev = vport->back;
5606 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5608 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5609 memcpy(desc[0].data,
5611 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5612 hclge_cmd_setup_basic_desc(&desc[1],
5613 HCLGE_OPC_MAC_VLAN_ADD,
5615 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5616 hclge_cmd_setup_basic_desc(&desc[2],
5617 HCLGE_OPC_MAC_VLAN_ADD,
5619 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5621 memcpy(desc[0].data,
5623 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5624 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5627 dev_err(&hdev->pdev->dev,
5628 "lookup mac addr failed for cmd_send, ret =%d.\n",
5632 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5633 retval = le16_to_cpu(desc[0].retval);
5635 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5636 HCLGE_MAC_VLAN_LKUP);
5639 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5640 struct hclge_mac_vlan_tbl_entry_cmd *req,
5641 struct hclge_desc *mc_desc)
5643 struct hclge_dev *hdev = vport->back;
5650 struct hclge_desc desc;
5652 hclge_cmd_setup_basic_desc(&desc,
5653 HCLGE_OPC_MAC_VLAN_ADD,
5655 memcpy(desc.data, req,
5656 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5657 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5658 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5659 retval = le16_to_cpu(desc.retval);
5661 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5663 HCLGE_MAC_VLAN_ADD);
5665 hclge_cmd_reuse_desc(&mc_desc[0], false);
5666 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5667 hclge_cmd_reuse_desc(&mc_desc[1], false);
5668 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5669 hclge_cmd_reuse_desc(&mc_desc[2], false);
5670 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5671 memcpy(mc_desc[0].data, req,
5672 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5673 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5674 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5675 retval = le16_to_cpu(mc_desc[0].retval);
5677 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5679 HCLGE_MAC_VLAN_ADD);
5683 dev_err(&hdev->pdev->dev,
5684 "add mac addr failed for cmd_send, ret =%d.\n",
5692 static int hclge_init_umv_space(struct hclge_dev *hdev)
5694 u16 allocated_size = 0;
5697 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5702 if (allocated_size < hdev->wanted_umv_size)
5703 dev_warn(&hdev->pdev->dev,
5704 "Alloc umv space failed, want %d, get %d\n",
5705 hdev->wanted_umv_size, allocated_size);
5707 mutex_init(&hdev->umv_mutex);
5708 hdev->max_umv_size = allocated_size;
5709 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5710 hdev->share_umv_size = hdev->priv_umv_size +
5711 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5716 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5720 if (hdev->max_umv_size > 0) {
5721 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5725 hdev->max_umv_size = 0;
5727 mutex_destroy(&hdev->umv_mutex);
5732 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5733 u16 *allocated_size, bool is_alloc)
5735 struct hclge_umv_spc_alc_cmd *req;
5736 struct hclge_desc desc;
5739 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5741 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5742 req->space_size = cpu_to_le32(space_size);
5744 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5746 dev_err(&hdev->pdev->dev,
5747 "%s umv space failed for cmd_send, ret =%d\n",
5748 is_alloc ? "allocate" : "free", ret);
5752 if (is_alloc && allocated_size)
5753 *allocated_size = le32_to_cpu(desc.data[1]);
5758 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5760 struct hclge_vport *vport;
5763 for (i = 0; i < hdev->num_alloc_vport; i++) {
5764 vport = &hdev->vport[i];
5765 vport->used_umv_num = 0;
5768 mutex_lock(&hdev->umv_mutex);
5769 hdev->share_umv_size = hdev->priv_umv_size +
5770 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5771 mutex_unlock(&hdev->umv_mutex);
5774 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5776 struct hclge_dev *hdev = vport->back;
5779 mutex_lock(&hdev->umv_mutex);
5780 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5781 hdev->share_umv_size == 0);
5782 mutex_unlock(&hdev->umv_mutex);
5787 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5789 struct hclge_dev *hdev = vport->back;
5791 mutex_lock(&hdev->umv_mutex);
5793 if (vport->used_umv_num > hdev->priv_umv_size)
5794 hdev->share_umv_size++;
5795 vport->used_umv_num--;
5797 if (vport->used_umv_num >= hdev->priv_umv_size)
5798 hdev->share_umv_size--;
5799 vport->used_umv_num++;
5801 mutex_unlock(&hdev->umv_mutex);
5804 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5805 const unsigned char *addr)
5807 struct hclge_vport *vport = hclge_get_vport(handle);
5809 return hclge_add_uc_addr_common(vport, addr);
5812 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5813 const unsigned char *addr)
5815 struct hclge_dev *hdev = vport->back;
5816 struct hclge_mac_vlan_tbl_entry_cmd req;
5817 struct hclge_desc desc;
5818 u16 egress_port = 0;
5821 /* mac addr check */
5822 if (is_zero_ether_addr(addr) ||
5823 is_broadcast_ether_addr(addr) ||
5824 is_multicast_ether_addr(addr)) {
5825 dev_err(&hdev->pdev->dev,
5826 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5828 is_zero_ether_addr(addr),
5829 is_broadcast_ether_addr(addr),
5830 is_multicast_ether_addr(addr));
5834 memset(&req, 0, sizeof(req));
5835 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5837 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5838 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5840 req.egress_port = cpu_to_le16(egress_port);
5842 hclge_prepare_mac_addr(&req, addr);
5844 /* Lookup the mac address in the mac_vlan table, and add
5845 * it if the entry is inexistent. Repeated unicast entry
5846 * is not allowed in the mac vlan table.
5848 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5849 if (ret == -ENOENT) {
5850 if (!hclge_is_umv_space_full(vport)) {
5851 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5853 hclge_update_umv_space(vport, false);
5857 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5858 hdev->priv_umv_size);
5863 /* check if we just hit the duplicate */
5867 dev_err(&hdev->pdev->dev,
5868 "PF failed to add unicast entry(%pM) in the MAC table\n",
5874 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5875 const unsigned char *addr)
5877 struct hclge_vport *vport = hclge_get_vport(handle);
5879 return hclge_rm_uc_addr_common(vport, addr);
5882 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5883 const unsigned char *addr)
5885 struct hclge_dev *hdev = vport->back;
5886 struct hclge_mac_vlan_tbl_entry_cmd req;
5889 /* mac addr check */
5890 if (is_zero_ether_addr(addr) ||
5891 is_broadcast_ether_addr(addr) ||
5892 is_multicast_ether_addr(addr)) {
5893 dev_dbg(&hdev->pdev->dev,
5894 "Remove mac err! invalid mac:%pM.\n",
5899 memset(&req, 0, sizeof(req));
5900 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5901 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5902 hclge_prepare_mac_addr(&req, addr);
5903 ret = hclge_remove_mac_vlan_tbl(vport, &req);
5905 hclge_update_umv_space(vport, true);
5910 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5911 const unsigned char *addr)
5913 struct hclge_vport *vport = hclge_get_vport(handle);
5915 return hclge_add_mc_addr_common(vport, addr);
5918 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5919 const unsigned char *addr)
5921 struct hclge_dev *hdev = vport->back;
5922 struct hclge_mac_vlan_tbl_entry_cmd req;
5923 struct hclge_desc desc[3];
5926 /* mac addr check */
5927 if (!is_multicast_ether_addr(addr)) {
5928 dev_err(&hdev->pdev->dev,
5929 "Add mc mac err! invalid mac:%pM.\n",
5933 memset(&req, 0, sizeof(req));
5934 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5935 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5936 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5937 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5938 hclge_prepare_mac_addr(&req, addr);
5939 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5941 /* This mac addr exist, update VFID for it */
5942 hclge_update_desc_vfid(desc, vport->vport_id, false);
5943 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5945 /* This mac addr do not exist, add new entry for it */
5946 memset(desc[0].data, 0, sizeof(desc[0].data));
5947 memset(desc[1].data, 0, sizeof(desc[0].data));
5948 memset(desc[2].data, 0, sizeof(desc[0].data));
5949 hclge_update_desc_vfid(desc, vport->vport_id, false);
5950 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5953 if (status == -ENOSPC)
5954 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5959 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5960 const unsigned char *addr)
5962 struct hclge_vport *vport = hclge_get_vport(handle);
5964 return hclge_rm_mc_addr_common(vport, addr);
5967 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5968 const unsigned char *addr)
5970 struct hclge_dev *hdev = vport->back;
5971 struct hclge_mac_vlan_tbl_entry_cmd req;
5972 enum hclge_cmd_status status;
5973 struct hclge_desc desc[3];
5975 /* mac addr check */
5976 if (!is_multicast_ether_addr(addr)) {
5977 dev_dbg(&hdev->pdev->dev,
5978 "Remove mc mac err! invalid mac:%pM.\n",
5983 memset(&req, 0, sizeof(req));
5984 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5985 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5986 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5987 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5988 hclge_prepare_mac_addr(&req, addr);
5989 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5991 /* This mac addr exist, remove this handle's VFID for it */
5992 hclge_update_desc_vfid(desc, vport->vport_id, true);
5994 if (hclge_is_all_function_id_zero(desc))
5995 /* All the vfid is zero, so need to delete this entry */
5996 status = hclge_remove_mac_vlan_tbl(vport, &req);
5998 /* Not all the vfid is zero, update the vfid */
5999 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6002 /* Maybe this mac address is in mta table, but it cannot be
6003 * deleted here because an entry of mta represents an address
6004 * range rather than a specific address. the delete action to
6005 * all entries will take effect in update_mta_status called by
6006 * hns3_nic_set_rx_mode.
6014 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6015 u16 cmdq_resp, u8 resp_code)
6017 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6018 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6019 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6020 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6025 dev_err(&hdev->pdev->dev,
6026 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6031 switch (resp_code) {
6032 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6033 case HCLGE_ETHERTYPE_ALREADY_ADD:
6036 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6037 dev_err(&hdev->pdev->dev,
6038 "add mac ethertype failed for manager table overflow.\n");
6039 return_status = -EIO;
6041 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6042 dev_err(&hdev->pdev->dev,
6043 "add mac ethertype failed for key conflict.\n");
6044 return_status = -EIO;
6047 dev_err(&hdev->pdev->dev,
6048 "add mac ethertype failed for undefined, code=%d.\n",
6050 return_status = -EIO;
6053 return return_status;
6056 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6057 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6059 struct hclge_desc desc;
6064 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6065 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6067 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6069 dev_err(&hdev->pdev->dev,
6070 "add mac ethertype failed for cmd_send, ret =%d.\n",
6075 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6076 retval = le16_to_cpu(desc.retval);
6078 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6081 static int init_mgr_tbl(struct hclge_dev *hdev)
6086 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6087 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6089 dev_err(&hdev->pdev->dev,
6090 "add mac ethertype failed, ret =%d.\n",
6099 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6101 struct hclge_vport *vport = hclge_get_vport(handle);
6102 struct hclge_dev *hdev = vport->back;
6104 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6107 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6110 const unsigned char *new_addr = (const unsigned char *)p;
6111 struct hclge_vport *vport = hclge_get_vport(handle);
6112 struct hclge_dev *hdev = vport->back;
6115 /* mac addr check */
6116 if (is_zero_ether_addr(new_addr) ||
6117 is_broadcast_ether_addr(new_addr) ||
6118 is_multicast_ether_addr(new_addr)) {
6119 dev_err(&hdev->pdev->dev,
6120 "Change uc mac err! invalid mac:%p.\n",
6125 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6126 dev_warn(&hdev->pdev->dev,
6127 "remove old uc mac address fail.\n");
6129 ret = hclge_add_uc_addr(handle, new_addr);
6131 dev_err(&hdev->pdev->dev,
6132 "add uc mac address fail, ret =%d.\n",
6136 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6137 dev_err(&hdev->pdev->dev,
6138 "restore uc mac address fail.\n");
6143 ret = hclge_pause_addr_cfg(hdev, new_addr);
6145 dev_err(&hdev->pdev->dev,
6146 "configure mac pause address fail, ret =%d.\n",
6151 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6156 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6159 struct hclge_vport *vport = hclge_get_vport(handle);
6160 struct hclge_dev *hdev = vport->back;
6162 if (!hdev->hw.mac.phydev)
6165 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6168 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6169 u8 fe_type, bool filter_en)
6171 struct hclge_vlan_filter_ctrl_cmd *req;
6172 struct hclge_desc desc;
6175 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6177 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6178 req->vlan_type = vlan_type;
6179 req->vlan_fe = filter_en ? fe_type : 0;
6181 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6183 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6189 #define HCLGE_FILTER_TYPE_VF 0
6190 #define HCLGE_FILTER_TYPE_PORT 1
6191 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6192 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6193 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6194 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6195 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6196 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6197 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6198 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6199 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6201 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6203 struct hclge_vport *vport = hclge_get_vport(handle);
6204 struct hclge_dev *hdev = vport->back;
6206 if (hdev->pdev->revision >= 0x21) {
6207 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6208 HCLGE_FILTER_FE_EGRESS, enable);
6209 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6210 HCLGE_FILTER_FE_INGRESS, enable);
6212 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6213 HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6216 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6218 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6221 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6222 bool is_kill, u16 vlan, u8 qos,
6225 #define HCLGE_MAX_VF_BYTES 16
6226 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6227 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6228 struct hclge_desc desc[2];
6233 hclge_cmd_setup_basic_desc(&desc[0],
6234 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6235 hclge_cmd_setup_basic_desc(&desc[1],
6236 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6238 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6240 vf_byte_off = vfid / 8;
6241 vf_byte_val = 1 << (vfid % 8);
6243 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6244 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6246 req0->vlan_id = cpu_to_le16(vlan);
6247 req0->vlan_cfg = is_kill;
6249 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6250 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6252 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6254 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6256 dev_err(&hdev->pdev->dev,
6257 "Send vf vlan command fail, ret =%d.\n",
6263 #define HCLGE_VF_VLAN_NO_ENTRY 2
6264 if (!req0->resp_code || req0->resp_code == 1)
6267 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6268 dev_warn(&hdev->pdev->dev,
6269 "vf vlan table is full, vf vlan filter is disabled\n");
6273 dev_err(&hdev->pdev->dev,
6274 "Add vf vlan filter fail, ret =%d.\n",
6277 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6278 if (!req0->resp_code)
6281 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6282 dev_warn(&hdev->pdev->dev,
6283 "vlan %d filter is not in vf vlan table\n",
6288 dev_err(&hdev->pdev->dev,
6289 "Kill vf vlan filter fail, ret =%d.\n",
6296 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6297 u16 vlan_id, bool is_kill)
6299 struct hclge_vlan_filter_pf_cfg_cmd *req;
6300 struct hclge_desc desc;
6301 u8 vlan_offset_byte_val;
6302 u8 vlan_offset_byte;
6306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6308 vlan_offset_160 = vlan_id / 160;
6309 vlan_offset_byte = (vlan_id % 160) / 8;
6310 vlan_offset_byte_val = 1 << (vlan_id % 8);
6312 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6313 req->vlan_offset = vlan_offset_160;
6314 req->vlan_cfg = is_kill;
6315 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6317 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6319 dev_err(&hdev->pdev->dev,
6320 "port vlan command, send fail, ret =%d.\n", ret);
6324 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6325 u16 vport_id, u16 vlan_id, u8 qos,
6328 u16 vport_idx, vport_num = 0;
6331 if (is_kill && !vlan_id)
6334 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6337 dev_err(&hdev->pdev->dev,
6338 "Set %d vport vlan filter config fail, ret =%d.\n",
6343 /* vlan 0 may be added twice when 8021q module is enabled */
6344 if (!is_kill && !vlan_id &&
6345 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6348 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6349 dev_err(&hdev->pdev->dev,
6350 "Add port vlan failed, vport %d is already in vlan %d\n",
6356 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6357 dev_err(&hdev->pdev->dev,
6358 "Delete port vlan failed, vport %d is not in vlan %d\n",
6363 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6366 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6367 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6373 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6374 u16 vlan_id, bool is_kill)
6376 struct hclge_vport *vport = hclge_get_vport(handle);
6377 struct hclge_dev *hdev = vport->back;
6379 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6383 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6384 u16 vlan, u8 qos, __be16 proto)
6386 struct hclge_vport *vport = hclge_get_vport(handle);
6387 struct hclge_dev *hdev = vport->back;
6389 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6391 if (proto != htons(ETH_P_8021Q))
6392 return -EPROTONOSUPPORT;
6394 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6397 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6399 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6400 struct hclge_vport_vtag_tx_cfg_cmd *req;
6401 struct hclge_dev *hdev = vport->back;
6402 struct hclge_desc desc;
6405 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6407 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6408 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6409 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6410 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6411 vcfg->accept_tag1 ? 1 : 0);
6412 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6413 vcfg->accept_untag1 ? 1 : 0);
6414 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6415 vcfg->accept_tag2 ? 1 : 0);
6416 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6417 vcfg->accept_untag2 ? 1 : 0);
6418 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6419 vcfg->insert_tag1_en ? 1 : 0);
6420 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6421 vcfg->insert_tag2_en ? 1 : 0);
6422 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6424 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6425 req->vf_bitmap[req->vf_offset] =
6426 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6428 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6430 dev_err(&hdev->pdev->dev,
6431 "Send port txvlan cfg command fail, ret =%d\n",
6437 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6439 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6440 struct hclge_vport_vtag_rx_cfg_cmd *req;
6441 struct hclge_dev *hdev = vport->back;
6442 struct hclge_desc desc;
6445 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6447 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6448 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6449 vcfg->strip_tag1_en ? 1 : 0);
6450 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6451 vcfg->strip_tag2_en ? 1 : 0);
6452 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6453 vcfg->vlan1_vlan_prionly ? 1 : 0);
6454 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6455 vcfg->vlan2_vlan_prionly ? 1 : 0);
6457 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6458 req->vf_bitmap[req->vf_offset] =
6459 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6461 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6463 dev_err(&hdev->pdev->dev,
6464 "Send port rxvlan cfg command fail, ret =%d\n",
6470 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6472 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6473 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6474 struct hclge_desc desc;
6477 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6478 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6479 rx_req->ot_fst_vlan_type =
6480 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6481 rx_req->ot_sec_vlan_type =
6482 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6483 rx_req->in_fst_vlan_type =
6484 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6485 rx_req->in_sec_vlan_type =
6486 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6488 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6490 dev_err(&hdev->pdev->dev,
6491 "Send rxvlan protocol type command fail, ret =%d\n",
6496 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6498 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6499 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6500 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6502 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6504 dev_err(&hdev->pdev->dev,
6505 "Send txvlan protocol type command fail, ret =%d\n",
6511 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6513 #define HCLGE_DEF_VLAN_TYPE 0x8100
6515 struct hnae3_handle *handle = &hdev->vport[0].nic;
6516 struct hclge_vport *vport;
6520 if (hdev->pdev->revision >= 0x21) {
6521 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6522 HCLGE_FILTER_FE_EGRESS, true);
6526 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6527 HCLGE_FILTER_FE_INGRESS, true);
6531 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6532 HCLGE_FILTER_FE_EGRESS_V1_B,
6538 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6540 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6541 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6542 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6543 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6544 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6545 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6547 ret = hclge_set_vlan_protocol_type(hdev);
6551 for (i = 0; i < hdev->num_alloc_vport; i++) {
6552 vport = &hdev->vport[i];
6553 vport->txvlan_cfg.accept_tag1 = true;
6554 vport->txvlan_cfg.accept_untag1 = true;
6556 /* accept_tag2 and accept_untag2 are not supported on
6557 * pdev revision(0x20), new revision support them. The
6558 * value of this two fields will not return error when driver
6559 * send command to fireware in revision(0x20).
6560 * This two fields can not configured by user.
6562 vport->txvlan_cfg.accept_tag2 = true;
6563 vport->txvlan_cfg.accept_untag2 = true;
6565 vport->txvlan_cfg.insert_tag1_en = false;
6566 vport->txvlan_cfg.insert_tag2_en = false;
6567 vport->txvlan_cfg.default_tag1 = 0;
6568 vport->txvlan_cfg.default_tag2 = 0;
6570 ret = hclge_set_vlan_tx_offload_cfg(vport);
6574 vport->rxvlan_cfg.strip_tag1_en = false;
6575 vport->rxvlan_cfg.strip_tag2_en = true;
6576 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6577 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6579 ret = hclge_set_vlan_rx_offload_cfg(vport);
6584 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6587 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6589 struct hclge_vport *vport = hclge_get_vport(handle);
6591 vport->rxvlan_cfg.strip_tag1_en = false;
6592 vport->rxvlan_cfg.strip_tag2_en = enable;
6593 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6594 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6596 return hclge_set_vlan_rx_offload_cfg(vport);
6599 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6601 struct hclge_config_max_frm_size_cmd *req;
6602 struct hclge_desc desc;
6604 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6606 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6607 req->max_frm_size = cpu_to_le16(new_mps);
6608 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6610 return hclge_cmd_send(&hdev->hw, &desc, 1);
6613 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6615 struct hclge_vport *vport = hclge_get_vport(handle);
6617 return hclge_set_vport_mtu(vport, new_mtu);
6620 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6622 struct hclge_dev *hdev = vport->back;
6623 int i, max_frm_size, ret = 0;
6625 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6626 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6627 max_frm_size > HCLGE_MAC_MAX_FRAME)
6630 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6631 mutex_lock(&hdev->vport_lock);
6632 /* VF's mps must fit within hdev->mps */
6633 if (vport->vport_id && max_frm_size > hdev->mps) {
6634 mutex_unlock(&hdev->vport_lock);
6636 } else if (vport->vport_id) {
6637 vport->mps = max_frm_size;
6638 mutex_unlock(&hdev->vport_lock);
6642 /* PF's mps must be greater then VF's mps */
6643 for (i = 1; i < hdev->num_alloc_vport; i++)
6644 if (max_frm_size < hdev->vport[i].mps) {
6645 mutex_unlock(&hdev->vport_lock);
6649 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6651 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6653 dev_err(&hdev->pdev->dev,
6654 "Change mtu fail, ret =%d\n", ret);
6658 hdev->mps = max_frm_size;
6659 vport->mps = max_frm_size;
6661 ret = hclge_buffer_alloc(hdev);
6663 dev_err(&hdev->pdev->dev,
6664 "Allocate buffer fail, ret =%d\n", ret);
6667 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6668 mutex_unlock(&hdev->vport_lock);
6672 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6675 struct hclge_reset_tqp_queue_cmd *req;
6676 struct hclge_desc desc;
6679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6681 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6682 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6683 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6685 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6687 dev_err(&hdev->pdev->dev,
6688 "Send tqp reset cmd error, status =%d\n", ret);
6695 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6697 struct hclge_reset_tqp_queue_cmd *req;
6698 struct hclge_desc desc;
6701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6703 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6704 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6708 dev_err(&hdev->pdev->dev,
6709 "Get reset status error, status =%d\n", ret);
6713 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6716 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
6718 struct hnae3_queue *queue;
6719 struct hclge_tqp *tqp;
6721 queue = handle->kinfo.tqp[queue_id];
6722 tqp = container_of(queue, struct hclge_tqp, q);
6727 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6729 struct hclge_vport *vport = hclge_get_vport(handle);
6730 struct hclge_dev *hdev = vport->back;
6731 int reset_try_times = 0;
6736 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6738 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6740 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6744 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6746 dev_err(&hdev->pdev->dev,
6747 "Send reset tqp cmd fail, ret = %d\n", ret);
6751 reset_try_times = 0;
6752 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6753 /* Wait for tqp hw reset */
6755 reset_status = hclge_get_reset_status(hdev, queue_gid);
6760 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6761 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6765 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6767 dev_err(&hdev->pdev->dev,
6768 "Deassert the soft reset fail, ret = %d\n", ret);
6773 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6775 struct hclge_dev *hdev = vport->back;
6776 int reset_try_times = 0;
6781 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6783 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6785 dev_warn(&hdev->pdev->dev,
6786 "Send reset tqp cmd fail, ret = %d\n", ret);
6790 reset_try_times = 0;
6791 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6792 /* Wait for tqp hw reset */
6794 reset_status = hclge_get_reset_status(hdev, queue_gid);
6799 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6800 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6804 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6806 dev_warn(&hdev->pdev->dev,
6807 "Deassert the soft reset fail, ret = %d\n", ret);
6810 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6812 struct hclge_vport *vport = hclge_get_vport(handle);
6813 struct hclge_dev *hdev = vport->back;
6815 return hdev->fw_version;
6818 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6820 struct phy_device *phydev = hdev->hw.mac.phydev;
6825 phy_set_asym_pause(phydev, rx_en, tx_en);
6828 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6833 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6834 else if (rx_en && !tx_en)
6835 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6836 else if (!rx_en && tx_en)
6837 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6839 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6841 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6844 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6846 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6851 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6856 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6858 struct phy_device *phydev = hdev->hw.mac.phydev;
6859 u16 remote_advertising = 0;
6860 u16 local_advertising = 0;
6861 u32 rx_pause, tx_pause;
6864 if (!phydev->link || !phydev->autoneg)
6867 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6870 remote_advertising = LPA_PAUSE_CAP;
6872 if (phydev->asym_pause)
6873 remote_advertising |= LPA_PAUSE_ASYM;
6875 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6876 remote_advertising);
6877 tx_pause = flowctl & FLOW_CTRL_TX;
6878 rx_pause = flowctl & FLOW_CTRL_RX;
6880 if (phydev->duplex == HCLGE_MAC_HALF) {
6885 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6888 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6889 u32 *rx_en, u32 *tx_en)
6891 struct hclge_vport *vport = hclge_get_vport(handle);
6892 struct hclge_dev *hdev = vport->back;
6894 *auto_neg = hclge_get_autoneg(handle);
6896 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6902 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6905 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6908 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6917 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6918 u32 rx_en, u32 tx_en)
6920 struct hclge_vport *vport = hclge_get_vport(handle);
6921 struct hclge_dev *hdev = vport->back;
6922 struct phy_device *phydev = hdev->hw.mac.phydev;
6925 fc_autoneg = hclge_get_autoneg(handle);
6926 if (auto_neg != fc_autoneg) {
6927 dev_info(&hdev->pdev->dev,
6928 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6932 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6933 dev_info(&hdev->pdev->dev,
6934 "Priority flow control enabled. Cannot set link flow control.\n");
6938 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6941 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6943 /* Only support flow control negotiation for netdev with
6944 * phy attached for now.
6949 return phy_start_aneg(phydev);
6952 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6953 u8 *auto_neg, u32 *speed, u8 *duplex)
6955 struct hclge_vport *vport = hclge_get_vport(handle);
6956 struct hclge_dev *hdev = vport->back;
6959 *speed = hdev->hw.mac.speed;
6961 *duplex = hdev->hw.mac.duplex;
6963 *auto_neg = hdev->hw.mac.autoneg;
6966 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6968 struct hclge_vport *vport = hclge_get_vport(handle);
6969 struct hclge_dev *hdev = vport->back;
6972 *media_type = hdev->hw.mac.media_type;
6975 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6976 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6978 struct hclge_vport *vport = hclge_get_vport(handle);
6979 struct hclge_dev *hdev = vport->back;
6980 struct phy_device *phydev = hdev->hw.mac.phydev;
6981 int mdix_ctrl, mdix, retval, is_resolved;
6984 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6985 *tp_mdix = ETH_TP_MDI_INVALID;
6989 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6991 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6992 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6993 HCLGE_PHY_MDIX_CTRL_S);
6995 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6996 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6997 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6999 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7001 switch (mdix_ctrl) {
7003 *tp_mdix_ctrl = ETH_TP_MDI;
7006 *tp_mdix_ctrl = ETH_TP_MDI_X;
7009 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7012 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7017 *tp_mdix = ETH_TP_MDI_INVALID;
7019 *tp_mdix = ETH_TP_MDI_X;
7021 *tp_mdix = ETH_TP_MDI;
7024 static int hclge_init_instance_hw(struct hclge_dev *hdev)
7026 return hclge_mac_connect_phy(hdev);
7029 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
7031 hclge_mac_disconnect_phy(hdev);
7034 static int hclge_init_client_instance(struct hnae3_client *client,
7035 struct hnae3_ae_dev *ae_dev)
7037 struct hclge_dev *hdev = ae_dev->priv;
7038 struct hclge_vport *vport;
7041 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7042 vport = &hdev->vport[i];
7044 switch (client->type) {
7045 case HNAE3_CLIENT_KNIC:
7047 hdev->nic_client = client;
7048 vport->nic.client = client;
7049 ret = client->ops->init_instance(&vport->nic);
7053 ret = hclge_init_instance_hw(hdev);
7055 client->ops->uninit_instance(&vport->nic,
7060 hnae3_set_client_init_flag(client, ae_dev, 1);
7062 if (hdev->roce_client &&
7063 hnae3_dev_roce_supported(hdev)) {
7064 struct hnae3_client *rc = hdev->roce_client;
7066 ret = hclge_init_roce_base_info(vport);
7070 ret = rc->ops->init_instance(&vport->roce);
7074 hnae3_set_client_init_flag(hdev->roce_client,
7079 case HNAE3_CLIENT_UNIC:
7080 hdev->nic_client = client;
7081 vport->nic.client = client;
7083 ret = client->ops->init_instance(&vport->nic);
7087 hnae3_set_client_init_flag(client, ae_dev, 1);
7090 case HNAE3_CLIENT_ROCE:
7091 if (hnae3_dev_roce_supported(hdev)) {
7092 hdev->roce_client = client;
7093 vport->roce.client = client;
7096 if (hdev->roce_client && hdev->nic_client) {
7097 ret = hclge_init_roce_base_info(vport);
7101 ret = client->ops->init_instance(&vport->roce);
7105 hnae3_set_client_init_flag(client, ae_dev, 1);
7117 hdev->nic_client = NULL;
7118 vport->nic.client = NULL;
7121 hdev->roce_client = NULL;
7122 vport->roce.client = NULL;
7126 static void hclge_uninit_client_instance(struct hnae3_client *client,
7127 struct hnae3_ae_dev *ae_dev)
7129 struct hclge_dev *hdev = ae_dev->priv;
7130 struct hclge_vport *vport;
7133 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7134 vport = &hdev->vport[i];
7135 if (hdev->roce_client) {
7136 hdev->roce_client->ops->uninit_instance(&vport->roce,
7138 hdev->roce_client = NULL;
7139 vport->roce.client = NULL;
7141 if (client->type == HNAE3_CLIENT_ROCE)
7143 if (hdev->nic_client && client->ops->uninit_instance) {
7144 hclge_uninit_instance_hw(hdev);
7145 client->ops->uninit_instance(&vport->nic, 0);
7146 hdev->nic_client = NULL;
7147 vport->nic.client = NULL;
7152 static int hclge_pci_init(struct hclge_dev *hdev)
7154 struct pci_dev *pdev = hdev->pdev;
7155 struct hclge_hw *hw;
7158 ret = pci_enable_device(pdev);
7160 dev_err(&pdev->dev, "failed to enable PCI device\n");
7164 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7166 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7169 "can't set consistent PCI DMA");
7170 goto err_disable_device;
7172 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7175 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7177 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7178 goto err_disable_device;
7181 pci_set_master(pdev);
7183 hw->io_base = pcim_iomap(pdev, 2, 0);
7185 dev_err(&pdev->dev, "Can't map configuration register space\n");
7187 goto err_clr_master;
7190 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7194 pci_clear_master(pdev);
7195 pci_release_regions(pdev);
7197 pci_disable_device(pdev);
7202 static void hclge_pci_uninit(struct hclge_dev *hdev)
7204 struct pci_dev *pdev = hdev->pdev;
7206 pcim_iounmap(pdev, hdev->hw.io_base);
7207 pci_free_irq_vectors(pdev);
7208 pci_clear_master(pdev);
7209 pci_release_mem_regions(pdev);
7210 pci_disable_device(pdev);
7213 static void hclge_state_init(struct hclge_dev *hdev)
7215 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7216 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7217 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7218 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7219 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7220 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7223 static void hclge_state_uninit(struct hclge_dev *hdev)
7225 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7227 if (hdev->service_timer.function)
7228 del_timer_sync(&hdev->service_timer);
7229 if (hdev->reset_timer.function)
7230 del_timer_sync(&hdev->reset_timer);
7231 if (hdev->service_task.func)
7232 cancel_work_sync(&hdev->service_task);
7233 if (hdev->rst_service_task.func)
7234 cancel_work_sync(&hdev->rst_service_task);
7235 if (hdev->mbx_service_task.func)
7236 cancel_work_sync(&hdev->mbx_service_task);
7239 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7241 #define HCLGE_FLR_WAIT_MS 100
7242 #define HCLGE_FLR_WAIT_CNT 50
7243 struct hclge_dev *hdev = ae_dev->priv;
7246 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7247 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7248 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7249 hclge_reset_event(hdev->pdev, NULL);
7251 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7252 cnt++ < HCLGE_FLR_WAIT_CNT)
7253 msleep(HCLGE_FLR_WAIT_MS);
7255 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7256 dev_err(&hdev->pdev->dev,
7257 "flr wait down timeout: %d\n", cnt);
7260 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7262 struct hclge_dev *hdev = ae_dev->priv;
7264 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7267 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7269 struct pci_dev *pdev = ae_dev->pdev;
7270 struct hclge_dev *hdev;
7273 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7280 hdev->ae_dev = ae_dev;
7281 hdev->reset_type = HNAE3_NONE_RESET;
7282 hdev->reset_level = HNAE3_FUNC_RESET;
7283 ae_dev->priv = hdev;
7284 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7286 mutex_init(&hdev->vport_lock);
7288 ret = hclge_pci_init(hdev);
7290 dev_err(&pdev->dev, "PCI init failed\n");
7294 /* Firmware command queue initialize */
7295 ret = hclge_cmd_queue_init(hdev);
7297 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7298 goto err_pci_uninit;
7301 /* Firmware command initialize */
7302 ret = hclge_cmd_init(hdev);
7304 goto err_cmd_uninit;
7306 ret = hclge_get_cap(hdev);
7308 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7310 goto err_cmd_uninit;
7313 ret = hclge_configure(hdev);
7315 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7316 goto err_cmd_uninit;
7319 ret = hclge_init_msi(hdev);
7321 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7322 goto err_cmd_uninit;
7325 ret = hclge_misc_irq_init(hdev);
7328 "Misc IRQ(vector0) init error, ret = %d.\n",
7330 goto err_msi_uninit;
7333 ret = hclge_alloc_tqps(hdev);
7335 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7336 goto err_msi_irq_uninit;
7339 ret = hclge_alloc_vport(hdev);
7341 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7342 goto err_msi_irq_uninit;
7345 ret = hclge_map_tqp(hdev);
7347 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7348 goto err_msi_irq_uninit;
7351 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7352 ret = hclge_mac_mdio_config(hdev);
7354 dev_err(&hdev->pdev->dev,
7355 "mdio config fail ret=%d\n", ret);
7356 goto err_msi_irq_uninit;
7360 ret = hclge_init_umv_space(hdev);
7362 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7363 goto err_msi_irq_uninit;
7366 ret = hclge_mac_init(hdev);
7368 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7369 goto err_mdiobus_unreg;
7372 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7374 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7375 goto err_mdiobus_unreg;
7378 ret = hclge_config_gro(hdev, true);
7380 goto err_mdiobus_unreg;
7382 ret = hclge_init_vlan_config(hdev);
7384 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7385 goto err_mdiobus_unreg;
7388 ret = hclge_tm_schd_init(hdev);
7390 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7391 goto err_mdiobus_unreg;
7394 hclge_rss_init_cfg(hdev);
7395 ret = hclge_rss_init_hw(hdev);
7397 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7398 goto err_mdiobus_unreg;
7401 ret = init_mgr_tbl(hdev);
7403 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7404 goto err_mdiobus_unreg;
7407 ret = hclge_init_fd_config(hdev);
7410 "fd table init fail, ret=%d\n", ret);
7411 goto err_mdiobus_unreg;
7414 ret = hclge_hw_error_set_state(hdev, true);
7417 "fail(%d) to enable hw error interrupts\n", ret);
7418 goto err_mdiobus_unreg;
7421 hclge_dcb_ops_set(hdev);
7423 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7424 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7425 INIT_WORK(&hdev->service_task, hclge_service_task);
7426 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7427 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7429 hclge_clear_all_event_cause(hdev);
7431 /* Enable MISC vector(vector0) */
7432 hclge_enable_vector(&hdev->misc_vector, true);
7434 hclge_state_init(hdev);
7435 hdev->last_reset_time = jiffies;
7437 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7441 if (hdev->hw.mac.phydev)
7442 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7444 hclge_misc_irq_uninit(hdev);
7446 pci_free_irq_vectors(pdev);
7448 hclge_destroy_cmd_queue(&hdev->hw);
7450 pcim_iounmap(pdev, hdev->hw.io_base);
7451 pci_clear_master(pdev);
7452 pci_release_regions(pdev);
7453 pci_disable_device(pdev);
7458 static void hclge_stats_clear(struct hclge_dev *hdev)
7460 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7463 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7465 struct hclge_vport *vport = hdev->vport;
7468 for (i = 0; i < hdev->num_alloc_vport; i++) {
7469 hclge_vport_start(vport);
7474 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7476 struct hclge_dev *hdev = ae_dev->priv;
7477 struct pci_dev *pdev = ae_dev->pdev;
7480 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7482 hclge_stats_clear(hdev);
7483 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7485 ret = hclge_cmd_init(hdev);
7487 dev_err(&pdev->dev, "Cmd queue init failed\n");
7491 ret = hclge_map_tqp(hdev);
7493 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7497 hclge_reset_umv_space(hdev);
7499 ret = hclge_mac_init(hdev);
7501 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7505 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7507 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7511 ret = hclge_config_gro(hdev, true);
7515 ret = hclge_init_vlan_config(hdev);
7517 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7521 ret = hclge_tm_init_hw(hdev, true);
7523 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7527 ret = hclge_rss_init_hw(hdev);
7529 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7533 ret = hclge_init_fd_config(hdev);
7536 "fd table init fail, ret=%d\n", ret);
7540 /* Re-enable the hw error interrupts because
7541 * the interrupts get disabled on core/global reset.
7543 ret = hclge_hw_error_set_state(hdev, true);
7546 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7550 hclge_reset_vport_state(hdev);
7552 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7558 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7560 struct hclge_dev *hdev = ae_dev->priv;
7561 struct hclge_mac *mac = &hdev->hw.mac;
7563 hclge_state_uninit(hdev);
7566 mdiobus_unregister(mac->mdio_bus);
7568 hclge_uninit_umv_space(hdev);
7570 /* Disable MISC vector(vector0) */
7571 hclge_enable_vector(&hdev->misc_vector, false);
7572 synchronize_irq(hdev->misc_vector.vector_irq);
7574 hclge_hw_error_set_state(hdev, false);
7575 hclge_destroy_cmd_queue(&hdev->hw);
7576 hclge_misc_irq_uninit(hdev);
7577 hclge_pci_uninit(hdev);
7578 mutex_destroy(&hdev->vport_lock);
7579 ae_dev->priv = NULL;
7582 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7584 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7585 struct hclge_vport *vport = hclge_get_vport(handle);
7586 struct hclge_dev *hdev = vport->back;
7588 return min_t(u32, hdev->rss_size_max,
7589 vport->alloc_tqps / kinfo->num_tc);
7592 static void hclge_get_channels(struct hnae3_handle *handle,
7593 struct ethtool_channels *ch)
7595 ch->max_combined = hclge_get_max_channels(handle);
7596 ch->other_count = 1;
7598 ch->combined_count = handle->kinfo.rss_size;
7601 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7602 u16 *alloc_tqps, u16 *max_rss_size)
7604 struct hclge_vport *vport = hclge_get_vport(handle);
7605 struct hclge_dev *hdev = vport->back;
7607 *alloc_tqps = vport->alloc_tqps;
7608 *max_rss_size = hdev->rss_size_max;
7611 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7612 bool rxfh_configured)
7614 struct hclge_vport *vport = hclge_get_vport(handle);
7615 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7616 struct hclge_dev *hdev = vport->back;
7617 int cur_rss_size = kinfo->rss_size;
7618 int cur_tqps = kinfo->num_tqps;
7619 u16 tc_offset[HCLGE_MAX_TC_NUM];
7620 u16 tc_valid[HCLGE_MAX_TC_NUM];
7621 u16 tc_size[HCLGE_MAX_TC_NUM];
7626 kinfo->req_rss_size = new_tqps_num;
7628 ret = hclge_tm_vport_map_update(hdev);
7630 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7634 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7635 roundup_size = ilog2(roundup_size);
7636 /* Set the RSS TC mode according to the new RSS size */
7637 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7640 if (!(hdev->hw_tc_map & BIT(i)))
7644 tc_size[i] = roundup_size;
7645 tc_offset[i] = kinfo->rss_size * i;
7647 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7651 /* RSS indirection table has been configuared by user */
7652 if (rxfh_configured)
7655 /* Reinitializes the rss indirect table according to the new RSS size */
7656 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7660 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7661 rss_indir[i] = i % kinfo->rss_size;
7663 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7665 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7672 dev_info(&hdev->pdev->dev,
7673 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7674 cur_rss_size, kinfo->rss_size,
7675 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7680 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7681 u32 *regs_num_64_bit)
7683 struct hclge_desc desc;
7687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7688 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7690 dev_err(&hdev->pdev->dev,
7691 "Query register number cmd failed, ret = %d.\n", ret);
7695 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7696 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7698 total_num = *regs_num_32_bit + *regs_num_64_bit;
7705 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7708 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7710 struct hclge_desc *desc;
7711 u32 *reg_val = data;
7720 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7721 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7725 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7726 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7728 dev_err(&hdev->pdev->dev,
7729 "Query 32 bit register cmd failed, ret = %d.\n", ret);
7734 for (i = 0; i < cmd_num; i++) {
7736 desc_data = (__le32 *)(&desc[i].data[0]);
7737 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7739 desc_data = (__le32 *)(&desc[i]);
7740 n = HCLGE_32_BIT_REG_RTN_DATANUM;
7742 for (k = 0; k < n; k++) {
7743 *reg_val++ = le32_to_cpu(*desc_data++);
7755 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7758 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7760 struct hclge_desc *desc;
7761 u64 *reg_val = data;
7770 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7771 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7775 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7776 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7778 dev_err(&hdev->pdev->dev,
7779 "Query 64 bit register cmd failed, ret = %d.\n", ret);
7784 for (i = 0; i < cmd_num; i++) {
7786 desc_data = (__le64 *)(&desc[i].data[0]);
7787 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7789 desc_data = (__le64 *)(&desc[i]);
7790 n = HCLGE_64_BIT_REG_RTN_DATANUM;
7792 for (k = 0; k < n; k++) {
7793 *reg_val++ = le64_to_cpu(*desc_data++);
7805 #define MAX_SEPARATE_NUM 4
7806 #define SEPARATOR_VALUE 0xFFFFFFFF
7807 #define REG_NUM_PER_LINE 4
7808 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
7810 static int hclge_get_regs_len(struct hnae3_handle *handle)
7812 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7813 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7814 struct hclge_vport *vport = hclge_get_vport(handle);
7815 struct hclge_dev *hdev = vport->back;
7816 u32 regs_num_32_bit, regs_num_64_bit;
7819 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7821 dev_err(&hdev->pdev->dev,
7822 "Get register number failed, ret = %d.\n", ret);
7826 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7827 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7828 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7829 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7831 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7832 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7833 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7836 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7839 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7840 struct hclge_vport *vport = hclge_get_vport(handle);
7841 struct hclge_dev *hdev = vport->back;
7842 u32 regs_num_32_bit, regs_num_64_bit;
7843 int i, j, reg_um, separator_num;
7847 *version = hdev->fw_version;
7849 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7851 dev_err(&hdev->pdev->dev,
7852 "Get register number failed, ret = %d.\n", ret);
7856 /* fetching per-PF registers valus from PF PCIe register space */
7857 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7858 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7859 for (i = 0; i < reg_um; i++)
7860 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7861 for (i = 0; i < separator_num; i++)
7862 *reg++ = SEPARATOR_VALUE;
7864 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7865 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7866 for (i = 0; i < reg_um; i++)
7867 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7868 for (i = 0; i < separator_num; i++)
7869 *reg++ = SEPARATOR_VALUE;
7871 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7872 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7873 for (j = 0; j < kinfo->num_tqps; j++) {
7874 for (i = 0; i < reg_um; i++)
7875 *reg++ = hclge_read_dev(&hdev->hw,
7876 ring_reg_addr_list[i] +
7878 for (i = 0; i < separator_num; i++)
7879 *reg++ = SEPARATOR_VALUE;
7882 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7883 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7884 for (j = 0; j < hdev->num_msi_used - 1; j++) {
7885 for (i = 0; i < reg_um; i++)
7886 *reg++ = hclge_read_dev(&hdev->hw,
7887 tqp_intr_reg_addr_list[i] +
7889 for (i = 0; i < separator_num; i++)
7890 *reg++ = SEPARATOR_VALUE;
7893 /* fetching PF common registers values from firmware */
7894 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7896 dev_err(&hdev->pdev->dev,
7897 "Get 32 bit register failed, ret = %d.\n", ret);
7901 reg += regs_num_32_bit;
7902 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7904 dev_err(&hdev->pdev->dev,
7905 "Get 64 bit register failed, ret = %d.\n", ret);
7908 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7910 struct hclge_set_led_state_cmd *req;
7911 struct hclge_desc desc;
7914 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7916 req = (struct hclge_set_led_state_cmd *)desc.data;
7917 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7918 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7920 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7922 dev_err(&hdev->pdev->dev,
7923 "Send set led state cmd error, ret =%d\n", ret);
7928 enum hclge_led_status {
7931 HCLGE_LED_NO_CHANGE = 0xFF,
7934 static int hclge_set_led_id(struct hnae3_handle *handle,
7935 enum ethtool_phys_id_state status)
7937 struct hclge_vport *vport = hclge_get_vport(handle);
7938 struct hclge_dev *hdev = vport->back;
7941 case ETHTOOL_ID_ACTIVE:
7942 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7943 case ETHTOOL_ID_INACTIVE:
7944 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7950 static void hclge_get_link_mode(struct hnae3_handle *handle,
7951 unsigned long *supported,
7952 unsigned long *advertising)
7954 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7955 struct hclge_vport *vport = hclge_get_vport(handle);
7956 struct hclge_dev *hdev = vport->back;
7957 unsigned int idx = 0;
7959 for (; idx < size; idx++) {
7960 supported[idx] = hdev->hw.mac.supported[idx];
7961 advertising[idx] = hdev->hw.mac.advertising[idx];
7965 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7967 struct hclge_vport *vport = hclge_get_vport(handle);
7968 struct hclge_dev *hdev = vport->back;
7970 return hclge_config_gro(hdev, enable);
7973 static const struct hnae3_ae_ops hclge_ops = {
7974 .init_ae_dev = hclge_init_ae_dev,
7975 .uninit_ae_dev = hclge_uninit_ae_dev,
7976 .flr_prepare = hclge_flr_prepare,
7977 .flr_done = hclge_flr_done,
7978 .init_client_instance = hclge_init_client_instance,
7979 .uninit_client_instance = hclge_uninit_client_instance,
7980 .map_ring_to_vector = hclge_map_ring_to_vector,
7981 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7982 .get_vector = hclge_get_vector,
7983 .put_vector = hclge_put_vector,
7984 .set_promisc_mode = hclge_set_promisc_mode,
7985 .set_loopback = hclge_set_loopback,
7986 .start = hclge_ae_start,
7987 .stop = hclge_ae_stop,
7988 .client_start = hclge_client_start,
7989 .client_stop = hclge_client_stop,
7990 .get_status = hclge_get_status,
7991 .get_ksettings_an_result = hclge_get_ksettings_an_result,
7992 .update_speed_duplex_h = hclge_update_speed_duplex_h,
7993 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7994 .get_media_type = hclge_get_media_type,
7995 .get_rss_key_size = hclge_get_rss_key_size,
7996 .get_rss_indir_size = hclge_get_rss_indir_size,
7997 .get_rss = hclge_get_rss,
7998 .set_rss = hclge_set_rss,
7999 .set_rss_tuple = hclge_set_rss_tuple,
8000 .get_rss_tuple = hclge_get_rss_tuple,
8001 .get_tc_size = hclge_get_tc_size,
8002 .get_mac_addr = hclge_get_mac_addr,
8003 .set_mac_addr = hclge_set_mac_addr,
8004 .do_ioctl = hclge_do_ioctl,
8005 .add_uc_addr = hclge_add_uc_addr,
8006 .rm_uc_addr = hclge_rm_uc_addr,
8007 .add_mc_addr = hclge_add_mc_addr,
8008 .rm_mc_addr = hclge_rm_mc_addr,
8009 .set_autoneg = hclge_set_autoneg,
8010 .get_autoneg = hclge_get_autoneg,
8011 .get_pauseparam = hclge_get_pauseparam,
8012 .set_pauseparam = hclge_set_pauseparam,
8013 .set_mtu = hclge_set_mtu,
8014 .reset_queue = hclge_reset_tqp,
8015 .get_stats = hclge_get_stats,
8016 .update_stats = hclge_update_stats,
8017 .get_strings = hclge_get_strings,
8018 .get_sset_count = hclge_get_sset_count,
8019 .get_fw_version = hclge_get_fw_version,
8020 .get_mdix_mode = hclge_get_mdix_mode,
8021 .enable_vlan_filter = hclge_enable_vlan_filter,
8022 .set_vlan_filter = hclge_set_vlan_filter,
8023 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8024 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8025 .reset_event = hclge_reset_event,
8026 .set_default_reset_request = hclge_set_def_reset_request,
8027 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8028 .set_channels = hclge_set_channels,
8029 .get_channels = hclge_get_channels,
8030 .get_regs_len = hclge_get_regs_len,
8031 .get_regs = hclge_get_regs,
8032 .set_led_id = hclge_set_led_id,
8033 .get_link_mode = hclge_get_link_mode,
8034 .add_fd_entry = hclge_add_fd_entry,
8035 .del_fd_entry = hclge_del_fd_entry,
8036 .del_all_fd_entries = hclge_del_all_fd_entries,
8037 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8038 .get_fd_rule_info = hclge_get_fd_rule_info,
8039 .get_fd_all_rules = hclge_get_all_rules,
8040 .restore_fd_rules = hclge_restore_fd_entries,
8041 .enable_fd = hclge_enable_fd,
8042 .dbg_run_cmd = hclge_dbg_run_cmd,
8043 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8044 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8045 .ae_dev_resetting = hclge_ae_dev_resetting,
8046 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8047 .set_gro_en = hclge_gro_en,
8048 .get_global_queue_id = hclge_covert_handle_qid_global,
8049 .set_timer_task = hclge_set_timer_task,
8052 static struct hnae3_ae_algo ae_algo = {
8054 .pdev_id_table = ae_algo_pci_tbl,
8057 static int hclge_init(void)
8059 pr_info("%s is initializing\n", HCLGE_NAME);
8061 hnae3_register_ae_algo(&ae_algo);
8066 static void hclge_exit(void)
8068 hnae3_unregister_ae_algo(&ae_algo);
8070 module_init(hclge_init);
8071 module_exit(hclge_exit);
8073 MODULE_LICENSE("GPL");
8074 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8075 MODULE_DESCRIPTION("HCLGE Driver");
8076 MODULE_VERSION(HCLGE_MOD_VERSION);