1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 #define HCLGE_BUF_SIZE_UNIT 256
31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
32 static int hclge_init_vlan_config(struct hclge_dev *hdev);
33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
35 u16 *allocated_size, bool is_alloc);
37 static struct hnae3_ae_algo ae_algo;
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47 /* required last entry */
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
54 HCLGE_CMDQ_TX_ADDR_H_REG,
55 HCLGE_CMDQ_TX_DEPTH_REG,
56 HCLGE_CMDQ_TX_TAIL_REG,
57 HCLGE_CMDQ_TX_HEAD_REG,
58 HCLGE_CMDQ_RX_ADDR_L_REG,
59 HCLGE_CMDQ_RX_ADDR_H_REG,
60 HCLGE_CMDQ_RX_DEPTH_REG,
61 HCLGE_CMDQ_RX_TAIL_REG,
62 HCLGE_CMDQ_RX_HEAD_REG,
63 HCLGE_VECTOR0_CMDQ_SRC_REG,
64 HCLGE_CMDQ_INTR_STS_REG,
65 HCLGE_CMDQ_INTR_EN_REG,
66 HCLGE_CMDQ_INTR_GEN_REG};
68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
69 HCLGE_VECTOR0_OTER_EN_REG,
70 HCLGE_MISC_RESET_STS_REG,
71 HCLGE_MISC_VECTOR_INT_STS,
72 HCLGE_GLOBAL_RESET_REG,
76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
77 HCLGE_RING_RX_ADDR_H_REG,
78 HCLGE_RING_RX_BD_NUM_REG,
79 HCLGE_RING_RX_BD_LENGTH_REG,
80 HCLGE_RING_RX_MERGE_EN_REG,
81 HCLGE_RING_RX_TAIL_REG,
82 HCLGE_RING_RX_HEAD_REG,
83 HCLGE_RING_RX_FBD_NUM_REG,
84 HCLGE_RING_RX_OFFSET_REG,
85 HCLGE_RING_RX_FBD_OFFSET_REG,
86 HCLGE_RING_RX_STASH_REG,
87 HCLGE_RING_RX_BD_ERR_REG,
88 HCLGE_RING_TX_ADDR_L_REG,
89 HCLGE_RING_TX_ADDR_H_REG,
90 HCLGE_RING_TX_BD_NUM_REG,
91 HCLGE_RING_TX_PRIORITY_REG,
93 HCLGE_RING_TX_MERGE_EN_REG,
94 HCLGE_RING_TX_TAIL_REG,
95 HCLGE_RING_TX_HEAD_REG,
96 HCLGE_RING_TX_FBD_NUM_REG,
97 HCLGE_RING_TX_OFFSET_REG,
98 HCLGE_RING_TX_EBD_NUM_REG,
99 HCLGE_RING_TX_EBD_OFFSET_REG,
100 HCLGE_RING_TX_BD_ERR_REG,
103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
104 HCLGE_TQP_INTR_GL0_REG,
105 HCLGE_TQP_INTR_GL1_REG,
106 HCLGE_TQP_INTR_GL2_REG,
107 HCLGE_TQP_INTR_RL_REG};
109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
111 "Serdes serial Loopback test",
112 "Serdes parallel Loopback test",
116 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
117 {"mac_tx_mac_pause_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
119 {"mac_rx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
121 {"mac_tx_pfc_pri0_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
123 {"mac_tx_pfc_pri1_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
125 {"mac_tx_pfc_pri2_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
127 {"mac_tx_pfc_pri3_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
129 {"mac_tx_pfc_pri4_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
131 {"mac_tx_pfc_pri5_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
133 {"mac_tx_pfc_pri6_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
135 {"mac_tx_pfc_pri7_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
137 {"mac_rx_pfc_pri0_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
139 {"mac_rx_pfc_pri1_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
141 {"mac_rx_pfc_pri2_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
143 {"mac_rx_pfc_pri3_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
145 {"mac_rx_pfc_pri4_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
147 {"mac_rx_pfc_pri5_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
149 {"mac_rx_pfc_pri6_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
151 {"mac_rx_pfc_pri7_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
153 {"mac_tx_total_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
155 {"mac_tx_total_oct_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
157 {"mac_tx_good_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
159 {"mac_tx_bad_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
161 {"mac_tx_good_oct_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
163 {"mac_tx_bad_oct_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
165 {"mac_tx_uni_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
167 {"mac_tx_multi_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
169 {"mac_tx_broad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
171 {"mac_tx_undersize_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
173 {"mac_tx_oversize_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
175 {"mac_tx_64_oct_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
177 {"mac_tx_65_127_oct_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
179 {"mac_tx_128_255_oct_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
181 {"mac_tx_256_511_oct_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
183 {"mac_tx_512_1023_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
185 {"mac_tx_1024_1518_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
187 {"mac_tx_1519_2047_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
189 {"mac_tx_2048_4095_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
191 {"mac_tx_4096_8191_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
193 {"mac_tx_8192_9216_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
195 {"mac_tx_9217_12287_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
197 {"mac_tx_12288_16383_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
199 {"mac_tx_1519_max_good_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
201 {"mac_tx_1519_max_bad_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
203 {"mac_rx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
205 {"mac_rx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
207 {"mac_rx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
209 {"mac_rx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
211 {"mac_rx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
213 {"mac_rx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
215 {"mac_rx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
217 {"mac_rx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
219 {"mac_rx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
221 {"mac_rx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
223 {"mac_rx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
225 {"mac_rx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
227 {"mac_rx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
229 {"mac_rx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
231 {"mac_rx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
233 {"mac_rx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
235 {"mac_rx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
237 {"mac_rx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
239 {"mac_rx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
241 {"mac_rx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
243 {"mac_rx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
245 {"mac_rx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
247 {"mac_rx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
249 {"mac_rx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
251 {"mac_rx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
254 {"mac_tx_fragment_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
256 {"mac_tx_undermin_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
258 {"mac_tx_jabber_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
260 {"mac_tx_err_all_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
262 {"mac_tx_from_app_good_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
264 {"mac_tx_from_app_bad_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
266 {"mac_rx_fragment_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
268 {"mac_rx_undermin_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
270 {"mac_rx_jabber_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
272 {"mac_rx_fcs_err_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
274 {"mac_rx_send_app_good_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
276 {"mac_rx_send_app_bad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
280 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
282 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
283 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
284 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
285 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
286 .i_port_bitmap = 0x1,
290 static int hclge_mac_update_stats(struct hclge_dev *hdev)
292 #define HCLGE_MAC_CMD_NUM 21
293 #define HCLGE_RTN_DATA_NUM 4
295 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
296 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
301 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
302 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
304 dev_err(&hdev->pdev->dev,
305 "Get MAC pkt stats fail, status = %d.\n", ret);
310 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
311 if (unlikely(i == 0)) {
312 desc_data = (__le64 *)(&desc[i].data[0]);
313 n = HCLGE_RTN_DATA_NUM - 2;
315 desc_data = (__le64 *)(&desc[i]);
316 n = HCLGE_RTN_DATA_NUM;
318 for (k = 0; k < n; k++) {
319 *data++ += le64_to_cpu(*desc_data);
327 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
329 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
330 struct hclge_vport *vport = hclge_get_vport(handle);
331 struct hclge_dev *hdev = vport->back;
332 struct hnae3_queue *queue;
333 struct hclge_desc desc[1];
334 struct hclge_tqp *tqp;
337 for (i = 0; i < kinfo->num_tqps; i++) {
338 queue = handle->kinfo.tqp[i];
339 tqp = container_of(queue, struct hclge_tqp, q);
340 /* command : HCLGE_OPC_QUERY_IGU_STAT */
341 hclge_cmd_setup_basic_desc(&desc[0],
342 HCLGE_OPC_QUERY_RX_STATUS,
345 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
346 ret = hclge_cmd_send(&hdev->hw, desc, 1);
348 dev_err(&hdev->pdev->dev,
349 "Query tqp stat fail, status = %d,queue = %d\n",
353 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
354 le32_to_cpu(desc[0].data[1]);
357 for (i = 0; i < kinfo->num_tqps; i++) {
358 queue = handle->kinfo.tqp[i];
359 tqp = container_of(queue, struct hclge_tqp, q);
360 /* command : HCLGE_OPC_QUERY_IGU_STAT */
361 hclge_cmd_setup_basic_desc(&desc[0],
362 HCLGE_OPC_QUERY_TX_STATUS,
365 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
366 ret = hclge_cmd_send(&hdev->hw, desc, 1);
368 dev_err(&hdev->pdev->dev,
369 "Query tqp stat fail, status = %d,queue = %d\n",
373 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
374 le32_to_cpu(desc[0].data[1]);
380 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
382 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
383 struct hclge_tqp *tqp;
387 for (i = 0; i < kinfo->num_tqps; i++) {
388 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
389 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
392 for (i = 0; i < kinfo->num_tqps; i++) {
393 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
394 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
400 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
402 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
404 return kinfo->num_tqps * (2);
407 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
409 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
413 for (i = 0; i < kinfo->num_tqps; i++) {
414 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
415 struct hclge_tqp, q);
416 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
418 buff = buff + ETH_GSTRING_LEN;
421 for (i = 0; i < kinfo->num_tqps; i++) {
422 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
423 struct hclge_tqp, q);
424 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
426 buff = buff + ETH_GSTRING_LEN;
432 static u64 *hclge_comm_get_stats(void *comm_stats,
433 const struct hclge_comm_stats_str strs[],
439 for (i = 0; i < size; i++)
440 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
445 static u8 *hclge_comm_get_strings(u32 stringset,
446 const struct hclge_comm_stats_str strs[],
449 char *buff = (char *)data;
452 if (stringset != ETH_SS_STATS)
455 for (i = 0; i < size; i++) {
456 snprintf(buff, ETH_GSTRING_LEN,
458 buff = buff + ETH_GSTRING_LEN;
464 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
465 struct net_device_stats *net_stats)
467 net_stats->tx_dropped = 0;
468 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
469 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
470 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
472 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
473 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
475 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
476 net_stats->rx_length_errors =
477 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
478 net_stats->rx_length_errors +=
479 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
480 net_stats->rx_over_errors =
481 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
484 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
486 struct hnae3_handle *handle;
489 handle = &hdev->vport[0].nic;
490 if (handle->client) {
491 status = hclge_tqps_update_stats(handle);
493 dev_err(&hdev->pdev->dev,
494 "Update TQPS stats fail, status = %d.\n",
499 status = hclge_mac_update_stats(hdev);
501 dev_err(&hdev->pdev->dev,
502 "Update MAC stats fail, status = %d.\n", status);
504 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
507 static void hclge_update_stats(struct hnae3_handle *handle,
508 struct net_device_stats *net_stats)
510 struct hclge_vport *vport = hclge_get_vport(handle);
511 struct hclge_dev *hdev = vport->back;
512 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
515 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
518 status = hclge_mac_update_stats(hdev);
520 dev_err(&hdev->pdev->dev,
521 "Update MAC stats fail, status = %d.\n",
524 status = hclge_tqps_update_stats(handle);
526 dev_err(&hdev->pdev->dev,
527 "Update TQPS stats fail, status = %d.\n",
530 hclge_update_netstat(hw_stats, net_stats);
532 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
535 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
537 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
538 HNAE3_SUPPORT_PHY_LOOPBACK |\
539 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
540 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
546 /* Loopback test support rules:
547 * mac: only GE mode support
548 * serdes: all mac mode will support include GE/XGE/LGE/CGE
549 * phy: only support when phy device exist on board
551 if (stringset == ETH_SS_TEST) {
552 /* clear loopback bit flags at first */
553 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
554 if (hdev->pdev->revision >= 0x21 ||
555 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
556 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
557 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
559 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
563 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
564 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
565 } else if (stringset == ETH_SS_STATS) {
566 count = ARRAY_SIZE(g_mac_stats_string) +
567 hclge_tqps_get_sset_count(handle, stringset);
573 static void hclge_get_strings(struct hnae3_handle *handle,
577 u8 *p = (char *)data;
580 if (stringset == ETH_SS_STATS) {
581 size = ARRAY_SIZE(g_mac_stats_string);
582 p = hclge_comm_get_strings(stringset,
586 p = hclge_tqps_get_strings(handle, p);
587 } else if (stringset == ETH_SS_TEST) {
588 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
590 hns3_nic_test_strs[HNAE3_LOOP_APP],
592 p += ETH_GSTRING_LEN;
594 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
596 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
598 p += ETH_GSTRING_LEN;
600 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
602 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
604 p += ETH_GSTRING_LEN;
606 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
608 hns3_nic_test_strs[HNAE3_LOOP_PHY],
610 p += ETH_GSTRING_LEN;
615 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
617 struct hclge_vport *vport = hclge_get_vport(handle);
618 struct hclge_dev *hdev = vport->back;
621 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
623 ARRAY_SIZE(g_mac_stats_string),
625 p = hclge_tqps_get_stats(handle, p);
628 static int hclge_parse_func_status(struct hclge_dev *hdev,
629 struct hclge_func_status_cmd *status)
631 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
634 /* Set the pf to main pf */
635 if (status->pf_state & HCLGE_PF_STATE_MAIN)
636 hdev->flag |= HCLGE_FLAG_MAIN;
638 hdev->flag &= ~HCLGE_FLAG_MAIN;
643 static int hclge_query_function_status(struct hclge_dev *hdev)
645 struct hclge_func_status_cmd *req;
646 struct hclge_desc desc;
650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
651 req = (struct hclge_func_status_cmd *)desc.data;
654 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
656 dev_err(&hdev->pdev->dev,
657 "query function status failed %d.\n",
663 /* Check pf reset is done */
666 usleep_range(1000, 2000);
667 } while (timeout++ < 5);
669 ret = hclge_parse_func_status(hdev, req);
674 static int hclge_query_pf_resource(struct hclge_dev *hdev)
676 struct hclge_pf_res_cmd *req;
677 struct hclge_desc desc;
680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
683 dev_err(&hdev->pdev->dev,
684 "query pf resource failed %d.\n", ret);
688 req = (struct hclge_pf_res_cmd *)desc.data;
689 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
690 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
692 if (req->tx_buf_size)
694 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
696 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
698 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
700 if (req->dv_buf_size)
702 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
704 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
706 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
708 if (hnae3_dev_roce_supported(hdev)) {
709 hdev->roce_base_msix_offset =
710 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
711 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
713 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
714 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
716 /* PF should have NIC vectors and Roce vectors,
717 * NIC vectors are queued before Roce vectors.
719 hdev->num_msi = hdev->num_roce_msi +
720 hdev->roce_base_msix_offset;
723 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
724 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
730 static int hclge_parse_speed(int speed_cmd, int *speed)
734 *speed = HCLGE_MAC_SPEED_10M;
737 *speed = HCLGE_MAC_SPEED_100M;
740 *speed = HCLGE_MAC_SPEED_1G;
743 *speed = HCLGE_MAC_SPEED_10G;
746 *speed = HCLGE_MAC_SPEED_25G;
749 *speed = HCLGE_MAC_SPEED_40G;
752 *speed = HCLGE_MAC_SPEED_50G;
755 *speed = HCLGE_MAC_SPEED_100G;
764 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
767 unsigned long *supported = hdev->hw.mac.supported;
769 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
770 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
773 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
774 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
777 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
778 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
781 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
782 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
785 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
786 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
789 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
790 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
793 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
795 u8 media_type = hdev->hw.mac.media_type;
797 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
800 hclge_parse_fiber_link_mode(hdev, speed_ability);
803 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
805 struct hclge_cfg_param_cmd *req;
806 u64 mac_addr_tmp_high;
810 req = (struct hclge_cfg_param_cmd *)desc[0].data;
812 /* get the configuration */
813 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
816 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
817 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
818 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
819 HCLGE_CFG_TQP_DESC_N_M,
820 HCLGE_CFG_TQP_DESC_N_S);
822 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
823 HCLGE_CFG_PHY_ADDR_M,
824 HCLGE_CFG_PHY_ADDR_S);
825 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
826 HCLGE_CFG_MEDIA_TP_M,
827 HCLGE_CFG_MEDIA_TP_S);
828 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
829 HCLGE_CFG_RX_BUF_LEN_M,
830 HCLGE_CFG_RX_BUF_LEN_S);
831 /* get mac_address */
832 mac_addr_tmp = __le32_to_cpu(req->param[2]);
833 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
834 HCLGE_CFG_MAC_ADDR_H_M,
835 HCLGE_CFG_MAC_ADDR_H_S);
837 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
839 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
840 HCLGE_CFG_DEFAULT_SPEED_M,
841 HCLGE_CFG_DEFAULT_SPEED_S);
842 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
843 HCLGE_CFG_RSS_SIZE_M,
844 HCLGE_CFG_RSS_SIZE_S);
846 for (i = 0; i < ETH_ALEN; i++)
847 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
849 req = (struct hclge_cfg_param_cmd *)desc[1].data;
850 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
852 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
853 HCLGE_CFG_SPEED_ABILITY_M,
854 HCLGE_CFG_SPEED_ABILITY_S);
855 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
856 HCLGE_CFG_UMV_TBL_SPACE_M,
857 HCLGE_CFG_UMV_TBL_SPACE_S);
859 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
862 /* hclge_get_cfg: query the static parameter from flash
863 * @hdev: pointer to struct hclge_dev
864 * @hcfg: the config structure to be getted
866 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
868 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
869 struct hclge_cfg_param_cmd *req;
872 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
875 req = (struct hclge_cfg_param_cmd *)desc[i].data;
876 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
878 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
879 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
880 /* Len should be united by 4 bytes when send to hardware */
881 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
882 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
883 req->offset = cpu_to_le32(offset);
886 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
888 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
892 hclge_parse_cfg(hcfg, desc);
897 static int hclge_get_cap(struct hclge_dev *hdev)
901 ret = hclge_query_function_status(hdev);
903 dev_err(&hdev->pdev->dev,
904 "query function status error %d.\n", ret);
908 /* get pf resource */
909 ret = hclge_query_pf_resource(hdev);
911 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
916 static int hclge_configure(struct hclge_dev *hdev)
918 struct hclge_cfg cfg;
921 ret = hclge_get_cfg(hdev, &cfg);
923 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
927 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
928 hdev->base_tqp_pid = 0;
929 hdev->rss_size_max = cfg.rss_size_max;
930 hdev->rx_buf_len = cfg.rx_buf_len;
931 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
932 hdev->hw.mac.media_type = cfg.media_type;
933 hdev->hw.mac.phy_addr = cfg.phy_addr;
934 hdev->num_desc = cfg.tqp_desc_num;
935 hdev->tm_info.num_pg = 1;
936 hdev->tc_max = cfg.tc_num;
937 hdev->tm_info.hw_pfc_map = 0;
938 hdev->wanted_umv_size = cfg.umv_space;
940 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
942 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
946 hclge_parse_link_mode(hdev, cfg.speed_ability);
948 if ((hdev->tc_max > HNAE3_MAX_TC) ||
949 (hdev->tc_max < 1)) {
950 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
955 /* Dev does not support DCB */
956 if (!hnae3_dev_dcb_supported(hdev)) {
960 hdev->pfc_max = hdev->tc_max;
963 hdev->tm_info.num_tc = hdev->tc_max;
965 /* Currently not support uncontiuous tc */
966 for (i = 0; i < hdev->tm_info.num_tc; i++)
967 hnae3_set_bit(hdev->hw_tc_map, i, 1);
969 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
974 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
977 struct hclge_cfg_tso_status_cmd *req;
978 struct hclge_desc desc;
981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
983 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
986 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
987 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
988 req->tso_mss_min = cpu_to_le16(tso_mss);
991 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
992 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
993 req->tso_mss_max = cpu_to_le16(tso_mss);
995 return hclge_cmd_send(&hdev->hw, &desc, 1);
998 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1000 struct hclge_cfg_gro_status_cmd *req;
1001 struct hclge_desc desc;
1004 if (!hnae3_dev_gro_supported(hdev))
1007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1008 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1010 req->gro_en = cpu_to_le16(en ? 1 : 0);
1012 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1014 dev_err(&hdev->pdev->dev,
1015 "GRO hardware config cmd failed, ret = %d\n", ret);
1020 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1022 struct hclge_tqp *tqp;
1025 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1026 sizeof(struct hclge_tqp), GFP_KERNEL);
1032 for (i = 0; i < hdev->num_tqps; i++) {
1033 tqp->dev = &hdev->pdev->dev;
1036 tqp->q.ae_algo = &ae_algo;
1037 tqp->q.buf_size = hdev->rx_buf_len;
1038 tqp->q.desc_num = hdev->num_desc;
1039 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1040 i * HCLGE_TQP_REG_SIZE;
1048 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1049 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1051 struct hclge_tqp_map_cmd *req;
1052 struct hclge_desc desc;
1055 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1057 req = (struct hclge_tqp_map_cmd *)desc.data;
1058 req->tqp_id = cpu_to_le16(tqp_pid);
1059 req->tqp_vf = func_id;
1060 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1061 1 << HCLGE_TQP_MAP_EN_B;
1062 req->tqp_vid = cpu_to_le16(tqp_vid);
1064 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1066 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1071 static int hclge_assign_tqp(struct hclge_vport *vport)
1073 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1074 struct hclge_dev *hdev = vport->back;
1077 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1078 alloced < kinfo->num_tqps; i++) {
1079 if (!hdev->htqp[i].alloced) {
1080 hdev->htqp[i].q.handle = &vport->nic;
1081 hdev->htqp[i].q.tqp_index = alloced;
1082 hdev->htqp[i].q.desc_num = kinfo->num_desc;
1083 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1084 hdev->htqp[i].alloced = true;
1088 vport->alloc_tqps = kinfo->num_tqps;
1093 static int hclge_knic_setup(struct hclge_vport *vport,
1094 u16 num_tqps, u16 num_desc)
1096 struct hnae3_handle *nic = &vport->nic;
1097 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1098 struct hclge_dev *hdev = vport->back;
1101 kinfo->num_desc = num_desc;
1102 kinfo->rx_buf_len = hdev->rx_buf_len;
1103 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1105 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1106 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1108 for (i = 0; i < HNAE3_MAX_TC; i++) {
1109 if (hdev->hw_tc_map & BIT(i)) {
1110 kinfo->tc_info[i].enable = true;
1111 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1112 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1113 kinfo->tc_info[i].tc = i;
1115 /* Set to default queue if TC is disable */
1116 kinfo->tc_info[i].enable = false;
1117 kinfo->tc_info[i].tqp_offset = 0;
1118 kinfo->tc_info[i].tqp_count = 1;
1119 kinfo->tc_info[i].tc = 0;
1123 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1124 sizeof(struct hnae3_queue *), GFP_KERNEL);
1128 ret = hclge_assign_tqp(vport);
1130 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1135 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1136 struct hclge_vport *vport)
1138 struct hnae3_handle *nic = &vport->nic;
1139 struct hnae3_knic_private_info *kinfo;
1142 kinfo = &nic->kinfo;
1143 for (i = 0; i < kinfo->num_tqps; i++) {
1144 struct hclge_tqp *q =
1145 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1149 is_pf = !(vport->vport_id);
1150 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1159 static int hclge_map_tqp(struct hclge_dev *hdev)
1161 struct hclge_vport *vport = hdev->vport;
1164 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1165 for (i = 0; i < num_vport; i++) {
1168 ret = hclge_map_tqp_to_vport(hdev, vport);
1178 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1180 /* this would be initialized later */
1183 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1185 struct hnae3_handle *nic = &vport->nic;
1186 struct hclge_dev *hdev = vport->back;
1189 nic->pdev = hdev->pdev;
1190 nic->ae_algo = &ae_algo;
1191 nic->numa_node_mask = hdev->numa_node_mask;
1193 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1194 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1196 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1201 hclge_unic_setup(vport, num_tqps);
1207 static int hclge_alloc_vport(struct hclge_dev *hdev)
1209 struct pci_dev *pdev = hdev->pdev;
1210 struct hclge_vport *vport;
1216 /* We need to alloc a vport for main NIC of PF */
1217 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1219 if (hdev->num_tqps < num_vport) {
1220 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1221 hdev->num_tqps, num_vport);
1225 /* Alloc the same number of TQPs for every vport */
1226 tqp_per_vport = hdev->num_tqps / num_vport;
1227 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1229 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1234 hdev->vport = vport;
1235 hdev->num_alloc_vport = num_vport;
1237 if (IS_ENABLED(CONFIG_PCI_IOV))
1238 hdev->num_alloc_vfs = hdev->num_req_vfs;
1240 for (i = 0; i < num_vport; i++) {
1242 vport->vport_id = i;
1243 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1246 ret = hclge_vport_setup(vport, tqp_main_vport);
1248 ret = hclge_vport_setup(vport, tqp_per_vport);
1251 "vport setup failed for vport %d, %d\n",
1262 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1263 struct hclge_pkt_buf_alloc *buf_alloc)
1265 /* TX buffer size is unit by 128 byte */
1266 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1267 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1268 struct hclge_tx_buff_alloc_cmd *req;
1269 struct hclge_desc desc;
1273 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1276 for (i = 0; i < HCLGE_TC_NUM; i++) {
1277 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1279 req->tx_pkt_buff[i] =
1280 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1281 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1284 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1286 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1292 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1293 struct hclge_pkt_buf_alloc *buf_alloc)
1295 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1298 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1303 static int hclge_get_tc_num(struct hclge_dev *hdev)
1307 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1308 if (hdev->hw_tc_map & BIT(i))
1313 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1317 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1318 if (hdev->hw_tc_map & BIT(i) &&
1319 hdev->tm_info.hw_pfc_map & BIT(i))
1324 /* Get the number of pfc enabled TCs, which have private buffer */
1325 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1326 struct hclge_pkt_buf_alloc *buf_alloc)
1328 struct hclge_priv_buf *priv;
1331 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1332 priv = &buf_alloc->priv_buf[i];
1333 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1341 /* Get the number of pfc disabled TCs, which have private buffer */
1342 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1343 struct hclge_pkt_buf_alloc *buf_alloc)
1345 struct hclge_priv_buf *priv;
1348 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1349 priv = &buf_alloc->priv_buf[i];
1350 if (hdev->hw_tc_map & BIT(i) &&
1351 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1359 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1361 struct hclge_priv_buf *priv;
1365 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1366 priv = &buf_alloc->priv_buf[i];
1368 rx_priv += priv->buf_size;
1373 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1375 u32 i, total_tx_size = 0;
1377 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1378 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1380 return total_tx_size;
1383 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1384 struct hclge_pkt_buf_alloc *buf_alloc,
1387 u32 shared_buf_min, shared_buf_tc, shared_std;
1388 int tc_num, pfc_enable_num;
1389 u32 shared_buf, aligned_mps;
1393 tc_num = hclge_get_tc_num(hdev);
1394 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1395 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1397 if (hnae3_dev_dcb_supported(hdev))
1398 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1400 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1401 + hdev->dv_buf_size;
1403 shared_buf_tc = pfc_enable_num * aligned_mps +
1404 (tc_num - pfc_enable_num) * aligned_mps / 2 +
1406 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1407 HCLGE_BUF_SIZE_UNIT);
1409 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1410 if (rx_all < rx_priv + shared_std)
1413 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1414 buf_alloc->s_buf.buf_size = shared_buf;
1415 if (hnae3_dev_dcb_supported(hdev)) {
1416 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1417 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1418 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1420 buf_alloc->s_buf.self.high = aligned_mps +
1421 HCLGE_NON_DCB_ADDITIONAL_BUF;
1422 buf_alloc->s_buf.self.low =
1423 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1426 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1427 if ((hdev->hw_tc_map & BIT(i)) &&
1428 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1429 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1430 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1432 buf_alloc->s_buf.tc_thrd[i].low = 0;
1433 buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1440 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1441 struct hclge_pkt_buf_alloc *buf_alloc)
1445 total_size = hdev->pkt_buf_size;
1447 /* alloc tx buffer for all enabled tc */
1448 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1449 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1451 if (total_size < hdev->tx_buf_size)
1454 if (hdev->hw_tc_map & BIT(i))
1455 priv->tx_buf_size = hdev->tx_buf_size;
1457 priv->tx_buf_size = 0;
1459 total_size -= priv->tx_buf_size;
1465 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1466 * @hdev: pointer to struct hclge_dev
1467 * @buf_alloc: pointer to buffer calculation data
1468 * @return: 0: calculate sucessful, negative: fail
1470 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1471 struct hclge_pkt_buf_alloc *buf_alloc)
1473 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1474 int no_pfc_priv_num, pfc_priv_num;
1475 struct hclge_priv_buf *priv;
1478 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1479 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1481 /* When DCB is not supported, rx private
1482 * buffer is not allocated.
1484 if (!hnae3_dev_dcb_supported(hdev)) {
1485 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1491 /* step 1, try to alloc private buffer for all enabled tc */
1492 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1493 priv = &buf_alloc->priv_buf[i];
1494 if (hdev->hw_tc_map & BIT(i)) {
1496 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1497 priv->wl.low = aligned_mps;
1499 roundup(priv->wl.low + aligned_mps,
1500 HCLGE_BUF_SIZE_UNIT);
1501 priv->buf_size = priv->wl.high +
1505 priv->wl.high = 2 * aligned_mps;
1506 priv->buf_size = priv->wl.high +
1517 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1520 /* step 2, try to decrease the buffer size of
1521 * no pfc TC's private buffer
1523 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1524 priv = &buf_alloc->priv_buf[i];
1531 if (!(hdev->hw_tc_map & BIT(i)))
1536 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1538 priv->wl.high = priv->wl.low + aligned_mps;
1539 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1542 priv->wl.high = aligned_mps;
1543 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1547 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1550 /* step 3, try to reduce the number of pfc disabled TCs,
1551 * which have private buffer
1553 /* get the total no pfc enable TC number, which have private buffer */
1554 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1556 /* let the last to be cleared first */
1557 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1558 priv = &buf_alloc->priv_buf[i];
1560 if (hdev->hw_tc_map & BIT(i) &&
1561 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1562 /* Clear the no pfc TC private buffer */
1570 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1571 no_pfc_priv_num == 0)
1575 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1578 /* step 4, try to reduce the number of pfc enabled TCs
1579 * which have private buffer.
1581 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1583 /* let the last to be cleared first */
1584 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1585 priv = &buf_alloc->priv_buf[i];
1587 if (hdev->hw_tc_map & BIT(i) &&
1588 hdev->tm_info.hw_pfc_map & BIT(i)) {
1589 /* Reduce the number of pfc TC with private buffer */
1597 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1601 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1607 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1608 struct hclge_pkt_buf_alloc *buf_alloc)
1610 struct hclge_rx_priv_buff_cmd *req;
1611 struct hclge_desc desc;
1615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1616 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1618 /* Alloc private buffer TCs */
1619 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1620 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1623 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1625 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1629 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1630 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1632 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1634 dev_err(&hdev->pdev->dev,
1635 "rx private buffer alloc cmd failed %d\n", ret);
1640 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1641 struct hclge_pkt_buf_alloc *buf_alloc)
1643 struct hclge_rx_priv_wl_buf *req;
1644 struct hclge_priv_buf *priv;
1645 struct hclge_desc desc[2];
1649 for (i = 0; i < 2; i++) {
1650 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1652 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1654 /* The first descriptor set the NEXT bit to 1 */
1656 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1658 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1660 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1661 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1663 priv = &buf_alloc->priv_buf[idx];
1664 req->tc_wl[j].high =
1665 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1666 req->tc_wl[j].high |=
1667 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1669 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1670 req->tc_wl[j].low |=
1671 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1675 /* Send 2 descriptor at one time */
1676 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1678 dev_err(&hdev->pdev->dev,
1679 "rx private waterline config cmd failed %d\n",
1684 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1685 struct hclge_pkt_buf_alloc *buf_alloc)
1687 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1688 struct hclge_rx_com_thrd *req;
1689 struct hclge_desc desc[2];
1690 struct hclge_tc_thrd *tc;
1694 for (i = 0; i < 2; i++) {
1695 hclge_cmd_setup_basic_desc(&desc[i],
1696 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1697 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1699 /* The first descriptor set the NEXT bit to 1 */
1701 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1703 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1705 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1706 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1708 req->com_thrd[j].high =
1709 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1710 req->com_thrd[j].high |=
1711 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1712 req->com_thrd[j].low =
1713 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1714 req->com_thrd[j].low |=
1715 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1719 /* Send 2 descriptors at one time */
1720 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1722 dev_err(&hdev->pdev->dev,
1723 "common threshold config cmd failed %d\n", ret);
1727 static int hclge_common_wl_config(struct hclge_dev *hdev,
1728 struct hclge_pkt_buf_alloc *buf_alloc)
1730 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1731 struct hclge_rx_com_wl *req;
1732 struct hclge_desc desc;
1735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1737 req = (struct hclge_rx_com_wl *)desc.data;
1738 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1739 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1741 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1742 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1744 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1746 dev_err(&hdev->pdev->dev,
1747 "common waterline config cmd failed %d\n", ret);
1752 int hclge_buffer_alloc(struct hclge_dev *hdev)
1754 struct hclge_pkt_buf_alloc *pkt_buf;
1757 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1761 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1763 dev_err(&hdev->pdev->dev,
1764 "could not calc tx buffer size for all TCs %d\n", ret);
1768 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1770 dev_err(&hdev->pdev->dev,
1771 "could not alloc tx buffers %d\n", ret);
1775 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1777 dev_err(&hdev->pdev->dev,
1778 "could not calc rx priv buffer size for all TCs %d\n",
1783 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1785 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1790 if (hnae3_dev_dcb_supported(hdev)) {
1791 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1793 dev_err(&hdev->pdev->dev,
1794 "could not configure rx private waterline %d\n",
1799 ret = hclge_common_thrd_config(hdev, pkt_buf);
1801 dev_err(&hdev->pdev->dev,
1802 "could not configure common threshold %d\n",
1808 ret = hclge_common_wl_config(hdev, pkt_buf);
1810 dev_err(&hdev->pdev->dev,
1811 "could not configure common waterline %d\n", ret);
1818 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1820 struct hnae3_handle *roce = &vport->roce;
1821 struct hnae3_handle *nic = &vport->nic;
1823 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1825 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1826 vport->back->num_msi_left == 0)
1829 roce->rinfo.base_vector = vport->back->roce_base_vector;
1831 roce->rinfo.netdev = nic->kinfo.netdev;
1832 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1834 roce->pdev = nic->pdev;
1835 roce->ae_algo = nic->ae_algo;
1836 roce->numa_node_mask = nic->numa_node_mask;
1841 static int hclge_init_msi(struct hclge_dev *hdev)
1843 struct pci_dev *pdev = hdev->pdev;
1847 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1848 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1851 "failed(%d) to allocate MSI/MSI-X vectors\n",
1855 if (vectors < hdev->num_msi)
1856 dev_warn(&hdev->pdev->dev,
1857 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1858 hdev->num_msi, vectors);
1860 hdev->num_msi = vectors;
1861 hdev->num_msi_left = vectors;
1862 hdev->base_msi_vector = pdev->irq;
1863 hdev->roce_base_vector = hdev->base_msi_vector +
1864 hdev->roce_base_msix_offset;
1866 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1867 sizeof(u16), GFP_KERNEL);
1868 if (!hdev->vector_status) {
1869 pci_free_irq_vectors(pdev);
1873 for (i = 0; i < hdev->num_msi; i++)
1874 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1876 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1877 sizeof(int), GFP_KERNEL);
1878 if (!hdev->vector_irq) {
1879 pci_free_irq_vectors(pdev);
1886 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1889 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1890 duplex = HCLGE_MAC_FULL;
1895 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1898 struct hclge_config_mac_speed_dup_cmd *req;
1899 struct hclge_desc desc;
1902 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1906 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1909 case HCLGE_MAC_SPEED_10M:
1910 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1911 HCLGE_CFG_SPEED_S, 6);
1913 case HCLGE_MAC_SPEED_100M:
1914 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1915 HCLGE_CFG_SPEED_S, 7);
1917 case HCLGE_MAC_SPEED_1G:
1918 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1919 HCLGE_CFG_SPEED_S, 0);
1921 case HCLGE_MAC_SPEED_10G:
1922 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1923 HCLGE_CFG_SPEED_S, 1);
1925 case HCLGE_MAC_SPEED_25G:
1926 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1927 HCLGE_CFG_SPEED_S, 2);
1929 case HCLGE_MAC_SPEED_40G:
1930 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1931 HCLGE_CFG_SPEED_S, 3);
1933 case HCLGE_MAC_SPEED_50G:
1934 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1935 HCLGE_CFG_SPEED_S, 4);
1937 case HCLGE_MAC_SPEED_100G:
1938 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1939 HCLGE_CFG_SPEED_S, 5);
1942 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1946 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1949 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1951 dev_err(&hdev->pdev->dev,
1952 "mac speed/duplex config cmd failed %d.\n", ret);
1959 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1963 duplex = hclge_check_speed_dup(duplex, speed);
1964 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1967 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1971 hdev->hw.mac.speed = speed;
1972 hdev->hw.mac.duplex = duplex;
1977 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1980 struct hclge_vport *vport = hclge_get_vport(handle);
1981 struct hclge_dev *hdev = vport->back;
1983 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1986 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1988 struct hclge_config_auto_neg_cmd *req;
1989 struct hclge_desc desc;
1993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1995 req = (struct hclge_config_auto_neg_cmd *)desc.data;
1996 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1997 req->cfg_an_cmd_flag = cpu_to_le32(flag);
1999 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2001 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2007 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2009 struct hclge_vport *vport = hclge_get_vport(handle);
2010 struct hclge_dev *hdev = vport->back;
2012 return hclge_set_autoneg_en(hdev, enable);
2015 static int hclge_get_autoneg(struct hnae3_handle *handle)
2017 struct hclge_vport *vport = hclge_get_vport(handle);
2018 struct hclge_dev *hdev = vport->back;
2019 struct phy_device *phydev = hdev->hw.mac.phydev;
2022 return phydev->autoneg;
2024 return hdev->hw.mac.autoneg;
2027 static int hclge_mac_init(struct hclge_dev *hdev)
2029 struct hclge_mac *mac = &hdev->hw.mac;
2032 hdev->support_sfp_query = true;
2033 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2034 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2035 hdev->hw.mac.duplex);
2037 dev_err(&hdev->pdev->dev,
2038 "Config mac speed dup fail ret=%d\n", ret);
2044 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2046 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2050 ret = hclge_buffer_alloc(hdev);
2052 dev_err(&hdev->pdev->dev,
2053 "allocate buffer fail, ret=%d\n", ret);
2058 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2060 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2061 schedule_work(&hdev->mbx_service_task);
2064 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2066 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2067 schedule_work(&hdev->rst_service_task);
2070 static void hclge_task_schedule(struct hclge_dev *hdev)
2072 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2073 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2074 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2075 (void)schedule_work(&hdev->service_task);
2078 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2080 struct hclge_link_status_cmd *req;
2081 struct hclge_desc desc;
2085 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2086 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2088 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2093 req = (struct hclge_link_status_cmd *)desc.data;
2094 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2096 return !!link_status;
2099 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2104 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2107 mac_state = hclge_get_mac_link_status(hdev);
2109 if (hdev->hw.mac.phydev) {
2110 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2111 link_stat = mac_state &
2112 hdev->hw.mac.phydev->link;
2117 link_stat = mac_state;
2123 static void hclge_update_link_status(struct hclge_dev *hdev)
2125 struct hnae3_client *client = hdev->nic_client;
2126 struct hnae3_handle *handle;
2132 state = hclge_get_mac_phy_link(hdev);
2133 if (state != hdev->hw.mac.link) {
2134 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2135 handle = &hdev->vport[i].nic;
2136 client->ops->link_status_change(handle, state);
2138 hdev->hw.mac.link = state;
2142 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2144 struct hclge_sfp_speed_cmd *resp = NULL;
2145 struct hclge_desc desc;
2148 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2149 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2150 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2151 if (ret == -EOPNOTSUPP) {
2152 dev_warn(&hdev->pdev->dev,
2153 "IMP do not support get SFP speed %d\n", ret);
2156 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2160 *speed = resp->sfp_speed;
2165 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2167 struct hclge_mac mac = hdev->hw.mac;
2171 /* get the speed from SFP cmd when phy
2177 /* if IMP does not support get SFP/qSFP speed, return directly */
2178 if (!hdev->support_sfp_query)
2181 ret = hclge_get_sfp_speed(hdev, &speed);
2182 if (ret == -EOPNOTSUPP) {
2183 hdev->support_sfp_query = false;
2189 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2190 return 0; /* do nothing if no SFP */
2192 /* must config full duplex for SFP */
2193 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2196 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2198 struct hclge_vport *vport = hclge_get_vport(handle);
2199 struct hclge_dev *hdev = vport->back;
2201 return hclge_update_speed_duplex(hdev);
2204 static int hclge_get_status(struct hnae3_handle *handle)
2206 struct hclge_vport *vport = hclge_get_vport(handle);
2207 struct hclge_dev *hdev = vport->back;
2209 hclge_update_link_status(hdev);
2211 return hdev->hw.mac.link;
2214 static void hclge_service_timer(struct timer_list *t)
2216 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2218 mod_timer(&hdev->service_timer, jiffies + HZ);
2219 hdev->hw_stats.stats_timer++;
2220 hclge_task_schedule(hdev);
2223 static void hclge_service_complete(struct hclge_dev *hdev)
2225 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2227 /* Flush memory before next watchdog */
2228 smp_mb__before_atomic();
2229 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2232 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2234 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2236 /* fetch the events from their corresponding regs */
2237 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2238 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2239 msix_src_reg = hclge_read_dev(&hdev->hw,
2240 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2242 /* Assumption: If by any chance reset and mailbox events are reported
2243 * together then we will only process reset event in this go and will
2244 * defer the processing of the mailbox events. Since, we would have not
2245 * cleared RX CMDQ event this time we would receive again another
2246 * interrupt from H/W just for the mailbox.
2249 /* check for vector0 reset event sources */
2250 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2251 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2252 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2253 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2254 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2255 return HCLGE_VECTOR0_EVENT_RST;
2258 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2259 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2260 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2261 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2262 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2263 return HCLGE_VECTOR0_EVENT_RST;
2266 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2267 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2268 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2269 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2270 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2271 return HCLGE_VECTOR0_EVENT_RST;
2274 /* check for vector0 msix event source */
2275 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2276 return HCLGE_VECTOR0_EVENT_ERR;
2278 /* check for vector0 mailbox(=CMDQ RX) event source */
2279 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2280 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2281 *clearval = cmdq_src_reg;
2282 return HCLGE_VECTOR0_EVENT_MBX;
2285 return HCLGE_VECTOR0_EVENT_OTHER;
2288 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2291 switch (event_type) {
2292 case HCLGE_VECTOR0_EVENT_RST:
2293 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2295 case HCLGE_VECTOR0_EVENT_MBX:
2296 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2303 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2305 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2306 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2307 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2308 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2309 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2312 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2314 writel(enable ? 1 : 0, vector->addr);
2317 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2319 struct hclge_dev *hdev = data;
2323 hclge_enable_vector(&hdev->misc_vector, false);
2324 event_cause = hclge_check_event_cause(hdev, &clearval);
2326 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2327 switch (event_cause) {
2328 case HCLGE_VECTOR0_EVENT_ERR:
2329 /* we do not know what type of reset is required now. This could
2330 * only be decided after we fetch the type of errors which
2331 * caused this event. Therefore, we will do below for now:
2332 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2333 * have defered type of reset to be used.
2334 * 2. Schedule the reset serivce task.
2335 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2336 * will fetch the correct type of reset. This would be done
2337 * by first decoding the types of errors.
2339 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2341 case HCLGE_VECTOR0_EVENT_RST:
2342 hclge_reset_task_schedule(hdev);
2344 case HCLGE_VECTOR0_EVENT_MBX:
2345 /* If we are here then,
2346 * 1. Either we are not handling any mbx task and we are not
2349 * 2. We could be handling a mbx task but nothing more is
2351 * In both cases, we should schedule mbx task as there are more
2352 * mbx messages reported by this interrupt.
2354 hclge_mbx_task_schedule(hdev);
2357 dev_warn(&hdev->pdev->dev,
2358 "received unknown or unhandled event of vector0\n");
2362 /* clear the source of interrupt if it is not cause by reset */
2363 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2364 hclge_clear_event_cause(hdev, event_cause, clearval);
2365 hclge_enable_vector(&hdev->misc_vector, true);
2371 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2373 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2374 dev_warn(&hdev->pdev->dev,
2375 "vector(vector_id %d) has been freed.\n", vector_id);
2379 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2380 hdev->num_msi_left += 1;
2381 hdev->num_msi_used -= 1;
2384 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2386 struct hclge_misc_vector *vector = &hdev->misc_vector;
2388 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2390 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2391 hdev->vector_status[0] = 0;
2393 hdev->num_msi_left -= 1;
2394 hdev->num_msi_used += 1;
2397 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2401 hclge_get_misc_vector(hdev);
2403 /* this would be explicitly freed in the end */
2404 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2405 0, "hclge_misc", hdev);
2407 hclge_free_vector(hdev, 0);
2408 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2409 hdev->misc_vector.vector_irq);
2415 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2417 free_irq(hdev->misc_vector.vector_irq, hdev);
2418 hclge_free_vector(hdev, 0);
2421 static int hclge_notify_client(struct hclge_dev *hdev,
2422 enum hnae3_reset_notify_type type)
2424 struct hnae3_client *client = hdev->nic_client;
2427 if (!client->ops->reset_notify)
2430 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2431 struct hnae3_handle *handle = &hdev->vport[i].nic;
2434 ret = client->ops->reset_notify(handle, type);
2436 dev_err(&hdev->pdev->dev,
2437 "notify nic client failed %d(%d)\n", type, ret);
2445 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2446 enum hnae3_reset_notify_type type)
2448 struct hnae3_client *client = hdev->roce_client;
2455 if (!client->ops->reset_notify)
2458 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2459 struct hnae3_handle *handle = &hdev->vport[i].roce;
2461 ret = client->ops->reset_notify(handle, type);
2463 dev_err(&hdev->pdev->dev,
2464 "notify roce client failed %d(%d)",
2473 static int hclge_reset_wait(struct hclge_dev *hdev)
2475 #define HCLGE_RESET_WATI_MS 100
2476 #define HCLGE_RESET_WAIT_CNT 200
2477 u32 val, reg, reg_bit;
2480 switch (hdev->reset_type) {
2481 case HNAE3_IMP_RESET:
2482 reg = HCLGE_GLOBAL_RESET_REG;
2483 reg_bit = HCLGE_IMP_RESET_BIT;
2485 case HNAE3_GLOBAL_RESET:
2486 reg = HCLGE_GLOBAL_RESET_REG;
2487 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2489 case HNAE3_CORE_RESET:
2490 reg = HCLGE_GLOBAL_RESET_REG;
2491 reg_bit = HCLGE_CORE_RESET_BIT;
2493 case HNAE3_FUNC_RESET:
2494 reg = HCLGE_FUN_RST_ING;
2495 reg_bit = HCLGE_FUN_RST_ING_B;
2497 case HNAE3_FLR_RESET:
2500 dev_err(&hdev->pdev->dev,
2501 "Wait for unsupported reset type: %d\n",
2506 if (hdev->reset_type == HNAE3_FLR_RESET) {
2507 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2508 cnt++ < HCLGE_RESET_WAIT_CNT)
2509 msleep(HCLGE_RESET_WATI_MS);
2511 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2512 dev_err(&hdev->pdev->dev,
2513 "flr wait timeout: %d\n", cnt);
2520 val = hclge_read_dev(&hdev->hw, reg);
2521 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2522 msleep(HCLGE_RESET_WATI_MS);
2523 val = hclge_read_dev(&hdev->hw, reg);
2527 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2528 dev_warn(&hdev->pdev->dev,
2529 "Wait for reset timeout: %d\n", hdev->reset_type);
2536 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2538 struct hclge_vf_rst_cmd *req;
2539 struct hclge_desc desc;
2541 req = (struct hclge_vf_rst_cmd *)desc.data;
2542 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2543 req->dest_vfid = func_id;
2548 return hclge_cmd_send(&hdev->hw, &desc, 1);
2551 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2555 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2556 struct hclge_vport *vport = &hdev->vport[i];
2559 /* Send cmd to set/clear VF's FUNC_RST_ING */
2560 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2562 dev_err(&hdev->pdev->dev,
2563 "set vf(%d) rst failed %d!\n",
2564 vport->vport_id, ret);
2571 /* Inform VF to process the reset.
2572 * hclge_inform_reset_assert_to_vf may fail if VF
2573 * driver is not loaded.
2575 ret = hclge_inform_reset_assert_to_vf(vport);
2577 dev_warn(&hdev->pdev->dev,
2578 "inform reset to vf(%d) failed %d!\n",
2579 vport->vport_id, ret);
2585 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2587 struct hclge_desc desc;
2588 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2592 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2593 req->fun_reset_vfid = func_id;
2595 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2597 dev_err(&hdev->pdev->dev,
2598 "send function reset cmd fail, status =%d\n", ret);
2603 static void hclge_do_reset(struct hclge_dev *hdev)
2605 struct pci_dev *pdev = hdev->pdev;
2608 switch (hdev->reset_type) {
2609 case HNAE3_GLOBAL_RESET:
2610 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2611 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2612 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2613 dev_info(&pdev->dev, "Global Reset requested\n");
2615 case HNAE3_CORE_RESET:
2616 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2617 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2618 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2619 dev_info(&pdev->dev, "Core Reset requested\n");
2621 case HNAE3_FUNC_RESET:
2622 dev_info(&pdev->dev, "PF Reset requested\n");
2623 /* schedule again to check later */
2624 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2625 hclge_reset_task_schedule(hdev);
2627 case HNAE3_FLR_RESET:
2628 dev_info(&pdev->dev, "FLR requested\n");
2629 /* schedule again to check later */
2630 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2631 hclge_reset_task_schedule(hdev);
2634 dev_warn(&pdev->dev,
2635 "Unsupported reset type: %d\n", hdev->reset_type);
2640 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2641 unsigned long *addr)
2643 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2645 /* first, resolve any unknown reset type to the known type(s) */
2646 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2647 /* we will intentionally ignore any errors from this function
2648 * as we will end up in *some* reset request in any case
2650 hclge_handle_hw_msix_error(hdev, addr);
2651 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2652 /* We defered the clearing of the error event which caused
2653 * interrupt since it was not posssible to do that in
2654 * interrupt context (and this is the reason we introduced
2655 * new UNKNOWN reset type). Now, the errors have been
2656 * handled and cleared in hardware we can safely enable
2657 * interrupts. This is an exception to the norm.
2659 hclge_enable_vector(&hdev->misc_vector, true);
2662 /* return the highest priority reset level amongst all */
2663 if (test_bit(HNAE3_IMP_RESET, addr)) {
2664 rst_level = HNAE3_IMP_RESET;
2665 clear_bit(HNAE3_IMP_RESET, addr);
2666 clear_bit(HNAE3_GLOBAL_RESET, addr);
2667 clear_bit(HNAE3_CORE_RESET, addr);
2668 clear_bit(HNAE3_FUNC_RESET, addr);
2669 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2670 rst_level = HNAE3_GLOBAL_RESET;
2671 clear_bit(HNAE3_GLOBAL_RESET, addr);
2672 clear_bit(HNAE3_CORE_RESET, addr);
2673 clear_bit(HNAE3_FUNC_RESET, addr);
2674 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2675 rst_level = HNAE3_CORE_RESET;
2676 clear_bit(HNAE3_CORE_RESET, addr);
2677 clear_bit(HNAE3_FUNC_RESET, addr);
2678 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2679 rst_level = HNAE3_FUNC_RESET;
2680 clear_bit(HNAE3_FUNC_RESET, addr);
2681 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2682 rst_level = HNAE3_FLR_RESET;
2683 clear_bit(HNAE3_FLR_RESET, addr);
2689 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2693 switch (hdev->reset_type) {
2694 case HNAE3_IMP_RESET:
2695 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2697 case HNAE3_GLOBAL_RESET:
2698 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2700 case HNAE3_CORE_RESET:
2701 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2710 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2711 hclge_enable_vector(&hdev->misc_vector, true);
2714 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2718 switch (hdev->reset_type) {
2719 case HNAE3_FUNC_RESET:
2721 case HNAE3_FLR_RESET:
2722 ret = hclge_set_all_vf_rst(hdev, true);
2731 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2736 switch (hdev->reset_type) {
2737 case HNAE3_FUNC_RESET:
2738 /* There is no mechanism for PF to know if VF has stopped IO
2739 * for now, just wait 100 ms for VF to stop IO
2742 ret = hclge_func_reset_cmd(hdev, 0);
2744 dev_err(&hdev->pdev->dev,
2745 "asserting function reset fail %d!\n", ret);
2749 /* After performaning pf reset, it is not necessary to do the
2750 * mailbox handling or send any command to firmware, because
2751 * any mailbox handling or command to firmware is only valid
2752 * after hclge_cmd_init is called.
2754 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2756 case HNAE3_FLR_RESET:
2757 /* There is no mechanism for PF to know if VF has stopped IO
2758 * for now, just wait 100 ms for VF to stop IO
2761 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2762 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2764 case HNAE3_IMP_RESET:
2765 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2766 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2767 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2773 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2778 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2780 #define MAX_RESET_FAIL_CNT 5
2781 #define RESET_UPGRADE_DELAY_SEC 10
2783 if (hdev->reset_pending) {
2784 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2785 hdev->reset_pending);
2787 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2788 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2789 BIT(HCLGE_IMP_RESET_BIT))) {
2790 dev_info(&hdev->pdev->dev,
2791 "reset failed because IMP Reset is pending\n");
2792 hclge_clear_reset_cause(hdev);
2794 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2795 hdev->reset_fail_cnt++;
2797 set_bit(hdev->reset_type, &hdev->reset_pending);
2798 dev_info(&hdev->pdev->dev,
2799 "re-schedule to wait for hw reset done\n");
2803 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2804 hclge_clear_reset_cause(hdev);
2805 mod_timer(&hdev->reset_timer,
2806 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2811 hclge_clear_reset_cause(hdev);
2812 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2816 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2820 switch (hdev->reset_type) {
2821 case HNAE3_FUNC_RESET:
2823 case HNAE3_FLR_RESET:
2824 ret = hclge_set_all_vf_rst(hdev, false);
2833 static void hclge_reset(struct hclge_dev *hdev)
2835 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2836 bool is_timeout = false;
2839 /* Initialize ae_dev reset status as well, in case enet layer wants to
2840 * know if device is undergoing reset
2842 ae_dev->reset_type = hdev->reset_type;
2843 hdev->reset_count++;
2844 /* perform reset of the stack & ae device for a client */
2845 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2849 ret = hclge_reset_prepare_down(hdev);
2854 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2856 goto err_reset_lock;
2860 ret = hclge_reset_prepare_wait(hdev);
2864 if (hclge_reset_wait(hdev)) {
2869 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2874 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2876 goto err_reset_lock;
2878 ret = hclge_reset_ae_dev(hdev->ae_dev);
2880 goto err_reset_lock;
2882 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2884 goto err_reset_lock;
2886 hclge_clear_reset_cause(hdev);
2888 ret = hclge_reset_prepare_up(hdev);
2890 goto err_reset_lock;
2892 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2894 goto err_reset_lock;
2898 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2902 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2906 hdev->last_reset_time = jiffies;
2907 hdev->reset_fail_cnt = 0;
2908 ae_dev->reset_type = HNAE3_NONE_RESET;
2915 if (hclge_reset_err_handle(hdev, is_timeout))
2916 hclge_reset_task_schedule(hdev);
2919 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2921 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2922 struct hclge_dev *hdev = ae_dev->priv;
2924 /* We might end up getting called broadly because of 2 below cases:
2925 * 1. Recoverable error was conveyed through APEI and only way to bring
2926 * normalcy is to reset.
2927 * 2. A new reset request from the stack due to timeout
2929 * For the first case,error event might not have ae handle available.
2930 * check if this is a new reset request and we are not here just because
2931 * last reset attempt did not succeed and watchdog hit us again. We will
2932 * know this if last reset request did not occur very recently (watchdog
2933 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2934 * In case of new request we reset the "reset level" to PF reset.
2935 * And if it is a repeat reset request of the most recent one then we
2936 * want to make sure we throttle the reset request. Therefore, we will
2937 * not allow it again before 3*HZ times.
2940 handle = &hdev->vport[0].nic;
2942 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2944 else if (hdev->default_reset_request)
2946 hclge_get_reset_level(hdev,
2947 &hdev->default_reset_request);
2948 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2949 hdev->reset_level = HNAE3_FUNC_RESET;
2951 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2954 /* request reset & schedule reset task */
2955 set_bit(hdev->reset_level, &hdev->reset_request);
2956 hclge_reset_task_schedule(hdev);
2958 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2959 hdev->reset_level++;
2962 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2963 enum hnae3_reset_type rst_type)
2965 struct hclge_dev *hdev = ae_dev->priv;
2967 set_bit(rst_type, &hdev->default_reset_request);
2970 static void hclge_reset_timer(struct timer_list *t)
2972 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2974 dev_info(&hdev->pdev->dev,
2975 "triggering global reset in reset timer\n");
2976 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2977 hclge_reset_event(hdev->pdev, NULL);
2980 static void hclge_reset_subtask(struct hclge_dev *hdev)
2982 /* check if there is any ongoing reset in the hardware. This status can
2983 * be checked from reset_pending. If there is then, we need to wait for
2984 * hardware to complete reset.
2985 * a. If we are able to figure out in reasonable time that hardware
2986 * has fully resetted then, we can proceed with driver, client
2988 * b. else, we can come back later to check this status so re-sched
2991 hdev->last_reset_time = jiffies;
2992 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2993 if (hdev->reset_type != HNAE3_NONE_RESET)
2996 /* check if we got any *new* reset requests to be honored */
2997 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2998 if (hdev->reset_type != HNAE3_NONE_RESET)
2999 hclge_do_reset(hdev);
3001 hdev->reset_type = HNAE3_NONE_RESET;
3004 static void hclge_reset_service_task(struct work_struct *work)
3006 struct hclge_dev *hdev =
3007 container_of(work, struct hclge_dev, rst_service_task);
3009 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3012 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3014 hclge_reset_subtask(hdev);
3016 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3019 static void hclge_mailbox_service_task(struct work_struct *work)
3021 struct hclge_dev *hdev =
3022 container_of(work, struct hclge_dev, mbx_service_task);
3024 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3027 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3029 hclge_mbx_handler(hdev);
3031 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3034 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3038 /* start from vport 1 for PF is always alive */
3039 for (i = 1; i < hdev->num_alloc_vport; i++) {
3040 struct hclge_vport *vport = &hdev->vport[i];
3042 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3043 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3045 /* If vf is not alive, set to default value */
3046 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3047 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3051 static void hclge_service_task(struct work_struct *work)
3053 struct hclge_dev *hdev =
3054 container_of(work, struct hclge_dev, service_task);
3056 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3057 hclge_update_stats_for_all(hdev);
3058 hdev->hw_stats.stats_timer = 0;
3061 hclge_update_speed_duplex(hdev);
3062 hclge_update_link_status(hdev);
3063 hclge_update_vport_alive(hdev);
3064 hclge_service_complete(hdev);
3067 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3069 /* VF handle has no client */
3070 if (!handle->client)
3071 return container_of(handle, struct hclge_vport, nic);
3072 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3073 return container_of(handle, struct hclge_vport, roce);
3075 return container_of(handle, struct hclge_vport, nic);
3078 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3079 struct hnae3_vector_info *vector_info)
3081 struct hclge_vport *vport = hclge_get_vport(handle);
3082 struct hnae3_vector_info *vector = vector_info;
3083 struct hclge_dev *hdev = vport->back;
3087 vector_num = min(hdev->num_msi_left, vector_num);
3089 for (j = 0; j < vector_num; j++) {
3090 for (i = 1; i < hdev->num_msi; i++) {
3091 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3092 vector->vector = pci_irq_vector(hdev->pdev, i);
3093 vector->io_addr = hdev->hw.io_base +
3094 HCLGE_VECTOR_REG_BASE +
3095 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3097 HCLGE_VECTOR_VF_OFFSET;
3098 hdev->vector_status[i] = vport->vport_id;
3099 hdev->vector_irq[i] = vector->vector;
3108 hdev->num_msi_left -= alloc;
3109 hdev->num_msi_used += alloc;
3114 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3118 for (i = 0; i < hdev->num_msi; i++)
3119 if (vector == hdev->vector_irq[i])
3125 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3127 struct hclge_vport *vport = hclge_get_vport(handle);
3128 struct hclge_dev *hdev = vport->back;
3131 vector_id = hclge_get_vector_index(hdev, vector);
3132 if (vector_id < 0) {
3133 dev_err(&hdev->pdev->dev,
3134 "Get vector index fail. vector_id =%d\n", vector_id);
3138 hclge_free_vector(hdev, vector_id);
3143 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3145 return HCLGE_RSS_KEY_SIZE;
3148 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3150 return HCLGE_RSS_IND_TBL_SIZE;
3153 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3154 const u8 hfunc, const u8 *key)
3156 struct hclge_rss_config_cmd *req;
3157 struct hclge_desc desc;
3162 req = (struct hclge_rss_config_cmd *)desc.data;
3164 for (key_offset = 0; key_offset < 3; key_offset++) {
3165 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3168 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3169 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3171 if (key_offset == 2)
3173 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3175 key_size = HCLGE_RSS_HASH_KEY_NUM;
3177 memcpy(req->hash_key,
3178 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3180 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3182 dev_err(&hdev->pdev->dev,
3183 "Configure RSS config fail, status = %d\n",
3191 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3193 struct hclge_rss_indirection_table_cmd *req;
3194 struct hclge_desc desc;
3198 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3200 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3201 hclge_cmd_setup_basic_desc
3202 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3204 req->start_table_index =
3205 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3206 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3208 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3209 req->rss_result[j] =
3210 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3212 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3214 dev_err(&hdev->pdev->dev,
3215 "Configure rss indir table fail,status = %d\n",
3223 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3224 u16 *tc_size, u16 *tc_offset)
3226 struct hclge_rss_tc_mode_cmd *req;
3227 struct hclge_desc desc;
3231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3232 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3234 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3237 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3238 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3239 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3240 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3241 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3243 req->rss_tc_mode[i] = cpu_to_le16(mode);
3246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3248 dev_err(&hdev->pdev->dev,
3249 "Configure rss tc mode fail, status = %d\n", ret);
3254 static void hclge_get_rss_type(struct hclge_vport *vport)
3256 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3257 vport->rss_tuple_sets.ipv4_udp_en ||
3258 vport->rss_tuple_sets.ipv4_sctp_en ||
3259 vport->rss_tuple_sets.ipv6_tcp_en ||
3260 vport->rss_tuple_sets.ipv6_udp_en ||
3261 vport->rss_tuple_sets.ipv6_sctp_en)
3262 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3263 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3264 vport->rss_tuple_sets.ipv6_fragment_en)
3265 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3267 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3270 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3272 struct hclge_rss_input_tuple_cmd *req;
3273 struct hclge_desc desc;
3276 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3278 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3280 /* Get the tuple cfg from pf */
3281 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3282 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3283 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3284 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3285 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3286 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3287 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3288 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3289 hclge_get_rss_type(&hdev->vport[0]);
3290 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3292 dev_err(&hdev->pdev->dev,
3293 "Configure rss input fail, status = %d\n", ret);
3297 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3300 struct hclge_vport *vport = hclge_get_vport(handle);
3303 /* Get hash algorithm */
3305 switch (vport->rss_algo) {
3306 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3307 *hfunc = ETH_RSS_HASH_TOP;
3309 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3310 *hfunc = ETH_RSS_HASH_XOR;
3313 *hfunc = ETH_RSS_HASH_UNKNOWN;
3318 /* Get the RSS Key required by the user */
3320 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3322 /* Get indirect table */
3324 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3325 indir[i] = vport->rss_indirection_tbl[i];
3330 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3331 const u8 *key, const u8 hfunc)
3333 struct hclge_vport *vport = hclge_get_vport(handle);
3334 struct hclge_dev *hdev = vport->back;
3338 /* Set the RSS Hash Key if specififed by the user */
3341 case ETH_RSS_HASH_TOP:
3342 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3344 case ETH_RSS_HASH_XOR:
3345 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3347 case ETH_RSS_HASH_NO_CHANGE:
3348 hash_algo = vport->rss_algo;
3354 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3358 /* Update the shadow RSS key with user specified qids */
3359 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3360 vport->rss_algo = hash_algo;
3363 /* Update the shadow RSS table with user specified qids */
3364 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3365 vport->rss_indirection_tbl[i] = indir[i];
3367 /* Update the hardware */
3368 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3371 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3373 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3375 if (nfc->data & RXH_L4_B_2_3)
3376 hash_sets |= HCLGE_D_PORT_BIT;
3378 hash_sets &= ~HCLGE_D_PORT_BIT;
3380 if (nfc->data & RXH_IP_SRC)
3381 hash_sets |= HCLGE_S_IP_BIT;
3383 hash_sets &= ~HCLGE_S_IP_BIT;
3385 if (nfc->data & RXH_IP_DST)
3386 hash_sets |= HCLGE_D_IP_BIT;
3388 hash_sets &= ~HCLGE_D_IP_BIT;
3390 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3391 hash_sets |= HCLGE_V_TAG_BIT;
3396 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3397 struct ethtool_rxnfc *nfc)
3399 struct hclge_vport *vport = hclge_get_vport(handle);
3400 struct hclge_dev *hdev = vport->back;
3401 struct hclge_rss_input_tuple_cmd *req;
3402 struct hclge_desc desc;
3406 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3407 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3410 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3411 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3413 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3414 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3415 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3416 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3417 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3418 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3419 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3420 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3422 tuple_sets = hclge_get_rss_hash_bits(nfc);
3423 switch (nfc->flow_type) {
3425 req->ipv4_tcp_en = tuple_sets;
3428 req->ipv6_tcp_en = tuple_sets;
3431 req->ipv4_udp_en = tuple_sets;
3434 req->ipv6_udp_en = tuple_sets;
3437 req->ipv4_sctp_en = tuple_sets;
3440 if ((nfc->data & RXH_L4_B_0_1) ||
3441 (nfc->data & RXH_L4_B_2_3))
3444 req->ipv6_sctp_en = tuple_sets;
3447 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3450 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3456 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3458 dev_err(&hdev->pdev->dev,
3459 "Set rss tuple fail, status = %d\n", ret);
3463 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3464 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3465 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3466 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3467 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3468 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3469 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3470 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3471 hclge_get_rss_type(vport);
3475 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3476 struct ethtool_rxnfc *nfc)
3478 struct hclge_vport *vport = hclge_get_vport(handle);
3483 switch (nfc->flow_type) {
3485 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3488 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3491 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3494 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3497 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3500 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3504 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3513 if (tuple_sets & HCLGE_D_PORT_BIT)
3514 nfc->data |= RXH_L4_B_2_3;
3515 if (tuple_sets & HCLGE_S_PORT_BIT)
3516 nfc->data |= RXH_L4_B_0_1;
3517 if (tuple_sets & HCLGE_D_IP_BIT)
3518 nfc->data |= RXH_IP_DST;
3519 if (tuple_sets & HCLGE_S_IP_BIT)
3520 nfc->data |= RXH_IP_SRC;
3525 static int hclge_get_tc_size(struct hnae3_handle *handle)
3527 struct hclge_vport *vport = hclge_get_vport(handle);
3528 struct hclge_dev *hdev = vport->back;
3530 return hdev->rss_size_max;
3533 int hclge_rss_init_hw(struct hclge_dev *hdev)
3535 struct hclge_vport *vport = hdev->vport;
3536 u8 *rss_indir = vport[0].rss_indirection_tbl;
3537 u16 rss_size = vport[0].alloc_rss_size;
3538 u8 *key = vport[0].rss_hash_key;
3539 u8 hfunc = vport[0].rss_algo;
3540 u16 tc_offset[HCLGE_MAX_TC_NUM];
3541 u16 tc_valid[HCLGE_MAX_TC_NUM];
3542 u16 tc_size[HCLGE_MAX_TC_NUM];
3546 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3550 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3554 ret = hclge_set_rss_input_tuple(hdev);
3558 /* Each TC have the same queue size, and tc_size set to hardware is
3559 * the log2 of roundup power of two of rss_size, the acutal queue
3560 * size is limited by indirection table.
3562 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3563 dev_err(&hdev->pdev->dev,
3564 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3569 roundup_size = roundup_pow_of_two(rss_size);
3570 roundup_size = ilog2(roundup_size);
3572 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3575 if (!(hdev->hw_tc_map & BIT(i)))
3579 tc_size[i] = roundup_size;
3580 tc_offset[i] = rss_size * i;
3583 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3586 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3588 struct hclge_vport *vport = hdev->vport;
3591 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3592 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3593 vport[j].rss_indirection_tbl[i] =
3594 i % vport[j].alloc_rss_size;
3598 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3600 struct hclge_vport *vport = hdev->vport;
3603 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3604 vport[i].rss_tuple_sets.ipv4_tcp_en =
3605 HCLGE_RSS_INPUT_TUPLE_OTHER;
3606 vport[i].rss_tuple_sets.ipv4_udp_en =
3607 HCLGE_RSS_INPUT_TUPLE_OTHER;
3608 vport[i].rss_tuple_sets.ipv4_sctp_en =
3609 HCLGE_RSS_INPUT_TUPLE_SCTP;
3610 vport[i].rss_tuple_sets.ipv4_fragment_en =
3611 HCLGE_RSS_INPUT_TUPLE_OTHER;
3612 vport[i].rss_tuple_sets.ipv6_tcp_en =
3613 HCLGE_RSS_INPUT_TUPLE_OTHER;
3614 vport[i].rss_tuple_sets.ipv6_udp_en =
3615 HCLGE_RSS_INPUT_TUPLE_OTHER;
3616 vport[i].rss_tuple_sets.ipv6_sctp_en =
3617 HCLGE_RSS_INPUT_TUPLE_SCTP;
3618 vport[i].rss_tuple_sets.ipv6_fragment_en =
3619 HCLGE_RSS_INPUT_TUPLE_OTHER;
3621 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3623 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3626 hclge_rss_indir_init_cfg(hdev);
3629 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3630 int vector_id, bool en,
3631 struct hnae3_ring_chain_node *ring_chain)
3633 struct hclge_dev *hdev = vport->back;
3634 struct hnae3_ring_chain_node *node;
3635 struct hclge_desc desc;
3636 struct hclge_ctrl_vector_chain_cmd *req
3637 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3638 enum hclge_cmd_status status;
3639 enum hclge_opcode_type op;
3640 u16 tqp_type_and_id;
3643 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3644 hclge_cmd_setup_basic_desc(&desc, op, false);
3645 req->int_vector_id = vector_id;
3648 for (node = ring_chain; node; node = node->next) {
3649 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3650 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3652 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3653 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3654 HCLGE_TQP_ID_S, node->tqp_index);
3655 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3657 hnae3_get_field(node->int_gl_idx,
3658 HNAE3_RING_GL_IDX_M,
3659 HNAE3_RING_GL_IDX_S));
3660 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3661 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3662 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3663 req->vfid = vport->vport_id;
3665 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3667 dev_err(&hdev->pdev->dev,
3668 "Map TQP fail, status is %d.\n",
3674 hclge_cmd_setup_basic_desc(&desc,
3677 req->int_vector_id = vector_id;
3682 req->int_cause_num = i;
3683 req->vfid = vport->vport_id;
3684 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3686 dev_err(&hdev->pdev->dev,
3687 "Map TQP fail, status is %d.\n", status);
3695 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3697 struct hnae3_ring_chain_node *ring_chain)
3699 struct hclge_vport *vport = hclge_get_vport(handle);
3700 struct hclge_dev *hdev = vport->back;
3703 vector_id = hclge_get_vector_index(hdev, vector);
3704 if (vector_id < 0) {
3705 dev_err(&hdev->pdev->dev,
3706 "Get vector index fail. vector_id =%d\n", vector_id);
3710 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3713 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3715 struct hnae3_ring_chain_node *ring_chain)
3717 struct hclge_vport *vport = hclge_get_vport(handle);
3718 struct hclge_dev *hdev = vport->back;
3721 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3724 vector_id = hclge_get_vector_index(hdev, vector);
3725 if (vector_id < 0) {
3726 dev_err(&handle->pdev->dev,
3727 "Get vector index fail. ret =%d\n", vector_id);
3731 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3733 dev_err(&handle->pdev->dev,
3734 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3741 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3742 struct hclge_promisc_param *param)
3744 struct hclge_promisc_cfg_cmd *req;
3745 struct hclge_desc desc;
3748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3750 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3751 req->vf_id = param->vf_id;
3753 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3754 * pdev revision(0x20), new revision support them. The
3755 * value of this two fields will not return error when driver
3756 * send command to fireware in revision(0x20).
3758 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3759 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3761 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3763 dev_err(&hdev->pdev->dev,
3764 "Set promisc mode fail, status is %d.\n", ret);
3769 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3770 bool en_mc, bool en_bc, int vport_id)
3775 memset(param, 0, sizeof(struct hclge_promisc_param));
3777 param->enable = HCLGE_PROMISC_EN_UC;
3779 param->enable |= HCLGE_PROMISC_EN_MC;
3781 param->enable |= HCLGE_PROMISC_EN_BC;
3782 param->vf_id = vport_id;
3785 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3788 struct hclge_vport *vport = hclge_get_vport(handle);
3789 struct hclge_dev *hdev = vport->back;
3790 struct hclge_promisc_param param;
3792 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3794 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3797 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3799 struct hclge_get_fd_mode_cmd *req;
3800 struct hclge_desc desc;
3803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3805 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3809 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3813 *fd_mode = req->mode;
3818 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3819 u32 *stage1_entry_num,
3820 u32 *stage2_entry_num,
3821 u16 *stage1_counter_num,
3822 u16 *stage2_counter_num)
3824 struct hclge_get_fd_allocation_cmd *req;
3825 struct hclge_desc desc;
3828 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3830 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3832 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3834 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3839 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3840 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3841 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3842 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3847 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3849 struct hclge_set_fd_key_config_cmd *req;
3850 struct hclge_fd_key_cfg *stage;
3851 struct hclge_desc desc;
3854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3856 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3857 stage = &hdev->fd_cfg.key_cfg[stage_num];
3858 req->stage = stage_num;
3859 req->key_select = stage->key_sel;
3860 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3861 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3862 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3863 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3864 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3865 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3869 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3874 static int hclge_init_fd_config(struct hclge_dev *hdev)
3876 #define LOW_2_WORDS 0x03
3877 struct hclge_fd_key_cfg *key_cfg;
3880 if (!hnae3_dev_fd_supported(hdev))
3883 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3887 switch (hdev->fd_cfg.fd_mode) {
3888 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3889 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3891 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3892 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3895 dev_err(&hdev->pdev->dev,
3896 "Unsupported flow director mode %d\n",
3897 hdev->fd_cfg.fd_mode);
3901 hdev->fd_cfg.fd_en = true;
3902 hdev->fd_cfg.proto_support =
3903 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3904 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3905 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3906 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3907 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3908 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3909 key_cfg->outer_sipv6_word_en = 0;
3910 key_cfg->outer_dipv6_word_en = 0;
3912 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3913 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3914 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3915 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3917 /* If use max 400bit key, we can support tuples for ether type */
3918 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3919 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3920 key_cfg->tuple_active |=
3921 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3924 /* roce_type is used to filter roce frames
3925 * dst_vport is used to specify the rule
3927 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3929 ret = hclge_get_fd_allocation(hdev,
3930 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3931 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3932 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3933 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3937 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3940 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3941 int loc, u8 *key, bool is_add)
3943 struct hclge_fd_tcam_config_1_cmd *req1;
3944 struct hclge_fd_tcam_config_2_cmd *req2;
3945 struct hclge_fd_tcam_config_3_cmd *req3;
3946 struct hclge_desc desc[3];
3949 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3950 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3951 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3952 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3953 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3955 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3956 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3957 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3959 req1->stage = stage;
3960 req1->xy_sel = sel_x ? 1 : 0;
3961 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3962 req1->index = cpu_to_le32(loc);
3963 req1->entry_vld = sel_x ? is_add : 0;
3966 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3967 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3968 sizeof(req2->tcam_data));
3969 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3970 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3973 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3975 dev_err(&hdev->pdev->dev,
3976 "config tcam key fail, ret=%d\n",
3982 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3983 struct hclge_fd_ad_data *action)
3985 struct hclge_fd_ad_config_cmd *req;
3986 struct hclge_desc desc;
3990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3992 req = (struct hclge_fd_ad_config_cmd *)desc.data;
3993 req->index = cpu_to_le32(loc);
3996 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3997 action->write_rule_id_to_bd);
3998 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4001 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4002 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4003 action->forward_to_direct_queue);
4004 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4006 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4007 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4008 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4009 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4010 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4011 action->counter_id);
4013 req->ad_data = cpu_to_le64(ad_data);
4014 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4016 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4021 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4022 struct hclge_fd_rule *rule)
4024 u16 tmp_x_s, tmp_y_s;
4025 u32 tmp_x_l, tmp_y_l;
4028 if (rule->unused_tuple & tuple_bit)
4031 switch (tuple_bit) {
4034 case BIT(INNER_DST_MAC):
4035 for (i = 0; i < 6; i++) {
4036 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4037 rule->tuples_mask.dst_mac[i]);
4038 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4039 rule->tuples_mask.dst_mac[i]);
4043 case BIT(INNER_SRC_MAC):
4044 for (i = 0; i < 6; i++) {
4045 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4046 rule->tuples.src_mac[i]);
4047 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4048 rule->tuples.src_mac[i]);
4052 case BIT(INNER_VLAN_TAG_FST):
4053 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4054 rule->tuples_mask.vlan_tag1);
4055 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4056 rule->tuples_mask.vlan_tag1);
4057 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4058 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4061 case BIT(INNER_ETH_TYPE):
4062 calc_x(tmp_x_s, rule->tuples.ether_proto,
4063 rule->tuples_mask.ether_proto);
4064 calc_y(tmp_y_s, rule->tuples.ether_proto,
4065 rule->tuples_mask.ether_proto);
4066 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4067 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4070 case BIT(INNER_IP_TOS):
4071 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4072 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4075 case BIT(INNER_IP_PROTO):
4076 calc_x(*key_x, rule->tuples.ip_proto,
4077 rule->tuples_mask.ip_proto);
4078 calc_y(*key_y, rule->tuples.ip_proto,
4079 rule->tuples_mask.ip_proto);
4082 case BIT(INNER_SRC_IP):
4083 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4084 rule->tuples_mask.src_ip[3]);
4085 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4086 rule->tuples_mask.src_ip[3]);
4087 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4088 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4091 case BIT(INNER_DST_IP):
4092 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4093 rule->tuples_mask.dst_ip[3]);
4094 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4095 rule->tuples_mask.dst_ip[3]);
4096 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4097 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4100 case BIT(INNER_SRC_PORT):
4101 calc_x(tmp_x_s, rule->tuples.src_port,
4102 rule->tuples_mask.src_port);
4103 calc_y(tmp_y_s, rule->tuples.src_port,
4104 rule->tuples_mask.src_port);
4105 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4106 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4109 case BIT(INNER_DST_PORT):
4110 calc_x(tmp_x_s, rule->tuples.dst_port,
4111 rule->tuples_mask.dst_port);
4112 calc_y(tmp_y_s, rule->tuples.dst_port,
4113 rule->tuples_mask.dst_port);
4114 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4115 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4123 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4124 u8 vf_id, u8 network_port_id)
4126 u32 port_number = 0;
4128 if (port_type == HOST_PORT) {
4129 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4131 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4133 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4135 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4136 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4137 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4143 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4144 __le32 *key_x, __le32 *key_y,
4145 struct hclge_fd_rule *rule)
4147 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4148 u8 cur_pos = 0, tuple_size, shift_bits;
4151 for (i = 0; i < MAX_META_DATA; i++) {
4152 tuple_size = meta_data_key_info[i].key_length;
4153 tuple_bit = key_cfg->meta_data_active & BIT(i);
4155 switch (tuple_bit) {
4156 case BIT(ROCE_TYPE):
4157 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4158 cur_pos += tuple_size;
4160 case BIT(DST_VPORT):
4161 port_number = hclge_get_port_number(HOST_PORT, 0,
4163 hnae3_set_field(meta_data,
4164 GENMASK(cur_pos + tuple_size, cur_pos),
4165 cur_pos, port_number);
4166 cur_pos += tuple_size;
4173 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4174 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4175 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4177 *key_x = cpu_to_le32(tmp_x << shift_bits);
4178 *key_y = cpu_to_le32(tmp_y << shift_bits);
4181 /* A complete key is combined with meta data key and tuple key.
4182 * Meta data key is stored at the MSB region, and tuple key is stored at
4183 * the LSB region, unused bits will be filled 0.
4185 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4186 struct hclge_fd_rule *rule)
4188 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4189 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4190 u8 *cur_key_x, *cur_key_y;
4191 int i, ret, tuple_size;
4192 u8 meta_data_region;
4194 memset(key_x, 0, sizeof(key_x));
4195 memset(key_y, 0, sizeof(key_y));
4199 for (i = 0 ; i < MAX_TUPLE; i++) {
4203 tuple_size = tuple_key_info[i].key_length / 8;
4204 check_tuple = key_cfg->tuple_active & BIT(i);
4206 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4209 cur_key_x += tuple_size;
4210 cur_key_y += tuple_size;
4214 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4215 MAX_META_DATA_LENGTH / 8;
4217 hclge_fd_convert_meta_data(key_cfg,
4218 (__le32 *)(key_x + meta_data_region),
4219 (__le32 *)(key_y + meta_data_region),
4222 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4225 dev_err(&hdev->pdev->dev,
4226 "fd key_y config fail, loc=%d, ret=%d\n",
4227 rule->queue_id, ret);
4231 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4234 dev_err(&hdev->pdev->dev,
4235 "fd key_x config fail, loc=%d, ret=%d\n",
4236 rule->queue_id, ret);
4240 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4241 struct hclge_fd_rule *rule)
4243 struct hclge_fd_ad_data ad_data;
4245 ad_data.ad_id = rule->location;
4247 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4248 ad_data.drop_packet = true;
4249 ad_data.forward_to_direct_queue = false;
4250 ad_data.queue_id = 0;
4252 ad_data.drop_packet = false;
4253 ad_data.forward_to_direct_queue = true;
4254 ad_data.queue_id = rule->queue_id;
4257 ad_data.use_counter = false;
4258 ad_data.counter_id = 0;
4260 ad_data.use_next_stage = false;
4261 ad_data.next_input_key = 0;
4263 ad_data.write_rule_id_to_bd = true;
4264 ad_data.rule_id = rule->location;
4266 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4269 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4270 struct ethtool_rx_flow_spec *fs, u32 *unused)
4272 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4273 struct ethtool_usrip4_spec *usr_ip4_spec;
4274 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4275 struct ethtool_usrip6_spec *usr_ip6_spec;
4276 struct ethhdr *ether_spec;
4278 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4281 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4284 if ((fs->flow_type & FLOW_EXT) &&
4285 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4286 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4290 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4294 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4295 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4297 if (!tcp_ip4_spec->ip4src)
4298 *unused |= BIT(INNER_SRC_IP);
4300 if (!tcp_ip4_spec->ip4dst)
4301 *unused |= BIT(INNER_DST_IP);
4303 if (!tcp_ip4_spec->psrc)
4304 *unused |= BIT(INNER_SRC_PORT);
4306 if (!tcp_ip4_spec->pdst)
4307 *unused |= BIT(INNER_DST_PORT);
4309 if (!tcp_ip4_spec->tos)
4310 *unused |= BIT(INNER_IP_TOS);
4314 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4315 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4316 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4318 if (!usr_ip4_spec->ip4src)
4319 *unused |= BIT(INNER_SRC_IP);
4321 if (!usr_ip4_spec->ip4dst)
4322 *unused |= BIT(INNER_DST_IP);
4324 if (!usr_ip4_spec->tos)
4325 *unused |= BIT(INNER_IP_TOS);
4327 if (!usr_ip4_spec->proto)
4328 *unused |= BIT(INNER_IP_PROTO);
4330 if (usr_ip4_spec->l4_4_bytes)
4333 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4340 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4341 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4344 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4345 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4346 *unused |= BIT(INNER_SRC_IP);
4348 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4349 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4350 *unused |= BIT(INNER_DST_IP);
4352 if (!tcp_ip6_spec->psrc)
4353 *unused |= BIT(INNER_SRC_PORT);
4355 if (!tcp_ip6_spec->pdst)
4356 *unused |= BIT(INNER_DST_PORT);
4358 if (tcp_ip6_spec->tclass)
4362 case IPV6_USER_FLOW:
4363 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4364 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4365 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4366 BIT(INNER_DST_PORT);
4368 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4369 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4370 *unused |= BIT(INNER_SRC_IP);
4372 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4373 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4374 *unused |= BIT(INNER_DST_IP);
4376 if (!usr_ip6_spec->l4_proto)
4377 *unused |= BIT(INNER_IP_PROTO);
4379 if (usr_ip6_spec->tclass)
4382 if (usr_ip6_spec->l4_4_bytes)
4387 ether_spec = &fs->h_u.ether_spec;
4388 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4389 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4390 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4392 if (is_zero_ether_addr(ether_spec->h_source))
4393 *unused |= BIT(INNER_SRC_MAC);
4395 if (is_zero_ether_addr(ether_spec->h_dest))
4396 *unused |= BIT(INNER_DST_MAC);
4398 if (!ether_spec->h_proto)
4399 *unused |= BIT(INNER_ETH_TYPE);
4406 if ((fs->flow_type & FLOW_EXT)) {
4407 if (fs->h_ext.vlan_etype)
4409 if (!fs->h_ext.vlan_tci)
4410 *unused |= BIT(INNER_VLAN_TAG_FST);
4412 if (fs->m_ext.vlan_tci) {
4413 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4417 *unused |= BIT(INNER_VLAN_TAG_FST);
4420 if (fs->flow_type & FLOW_MAC_EXT) {
4421 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4424 if (is_zero_ether_addr(fs->h_ext.h_dest))
4425 *unused |= BIT(INNER_DST_MAC);
4427 *unused &= ~(BIT(INNER_DST_MAC));
4433 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4435 struct hclge_fd_rule *rule = NULL;
4436 struct hlist_node *node2;
4438 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4439 if (rule->location >= location)
4443 return rule && rule->location == location;
4446 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4447 struct hclge_fd_rule *new_rule,
4451 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4452 struct hlist_node *node2;
4454 if (is_add && !new_rule)
4457 hlist_for_each_entry_safe(rule, node2,
4458 &hdev->fd_rule_list, rule_node) {
4459 if (rule->location >= location)
4464 if (rule && rule->location == location) {
4465 hlist_del(&rule->rule_node);
4467 hdev->hclge_fd_rule_num--;
4472 } else if (!is_add) {
4473 dev_err(&hdev->pdev->dev,
4474 "delete fail, rule %d is inexistent\n",
4479 INIT_HLIST_NODE(&new_rule->rule_node);
4482 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4484 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4486 hdev->hclge_fd_rule_num++;
4491 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4492 struct ethtool_rx_flow_spec *fs,
4493 struct hclge_fd_rule *rule)
4495 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4497 switch (flow_type) {
4501 rule->tuples.src_ip[3] =
4502 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4503 rule->tuples_mask.src_ip[3] =
4504 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4506 rule->tuples.dst_ip[3] =
4507 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4508 rule->tuples_mask.dst_ip[3] =
4509 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4511 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4512 rule->tuples_mask.src_port =
4513 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4515 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4516 rule->tuples_mask.dst_port =
4517 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4519 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4520 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4522 rule->tuples.ether_proto = ETH_P_IP;
4523 rule->tuples_mask.ether_proto = 0xFFFF;
4527 rule->tuples.src_ip[3] =
4528 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4529 rule->tuples_mask.src_ip[3] =
4530 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4532 rule->tuples.dst_ip[3] =
4533 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4534 rule->tuples_mask.dst_ip[3] =
4535 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4537 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4538 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4540 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4541 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4543 rule->tuples.ether_proto = ETH_P_IP;
4544 rule->tuples_mask.ether_proto = 0xFFFF;
4550 be32_to_cpu_array(rule->tuples.src_ip,
4551 fs->h_u.tcp_ip6_spec.ip6src, 4);
4552 be32_to_cpu_array(rule->tuples_mask.src_ip,
4553 fs->m_u.tcp_ip6_spec.ip6src, 4);
4555 be32_to_cpu_array(rule->tuples.dst_ip,
4556 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4557 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4558 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4560 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4561 rule->tuples_mask.src_port =
4562 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4564 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4565 rule->tuples_mask.dst_port =
4566 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4568 rule->tuples.ether_proto = ETH_P_IPV6;
4569 rule->tuples_mask.ether_proto = 0xFFFF;
4572 case IPV6_USER_FLOW:
4573 be32_to_cpu_array(rule->tuples.src_ip,
4574 fs->h_u.usr_ip6_spec.ip6src, 4);
4575 be32_to_cpu_array(rule->tuples_mask.src_ip,
4576 fs->m_u.usr_ip6_spec.ip6src, 4);
4578 be32_to_cpu_array(rule->tuples.dst_ip,
4579 fs->h_u.usr_ip6_spec.ip6dst, 4);
4580 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4581 fs->m_u.usr_ip6_spec.ip6dst, 4);
4583 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4584 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4586 rule->tuples.ether_proto = ETH_P_IPV6;
4587 rule->tuples_mask.ether_proto = 0xFFFF;
4591 ether_addr_copy(rule->tuples.src_mac,
4592 fs->h_u.ether_spec.h_source);
4593 ether_addr_copy(rule->tuples_mask.src_mac,
4594 fs->m_u.ether_spec.h_source);
4596 ether_addr_copy(rule->tuples.dst_mac,
4597 fs->h_u.ether_spec.h_dest);
4598 ether_addr_copy(rule->tuples_mask.dst_mac,
4599 fs->m_u.ether_spec.h_dest);
4601 rule->tuples.ether_proto =
4602 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4603 rule->tuples_mask.ether_proto =
4604 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4611 switch (flow_type) {
4614 rule->tuples.ip_proto = IPPROTO_SCTP;
4615 rule->tuples_mask.ip_proto = 0xFF;
4619 rule->tuples.ip_proto = IPPROTO_TCP;
4620 rule->tuples_mask.ip_proto = 0xFF;
4624 rule->tuples.ip_proto = IPPROTO_UDP;
4625 rule->tuples_mask.ip_proto = 0xFF;
4631 if ((fs->flow_type & FLOW_EXT)) {
4632 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4633 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4636 if (fs->flow_type & FLOW_MAC_EXT) {
4637 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4638 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4644 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4645 struct ethtool_rxnfc *cmd)
4647 struct hclge_vport *vport = hclge_get_vport(handle);
4648 struct hclge_dev *hdev = vport->back;
4649 u16 dst_vport_id = 0, q_index = 0;
4650 struct ethtool_rx_flow_spec *fs;
4651 struct hclge_fd_rule *rule;
4656 if (!hnae3_dev_fd_supported(hdev))
4659 if (!hdev->fd_cfg.fd_en) {
4660 dev_warn(&hdev->pdev->dev,
4661 "Please enable flow director first\n");
4665 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4667 ret = hclge_fd_check_spec(hdev, fs, &unused);
4669 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4673 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4674 action = HCLGE_FD_ACTION_DROP_PACKET;
4676 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4677 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4680 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4681 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4684 dev_err(&hdev->pdev->dev,
4685 "Error: queue id (%d) > max tqp num (%d)\n",
4690 if (vf > hdev->num_req_vfs) {
4691 dev_err(&hdev->pdev->dev,
4692 "Error: vf id (%d) > max vf num (%d)\n",
4693 vf, hdev->num_req_vfs);
4697 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4701 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4705 ret = hclge_fd_get_tuple(hdev, fs, rule);
4709 rule->flow_type = fs->flow_type;
4711 rule->location = fs->location;
4712 rule->unused_tuple = unused;
4713 rule->vf_id = dst_vport_id;
4714 rule->queue_id = q_index;
4715 rule->action = action;
4717 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4721 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4725 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4736 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4737 struct ethtool_rxnfc *cmd)
4739 struct hclge_vport *vport = hclge_get_vport(handle);
4740 struct hclge_dev *hdev = vport->back;
4741 struct ethtool_rx_flow_spec *fs;
4744 if (!hnae3_dev_fd_supported(hdev))
4747 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4749 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4752 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4753 dev_err(&hdev->pdev->dev,
4754 "Delete fail, rule %d is inexistent\n",
4759 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4760 fs->location, NULL, false);
4764 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4768 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4771 struct hclge_vport *vport = hclge_get_vport(handle);
4772 struct hclge_dev *hdev = vport->back;
4773 struct hclge_fd_rule *rule;
4774 struct hlist_node *node;
4776 if (!hnae3_dev_fd_supported(hdev))
4780 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4782 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4783 rule->location, NULL, false);
4784 hlist_del(&rule->rule_node);
4786 hdev->hclge_fd_rule_num--;
4789 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4791 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4792 rule->location, NULL, false);
4796 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4798 struct hclge_vport *vport = hclge_get_vport(handle);
4799 struct hclge_dev *hdev = vport->back;
4800 struct hclge_fd_rule *rule;
4801 struct hlist_node *node;
4804 /* Return ok here, because reset error handling will check this
4805 * return value. If error is returned here, the reset process will
4808 if (!hnae3_dev_fd_supported(hdev))
4811 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4812 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4814 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4817 dev_warn(&hdev->pdev->dev,
4818 "Restore rule %d failed, remove it\n",
4820 hlist_del(&rule->rule_node);
4822 hdev->hclge_fd_rule_num--;
4828 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4829 struct ethtool_rxnfc *cmd)
4831 struct hclge_vport *vport = hclge_get_vport(handle);
4832 struct hclge_dev *hdev = vport->back;
4834 if (!hnae3_dev_fd_supported(hdev))
4837 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4838 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4843 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4844 struct ethtool_rxnfc *cmd)
4846 struct hclge_vport *vport = hclge_get_vport(handle);
4847 struct hclge_fd_rule *rule = NULL;
4848 struct hclge_dev *hdev = vport->back;
4849 struct ethtool_rx_flow_spec *fs;
4850 struct hlist_node *node2;
4852 if (!hnae3_dev_fd_supported(hdev))
4855 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4857 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4858 if (rule->location >= fs->location)
4862 if (!rule || fs->location != rule->location)
4865 fs->flow_type = rule->flow_type;
4866 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4870 fs->h_u.tcp_ip4_spec.ip4src =
4871 cpu_to_be32(rule->tuples.src_ip[3]);
4872 fs->m_u.tcp_ip4_spec.ip4src =
4873 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4874 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4876 fs->h_u.tcp_ip4_spec.ip4dst =
4877 cpu_to_be32(rule->tuples.dst_ip[3]);
4878 fs->m_u.tcp_ip4_spec.ip4dst =
4879 rule->unused_tuple & BIT(INNER_DST_IP) ?
4880 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4882 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4883 fs->m_u.tcp_ip4_spec.psrc =
4884 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4885 0 : cpu_to_be16(rule->tuples_mask.src_port);
4887 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4888 fs->m_u.tcp_ip4_spec.pdst =
4889 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4890 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4892 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4893 fs->m_u.tcp_ip4_spec.tos =
4894 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4895 0 : rule->tuples_mask.ip_tos;
4899 fs->h_u.usr_ip4_spec.ip4src =
4900 cpu_to_be32(rule->tuples.src_ip[3]);
4901 fs->m_u.tcp_ip4_spec.ip4src =
4902 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4903 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4905 fs->h_u.usr_ip4_spec.ip4dst =
4906 cpu_to_be32(rule->tuples.dst_ip[3]);
4907 fs->m_u.usr_ip4_spec.ip4dst =
4908 rule->unused_tuple & BIT(INNER_DST_IP) ?
4909 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4911 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4912 fs->m_u.usr_ip4_spec.tos =
4913 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4914 0 : rule->tuples_mask.ip_tos;
4916 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4917 fs->m_u.usr_ip4_spec.proto =
4918 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4919 0 : rule->tuples_mask.ip_proto;
4921 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4927 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4928 rule->tuples.src_ip, 4);
4929 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4930 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4932 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4933 rule->tuples_mask.src_ip, 4);
4935 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4936 rule->tuples.dst_ip, 4);
4937 if (rule->unused_tuple & BIT(INNER_DST_IP))
4938 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4940 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4941 rule->tuples_mask.dst_ip, 4);
4943 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4944 fs->m_u.tcp_ip6_spec.psrc =
4945 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4946 0 : cpu_to_be16(rule->tuples_mask.src_port);
4948 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4949 fs->m_u.tcp_ip6_spec.pdst =
4950 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4951 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4954 case IPV6_USER_FLOW:
4955 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4956 rule->tuples.src_ip, 4);
4957 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4958 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4960 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4961 rule->tuples_mask.src_ip, 4);
4963 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4964 rule->tuples.dst_ip, 4);
4965 if (rule->unused_tuple & BIT(INNER_DST_IP))
4966 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4968 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4969 rule->tuples_mask.dst_ip, 4);
4971 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4972 fs->m_u.usr_ip6_spec.l4_proto =
4973 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4974 0 : rule->tuples_mask.ip_proto;
4978 ether_addr_copy(fs->h_u.ether_spec.h_source,
4979 rule->tuples.src_mac);
4980 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4981 eth_zero_addr(fs->m_u.ether_spec.h_source);
4983 ether_addr_copy(fs->m_u.ether_spec.h_source,
4984 rule->tuples_mask.src_mac);
4986 ether_addr_copy(fs->h_u.ether_spec.h_dest,
4987 rule->tuples.dst_mac);
4988 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4989 eth_zero_addr(fs->m_u.ether_spec.h_dest);
4991 ether_addr_copy(fs->m_u.ether_spec.h_dest,
4992 rule->tuples_mask.dst_mac);
4994 fs->h_u.ether_spec.h_proto =
4995 cpu_to_be16(rule->tuples.ether_proto);
4996 fs->m_u.ether_spec.h_proto =
4997 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4998 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5005 if (fs->flow_type & FLOW_EXT) {
5006 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5007 fs->m_ext.vlan_tci =
5008 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5009 cpu_to_be16(VLAN_VID_MASK) :
5010 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5013 if (fs->flow_type & FLOW_MAC_EXT) {
5014 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5015 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5016 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5018 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5019 rule->tuples_mask.dst_mac);
5022 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5023 fs->ring_cookie = RX_CLS_FLOW_DISC;
5027 fs->ring_cookie = rule->queue_id;
5028 vf_id = rule->vf_id;
5029 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5030 fs->ring_cookie |= vf_id;
5036 static int hclge_get_all_rules(struct hnae3_handle *handle,
5037 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5039 struct hclge_vport *vport = hclge_get_vport(handle);
5040 struct hclge_dev *hdev = vport->back;
5041 struct hclge_fd_rule *rule;
5042 struct hlist_node *node2;
5045 if (!hnae3_dev_fd_supported(hdev))
5048 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5050 hlist_for_each_entry_safe(rule, node2,
5051 &hdev->fd_rule_list, rule_node) {
5052 if (cnt == cmd->rule_cnt)
5055 rule_locs[cnt] = rule->location;
5059 cmd->rule_cnt = cnt;
5064 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5066 struct hclge_vport *vport = hclge_get_vport(handle);
5067 struct hclge_dev *hdev = vport->back;
5069 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5070 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5073 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5075 struct hclge_vport *vport = hclge_get_vport(handle);
5076 struct hclge_dev *hdev = vport->back;
5078 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5081 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5083 struct hclge_vport *vport = hclge_get_vport(handle);
5084 struct hclge_dev *hdev = vport->back;
5086 return hdev->reset_count;
5089 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5091 struct hclge_vport *vport = hclge_get_vport(handle);
5092 struct hclge_dev *hdev = vport->back;
5094 hdev->fd_cfg.fd_en = enable;
5096 hclge_del_all_fd_entries(handle, false);
5098 hclge_restore_fd_entries(handle);
5101 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5103 struct hclge_desc desc;
5104 struct hclge_config_mac_mode_cmd *req =
5105 (struct hclge_config_mac_mode_cmd *)desc.data;
5109 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5110 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5111 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5112 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5113 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5114 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5115 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5116 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5117 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5118 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5119 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5120 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5121 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5122 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5123 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5124 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5126 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5128 dev_err(&hdev->pdev->dev,
5129 "mac enable fail, ret =%d.\n", ret);
5132 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5134 struct hclge_config_mac_mode_cmd *req;
5135 struct hclge_desc desc;
5139 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5140 /* 1 Read out the MAC mode config at first */
5141 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5142 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5144 dev_err(&hdev->pdev->dev,
5145 "mac loopback get fail, ret =%d.\n", ret);
5149 /* 2 Then setup the loopback flag */
5150 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5151 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5152 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5153 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5155 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5157 /* 3 Config mac work mode with loopback flag
5158 * and its original configure parameters
5160 hclge_cmd_reuse_desc(&desc, false);
5161 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5163 dev_err(&hdev->pdev->dev,
5164 "mac loopback set fail, ret =%d.\n", ret);
5168 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5169 enum hnae3_loop loop_mode)
5171 #define HCLGE_SERDES_RETRY_MS 10
5172 #define HCLGE_SERDES_RETRY_NUM 100
5173 struct hclge_serdes_lb_cmd *req;
5174 struct hclge_desc desc;
5178 req = (struct hclge_serdes_lb_cmd *)desc.data;
5179 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5181 switch (loop_mode) {
5182 case HNAE3_LOOP_SERIAL_SERDES:
5183 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5185 case HNAE3_LOOP_PARALLEL_SERDES:
5186 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5189 dev_err(&hdev->pdev->dev,
5190 "unsupported serdes loopback mode %d\n", loop_mode);
5195 req->enable = loop_mode_b;
5196 req->mask = loop_mode_b;
5198 req->mask = loop_mode_b;
5201 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5203 dev_err(&hdev->pdev->dev,
5204 "serdes loopback set fail, ret = %d\n", ret);
5209 msleep(HCLGE_SERDES_RETRY_MS);
5210 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5212 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5214 dev_err(&hdev->pdev->dev,
5215 "serdes loopback get, ret = %d\n", ret);
5218 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5219 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5221 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5222 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5224 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5225 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5229 hclge_cfg_mac_mode(hdev, en);
5233 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5234 int stream_id, bool enable)
5236 struct hclge_desc desc;
5237 struct hclge_cfg_com_tqp_queue_cmd *req =
5238 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5241 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5242 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5243 req->stream_id = cpu_to_le16(stream_id);
5244 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5246 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5248 dev_err(&hdev->pdev->dev,
5249 "Tqp enable fail, status =%d.\n", ret);
5253 static int hclge_set_loopback(struct hnae3_handle *handle,
5254 enum hnae3_loop loop_mode, bool en)
5256 struct hclge_vport *vport = hclge_get_vport(handle);
5257 struct hclge_dev *hdev = vport->back;
5260 switch (loop_mode) {
5261 case HNAE3_LOOP_APP:
5262 ret = hclge_set_app_loopback(hdev, en);
5264 case HNAE3_LOOP_SERIAL_SERDES:
5265 case HNAE3_LOOP_PARALLEL_SERDES:
5266 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5270 dev_err(&hdev->pdev->dev,
5271 "loop_mode %d is not supported\n", loop_mode);
5275 for (i = 0; i < vport->alloc_tqps; i++) {
5276 ret = hclge_tqp_enable(hdev, i, 0, en);
5284 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5286 struct hclge_vport *vport = hclge_get_vport(handle);
5287 struct hnae3_queue *queue;
5288 struct hclge_tqp *tqp;
5291 for (i = 0; i < vport->alloc_tqps; i++) {
5292 queue = handle->kinfo.tqp[i];
5293 tqp = container_of(queue, struct hclge_tqp, q);
5294 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5298 static int hclge_ae_start(struct hnae3_handle *handle)
5300 struct hclge_vport *vport = hclge_get_vport(handle);
5301 struct hclge_dev *hdev = vport->back;
5304 hclge_cfg_mac_mode(hdev, true);
5305 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5306 mod_timer(&hdev->service_timer, jiffies + HZ);
5307 hdev->hw.mac.link = 0;
5309 /* reset tqp stats */
5310 hclge_reset_tqp_stats(handle);
5312 hclge_mac_start_phy(hdev);
5317 static void hclge_ae_stop(struct hnae3_handle *handle)
5319 struct hclge_vport *vport = hclge_get_vport(handle);
5320 struct hclge_dev *hdev = vport->back;
5322 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5324 del_timer_sync(&hdev->service_timer);
5325 cancel_work_sync(&hdev->service_task);
5326 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5328 /* If it is not PF reset, the firmware will disable the MAC,
5329 * so it only need to stop phy here.
5331 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5332 hdev->reset_type != HNAE3_FUNC_RESET) {
5333 hclge_mac_stop_phy(hdev);
5338 hclge_cfg_mac_mode(hdev, false);
5340 hclge_mac_stop_phy(hdev);
5342 /* reset tqp stats */
5343 hclge_reset_tqp_stats(handle);
5344 del_timer_sync(&hdev->service_timer);
5345 cancel_work_sync(&hdev->service_task);
5346 hclge_update_link_status(hdev);
5349 int hclge_vport_start(struct hclge_vport *vport)
5351 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5352 vport->last_active_jiffies = jiffies;
5356 void hclge_vport_stop(struct hclge_vport *vport)
5358 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5361 static int hclge_client_start(struct hnae3_handle *handle)
5363 struct hclge_vport *vport = hclge_get_vport(handle);
5365 return hclge_vport_start(vport);
5368 static void hclge_client_stop(struct hnae3_handle *handle)
5370 struct hclge_vport *vport = hclge_get_vport(handle);
5372 hclge_vport_stop(vport);
5375 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5376 u16 cmdq_resp, u8 resp_code,
5377 enum hclge_mac_vlan_tbl_opcode op)
5379 struct hclge_dev *hdev = vport->back;
5380 int return_status = -EIO;
5383 dev_err(&hdev->pdev->dev,
5384 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5389 if (op == HCLGE_MAC_VLAN_ADD) {
5390 if ((!resp_code) || (resp_code == 1)) {
5392 } else if (resp_code == 2) {
5393 return_status = -ENOSPC;
5394 dev_err(&hdev->pdev->dev,
5395 "add mac addr failed for uc_overflow.\n");
5396 } else if (resp_code == 3) {
5397 return_status = -ENOSPC;
5398 dev_err(&hdev->pdev->dev,
5399 "add mac addr failed for mc_overflow.\n");
5401 dev_err(&hdev->pdev->dev,
5402 "add mac addr failed for undefined, code=%d.\n",
5405 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5408 } else if (resp_code == 1) {
5409 return_status = -ENOENT;
5410 dev_dbg(&hdev->pdev->dev,
5411 "remove mac addr failed for miss.\n");
5413 dev_err(&hdev->pdev->dev,
5414 "remove mac addr failed for undefined, code=%d.\n",
5417 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5420 } else if (resp_code == 1) {
5421 return_status = -ENOENT;
5422 dev_dbg(&hdev->pdev->dev,
5423 "lookup mac addr failed for miss.\n");
5425 dev_err(&hdev->pdev->dev,
5426 "lookup mac addr failed for undefined, code=%d.\n",
5430 return_status = -EINVAL;
5431 dev_err(&hdev->pdev->dev,
5432 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5436 return return_status;
5439 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5444 if (vfid > 255 || vfid < 0)
5447 if (vfid >= 0 && vfid <= 191) {
5448 word_num = vfid / 32;
5449 bit_num = vfid % 32;
5451 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5453 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5455 word_num = (vfid - 192) / 32;
5456 bit_num = vfid % 32;
5458 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5460 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5466 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5468 #define HCLGE_DESC_NUMBER 3
5469 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5472 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5473 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5474 if (desc[i].data[j])
5480 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5483 const unsigned char *mac_addr = addr;
5484 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5485 (mac_addr[0]) | (mac_addr[1] << 8);
5486 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5488 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5489 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5492 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5493 struct hclge_mac_vlan_tbl_entry_cmd *req)
5495 struct hclge_dev *hdev = vport->back;
5496 struct hclge_desc desc;
5501 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5503 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5505 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5507 dev_err(&hdev->pdev->dev,
5508 "del mac addr failed for cmd_send, ret =%d.\n",
5512 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5513 retval = le16_to_cpu(desc.retval);
5515 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5516 HCLGE_MAC_VLAN_REMOVE);
5519 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5520 struct hclge_mac_vlan_tbl_entry_cmd *req,
5521 struct hclge_desc *desc,
5524 struct hclge_dev *hdev = vport->back;
5529 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5531 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5532 memcpy(desc[0].data,
5534 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5535 hclge_cmd_setup_basic_desc(&desc[1],
5536 HCLGE_OPC_MAC_VLAN_ADD,
5538 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5539 hclge_cmd_setup_basic_desc(&desc[2],
5540 HCLGE_OPC_MAC_VLAN_ADD,
5542 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5544 memcpy(desc[0].data,
5546 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5547 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5550 dev_err(&hdev->pdev->dev,
5551 "lookup mac addr failed for cmd_send, ret =%d.\n",
5555 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5556 retval = le16_to_cpu(desc[0].retval);
5558 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5559 HCLGE_MAC_VLAN_LKUP);
5562 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5563 struct hclge_mac_vlan_tbl_entry_cmd *req,
5564 struct hclge_desc *mc_desc)
5566 struct hclge_dev *hdev = vport->back;
5573 struct hclge_desc desc;
5575 hclge_cmd_setup_basic_desc(&desc,
5576 HCLGE_OPC_MAC_VLAN_ADD,
5578 memcpy(desc.data, req,
5579 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5580 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5581 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5582 retval = le16_to_cpu(desc.retval);
5584 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5586 HCLGE_MAC_VLAN_ADD);
5588 hclge_cmd_reuse_desc(&mc_desc[0], false);
5589 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5590 hclge_cmd_reuse_desc(&mc_desc[1], false);
5591 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5592 hclge_cmd_reuse_desc(&mc_desc[2], false);
5593 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5594 memcpy(mc_desc[0].data, req,
5595 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5596 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5597 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5598 retval = le16_to_cpu(mc_desc[0].retval);
5600 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5602 HCLGE_MAC_VLAN_ADD);
5606 dev_err(&hdev->pdev->dev,
5607 "add mac addr failed for cmd_send, ret =%d.\n",
5615 static int hclge_init_umv_space(struct hclge_dev *hdev)
5617 u16 allocated_size = 0;
5620 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5625 if (allocated_size < hdev->wanted_umv_size)
5626 dev_warn(&hdev->pdev->dev,
5627 "Alloc umv space failed, want %d, get %d\n",
5628 hdev->wanted_umv_size, allocated_size);
5630 mutex_init(&hdev->umv_mutex);
5631 hdev->max_umv_size = allocated_size;
5632 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5633 hdev->share_umv_size = hdev->priv_umv_size +
5634 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5639 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5643 if (hdev->max_umv_size > 0) {
5644 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5648 hdev->max_umv_size = 0;
5650 mutex_destroy(&hdev->umv_mutex);
5655 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5656 u16 *allocated_size, bool is_alloc)
5658 struct hclge_umv_spc_alc_cmd *req;
5659 struct hclge_desc desc;
5662 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5663 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5664 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5665 req->space_size = cpu_to_le32(space_size);
5667 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5669 dev_err(&hdev->pdev->dev,
5670 "%s umv space failed for cmd_send, ret =%d\n",
5671 is_alloc ? "allocate" : "free", ret);
5675 if (is_alloc && allocated_size)
5676 *allocated_size = le32_to_cpu(desc.data[1]);
5681 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5683 struct hclge_vport *vport;
5686 for (i = 0; i < hdev->num_alloc_vport; i++) {
5687 vport = &hdev->vport[i];
5688 vport->used_umv_num = 0;
5691 mutex_lock(&hdev->umv_mutex);
5692 hdev->share_umv_size = hdev->priv_umv_size +
5693 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5694 mutex_unlock(&hdev->umv_mutex);
5697 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5699 struct hclge_dev *hdev = vport->back;
5702 mutex_lock(&hdev->umv_mutex);
5703 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5704 hdev->share_umv_size == 0);
5705 mutex_unlock(&hdev->umv_mutex);
5710 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5712 struct hclge_dev *hdev = vport->back;
5714 mutex_lock(&hdev->umv_mutex);
5716 if (vport->used_umv_num > hdev->priv_umv_size)
5717 hdev->share_umv_size++;
5718 vport->used_umv_num--;
5720 if (vport->used_umv_num >= hdev->priv_umv_size)
5721 hdev->share_umv_size--;
5722 vport->used_umv_num++;
5724 mutex_unlock(&hdev->umv_mutex);
5727 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5728 const unsigned char *addr)
5730 struct hclge_vport *vport = hclge_get_vport(handle);
5732 return hclge_add_uc_addr_common(vport, addr);
5735 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5736 const unsigned char *addr)
5738 struct hclge_dev *hdev = vport->back;
5739 struct hclge_mac_vlan_tbl_entry_cmd req;
5740 struct hclge_desc desc;
5741 u16 egress_port = 0;
5744 /* mac addr check */
5745 if (is_zero_ether_addr(addr) ||
5746 is_broadcast_ether_addr(addr) ||
5747 is_multicast_ether_addr(addr)) {
5748 dev_err(&hdev->pdev->dev,
5749 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5751 is_zero_ether_addr(addr),
5752 is_broadcast_ether_addr(addr),
5753 is_multicast_ether_addr(addr));
5757 memset(&req, 0, sizeof(req));
5758 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5760 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5761 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5763 req.egress_port = cpu_to_le16(egress_port);
5765 hclge_prepare_mac_addr(&req, addr);
5767 /* Lookup the mac address in the mac_vlan table, and add
5768 * it if the entry is inexistent. Repeated unicast entry
5769 * is not allowed in the mac vlan table.
5771 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5772 if (ret == -ENOENT) {
5773 if (!hclge_is_umv_space_full(vport)) {
5774 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5776 hclge_update_umv_space(vport, false);
5780 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5781 hdev->priv_umv_size);
5786 /* check if we just hit the duplicate */
5790 dev_err(&hdev->pdev->dev,
5791 "PF failed to add unicast entry(%pM) in the MAC table\n",
5797 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5798 const unsigned char *addr)
5800 struct hclge_vport *vport = hclge_get_vport(handle);
5802 return hclge_rm_uc_addr_common(vport, addr);
5805 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5806 const unsigned char *addr)
5808 struct hclge_dev *hdev = vport->back;
5809 struct hclge_mac_vlan_tbl_entry_cmd req;
5812 /* mac addr check */
5813 if (is_zero_ether_addr(addr) ||
5814 is_broadcast_ether_addr(addr) ||
5815 is_multicast_ether_addr(addr)) {
5816 dev_dbg(&hdev->pdev->dev,
5817 "Remove mac err! invalid mac:%pM.\n",
5822 memset(&req, 0, sizeof(req));
5823 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5824 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5825 hclge_prepare_mac_addr(&req, addr);
5826 ret = hclge_remove_mac_vlan_tbl(vport, &req);
5828 hclge_update_umv_space(vport, true);
5833 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5834 const unsigned char *addr)
5836 struct hclge_vport *vport = hclge_get_vport(handle);
5838 return hclge_add_mc_addr_common(vport, addr);
5841 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5842 const unsigned char *addr)
5844 struct hclge_dev *hdev = vport->back;
5845 struct hclge_mac_vlan_tbl_entry_cmd req;
5846 struct hclge_desc desc[3];
5849 /* mac addr check */
5850 if (!is_multicast_ether_addr(addr)) {
5851 dev_err(&hdev->pdev->dev,
5852 "Add mc mac err! invalid mac:%pM.\n",
5856 memset(&req, 0, sizeof(req));
5857 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5858 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5859 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5860 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5861 hclge_prepare_mac_addr(&req, addr);
5862 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5864 /* This mac addr exist, update VFID for it */
5865 hclge_update_desc_vfid(desc, vport->vport_id, false);
5866 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5868 /* This mac addr do not exist, add new entry for it */
5869 memset(desc[0].data, 0, sizeof(desc[0].data));
5870 memset(desc[1].data, 0, sizeof(desc[0].data));
5871 memset(desc[2].data, 0, sizeof(desc[0].data));
5872 hclge_update_desc_vfid(desc, vport->vport_id, false);
5873 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5876 if (status == -ENOSPC)
5877 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5882 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5883 const unsigned char *addr)
5885 struct hclge_vport *vport = hclge_get_vport(handle);
5887 return hclge_rm_mc_addr_common(vport, addr);
5890 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5891 const unsigned char *addr)
5893 struct hclge_dev *hdev = vport->back;
5894 struct hclge_mac_vlan_tbl_entry_cmd req;
5895 enum hclge_cmd_status status;
5896 struct hclge_desc desc[3];
5898 /* mac addr check */
5899 if (!is_multicast_ether_addr(addr)) {
5900 dev_dbg(&hdev->pdev->dev,
5901 "Remove mc mac err! invalid mac:%pM.\n",
5906 memset(&req, 0, sizeof(req));
5907 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5908 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5909 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5910 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5911 hclge_prepare_mac_addr(&req, addr);
5912 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5914 /* This mac addr exist, remove this handle's VFID for it */
5915 hclge_update_desc_vfid(desc, vport->vport_id, true);
5917 if (hclge_is_all_function_id_zero(desc))
5918 /* All the vfid is zero, so need to delete this entry */
5919 status = hclge_remove_mac_vlan_tbl(vport, &req);
5921 /* Not all the vfid is zero, update the vfid */
5922 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5925 /* Maybe this mac address is in mta table, but it cannot be
5926 * deleted here because an entry of mta represents an address
5927 * range rather than a specific address. the delete action to
5928 * all entries will take effect in update_mta_status called by
5929 * hns3_nic_set_rx_mode.
5937 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5938 u16 cmdq_resp, u8 resp_code)
5940 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
5941 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
5942 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
5943 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
5948 dev_err(&hdev->pdev->dev,
5949 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5954 switch (resp_code) {
5955 case HCLGE_ETHERTYPE_SUCCESS_ADD:
5956 case HCLGE_ETHERTYPE_ALREADY_ADD:
5959 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5960 dev_err(&hdev->pdev->dev,
5961 "add mac ethertype failed for manager table overflow.\n");
5962 return_status = -EIO;
5964 case HCLGE_ETHERTYPE_KEY_CONFLICT:
5965 dev_err(&hdev->pdev->dev,
5966 "add mac ethertype failed for key conflict.\n");
5967 return_status = -EIO;
5970 dev_err(&hdev->pdev->dev,
5971 "add mac ethertype failed for undefined, code=%d.\n",
5973 return_status = -EIO;
5976 return return_status;
5979 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5980 const struct hclge_mac_mgr_tbl_entry_cmd *req)
5982 struct hclge_desc desc;
5987 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5988 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5992 dev_err(&hdev->pdev->dev,
5993 "add mac ethertype failed for cmd_send, ret =%d.\n",
5998 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5999 retval = le16_to_cpu(desc.retval);
6001 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6004 static int init_mgr_tbl(struct hclge_dev *hdev)
6009 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6010 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6012 dev_err(&hdev->pdev->dev,
6013 "add mac ethertype failed, ret =%d.\n",
6022 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6024 struct hclge_vport *vport = hclge_get_vport(handle);
6025 struct hclge_dev *hdev = vport->back;
6027 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6030 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6033 const unsigned char *new_addr = (const unsigned char *)p;
6034 struct hclge_vport *vport = hclge_get_vport(handle);
6035 struct hclge_dev *hdev = vport->back;
6038 /* mac addr check */
6039 if (is_zero_ether_addr(new_addr) ||
6040 is_broadcast_ether_addr(new_addr) ||
6041 is_multicast_ether_addr(new_addr)) {
6042 dev_err(&hdev->pdev->dev,
6043 "Change uc mac err! invalid mac:%p.\n",
6048 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6049 dev_warn(&hdev->pdev->dev,
6050 "remove old uc mac address fail.\n");
6052 ret = hclge_add_uc_addr(handle, new_addr);
6054 dev_err(&hdev->pdev->dev,
6055 "add uc mac address fail, ret =%d.\n",
6059 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6060 dev_err(&hdev->pdev->dev,
6061 "restore uc mac address fail.\n");
6066 ret = hclge_pause_addr_cfg(hdev, new_addr);
6068 dev_err(&hdev->pdev->dev,
6069 "configure mac pause address fail, ret =%d.\n",
6074 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6079 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6082 struct hclge_vport *vport = hclge_get_vport(handle);
6083 struct hclge_dev *hdev = vport->back;
6085 if (!hdev->hw.mac.phydev)
6088 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6091 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6092 u8 fe_type, bool filter_en)
6094 struct hclge_vlan_filter_ctrl_cmd *req;
6095 struct hclge_desc desc;
6098 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6100 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6101 req->vlan_type = vlan_type;
6102 req->vlan_fe = filter_en ? fe_type : 0;
6104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6106 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6112 #define HCLGE_FILTER_TYPE_VF 0
6113 #define HCLGE_FILTER_TYPE_PORT 1
6114 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6115 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6116 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6117 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6118 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6119 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6120 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6121 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6122 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6124 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6126 struct hclge_vport *vport = hclge_get_vport(handle);
6127 struct hclge_dev *hdev = vport->back;
6129 if (hdev->pdev->revision >= 0x21) {
6130 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6131 HCLGE_FILTER_FE_EGRESS, enable);
6132 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6133 HCLGE_FILTER_FE_INGRESS, enable);
6135 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6136 HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6139 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6141 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6144 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6145 bool is_kill, u16 vlan, u8 qos,
6148 #define HCLGE_MAX_VF_BYTES 16
6149 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6150 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6151 struct hclge_desc desc[2];
6156 hclge_cmd_setup_basic_desc(&desc[0],
6157 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6158 hclge_cmd_setup_basic_desc(&desc[1],
6159 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6161 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6163 vf_byte_off = vfid / 8;
6164 vf_byte_val = 1 << (vfid % 8);
6166 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6167 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6169 req0->vlan_id = cpu_to_le16(vlan);
6170 req0->vlan_cfg = is_kill;
6172 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6173 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6175 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6177 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6179 dev_err(&hdev->pdev->dev,
6180 "Send vf vlan command fail, ret =%d.\n",
6186 #define HCLGE_VF_VLAN_NO_ENTRY 2
6187 if (!req0->resp_code || req0->resp_code == 1)
6190 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6191 dev_warn(&hdev->pdev->dev,
6192 "vf vlan table is full, vf vlan filter is disabled\n");
6196 dev_err(&hdev->pdev->dev,
6197 "Add vf vlan filter fail, ret =%d.\n",
6200 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6201 if (!req0->resp_code)
6204 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6205 dev_warn(&hdev->pdev->dev,
6206 "vlan %d filter is not in vf vlan table\n",
6211 dev_err(&hdev->pdev->dev,
6212 "Kill vf vlan filter fail, ret =%d.\n",
6219 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6220 u16 vlan_id, bool is_kill)
6222 struct hclge_vlan_filter_pf_cfg_cmd *req;
6223 struct hclge_desc desc;
6224 u8 vlan_offset_byte_val;
6225 u8 vlan_offset_byte;
6229 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6231 vlan_offset_160 = vlan_id / 160;
6232 vlan_offset_byte = (vlan_id % 160) / 8;
6233 vlan_offset_byte_val = 1 << (vlan_id % 8);
6235 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6236 req->vlan_offset = vlan_offset_160;
6237 req->vlan_cfg = is_kill;
6238 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6240 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6242 dev_err(&hdev->pdev->dev,
6243 "port vlan command, send fail, ret =%d.\n", ret);
6247 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6248 u16 vport_id, u16 vlan_id, u8 qos,
6251 u16 vport_idx, vport_num = 0;
6254 if (is_kill && !vlan_id)
6257 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6260 dev_err(&hdev->pdev->dev,
6261 "Set %d vport vlan filter config fail, ret =%d.\n",
6266 /* vlan 0 may be added twice when 8021q module is enabled */
6267 if (!is_kill && !vlan_id &&
6268 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6271 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6272 dev_err(&hdev->pdev->dev,
6273 "Add port vlan failed, vport %d is already in vlan %d\n",
6279 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6280 dev_err(&hdev->pdev->dev,
6281 "Delete port vlan failed, vport %d is not in vlan %d\n",
6286 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6289 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6290 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6296 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6297 u16 vlan_id, bool is_kill)
6299 struct hclge_vport *vport = hclge_get_vport(handle);
6300 struct hclge_dev *hdev = vport->back;
6302 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6306 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6307 u16 vlan, u8 qos, __be16 proto)
6309 struct hclge_vport *vport = hclge_get_vport(handle);
6310 struct hclge_dev *hdev = vport->back;
6312 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6314 if (proto != htons(ETH_P_8021Q))
6315 return -EPROTONOSUPPORT;
6317 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6320 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6322 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6323 struct hclge_vport_vtag_tx_cfg_cmd *req;
6324 struct hclge_dev *hdev = vport->back;
6325 struct hclge_desc desc;
6328 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6330 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6331 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6332 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6333 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6334 vcfg->accept_tag1 ? 1 : 0);
6335 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6336 vcfg->accept_untag1 ? 1 : 0);
6337 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6338 vcfg->accept_tag2 ? 1 : 0);
6339 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6340 vcfg->accept_untag2 ? 1 : 0);
6341 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6342 vcfg->insert_tag1_en ? 1 : 0);
6343 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6344 vcfg->insert_tag2_en ? 1 : 0);
6345 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6347 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6348 req->vf_bitmap[req->vf_offset] =
6349 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6351 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6353 dev_err(&hdev->pdev->dev,
6354 "Send port txvlan cfg command fail, ret =%d\n",
6360 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6362 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6363 struct hclge_vport_vtag_rx_cfg_cmd *req;
6364 struct hclge_dev *hdev = vport->back;
6365 struct hclge_desc desc;
6368 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6370 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6371 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6372 vcfg->strip_tag1_en ? 1 : 0);
6373 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6374 vcfg->strip_tag2_en ? 1 : 0);
6375 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6376 vcfg->vlan1_vlan_prionly ? 1 : 0);
6377 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6378 vcfg->vlan2_vlan_prionly ? 1 : 0);
6380 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6381 req->vf_bitmap[req->vf_offset] =
6382 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6384 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6386 dev_err(&hdev->pdev->dev,
6387 "Send port rxvlan cfg command fail, ret =%d\n",
6393 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6395 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6396 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6397 struct hclge_desc desc;
6400 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6401 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6402 rx_req->ot_fst_vlan_type =
6403 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6404 rx_req->ot_sec_vlan_type =
6405 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6406 rx_req->in_fst_vlan_type =
6407 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6408 rx_req->in_sec_vlan_type =
6409 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6411 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6413 dev_err(&hdev->pdev->dev,
6414 "Send rxvlan protocol type command fail, ret =%d\n",
6419 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6421 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6422 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6423 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6425 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6427 dev_err(&hdev->pdev->dev,
6428 "Send txvlan protocol type command fail, ret =%d\n",
6434 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6436 #define HCLGE_DEF_VLAN_TYPE 0x8100
6438 struct hnae3_handle *handle = &hdev->vport[0].nic;
6439 struct hclge_vport *vport;
6443 if (hdev->pdev->revision >= 0x21) {
6444 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6445 HCLGE_FILTER_FE_EGRESS, true);
6449 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6450 HCLGE_FILTER_FE_INGRESS, true);
6454 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6455 HCLGE_FILTER_FE_EGRESS_V1_B,
6461 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6463 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6464 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6465 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6466 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6467 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6468 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6470 ret = hclge_set_vlan_protocol_type(hdev);
6474 for (i = 0; i < hdev->num_alloc_vport; i++) {
6475 vport = &hdev->vport[i];
6476 vport->txvlan_cfg.accept_tag1 = true;
6477 vport->txvlan_cfg.accept_untag1 = true;
6479 /* accept_tag2 and accept_untag2 are not supported on
6480 * pdev revision(0x20), new revision support them. The
6481 * value of this two fields will not return error when driver
6482 * send command to fireware in revision(0x20).
6483 * This two fields can not configured by user.
6485 vport->txvlan_cfg.accept_tag2 = true;
6486 vport->txvlan_cfg.accept_untag2 = true;
6488 vport->txvlan_cfg.insert_tag1_en = false;
6489 vport->txvlan_cfg.insert_tag2_en = false;
6490 vport->txvlan_cfg.default_tag1 = 0;
6491 vport->txvlan_cfg.default_tag2 = 0;
6493 ret = hclge_set_vlan_tx_offload_cfg(vport);
6497 vport->rxvlan_cfg.strip_tag1_en = false;
6498 vport->rxvlan_cfg.strip_tag2_en = true;
6499 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6500 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6502 ret = hclge_set_vlan_rx_offload_cfg(vport);
6507 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6510 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6512 struct hclge_vport *vport = hclge_get_vport(handle);
6514 vport->rxvlan_cfg.strip_tag1_en = false;
6515 vport->rxvlan_cfg.strip_tag2_en = enable;
6516 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6517 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6519 return hclge_set_vlan_rx_offload_cfg(vport);
6522 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6524 struct hclge_config_max_frm_size_cmd *req;
6525 struct hclge_desc desc;
6527 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6529 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6530 req->max_frm_size = cpu_to_le16(new_mps);
6531 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6533 return hclge_cmd_send(&hdev->hw, &desc, 1);
6536 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6538 struct hclge_vport *vport = hclge_get_vport(handle);
6540 return hclge_set_vport_mtu(vport, new_mtu);
6543 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6545 struct hclge_dev *hdev = vport->back;
6546 int i, max_frm_size, ret = 0;
6548 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6549 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6550 max_frm_size > HCLGE_MAC_MAX_FRAME)
6553 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6554 mutex_lock(&hdev->vport_lock);
6555 /* VF's mps must fit within hdev->mps */
6556 if (vport->vport_id && max_frm_size > hdev->mps) {
6557 mutex_unlock(&hdev->vport_lock);
6559 } else if (vport->vport_id) {
6560 vport->mps = max_frm_size;
6561 mutex_unlock(&hdev->vport_lock);
6565 /* PF's mps must be greater then VF's mps */
6566 for (i = 1; i < hdev->num_alloc_vport; i++)
6567 if (max_frm_size < hdev->vport[i].mps) {
6568 mutex_unlock(&hdev->vport_lock);
6572 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6574 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6576 dev_err(&hdev->pdev->dev,
6577 "Change mtu fail, ret =%d\n", ret);
6581 hdev->mps = max_frm_size;
6582 vport->mps = max_frm_size;
6584 ret = hclge_buffer_alloc(hdev);
6586 dev_err(&hdev->pdev->dev,
6587 "Allocate buffer fail, ret =%d\n", ret);
6590 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6591 mutex_unlock(&hdev->vport_lock);
6595 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6598 struct hclge_reset_tqp_queue_cmd *req;
6599 struct hclge_desc desc;
6602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6604 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6605 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6606 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6608 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6610 dev_err(&hdev->pdev->dev,
6611 "Send tqp reset cmd error, status =%d\n", ret);
6618 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6620 struct hclge_reset_tqp_queue_cmd *req;
6621 struct hclge_desc desc;
6624 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6626 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6627 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6629 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6631 dev_err(&hdev->pdev->dev,
6632 "Get reset status error, status =%d\n", ret);
6636 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6639 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
6641 struct hnae3_queue *queue;
6642 struct hclge_tqp *tqp;
6644 queue = handle->kinfo.tqp[queue_id];
6645 tqp = container_of(queue, struct hclge_tqp, q);
6650 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6652 struct hclge_vport *vport = hclge_get_vport(handle);
6653 struct hclge_dev *hdev = vport->back;
6654 int reset_try_times = 0;
6659 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6661 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6663 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6667 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6669 dev_err(&hdev->pdev->dev,
6670 "Send reset tqp cmd fail, ret = %d\n", ret);
6674 reset_try_times = 0;
6675 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6676 /* Wait for tqp hw reset */
6678 reset_status = hclge_get_reset_status(hdev, queue_gid);
6683 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6684 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6688 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6690 dev_err(&hdev->pdev->dev,
6691 "Deassert the soft reset fail, ret = %d\n", ret);
6696 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6698 struct hclge_dev *hdev = vport->back;
6699 int reset_try_times = 0;
6704 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6706 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6708 dev_warn(&hdev->pdev->dev,
6709 "Send reset tqp cmd fail, ret = %d\n", ret);
6713 reset_try_times = 0;
6714 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6715 /* Wait for tqp hw reset */
6717 reset_status = hclge_get_reset_status(hdev, queue_gid);
6722 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6723 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6727 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6729 dev_warn(&hdev->pdev->dev,
6730 "Deassert the soft reset fail, ret = %d\n", ret);
6733 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6735 struct hclge_vport *vport = hclge_get_vport(handle);
6736 struct hclge_dev *hdev = vport->back;
6738 return hdev->fw_version;
6741 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6743 struct phy_device *phydev = hdev->hw.mac.phydev;
6748 phy_set_asym_pause(phydev, rx_en, tx_en);
6751 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6756 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6757 else if (rx_en && !tx_en)
6758 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6759 else if (!rx_en && tx_en)
6760 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6762 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6764 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6767 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6769 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6774 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6779 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6781 struct phy_device *phydev = hdev->hw.mac.phydev;
6782 u16 remote_advertising = 0;
6783 u16 local_advertising = 0;
6784 u32 rx_pause, tx_pause;
6787 if (!phydev->link || !phydev->autoneg)
6790 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6793 remote_advertising = LPA_PAUSE_CAP;
6795 if (phydev->asym_pause)
6796 remote_advertising |= LPA_PAUSE_ASYM;
6798 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6799 remote_advertising);
6800 tx_pause = flowctl & FLOW_CTRL_TX;
6801 rx_pause = flowctl & FLOW_CTRL_RX;
6803 if (phydev->duplex == HCLGE_MAC_HALF) {
6808 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6811 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6812 u32 *rx_en, u32 *tx_en)
6814 struct hclge_vport *vport = hclge_get_vport(handle);
6815 struct hclge_dev *hdev = vport->back;
6817 *auto_neg = hclge_get_autoneg(handle);
6819 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6825 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6828 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6831 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6840 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6841 u32 rx_en, u32 tx_en)
6843 struct hclge_vport *vport = hclge_get_vport(handle);
6844 struct hclge_dev *hdev = vport->back;
6845 struct phy_device *phydev = hdev->hw.mac.phydev;
6848 fc_autoneg = hclge_get_autoneg(handle);
6849 if (auto_neg != fc_autoneg) {
6850 dev_info(&hdev->pdev->dev,
6851 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6855 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6856 dev_info(&hdev->pdev->dev,
6857 "Priority flow control enabled. Cannot set link flow control.\n");
6861 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6864 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6866 /* Only support flow control negotiation for netdev with
6867 * phy attached for now.
6872 return phy_start_aneg(phydev);
6875 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6876 u8 *auto_neg, u32 *speed, u8 *duplex)
6878 struct hclge_vport *vport = hclge_get_vport(handle);
6879 struct hclge_dev *hdev = vport->back;
6882 *speed = hdev->hw.mac.speed;
6884 *duplex = hdev->hw.mac.duplex;
6886 *auto_neg = hdev->hw.mac.autoneg;
6889 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6891 struct hclge_vport *vport = hclge_get_vport(handle);
6892 struct hclge_dev *hdev = vport->back;
6895 *media_type = hdev->hw.mac.media_type;
6898 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6899 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6901 struct hclge_vport *vport = hclge_get_vport(handle);
6902 struct hclge_dev *hdev = vport->back;
6903 struct phy_device *phydev = hdev->hw.mac.phydev;
6904 int mdix_ctrl, mdix, retval, is_resolved;
6907 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6908 *tp_mdix = ETH_TP_MDI_INVALID;
6912 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6914 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6915 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6916 HCLGE_PHY_MDIX_CTRL_S);
6918 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6919 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6920 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6922 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6924 switch (mdix_ctrl) {
6926 *tp_mdix_ctrl = ETH_TP_MDI;
6929 *tp_mdix_ctrl = ETH_TP_MDI_X;
6932 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6935 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6940 *tp_mdix = ETH_TP_MDI_INVALID;
6942 *tp_mdix = ETH_TP_MDI_X;
6944 *tp_mdix = ETH_TP_MDI;
6947 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6949 return hclge_mac_connect_phy(hdev);
6952 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6954 hclge_mac_disconnect_phy(hdev);
6957 static int hclge_init_client_instance(struct hnae3_client *client,
6958 struct hnae3_ae_dev *ae_dev)
6960 struct hclge_dev *hdev = ae_dev->priv;
6961 struct hclge_vport *vport;
6964 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6965 vport = &hdev->vport[i];
6967 switch (client->type) {
6968 case HNAE3_CLIENT_KNIC:
6970 hdev->nic_client = client;
6971 vport->nic.client = client;
6972 ret = client->ops->init_instance(&vport->nic);
6976 ret = hclge_init_instance_hw(hdev);
6978 client->ops->uninit_instance(&vport->nic,
6983 hnae3_set_client_init_flag(client, ae_dev, 1);
6985 if (hdev->roce_client &&
6986 hnae3_dev_roce_supported(hdev)) {
6987 struct hnae3_client *rc = hdev->roce_client;
6989 ret = hclge_init_roce_base_info(vport);
6993 ret = rc->ops->init_instance(&vport->roce);
6997 hnae3_set_client_init_flag(hdev->roce_client,
7002 case HNAE3_CLIENT_UNIC:
7003 hdev->nic_client = client;
7004 vport->nic.client = client;
7006 ret = client->ops->init_instance(&vport->nic);
7010 hnae3_set_client_init_flag(client, ae_dev, 1);
7013 case HNAE3_CLIENT_ROCE:
7014 if (hnae3_dev_roce_supported(hdev)) {
7015 hdev->roce_client = client;
7016 vport->roce.client = client;
7019 if (hdev->roce_client && hdev->nic_client) {
7020 ret = hclge_init_roce_base_info(vport);
7024 ret = client->ops->init_instance(&vport->roce);
7028 hnae3_set_client_init_flag(client, ae_dev, 1);
7040 hdev->nic_client = NULL;
7041 vport->nic.client = NULL;
7044 hdev->roce_client = NULL;
7045 vport->roce.client = NULL;
7049 static void hclge_uninit_client_instance(struct hnae3_client *client,
7050 struct hnae3_ae_dev *ae_dev)
7052 struct hclge_dev *hdev = ae_dev->priv;
7053 struct hclge_vport *vport;
7056 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7057 vport = &hdev->vport[i];
7058 if (hdev->roce_client) {
7059 hdev->roce_client->ops->uninit_instance(&vport->roce,
7061 hdev->roce_client = NULL;
7062 vport->roce.client = NULL;
7064 if (client->type == HNAE3_CLIENT_ROCE)
7066 if (hdev->nic_client && client->ops->uninit_instance) {
7067 hclge_uninit_instance_hw(hdev);
7068 client->ops->uninit_instance(&vport->nic, 0);
7069 hdev->nic_client = NULL;
7070 vport->nic.client = NULL;
7075 static int hclge_pci_init(struct hclge_dev *hdev)
7077 struct pci_dev *pdev = hdev->pdev;
7078 struct hclge_hw *hw;
7081 ret = pci_enable_device(pdev);
7083 dev_err(&pdev->dev, "failed to enable PCI device\n");
7087 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7089 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7092 "can't set consistent PCI DMA");
7093 goto err_disable_device;
7095 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7098 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7100 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7101 goto err_disable_device;
7104 pci_set_master(pdev);
7106 hw->io_base = pcim_iomap(pdev, 2, 0);
7108 dev_err(&pdev->dev, "Can't map configuration register space\n");
7110 goto err_clr_master;
7113 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7117 pci_clear_master(pdev);
7118 pci_release_regions(pdev);
7120 pci_disable_device(pdev);
7125 static void hclge_pci_uninit(struct hclge_dev *hdev)
7127 struct pci_dev *pdev = hdev->pdev;
7129 pcim_iounmap(pdev, hdev->hw.io_base);
7130 pci_free_irq_vectors(pdev);
7131 pci_clear_master(pdev);
7132 pci_release_mem_regions(pdev);
7133 pci_disable_device(pdev);
7136 static void hclge_state_init(struct hclge_dev *hdev)
7138 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7139 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7140 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7141 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7142 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7143 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7146 static void hclge_state_uninit(struct hclge_dev *hdev)
7148 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7150 if (hdev->service_timer.function)
7151 del_timer_sync(&hdev->service_timer);
7152 if (hdev->reset_timer.function)
7153 del_timer_sync(&hdev->reset_timer);
7154 if (hdev->service_task.func)
7155 cancel_work_sync(&hdev->service_task);
7156 if (hdev->rst_service_task.func)
7157 cancel_work_sync(&hdev->rst_service_task);
7158 if (hdev->mbx_service_task.func)
7159 cancel_work_sync(&hdev->mbx_service_task);
7162 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7164 #define HCLGE_FLR_WAIT_MS 100
7165 #define HCLGE_FLR_WAIT_CNT 50
7166 struct hclge_dev *hdev = ae_dev->priv;
7169 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7170 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7171 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7172 hclge_reset_event(hdev->pdev, NULL);
7174 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7175 cnt++ < HCLGE_FLR_WAIT_CNT)
7176 msleep(HCLGE_FLR_WAIT_MS);
7178 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7179 dev_err(&hdev->pdev->dev,
7180 "flr wait down timeout: %d\n", cnt);
7183 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7185 struct hclge_dev *hdev = ae_dev->priv;
7187 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7190 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7192 struct pci_dev *pdev = ae_dev->pdev;
7193 struct hclge_dev *hdev;
7196 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7203 hdev->ae_dev = ae_dev;
7204 hdev->reset_type = HNAE3_NONE_RESET;
7205 hdev->reset_level = HNAE3_FUNC_RESET;
7206 ae_dev->priv = hdev;
7207 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7209 mutex_init(&hdev->vport_lock);
7211 ret = hclge_pci_init(hdev);
7213 dev_err(&pdev->dev, "PCI init failed\n");
7217 /* Firmware command queue initialize */
7218 ret = hclge_cmd_queue_init(hdev);
7220 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7221 goto err_pci_uninit;
7224 /* Firmware command initialize */
7225 ret = hclge_cmd_init(hdev);
7227 goto err_cmd_uninit;
7229 ret = hclge_get_cap(hdev);
7231 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7233 goto err_cmd_uninit;
7236 ret = hclge_configure(hdev);
7238 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7239 goto err_cmd_uninit;
7242 ret = hclge_init_msi(hdev);
7244 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7245 goto err_cmd_uninit;
7248 ret = hclge_misc_irq_init(hdev);
7251 "Misc IRQ(vector0) init error, ret = %d.\n",
7253 goto err_msi_uninit;
7256 ret = hclge_alloc_tqps(hdev);
7258 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7259 goto err_msi_irq_uninit;
7262 ret = hclge_alloc_vport(hdev);
7264 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7265 goto err_msi_irq_uninit;
7268 ret = hclge_map_tqp(hdev);
7270 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7271 goto err_msi_irq_uninit;
7274 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7275 ret = hclge_mac_mdio_config(hdev);
7277 dev_err(&hdev->pdev->dev,
7278 "mdio config fail ret=%d\n", ret);
7279 goto err_msi_irq_uninit;
7283 ret = hclge_init_umv_space(hdev);
7285 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7286 goto err_msi_irq_uninit;
7289 ret = hclge_mac_init(hdev);
7291 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7292 goto err_mdiobus_unreg;
7295 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7297 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7298 goto err_mdiobus_unreg;
7301 ret = hclge_config_gro(hdev, true);
7303 goto err_mdiobus_unreg;
7305 ret = hclge_init_vlan_config(hdev);
7307 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7308 goto err_mdiobus_unreg;
7311 ret = hclge_tm_schd_init(hdev);
7313 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7314 goto err_mdiobus_unreg;
7317 hclge_rss_init_cfg(hdev);
7318 ret = hclge_rss_init_hw(hdev);
7320 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7321 goto err_mdiobus_unreg;
7324 ret = init_mgr_tbl(hdev);
7326 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7327 goto err_mdiobus_unreg;
7330 ret = hclge_init_fd_config(hdev);
7333 "fd table init fail, ret=%d\n", ret);
7334 goto err_mdiobus_unreg;
7337 ret = hclge_hw_error_set_state(hdev, true);
7340 "fail(%d) to enable hw error interrupts\n", ret);
7341 goto err_mdiobus_unreg;
7344 hclge_dcb_ops_set(hdev);
7346 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7347 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7348 INIT_WORK(&hdev->service_task, hclge_service_task);
7349 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7350 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7352 hclge_clear_all_event_cause(hdev);
7354 /* Enable MISC vector(vector0) */
7355 hclge_enable_vector(&hdev->misc_vector, true);
7357 hclge_state_init(hdev);
7358 hdev->last_reset_time = jiffies;
7360 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7364 if (hdev->hw.mac.phydev)
7365 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7367 hclge_misc_irq_uninit(hdev);
7369 pci_free_irq_vectors(pdev);
7371 hclge_destroy_cmd_queue(&hdev->hw);
7373 pcim_iounmap(pdev, hdev->hw.io_base);
7374 pci_clear_master(pdev);
7375 pci_release_regions(pdev);
7376 pci_disable_device(pdev);
7381 static void hclge_stats_clear(struct hclge_dev *hdev)
7383 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7386 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7388 struct hclge_vport *vport = hdev->vport;
7391 for (i = 0; i < hdev->num_alloc_vport; i++) {
7392 hclge_vport_start(vport);
7397 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7399 struct hclge_dev *hdev = ae_dev->priv;
7400 struct pci_dev *pdev = ae_dev->pdev;
7403 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7405 hclge_stats_clear(hdev);
7406 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7408 ret = hclge_cmd_init(hdev);
7410 dev_err(&pdev->dev, "Cmd queue init failed\n");
7414 ret = hclge_map_tqp(hdev);
7416 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7420 hclge_reset_umv_space(hdev);
7422 ret = hclge_mac_init(hdev);
7424 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7428 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7430 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7434 ret = hclge_config_gro(hdev, true);
7438 ret = hclge_init_vlan_config(hdev);
7440 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7444 ret = hclge_tm_init_hw(hdev);
7446 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7450 ret = hclge_rss_init_hw(hdev);
7452 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7456 ret = hclge_init_fd_config(hdev);
7459 "fd table init fail, ret=%d\n", ret);
7463 /* Re-enable the hw error interrupts because
7464 * the interrupts get disabled on core/global reset.
7466 ret = hclge_hw_error_set_state(hdev, true);
7469 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7473 hclge_reset_vport_state(hdev);
7475 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7481 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7483 struct hclge_dev *hdev = ae_dev->priv;
7484 struct hclge_mac *mac = &hdev->hw.mac;
7486 hclge_state_uninit(hdev);
7489 mdiobus_unregister(mac->mdio_bus);
7491 hclge_uninit_umv_space(hdev);
7493 /* Disable MISC vector(vector0) */
7494 hclge_enable_vector(&hdev->misc_vector, false);
7495 synchronize_irq(hdev->misc_vector.vector_irq);
7497 hclge_hw_error_set_state(hdev, false);
7498 hclge_destroy_cmd_queue(&hdev->hw);
7499 hclge_misc_irq_uninit(hdev);
7500 hclge_pci_uninit(hdev);
7501 mutex_destroy(&hdev->vport_lock);
7502 ae_dev->priv = NULL;
7505 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7507 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7508 struct hclge_vport *vport = hclge_get_vport(handle);
7509 struct hclge_dev *hdev = vport->back;
7511 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7514 static void hclge_get_channels(struct hnae3_handle *handle,
7515 struct ethtool_channels *ch)
7517 struct hclge_vport *vport = hclge_get_vport(handle);
7519 ch->max_combined = hclge_get_max_channels(handle);
7520 ch->other_count = 1;
7522 ch->combined_count = vport->alloc_tqps;
7525 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7526 u16 *alloc_tqps, u16 *max_rss_size)
7528 struct hclge_vport *vport = hclge_get_vport(handle);
7529 struct hclge_dev *hdev = vport->back;
7531 *alloc_tqps = vport->alloc_tqps;
7532 *max_rss_size = hdev->rss_size_max;
7535 static void hclge_release_tqp(struct hclge_vport *vport)
7537 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7538 struct hclge_dev *hdev = vport->back;
7541 for (i = 0; i < kinfo->num_tqps; i++) {
7542 struct hclge_tqp *tqp =
7543 container_of(kinfo->tqp[i], struct hclge_tqp, q);
7545 tqp->q.handle = NULL;
7546 tqp->q.tqp_index = 0;
7547 tqp->alloced = false;
7550 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7554 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7556 struct hclge_vport *vport = hclge_get_vport(handle);
7557 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7558 struct hclge_dev *hdev = vport->back;
7559 int cur_rss_size = kinfo->rss_size;
7560 int cur_tqps = kinfo->num_tqps;
7561 u16 tc_offset[HCLGE_MAX_TC_NUM];
7562 u16 tc_valid[HCLGE_MAX_TC_NUM];
7563 u16 tc_size[HCLGE_MAX_TC_NUM];
7568 /* Free old tqps, and reallocate with new tqp number when nic setup */
7569 hclge_release_tqp(vport);
7571 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7573 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7577 ret = hclge_map_tqp_to_vport(hdev, vport);
7579 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7583 ret = hclge_tm_schd_init(hdev);
7585 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7589 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7590 roundup_size = ilog2(roundup_size);
7591 /* Set the RSS TC mode according to the new RSS size */
7592 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7595 if (!(hdev->hw_tc_map & BIT(i)))
7599 tc_size[i] = roundup_size;
7600 tc_offset[i] = kinfo->rss_size * i;
7602 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7606 /* Reinitializes the rss indirect table according to the new RSS size */
7607 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7611 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7612 rss_indir[i] = i % kinfo->rss_size;
7614 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7616 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7622 dev_info(&hdev->pdev->dev,
7623 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7624 cur_rss_size, kinfo->rss_size,
7625 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7630 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7631 u32 *regs_num_64_bit)
7633 struct hclge_desc desc;
7637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7640 dev_err(&hdev->pdev->dev,
7641 "Query register number cmd failed, ret = %d.\n", ret);
7645 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7646 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7648 total_num = *regs_num_32_bit + *regs_num_64_bit;
7655 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7658 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7660 struct hclge_desc *desc;
7661 u32 *reg_val = data;
7670 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7671 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7675 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7676 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7678 dev_err(&hdev->pdev->dev,
7679 "Query 32 bit register cmd failed, ret = %d.\n", ret);
7684 for (i = 0; i < cmd_num; i++) {
7686 desc_data = (__le32 *)(&desc[i].data[0]);
7687 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7689 desc_data = (__le32 *)(&desc[i]);
7690 n = HCLGE_32_BIT_REG_RTN_DATANUM;
7692 for (k = 0; k < n; k++) {
7693 *reg_val++ = le32_to_cpu(*desc_data++);
7705 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7708 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7710 struct hclge_desc *desc;
7711 u64 *reg_val = data;
7720 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7721 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7725 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7726 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7728 dev_err(&hdev->pdev->dev,
7729 "Query 64 bit register cmd failed, ret = %d.\n", ret);
7734 for (i = 0; i < cmd_num; i++) {
7736 desc_data = (__le64 *)(&desc[i].data[0]);
7737 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7739 desc_data = (__le64 *)(&desc[i]);
7740 n = HCLGE_64_BIT_REG_RTN_DATANUM;
7742 for (k = 0; k < n; k++) {
7743 *reg_val++ = le64_to_cpu(*desc_data++);
7755 #define MAX_SEPARATE_NUM 4
7756 #define SEPARATOR_VALUE 0xFFFFFFFF
7757 #define REG_NUM_PER_LINE 4
7758 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
7760 static int hclge_get_regs_len(struct hnae3_handle *handle)
7762 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7763 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7764 struct hclge_vport *vport = hclge_get_vport(handle);
7765 struct hclge_dev *hdev = vport->back;
7766 u32 regs_num_32_bit, regs_num_64_bit;
7769 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7771 dev_err(&hdev->pdev->dev,
7772 "Get register number failed, ret = %d.\n", ret);
7776 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7777 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7778 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7779 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7781 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7782 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7783 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7786 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7789 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7790 struct hclge_vport *vport = hclge_get_vport(handle);
7791 struct hclge_dev *hdev = vport->back;
7792 u32 regs_num_32_bit, regs_num_64_bit;
7793 int i, j, reg_um, separator_num;
7797 *version = hdev->fw_version;
7799 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7801 dev_err(&hdev->pdev->dev,
7802 "Get register number failed, ret = %d.\n", ret);
7806 /* fetching per-PF registers valus from PF PCIe register space */
7807 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7808 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7809 for (i = 0; i < reg_um; i++)
7810 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7811 for (i = 0; i < separator_num; i++)
7812 *reg++ = SEPARATOR_VALUE;
7814 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7815 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7816 for (i = 0; i < reg_um; i++)
7817 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7818 for (i = 0; i < separator_num; i++)
7819 *reg++ = SEPARATOR_VALUE;
7821 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7822 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7823 for (j = 0; j < kinfo->num_tqps; j++) {
7824 for (i = 0; i < reg_um; i++)
7825 *reg++ = hclge_read_dev(&hdev->hw,
7826 ring_reg_addr_list[i] +
7828 for (i = 0; i < separator_num; i++)
7829 *reg++ = SEPARATOR_VALUE;
7832 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7833 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7834 for (j = 0; j < hdev->num_msi_used - 1; j++) {
7835 for (i = 0; i < reg_um; i++)
7836 *reg++ = hclge_read_dev(&hdev->hw,
7837 tqp_intr_reg_addr_list[i] +
7839 for (i = 0; i < separator_num; i++)
7840 *reg++ = SEPARATOR_VALUE;
7843 /* fetching PF common registers values from firmware */
7844 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7846 dev_err(&hdev->pdev->dev,
7847 "Get 32 bit register failed, ret = %d.\n", ret);
7851 reg += regs_num_32_bit;
7852 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7854 dev_err(&hdev->pdev->dev,
7855 "Get 64 bit register failed, ret = %d.\n", ret);
7858 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7860 struct hclge_set_led_state_cmd *req;
7861 struct hclge_desc desc;
7864 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7866 req = (struct hclge_set_led_state_cmd *)desc.data;
7867 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7868 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7870 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7872 dev_err(&hdev->pdev->dev,
7873 "Send set led state cmd error, ret =%d\n", ret);
7878 enum hclge_led_status {
7881 HCLGE_LED_NO_CHANGE = 0xFF,
7884 static int hclge_set_led_id(struct hnae3_handle *handle,
7885 enum ethtool_phys_id_state status)
7887 struct hclge_vport *vport = hclge_get_vport(handle);
7888 struct hclge_dev *hdev = vport->back;
7891 case ETHTOOL_ID_ACTIVE:
7892 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7893 case ETHTOOL_ID_INACTIVE:
7894 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7900 static void hclge_get_link_mode(struct hnae3_handle *handle,
7901 unsigned long *supported,
7902 unsigned long *advertising)
7904 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7905 struct hclge_vport *vport = hclge_get_vport(handle);
7906 struct hclge_dev *hdev = vport->back;
7907 unsigned int idx = 0;
7909 for (; idx < size; idx++) {
7910 supported[idx] = hdev->hw.mac.supported[idx];
7911 advertising[idx] = hdev->hw.mac.advertising[idx];
7915 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7917 struct hclge_vport *vport = hclge_get_vport(handle);
7918 struct hclge_dev *hdev = vport->back;
7920 return hclge_config_gro(hdev, enable);
7923 static const struct hnae3_ae_ops hclge_ops = {
7924 .init_ae_dev = hclge_init_ae_dev,
7925 .uninit_ae_dev = hclge_uninit_ae_dev,
7926 .flr_prepare = hclge_flr_prepare,
7927 .flr_done = hclge_flr_done,
7928 .init_client_instance = hclge_init_client_instance,
7929 .uninit_client_instance = hclge_uninit_client_instance,
7930 .map_ring_to_vector = hclge_map_ring_to_vector,
7931 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7932 .get_vector = hclge_get_vector,
7933 .put_vector = hclge_put_vector,
7934 .set_promisc_mode = hclge_set_promisc_mode,
7935 .set_loopback = hclge_set_loopback,
7936 .start = hclge_ae_start,
7937 .stop = hclge_ae_stop,
7938 .client_start = hclge_client_start,
7939 .client_stop = hclge_client_stop,
7940 .get_status = hclge_get_status,
7941 .get_ksettings_an_result = hclge_get_ksettings_an_result,
7942 .update_speed_duplex_h = hclge_update_speed_duplex_h,
7943 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7944 .get_media_type = hclge_get_media_type,
7945 .get_rss_key_size = hclge_get_rss_key_size,
7946 .get_rss_indir_size = hclge_get_rss_indir_size,
7947 .get_rss = hclge_get_rss,
7948 .set_rss = hclge_set_rss,
7949 .set_rss_tuple = hclge_set_rss_tuple,
7950 .get_rss_tuple = hclge_get_rss_tuple,
7951 .get_tc_size = hclge_get_tc_size,
7952 .get_mac_addr = hclge_get_mac_addr,
7953 .set_mac_addr = hclge_set_mac_addr,
7954 .do_ioctl = hclge_do_ioctl,
7955 .add_uc_addr = hclge_add_uc_addr,
7956 .rm_uc_addr = hclge_rm_uc_addr,
7957 .add_mc_addr = hclge_add_mc_addr,
7958 .rm_mc_addr = hclge_rm_mc_addr,
7959 .set_autoneg = hclge_set_autoneg,
7960 .get_autoneg = hclge_get_autoneg,
7961 .get_pauseparam = hclge_get_pauseparam,
7962 .set_pauseparam = hclge_set_pauseparam,
7963 .set_mtu = hclge_set_mtu,
7964 .reset_queue = hclge_reset_tqp,
7965 .get_stats = hclge_get_stats,
7966 .update_stats = hclge_update_stats,
7967 .get_strings = hclge_get_strings,
7968 .get_sset_count = hclge_get_sset_count,
7969 .get_fw_version = hclge_get_fw_version,
7970 .get_mdix_mode = hclge_get_mdix_mode,
7971 .enable_vlan_filter = hclge_enable_vlan_filter,
7972 .set_vlan_filter = hclge_set_vlan_filter,
7973 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7974 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7975 .reset_event = hclge_reset_event,
7976 .set_default_reset_request = hclge_set_def_reset_request,
7977 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7978 .set_channels = hclge_set_channels,
7979 .get_channels = hclge_get_channels,
7980 .get_regs_len = hclge_get_regs_len,
7981 .get_regs = hclge_get_regs,
7982 .set_led_id = hclge_set_led_id,
7983 .get_link_mode = hclge_get_link_mode,
7984 .add_fd_entry = hclge_add_fd_entry,
7985 .del_fd_entry = hclge_del_fd_entry,
7986 .del_all_fd_entries = hclge_del_all_fd_entries,
7987 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7988 .get_fd_rule_info = hclge_get_fd_rule_info,
7989 .get_fd_all_rules = hclge_get_all_rules,
7990 .restore_fd_rules = hclge_restore_fd_entries,
7991 .enable_fd = hclge_enable_fd,
7992 .dbg_run_cmd = hclge_dbg_run_cmd,
7993 .handle_hw_ras_error = hclge_handle_hw_ras_error,
7994 .get_hw_reset_stat = hclge_get_hw_reset_stat,
7995 .ae_dev_resetting = hclge_ae_dev_resetting,
7996 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7997 .set_gro_en = hclge_gro_en,
7998 .get_global_queue_id = hclge_covert_handle_qid_global,
8001 static struct hnae3_ae_algo ae_algo = {
8003 .pdev_id_table = ae_algo_pci_tbl,
8006 static int hclge_init(void)
8008 pr_info("%s is initializing\n", HCLGE_NAME);
8010 hnae3_register_ae_algo(&ae_algo);
8015 static void hclge_exit(void)
8017 hnae3_unregister_ae_algo(&ae_algo);
8019 module_init(hclge_init);
8020 module_exit(hclge_exit);
8022 MODULE_LICENSE("GPL");
8023 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8024 MODULE_DESCRIPTION("HCLGE Driver");
8025 MODULE_VERSION(HCLGE_MOD_VERSION);