1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
22 #include "hclge_err.h"
25 #define HCLGE_NAME "hclge"
26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
30 static int hclge_init_vlan_config(struct hclge_dev *hdev);
31 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
32 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
33 u16 *allocated_size, bool is_alloc);
35 static struct hnae3_ae_algo ae_algo;
37 static const struct pci_device_id ae_algo_pci_tbl[] = {
38 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
39 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
45 /* required last entry */
49 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
51 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
52 HCLGE_CMDQ_TX_ADDR_H_REG,
53 HCLGE_CMDQ_TX_DEPTH_REG,
54 HCLGE_CMDQ_TX_TAIL_REG,
55 HCLGE_CMDQ_TX_HEAD_REG,
56 HCLGE_CMDQ_RX_ADDR_L_REG,
57 HCLGE_CMDQ_RX_ADDR_H_REG,
58 HCLGE_CMDQ_RX_DEPTH_REG,
59 HCLGE_CMDQ_RX_TAIL_REG,
60 HCLGE_CMDQ_RX_HEAD_REG,
61 HCLGE_VECTOR0_CMDQ_SRC_REG,
62 HCLGE_CMDQ_INTR_STS_REG,
63 HCLGE_CMDQ_INTR_EN_REG,
64 HCLGE_CMDQ_INTR_GEN_REG};
66 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
67 HCLGE_VECTOR0_OTER_EN_REG,
68 HCLGE_MISC_RESET_STS_REG,
69 HCLGE_MISC_VECTOR_INT_STS,
70 HCLGE_GLOBAL_RESET_REG,
74 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
75 HCLGE_RING_RX_ADDR_H_REG,
76 HCLGE_RING_RX_BD_NUM_REG,
77 HCLGE_RING_RX_BD_LENGTH_REG,
78 HCLGE_RING_RX_MERGE_EN_REG,
79 HCLGE_RING_RX_TAIL_REG,
80 HCLGE_RING_RX_HEAD_REG,
81 HCLGE_RING_RX_FBD_NUM_REG,
82 HCLGE_RING_RX_OFFSET_REG,
83 HCLGE_RING_RX_FBD_OFFSET_REG,
84 HCLGE_RING_RX_STASH_REG,
85 HCLGE_RING_RX_BD_ERR_REG,
86 HCLGE_RING_TX_ADDR_L_REG,
87 HCLGE_RING_TX_ADDR_H_REG,
88 HCLGE_RING_TX_BD_NUM_REG,
89 HCLGE_RING_TX_PRIORITY_REG,
91 HCLGE_RING_TX_MERGE_EN_REG,
92 HCLGE_RING_TX_TAIL_REG,
93 HCLGE_RING_TX_HEAD_REG,
94 HCLGE_RING_TX_FBD_NUM_REG,
95 HCLGE_RING_TX_OFFSET_REG,
96 HCLGE_RING_TX_EBD_NUM_REG,
97 HCLGE_RING_TX_EBD_OFFSET_REG,
98 HCLGE_RING_TX_BD_ERR_REG,
101 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
102 HCLGE_TQP_INTR_GL0_REG,
103 HCLGE_TQP_INTR_GL1_REG,
104 HCLGE_TQP_INTR_GL2_REG,
105 HCLGE_TQP_INTR_RL_REG};
107 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
109 "Serdes serial Loopback test",
110 "Serdes parallel Loopback test",
114 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
115 {"mac_tx_mac_pause_num",
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
117 {"mac_rx_mac_pause_num",
118 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
119 {"mac_tx_pfc_pri0_pkt_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
121 {"mac_tx_pfc_pri1_pkt_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
123 {"mac_tx_pfc_pri2_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
125 {"mac_tx_pfc_pri3_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
127 {"mac_tx_pfc_pri4_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
129 {"mac_tx_pfc_pri5_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
131 {"mac_tx_pfc_pri6_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
133 {"mac_tx_pfc_pri7_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
135 {"mac_rx_pfc_pri0_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
137 {"mac_rx_pfc_pri1_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
139 {"mac_rx_pfc_pri2_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
141 {"mac_rx_pfc_pri3_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
143 {"mac_rx_pfc_pri4_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
145 {"mac_rx_pfc_pri5_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
147 {"mac_rx_pfc_pri6_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
149 {"mac_rx_pfc_pri7_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
151 {"mac_tx_total_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
153 {"mac_tx_total_oct_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
155 {"mac_tx_good_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
157 {"mac_tx_bad_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
159 {"mac_tx_good_oct_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
161 {"mac_tx_bad_oct_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
163 {"mac_tx_uni_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
165 {"mac_tx_multi_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
167 {"mac_tx_broad_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
169 {"mac_tx_undersize_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
171 {"mac_tx_oversize_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
173 {"mac_tx_64_oct_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
175 {"mac_tx_65_127_oct_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
177 {"mac_tx_128_255_oct_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
179 {"mac_tx_256_511_oct_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
181 {"mac_tx_512_1023_oct_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
183 {"mac_tx_1024_1518_oct_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
185 {"mac_tx_1519_2047_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
187 {"mac_tx_2048_4095_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
189 {"mac_tx_4096_8191_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
191 {"mac_tx_8192_9216_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
193 {"mac_tx_9217_12287_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
195 {"mac_tx_12288_16383_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
197 {"mac_tx_1519_max_good_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
199 {"mac_tx_1519_max_bad_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
201 {"mac_rx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
203 {"mac_rx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
205 {"mac_rx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
207 {"mac_rx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
209 {"mac_rx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
211 {"mac_rx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
213 {"mac_rx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
215 {"mac_rx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
217 {"mac_rx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
219 {"mac_rx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
221 {"mac_rx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
223 {"mac_rx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
225 {"mac_rx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
227 {"mac_rx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
229 {"mac_rx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
231 {"mac_rx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
233 {"mac_rx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
235 {"mac_rx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
237 {"mac_rx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
239 {"mac_rx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
241 {"mac_rx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
243 {"mac_rx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
245 {"mac_rx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
247 {"mac_rx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
249 {"mac_rx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
252 {"mac_tx_fragment_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
254 {"mac_tx_undermin_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
256 {"mac_tx_jabber_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
258 {"mac_tx_err_all_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
260 {"mac_tx_from_app_good_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
262 {"mac_tx_from_app_bad_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
264 {"mac_rx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
266 {"mac_rx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
268 {"mac_rx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
270 {"mac_rx_fcs_err_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
272 {"mac_rx_send_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
274 {"mac_rx_send_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
278 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
280 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
281 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
282 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
283 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
284 .i_port_bitmap = 0x1,
288 static int hclge_mac_update_stats(struct hclge_dev *hdev)
290 #define HCLGE_MAC_CMD_NUM 21
291 #define HCLGE_RTN_DATA_NUM 4
293 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
294 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
299 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
300 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
302 dev_err(&hdev->pdev->dev,
303 "Get MAC pkt stats fail, status = %d.\n", ret);
308 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
309 if (unlikely(i == 0)) {
310 desc_data = (__le64 *)(&desc[i].data[0]);
311 n = HCLGE_RTN_DATA_NUM - 2;
313 desc_data = (__le64 *)(&desc[i]);
314 n = HCLGE_RTN_DATA_NUM;
316 for (k = 0; k < n; k++) {
317 *data++ += le64_to_cpu(*desc_data);
325 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
327 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
328 struct hclge_vport *vport = hclge_get_vport(handle);
329 struct hclge_dev *hdev = vport->back;
330 struct hnae3_queue *queue;
331 struct hclge_desc desc[1];
332 struct hclge_tqp *tqp;
335 for (i = 0; i < kinfo->num_tqps; i++) {
336 queue = handle->kinfo.tqp[i];
337 tqp = container_of(queue, struct hclge_tqp, q);
338 /* command : HCLGE_OPC_QUERY_IGU_STAT */
339 hclge_cmd_setup_basic_desc(&desc[0],
340 HCLGE_OPC_QUERY_RX_STATUS,
343 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
344 ret = hclge_cmd_send(&hdev->hw, desc, 1);
346 dev_err(&hdev->pdev->dev,
347 "Query tqp stat fail, status = %d,queue = %d\n",
351 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
352 le32_to_cpu(desc[0].data[1]);
355 for (i = 0; i < kinfo->num_tqps; i++) {
356 queue = handle->kinfo.tqp[i];
357 tqp = container_of(queue, struct hclge_tqp, q);
358 /* command : HCLGE_OPC_QUERY_IGU_STAT */
359 hclge_cmd_setup_basic_desc(&desc[0],
360 HCLGE_OPC_QUERY_TX_STATUS,
363 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
364 ret = hclge_cmd_send(&hdev->hw, desc, 1);
366 dev_err(&hdev->pdev->dev,
367 "Query tqp stat fail, status = %d,queue = %d\n",
371 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
372 le32_to_cpu(desc[0].data[1]);
378 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
380 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
381 struct hclge_tqp *tqp;
385 for (i = 0; i < kinfo->num_tqps; i++) {
386 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
387 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
390 for (i = 0; i < kinfo->num_tqps; i++) {
391 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
392 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
398 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
400 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
402 return kinfo->num_tqps * (2);
405 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
407 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
411 for (i = 0; i < kinfo->num_tqps; i++) {
412 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
413 struct hclge_tqp, q);
414 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
416 buff = buff + ETH_GSTRING_LEN;
419 for (i = 0; i < kinfo->num_tqps; i++) {
420 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
421 struct hclge_tqp, q);
422 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
424 buff = buff + ETH_GSTRING_LEN;
430 static u64 *hclge_comm_get_stats(void *comm_stats,
431 const struct hclge_comm_stats_str strs[],
437 for (i = 0; i < size; i++)
438 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
443 static u8 *hclge_comm_get_strings(u32 stringset,
444 const struct hclge_comm_stats_str strs[],
447 char *buff = (char *)data;
450 if (stringset != ETH_SS_STATS)
453 for (i = 0; i < size; i++) {
454 snprintf(buff, ETH_GSTRING_LEN,
456 buff = buff + ETH_GSTRING_LEN;
462 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
463 struct net_device_stats *net_stats)
465 net_stats->tx_dropped = 0;
466 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
467 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
468 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
470 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
471 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
473 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
474 net_stats->rx_length_errors =
475 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
476 net_stats->rx_length_errors +=
477 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
478 net_stats->rx_over_errors =
479 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
482 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
484 struct hnae3_handle *handle;
487 handle = &hdev->vport[0].nic;
488 if (handle->client) {
489 status = hclge_tqps_update_stats(handle);
491 dev_err(&hdev->pdev->dev,
492 "Update TQPS stats fail, status = %d.\n",
497 status = hclge_mac_update_stats(hdev);
499 dev_err(&hdev->pdev->dev,
500 "Update MAC stats fail, status = %d.\n", status);
502 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
505 static void hclge_update_stats(struct hnae3_handle *handle,
506 struct net_device_stats *net_stats)
508 struct hclge_vport *vport = hclge_get_vport(handle);
509 struct hclge_dev *hdev = vport->back;
510 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
513 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
516 status = hclge_mac_update_stats(hdev);
518 dev_err(&hdev->pdev->dev,
519 "Update MAC stats fail, status = %d.\n",
522 status = hclge_tqps_update_stats(handle);
524 dev_err(&hdev->pdev->dev,
525 "Update TQPS stats fail, status = %d.\n",
528 hclge_update_netstat(hw_stats, net_stats);
530 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
533 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
535 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
536 HNAE3_SUPPORT_PHY_LOOPBACK |\
537 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
538 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
540 struct hclge_vport *vport = hclge_get_vport(handle);
541 struct hclge_dev *hdev = vport->back;
544 /* Loopback test support rules:
545 * mac: only GE mode support
546 * serdes: all mac mode will support include GE/XGE/LGE/CGE
547 * phy: only support when phy device exist on board
549 if (stringset == ETH_SS_TEST) {
550 /* clear loopback bit flags at first */
551 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
552 if (hdev->pdev->revision >= 0x21 ||
553 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
554 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
555 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
557 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
561 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
562 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
563 } else if (stringset == ETH_SS_STATS) {
564 count = ARRAY_SIZE(g_mac_stats_string) +
565 hclge_tqps_get_sset_count(handle, stringset);
571 static void hclge_get_strings(struct hnae3_handle *handle,
575 u8 *p = (char *)data;
578 if (stringset == ETH_SS_STATS) {
579 size = ARRAY_SIZE(g_mac_stats_string);
580 p = hclge_comm_get_strings(stringset,
584 p = hclge_tqps_get_strings(handle, p);
585 } else if (stringset == ETH_SS_TEST) {
586 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
588 hns3_nic_test_strs[HNAE3_LOOP_APP],
590 p += ETH_GSTRING_LEN;
592 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
594 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
596 p += ETH_GSTRING_LEN;
598 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
600 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
602 p += ETH_GSTRING_LEN;
604 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
606 hns3_nic_test_strs[HNAE3_LOOP_PHY],
608 p += ETH_GSTRING_LEN;
613 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
615 struct hclge_vport *vport = hclge_get_vport(handle);
616 struct hclge_dev *hdev = vport->back;
619 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
621 ARRAY_SIZE(g_mac_stats_string),
623 p = hclge_tqps_get_stats(handle, p);
626 static int hclge_parse_func_status(struct hclge_dev *hdev,
627 struct hclge_func_status_cmd *status)
629 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
632 /* Set the pf to main pf */
633 if (status->pf_state & HCLGE_PF_STATE_MAIN)
634 hdev->flag |= HCLGE_FLAG_MAIN;
636 hdev->flag &= ~HCLGE_FLAG_MAIN;
641 static int hclge_query_function_status(struct hclge_dev *hdev)
643 struct hclge_func_status_cmd *req;
644 struct hclge_desc desc;
648 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
649 req = (struct hclge_func_status_cmd *)desc.data;
652 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
654 dev_err(&hdev->pdev->dev,
655 "query function status failed %d.\n",
661 /* Check pf reset is done */
664 usleep_range(1000, 2000);
665 } while (timeout++ < 5);
667 ret = hclge_parse_func_status(hdev, req);
672 static int hclge_query_pf_resource(struct hclge_dev *hdev)
674 struct hclge_pf_res_cmd *req;
675 struct hclge_desc desc;
678 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
681 dev_err(&hdev->pdev->dev,
682 "query pf resource failed %d.\n", ret);
686 req = (struct hclge_pf_res_cmd *)desc.data;
687 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
688 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
690 if (hnae3_dev_roce_supported(hdev)) {
691 hdev->roce_base_msix_offset =
692 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
693 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
695 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
696 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
698 /* PF should have NIC vectors and Roce vectors,
699 * NIC vectors are queued before Roce vectors.
701 hdev->num_msi = hdev->num_roce_msi +
702 hdev->roce_base_msix_offset;
705 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
706 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
712 static int hclge_parse_speed(int speed_cmd, int *speed)
716 *speed = HCLGE_MAC_SPEED_10M;
719 *speed = HCLGE_MAC_SPEED_100M;
722 *speed = HCLGE_MAC_SPEED_1G;
725 *speed = HCLGE_MAC_SPEED_10G;
728 *speed = HCLGE_MAC_SPEED_25G;
731 *speed = HCLGE_MAC_SPEED_40G;
734 *speed = HCLGE_MAC_SPEED_50G;
737 *speed = HCLGE_MAC_SPEED_100G;
746 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
749 unsigned long *supported = hdev->hw.mac.supported;
751 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
752 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
755 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
756 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
759 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
760 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
763 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
764 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
767 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
768 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
771 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
772 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
775 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
777 u8 media_type = hdev->hw.mac.media_type;
779 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
782 hclge_parse_fiber_link_mode(hdev, speed_ability);
785 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
787 struct hclge_cfg_param_cmd *req;
788 u64 mac_addr_tmp_high;
792 req = (struct hclge_cfg_param_cmd *)desc[0].data;
794 /* get the configuration */
795 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
798 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
799 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
800 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
801 HCLGE_CFG_TQP_DESC_N_M,
802 HCLGE_CFG_TQP_DESC_N_S);
804 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
805 HCLGE_CFG_PHY_ADDR_M,
806 HCLGE_CFG_PHY_ADDR_S);
807 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
808 HCLGE_CFG_MEDIA_TP_M,
809 HCLGE_CFG_MEDIA_TP_S);
810 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
811 HCLGE_CFG_RX_BUF_LEN_M,
812 HCLGE_CFG_RX_BUF_LEN_S);
813 /* get mac_address */
814 mac_addr_tmp = __le32_to_cpu(req->param[2]);
815 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
816 HCLGE_CFG_MAC_ADDR_H_M,
817 HCLGE_CFG_MAC_ADDR_H_S);
819 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
821 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
822 HCLGE_CFG_DEFAULT_SPEED_M,
823 HCLGE_CFG_DEFAULT_SPEED_S);
824 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
825 HCLGE_CFG_RSS_SIZE_M,
826 HCLGE_CFG_RSS_SIZE_S);
828 for (i = 0; i < ETH_ALEN; i++)
829 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
831 req = (struct hclge_cfg_param_cmd *)desc[1].data;
832 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
834 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
835 HCLGE_CFG_SPEED_ABILITY_M,
836 HCLGE_CFG_SPEED_ABILITY_S);
837 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
838 HCLGE_CFG_UMV_TBL_SPACE_M,
839 HCLGE_CFG_UMV_TBL_SPACE_S);
841 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
844 /* hclge_get_cfg: query the static parameter from flash
845 * @hdev: pointer to struct hclge_dev
846 * @hcfg: the config structure to be getted
848 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
850 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
851 struct hclge_cfg_param_cmd *req;
854 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
857 req = (struct hclge_cfg_param_cmd *)desc[i].data;
858 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
860 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
861 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
862 /* Len should be united by 4 bytes when send to hardware */
863 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
864 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
865 req->offset = cpu_to_le32(offset);
868 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
870 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
874 hclge_parse_cfg(hcfg, desc);
879 static int hclge_get_cap(struct hclge_dev *hdev)
883 ret = hclge_query_function_status(hdev);
885 dev_err(&hdev->pdev->dev,
886 "query function status error %d.\n", ret);
890 /* get pf resource */
891 ret = hclge_query_pf_resource(hdev);
893 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
898 static int hclge_configure(struct hclge_dev *hdev)
900 struct hclge_cfg cfg;
903 ret = hclge_get_cfg(hdev, &cfg);
905 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
909 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
910 hdev->base_tqp_pid = 0;
911 hdev->rss_size_max = cfg.rss_size_max;
912 hdev->rx_buf_len = cfg.rx_buf_len;
913 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
914 hdev->hw.mac.media_type = cfg.media_type;
915 hdev->hw.mac.phy_addr = cfg.phy_addr;
916 hdev->num_desc = cfg.tqp_desc_num;
917 hdev->tm_info.num_pg = 1;
918 hdev->tc_max = cfg.tc_num;
919 hdev->tm_info.hw_pfc_map = 0;
920 hdev->wanted_umv_size = cfg.umv_space;
922 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
924 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
928 hclge_parse_link_mode(hdev, cfg.speed_ability);
930 if ((hdev->tc_max > HNAE3_MAX_TC) ||
931 (hdev->tc_max < 1)) {
932 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
937 /* Dev does not support DCB */
938 if (!hnae3_dev_dcb_supported(hdev)) {
942 hdev->pfc_max = hdev->tc_max;
945 hdev->tm_info.num_tc = hdev->tc_max;
947 /* Currently not support uncontiuous tc */
948 for (i = 0; i < hdev->tm_info.num_tc; i++)
949 hnae3_set_bit(hdev->hw_tc_map, i, 1);
951 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
956 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
959 struct hclge_cfg_tso_status_cmd *req;
960 struct hclge_desc desc;
963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
965 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
968 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
969 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
970 req->tso_mss_min = cpu_to_le16(tso_mss);
973 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
974 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
975 req->tso_mss_max = cpu_to_le16(tso_mss);
977 return hclge_cmd_send(&hdev->hw, &desc, 1);
980 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
982 struct hclge_cfg_gro_status_cmd *req;
983 struct hclge_desc desc;
986 if (!hnae3_dev_gro_supported(hdev))
989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
990 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
992 req->gro_en = cpu_to_le16(en ? 1 : 0);
994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
996 dev_err(&hdev->pdev->dev,
997 "GRO hardware config cmd failed, ret = %d\n", ret);
1002 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1004 struct hclge_tqp *tqp;
1007 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1008 sizeof(struct hclge_tqp), GFP_KERNEL);
1014 for (i = 0; i < hdev->num_tqps; i++) {
1015 tqp->dev = &hdev->pdev->dev;
1018 tqp->q.ae_algo = &ae_algo;
1019 tqp->q.buf_size = hdev->rx_buf_len;
1020 tqp->q.desc_num = hdev->num_desc;
1021 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1022 i * HCLGE_TQP_REG_SIZE;
1030 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1031 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1033 struct hclge_tqp_map_cmd *req;
1034 struct hclge_desc desc;
1037 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1039 req = (struct hclge_tqp_map_cmd *)desc.data;
1040 req->tqp_id = cpu_to_le16(tqp_pid);
1041 req->tqp_vf = func_id;
1042 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1043 1 << HCLGE_TQP_MAP_EN_B;
1044 req->tqp_vid = cpu_to_le16(tqp_vid);
1046 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1048 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1053 static int hclge_assign_tqp(struct hclge_vport *vport)
1055 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1056 struct hclge_dev *hdev = vport->back;
1059 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1060 alloced < kinfo->num_tqps; i++) {
1061 if (!hdev->htqp[i].alloced) {
1062 hdev->htqp[i].q.handle = &vport->nic;
1063 hdev->htqp[i].q.tqp_index = alloced;
1064 hdev->htqp[i].q.desc_num = kinfo->num_desc;
1065 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1066 hdev->htqp[i].alloced = true;
1070 vport->alloc_tqps = kinfo->num_tqps;
1075 static int hclge_knic_setup(struct hclge_vport *vport,
1076 u16 num_tqps, u16 num_desc)
1078 struct hnae3_handle *nic = &vport->nic;
1079 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1080 struct hclge_dev *hdev = vport->back;
1083 kinfo->num_desc = num_desc;
1084 kinfo->rx_buf_len = hdev->rx_buf_len;
1085 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1087 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1088 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1090 for (i = 0; i < HNAE3_MAX_TC; i++) {
1091 if (hdev->hw_tc_map & BIT(i)) {
1092 kinfo->tc_info[i].enable = true;
1093 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1094 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1095 kinfo->tc_info[i].tc = i;
1097 /* Set to default queue if TC is disable */
1098 kinfo->tc_info[i].enable = false;
1099 kinfo->tc_info[i].tqp_offset = 0;
1100 kinfo->tc_info[i].tqp_count = 1;
1101 kinfo->tc_info[i].tc = 0;
1105 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1106 sizeof(struct hnae3_queue *), GFP_KERNEL);
1110 ret = hclge_assign_tqp(vport);
1112 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1117 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1118 struct hclge_vport *vport)
1120 struct hnae3_handle *nic = &vport->nic;
1121 struct hnae3_knic_private_info *kinfo;
1124 kinfo = &nic->kinfo;
1125 for (i = 0; i < kinfo->num_tqps; i++) {
1126 struct hclge_tqp *q =
1127 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1131 is_pf = !(vport->vport_id);
1132 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1141 static int hclge_map_tqp(struct hclge_dev *hdev)
1143 struct hclge_vport *vport = hdev->vport;
1146 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1147 for (i = 0; i < num_vport; i++) {
1150 ret = hclge_map_tqp_to_vport(hdev, vport);
1160 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1162 /* this would be initialized later */
1165 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1167 struct hnae3_handle *nic = &vport->nic;
1168 struct hclge_dev *hdev = vport->back;
1171 nic->pdev = hdev->pdev;
1172 nic->ae_algo = &ae_algo;
1173 nic->numa_node_mask = hdev->numa_node_mask;
1175 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1176 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1178 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1183 hclge_unic_setup(vport, num_tqps);
1189 static int hclge_alloc_vport(struct hclge_dev *hdev)
1191 struct pci_dev *pdev = hdev->pdev;
1192 struct hclge_vport *vport;
1198 /* We need to alloc a vport for main NIC of PF */
1199 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1201 if (hdev->num_tqps < num_vport) {
1202 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1203 hdev->num_tqps, num_vport);
1207 /* Alloc the same number of TQPs for every vport */
1208 tqp_per_vport = hdev->num_tqps / num_vport;
1209 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1211 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1216 hdev->vport = vport;
1217 hdev->num_alloc_vport = num_vport;
1219 if (IS_ENABLED(CONFIG_PCI_IOV))
1220 hdev->num_alloc_vfs = hdev->num_req_vfs;
1222 for (i = 0; i < num_vport; i++) {
1224 vport->vport_id = i;
1225 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1228 ret = hclge_vport_setup(vport, tqp_main_vport);
1230 ret = hclge_vport_setup(vport, tqp_per_vport);
1233 "vport setup failed for vport %d, %d\n",
1244 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1245 struct hclge_pkt_buf_alloc *buf_alloc)
1247 /* TX buffer size is unit by 128 byte */
1248 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1249 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1250 struct hclge_tx_buff_alloc_cmd *req;
1251 struct hclge_desc desc;
1255 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1258 for (i = 0; i < HCLGE_TC_NUM; i++) {
1259 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1261 req->tx_pkt_buff[i] =
1262 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1263 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1268 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1274 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1275 struct hclge_pkt_buf_alloc *buf_alloc)
1277 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1280 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1285 static int hclge_get_tc_num(struct hclge_dev *hdev)
1289 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1290 if (hdev->hw_tc_map & BIT(i))
1295 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1299 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1300 if (hdev->hw_tc_map & BIT(i) &&
1301 hdev->tm_info.hw_pfc_map & BIT(i))
1306 /* Get the number of pfc enabled TCs, which have private buffer */
1307 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1308 struct hclge_pkt_buf_alloc *buf_alloc)
1310 struct hclge_priv_buf *priv;
1313 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1314 priv = &buf_alloc->priv_buf[i];
1315 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1323 /* Get the number of pfc disabled TCs, which have private buffer */
1324 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1325 struct hclge_pkt_buf_alloc *buf_alloc)
1327 struct hclge_priv_buf *priv;
1330 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1331 priv = &buf_alloc->priv_buf[i];
1332 if (hdev->hw_tc_map & BIT(i) &&
1333 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1341 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1343 struct hclge_priv_buf *priv;
1347 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1348 priv = &buf_alloc->priv_buf[i];
1350 rx_priv += priv->buf_size;
1355 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1357 u32 i, total_tx_size = 0;
1359 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1360 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1362 return total_tx_size;
1365 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1366 struct hclge_pkt_buf_alloc *buf_alloc,
1369 u32 shared_buf_min, shared_buf_tc, shared_std;
1370 int tc_num, pfc_enable_num;
1375 tc_num = hclge_get_tc_num(hdev);
1376 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1378 if (hnae3_dev_dcb_supported(hdev))
1379 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1381 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1383 shared_buf_tc = pfc_enable_num * hdev->mps +
1384 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1386 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1388 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1389 if (rx_all <= rx_priv + shared_std)
1392 shared_buf = rx_all - rx_priv;
1393 buf_alloc->s_buf.buf_size = shared_buf;
1394 buf_alloc->s_buf.self.high = shared_buf;
1395 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1397 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1398 if ((hdev->hw_tc_map & BIT(i)) &&
1399 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1400 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1401 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1403 buf_alloc->s_buf.tc_thrd[i].low = 0;
1404 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1411 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1412 struct hclge_pkt_buf_alloc *buf_alloc)
1416 total_size = hdev->pkt_buf_size;
1418 /* alloc tx buffer for all enabled tc */
1419 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1420 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1422 if (total_size < HCLGE_DEFAULT_TX_BUF)
1425 if (hdev->hw_tc_map & BIT(i))
1426 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1428 priv->tx_buf_size = 0;
1430 total_size -= priv->tx_buf_size;
1436 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1437 * @hdev: pointer to struct hclge_dev
1438 * @buf_alloc: pointer to buffer calculation data
1439 * @return: 0: calculate sucessful, negative: fail
1441 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1442 struct hclge_pkt_buf_alloc *buf_alloc)
1444 #define HCLGE_BUF_SIZE_UNIT 128
1445 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1446 int no_pfc_priv_num, pfc_priv_num;
1447 struct hclge_priv_buf *priv;
1450 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1451 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1453 /* When DCB is not supported, rx private
1454 * buffer is not allocated.
1456 if (!hnae3_dev_dcb_supported(hdev)) {
1457 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1463 /* step 1, try to alloc private buffer for all enabled tc */
1464 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1465 priv = &buf_alloc->priv_buf[i];
1466 if (hdev->hw_tc_map & BIT(i)) {
1468 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1469 priv->wl.low = aligned_mps;
1470 priv->wl.high = priv->wl.low + aligned_mps;
1471 priv->buf_size = priv->wl.high +
1475 priv->wl.high = 2 * aligned_mps;
1476 priv->buf_size = priv->wl.high;
1486 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1489 /* step 2, try to decrease the buffer size of
1490 * no pfc TC's private buffer
1492 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1493 priv = &buf_alloc->priv_buf[i];
1500 if (!(hdev->hw_tc_map & BIT(i)))
1505 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1507 priv->wl.high = priv->wl.low + aligned_mps;
1508 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1511 priv->wl.high = aligned_mps;
1512 priv->buf_size = priv->wl.high;
1516 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1519 /* step 3, try to reduce the number of pfc disabled TCs,
1520 * which have private buffer
1522 /* get the total no pfc enable TC number, which have private buffer */
1523 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1525 /* let the last to be cleared first */
1526 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1527 priv = &buf_alloc->priv_buf[i];
1529 if (hdev->hw_tc_map & BIT(i) &&
1530 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1531 /* Clear the no pfc TC private buffer */
1539 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1540 no_pfc_priv_num == 0)
1544 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1547 /* step 4, try to reduce the number of pfc enabled TCs
1548 * which have private buffer.
1550 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1552 /* let the last to be cleared first */
1553 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1554 priv = &buf_alloc->priv_buf[i];
1556 if (hdev->hw_tc_map & BIT(i) &&
1557 hdev->tm_info.hw_pfc_map & BIT(i)) {
1558 /* Reduce the number of pfc TC with private buffer */
1566 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1570 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1576 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1577 struct hclge_pkt_buf_alloc *buf_alloc)
1579 struct hclge_rx_priv_buff_cmd *req;
1580 struct hclge_desc desc;
1584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1585 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1587 /* Alloc private buffer TCs */
1588 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1589 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1592 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1594 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1598 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1599 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1603 dev_err(&hdev->pdev->dev,
1604 "rx private buffer alloc cmd failed %d\n", ret);
1609 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1610 struct hclge_pkt_buf_alloc *buf_alloc)
1612 struct hclge_rx_priv_wl_buf *req;
1613 struct hclge_priv_buf *priv;
1614 struct hclge_desc desc[2];
1618 for (i = 0; i < 2; i++) {
1619 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1621 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1623 /* The first descriptor set the NEXT bit to 1 */
1625 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1627 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1629 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1630 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1632 priv = &buf_alloc->priv_buf[idx];
1633 req->tc_wl[j].high =
1634 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1635 req->tc_wl[j].high |=
1636 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1638 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1639 req->tc_wl[j].low |=
1640 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1644 /* Send 2 descriptor at one time */
1645 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1647 dev_err(&hdev->pdev->dev,
1648 "rx private waterline config cmd failed %d\n",
1653 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1654 struct hclge_pkt_buf_alloc *buf_alloc)
1656 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1657 struct hclge_rx_com_thrd *req;
1658 struct hclge_desc desc[2];
1659 struct hclge_tc_thrd *tc;
1663 for (i = 0; i < 2; i++) {
1664 hclge_cmd_setup_basic_desc(&desc[i],
1665 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1666 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1668 /* The first descriptor set the NEXT bit to 1 */
1670 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1672 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1674 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1675 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1677 req->com_thrd[j].high =
1678 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1679 req->com_thrd[j].high |=
1680 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1681 req->com_thrd[j].low =
1682 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1683 req->com_thrd[j].low |=
1684 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1688 /* Send 2 descriptors at one time */
1689 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1691 dev_err(&hdev->pdev->dev,
1692 "common threshold config cmd failed %d\n", ret);
1696 static int hclge_common_wl_config(struct hclge_dev *hdev,
1697 struct hclge_pkt_buf_alloc *buf_alloc)
1699 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1700 struct hclge_rx_com_wl *req;
1701 struct hclge_desc desc;
1704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1706 req = (struct hclge_rx_com_wl *)desc.data;
1707 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1708 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1710 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1711 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1713 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1715 dev_err(&hdev->pdev->dev,
1716 "common waterline config cmd failed %d\n", ret);
1721 int hclge_buffer_alloc(struct hclge_dev *hdev)
1723 struct hclge_pkt_buf_alloc *pkt_buf;
1726 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1730 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1732 dev_err(&hdev->pdev->dev,
1733 "could not calc tx buffer size for all TCs %d\n", ret);
1737 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1739 dev_err(&hdev->pdev->dev,
1740 "could not alloc tx buffers %d\n", ret);
1744 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1746 dev_err(&hdev->pdev->dev,
1747 "could not calc rx priv buffer size for all TCs %d\n",
1752 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1754 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1759 if (hnae3_dev_dcb_supported(hdev)) {
1760 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1762 dev_err(&hdev->pdev->dev,
1763 "could not configure rx private waterline %d\n",
1768 ret = hclge_common_thrd_config(hdev, pkt_buf);
1770 dev_err(&hdev->pdev->dev,
1771 "could not configure common threshold %d\n",
1777 ret = hclge_common_wl_config(hdev, pkt_buf);
1779 dev_err(&hdev->pdev->dev,
1780 "could not configure common waterline %d\n", ret);
1787 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1789 struct hnae3_handle *roce = &vport->roce;
1790 struct hnae3_handle *nic = &vport->nic;
1792 roce->rinfo.num_vectors = vport->back->num_roce_msi;
1794 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1795 vport->back->num_msi_left == 0)
1798 roce->rinfo.base_vector = vport->back->roce_base_vector;
1800 roce->rinfo.netdev = nic->kinfo.netdev;
1801 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1803 roce->pdev = nic->pdev;
1804 roce->ae_algo = nic->ae_algo;
1805 roce->numa_node_mask = nic->numa_node_mask;
1810 static int hclge_init_msi(struct hclge_dev *hdev)
1812 struct pci_dev *pdev = hdev->pdev;
1816 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1817 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1820 "failed(%d) to allocate MSI/MSI-X vectors\n",
1824 if (vectors < hdev->num_msi)
1825 dev_warn(&hdev->pdev->dev,
1826 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1827 hdev->num_msi, vectors);
1829 hdev->num_msi = vectors;
1830 hdev->num_msi_left = vectors;
1831 hdev->base_msi_vector = pdev->irq;
1832 hdev->roce_base_vector = hdev->base_msi_vector +
1833 hdev->roce_base_msix_offset;
1835 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1836 sizeof(u16), GFP_KERNEL);
1837 if (!hdev->vector_status) {
1838 pci_free_irq_vectors(pdev);
1842 for (i = 0; i < hdev->num_msi; i++)
1843 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1845 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1846 sizeof(int), GFP_KERNEL);
1847 if (!hdev->vector_irq) {
1848 pci_free_irq_vectors(pdev);
1855 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1858 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1859 duplex = HCLGE_MAC_FULL;
1864 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1867 struct hclge_config_mac_speed_dup_cmd *req;
1868 struct hclge_desc desc;
1871 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
1873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
1875 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
1878 case HCLGE_MAC_SPEED_10M:
1879 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1880 HCLGE_CFG_SPEED_S, 6);
1882 case HCLGE_MAC_SPEED_100M:
1883 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1884 HCLGE_CFG_SPEED_S, 7);
1886 case HCLGE_MAC_SPEED_1G:
1887 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1888 HCLGE_CFG_SPEED_S, 0);
1890 case HCLGE_MAC_SPEED_10G:
1891 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1892 HCLGE_CFG_SPEED_S, 1);
1894 case HCLGE_MAC_SPEED_25G:
1895 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1896 HCLGE_CFG_SPEED_S, 2);
1898 case HCLGE_MAC_SPEED_40G:
1899 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1900 HCLGE_CFG_SPEED_S, 3);
1902 case HCLGE_MAC_SPEED_50G:
1903 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1904 HCLGE_CFG_SPEED_S, 4);
1906 case HCLGE_MAC_SPEED_100G:
1907 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
1908 HCLGE_CFG_SPEED_S, 5);
1911 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
1915 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1918 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1920 dev_err(&hdev->pdev->dev,
1921 "mac speed/duplex config cmd failed %d.\n", ret);
1928 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1932 duplex = hclge_check_speed_dup(duplex, speed);
1933 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1936 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1940 hdev->hw.mac.speed = speed;
1941 hdev->hw.mac.duplex = duplex;
1946 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
1949 struct hclge_vport *vport = hclge_get_vport(handle);
1950 struct hclge_dev *hdev = vport->back;
1952 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
1955 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
1957 struct hclge_config_auto_neg_cmd *req;
1958 struct hclge_desc desc;
1962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
1964 req = (struct hclge_config_auto_neg_cmd *)desc.data;
1965 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
1966 req->cfg_an_cmd_flag = cpu_to_le32(flag);
1968 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1970 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
1976 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
1978 struct hclge_vport *vport = hclge_get_vport(handle);
1979 struct hclge_dev *hdev = vport->back;
1981 return hclge_set_autoneg_en(hdev, enable);
1984 static int hclge_get_autoneg(struct hnae3_handle *handle)
1986 struct hclge_vport *vport = hclge_get_vport(handle);
1987 struct hclge_dev *hdev = vport->back;
1988 struct phy_device *phydev = hdev->hw.mac.phydev;
1991 return phydev->autoneg;
1993 return hdev->hw.mac.autoneg;
1996 static int hclge_mac_init(struct hclge_dev *hdev)
1998 struct hclge_mac *mac = &hdev->hw.mac;
2001 hdev->support_sfp_query = true;
2002 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2003 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2004 hdev->hw.mac.duplex);
2006 dev_err(&hdev->pdev->dev,
2007 "Config mac speed dup fail ret=%d\n", ret);
2013 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2015 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2019 ret = hclge_buffer_alloc(hdev);
2021 dev_err(&hdev->pdev->dev,
2022 "allocate buffer fail, ret=%d\n", ret);
2027 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2029 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2030 schedule_work(&hdev->mbx_service_task);
2033 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2035 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2036 schedule_work(&hdev->rst_service_task);
2039 static void hclge_task_schedule(struct hclge_dev *hdev)
2041 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2042 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2043 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2044 (void)schedule_work(&hdev->service_task);
2047 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2049 struct hclge_link_status_cmd *req;
2050 struct hclge_desc desc;
2054 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2055 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2057 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2062 req = (struct hclge_link_status_cmd *)desc.data;
2063 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2065 return !!link_status;
2068 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2073 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2076 mac_state = hclge_get_mac_link_status(hdev);
2078 if (hdev->hw.mac.phydev) {
2079 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2080 link_stat = mac_state &
2081 hdev->hw.mac.phydev->link;
2086 link_stat = mac_state;
2092 static void hclge_update_link_status(struct hclge_dev *hdev)
2094 struct hnae3_client *client = hdev->nic_client;
2095 struct hnae3_handle *handle;
2101 state = hclge_get_mac_phy_link(hdev);
2102 if (state != hdev->hw.mac.link) {
2103 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2104 handle = &hdev->vport[i].nic;
2105 client->ops->link_status_change(handle, state);
2107 hdev->hw.mac.link = state;
2111 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2113 struct hclge_sfp_speed_cmd *resp = NULL;
2114 struct hclge_desc desc;
2117 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2118 resp = (struct hclge_sfp_speed_cmd *)desc.data;
2119 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 if (ret == -EOPNOTSUPP) {
2121 dev_warn(&hdev->pdev->dev,
2122 "IMP do not support get SFP speed %d\n", ret);
2125 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2129 *speed = resp->sfp_speed;
2134 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2136 struct hclge_mac mac = hdev->hw.mac;
2140 /* get the speed from SFP cmd when phy
2146 /* if IMP does not support get SFP/qSFP speed, return directly */
2147 if (!hdev->support_sfp_query)
2150 ret = hclge_get_sfp_speed(hdev, &speed);
2151 if (ret == -EOPNOTSUPP) {
2152 hdev->support_sfp_query = false;
2158 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2159 return 0; /* do nothing if no SFP */
2161 /* must config full duplex for SFP */
2162 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2165 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2167 struct hclge_vport *vport = hclge_get_vport(handle);
2168 struct hclge_dev *hdev = vport->back;
2170 return hclge_update_speed_duplex(hdev);
2173 static int hclge_get_status(struct hnae3_handle *handle)
2175 struct hclge_vport *vport = hclge_get_vport(handle);
2176 struct hclge_dev *hdev = vport->back;
2178 hclge_update_link_status(hdev);
2180 return hdev->hw.mac.link;
2183 static void hclge_service_timer(struct timer_list *t)
2185 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2187 mod_timer(&hdev->service_timer, jiffies + HZ);
2188 hdev->hw_stats.stats_timer++;
2189 hclge_task_schedule(hdev);
2192 static void hclge_service_complete(struct hclge_dev *hdev)
2194 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2196 /* Flush memory before next watchdog */
2197 smp_mb__before_atomic();
2198 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2201 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2206 /* fetch the events from their corresponding regs */
2207 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2208 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2210 /* Assumption: If by any chance reset and mailbox events are reported
2211 * together then we will only process reset event in this go and will
2212 * defer the processing of the mailbox events. Since, we would have not
2213 * cleared RX CMDQ event this time we would receive again another
2214 * interrupt from H/W just for the mailbox.
2217 /* check for vector0 reset event sources */
2218 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2219 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2220 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2221 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2222 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2223 return HCLGE_VECTOR0_EVENT_RST;
2226 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2227 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2228 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2229 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2230 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2231 return HCLGE_VECTOR0_EVENT_RST;
2234 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2235 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2236 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2237 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2238 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2239 return HCLGE_VECTOR0_EVENT_RST;
2242 /* check for vector0 mailbox(=CMDQ RX) event source */
2243 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2244 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2245 *clearval = cmdq_src_reg;
2246 return HCLGE_VECTOR0_EVENT_MBX;
2249 return HCLGE_VECTOR0_EVENT_OTHER;
2252 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2255 switch (event_type) {
2256 case HCLGE_VECTOR0_EVENT_RST:
2257 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2259 case HCLGE_VECTOR0_EVENT_MBX:
2260 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2267 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2269 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2270 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2271 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2272 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2273 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2276 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2278 writel(enable ? 1 : 0, vector->addr);
2281 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2283 struct hclge_dev *hdev = data;
2287 hclge_enable_vector(&hdev->misc_vector, false);
2288 event_cause = hclge_check_event_cause(hdev, &clearval);
2290 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2291 switch (event_cause) {
2292 case HCLGE_VECTOR0_EVENT_RST:
2293 hclge_reset_task_schedule(hdev);
2295 case HCLGE_VECTOR0_EVENT_MBX:
2296 /* If we are here then,
2297 * 1. Either we are not handling any mbx task and we are not
2300 * 2. We could be handling a mbx task but nothing more is
2302 * In both cases, we should schedule mbx task as there are more
2303 * mbx messages reported by this interrupt.
2305 hclge_mbx_task_schedule(hdev);
2308 dev_warn(&hdev->pdev->dev,
2309 "received unknown or unhandled event of vector0\n");
2313 /* clear the source of interrupt if it is not cause by reset */
2314 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2315 hclge_clear_event_cause(hdev, event_cause, clearval);
2316 hclge_enable_vector(&hdev->misc_vector, true);
2322 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2324 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2325 dev_warn(&hdev->pdev->dev,
2326 "vector(vector_id %d) has been freed.\n", vector_id);
2330 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2331 hdev->num_msi_left += 1;
2332 hdev->num_msi_used -= 1;
2335 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2337 struct hclge_misc_vector *vector = &hdev->misc_vector;
2339 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2341 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2342 hdev->vector_status[0] = 0;
2344 hdev->num_msi_left -= 1;
2345 hdev->num_msi_used += 1;
2348 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2352 hclge_get_misc_vector(hdev);
2354 /* this would be explicitly freed in the end */
2355 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2356 0, "hclge_misc", hdev);
2358 hclge_free_vector(hdev, 0);
2359 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2360 hdev->misc_vector.vector_irq);
2366 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2368 free_irq(hdev->misc_vector.vector_irq, hdev);
2369 hclge_free_vector(hdev, 0);
2372 static int hclge_notify_client(struct hclge_dev *hdev,
2373 enum hnae3_reset_notify_type type)
2375 struct hnae3_client *client = hdev->nic_client;
2378 if (!client->ops->reset_notify)
2381 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2382 struct hnae3_handle *handle = &hdev->vport[i].nic;
2385 ret = client->ops->reset_notify(handle, type);
2387 dev_err(&hdev->pdev->dev,
2388 "notify nic client failed %d(%d)\n", type, ret);
2396 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2397 enum hnae3_reset_notify_type type)
2399 struct hnae3_client *client = hdev->roce_client;
2406 if (!client->ops->reset_notify)
2409 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2410 struct hnae3_handle *handle = &hdev->vport[i].roce;
2412 ret = client->ops->reset_notify(handle, type);
2414 dev_err(&hdev->pdev->dev,
2415 "notify roce client failed %d(%d)",
2424 static int hclge_reset_wait(struct hclge_dev *hdev)
2426 #define HCLGE_RESET_WATI_MS 100
2427 #define HCLGE_RESET_WAIT_CNT 200
2428 u32 val, reg, reg_bit;
2431 switch (hdev->reset_type) {
2432 case HNAE3_IMP_RESET:
2433 reg = HCLGE_GLOBAL_RESET_REG;
2434 reg_bit = HCLGE_IMP_RESET_BIT;
2436 case HNAE3_GLOBAL_RESET:
2437 reg = HCLGE_GLOBAL_RESET_REG;
2438 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2440 case HNAE3_CORE_RESET:
2441 reg = HCLGE_GLOBAL_RESET_REG;
2442 reg_bit = HCLGE_CORE_RESET_BIT;
2444 case HNAE3_FUNC_RESET:
2445 reg = HCLGE_FUN_RST_ING;
2446 reg_bit = HCLGE_FUN_RST_ING_B;
2448 case HNAE3_FLR_RESET:
2451 dev_err(&hdev->pdev->dev,
2452 "Wait for unsupported reset type: %d\n",
2457 if (hdev->reset_type == HNAE3_FLR_RESET) {
2458 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2459 cnt++ < HCLGE_RESET_WAIT_CNT)
2460 msleep(HCLGE_RESET_WATI_MS);
2462 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2463 dev_err(&hdev->pdev->dev,
2464 "flr wait timeout: %d\n", cnt);
2471 val = hclge_read_dev(&hdev->hw, reg);
2472 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2473 msleep(HCLGE_RESET_WATI_MS);
2474 val = hclge_read_dev(&hdev->hw, reg);
2478 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2479 dev_warn(&hdev->pdev->dev,
2480 "Wait for reset timeout: %d\n", hdev->reset_type);
2487 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2489 struct hclge_vf_rst_cmd *req;
2490 struct hclge_desc desc;
2492 req = (struct hclge_vf_rst_cmd *)desc.data;
2493 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2494 req->dest_vfid = func_id;
2499 return hclge_cmd_send(&hdev->hw, &desc, 1);
2502 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2506 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2507 struct hclge_vport *vport = &hdev->vport[i];
2510 /* Send cmd to set/clear VF's FUNC_RST_ING */
2511 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2513 dev_err(&hdev->pdev->dev,
2514 "set vf(%d) rst failed %d!\n",
2515 vport->vport_id, ret);
2522 /* Inform VF to process the reset.
2523 * hclge_inform_reset_assert_to_vf may fail if VF
2524 * driver is not loaded.
2526 ret = hclge_inform_reset_assert_to_vf(vport);
2528 dev_warn(&hdev->pdev->dev,
2529 "inform reset to vf(%d) failed %d!\n",
2530 vport->vport_id, ret);
2536 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2538 struct hclge_desc desc;
2539 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2542 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2543 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2544 req->fun_reset_vfid = func_id;
2546 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2548 dev_err(&hdev->pdev->dev,
2549 "send function reset cmd fail, status =%d\n", ret);
2554 static void hclge_do_reset(struct hclge_dev *hdev)
2556 struct pci_dev *pdev = hdev->pdev;
2559 switch (hdev->reset_type) {
2560 case HNAE3_GLOBAL_RESET:
2561 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2562 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2563 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2564 dev_info(&pdev->dev, "Global Reset requested\n");
2566 case HNAE3_CORE_RESET:
2567 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2568 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2569 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2570 dev_info(&pdev->dev, "Core Reset requested\n");
2572 case HNAE3_FUNC_RESET:
2573 dev_info(&pdev->dev, "PF Reset requested\n");
2574 /* schedule again to check later */
2575 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2576 hclge_reset_task_schedule(hdev);
2578 case HNAE3_FLR_RESET:
2579 dev_info(&pdev->dev, "FLR requested\n");
2580 /* schedule again to check later */
2581 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2582 hclge_reset_task_schedule(hdev);
2585 dev_warn(&pdev->dev,
2586 "Unsupported reset type: %d\n", hdev->reset_type);
2591 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2592 unsigned long *addr)
2594 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2596 /* return the highest priority reset level amongst all */
2597 if (test_bit(HNAE3_IMP_RESET, addr)) {
2598 rst_level = HNAE3_IMP_RESET;
2599 clear_bit(HNAE3_IMP_RESET, addr);
2600 clear_bit(HNAE3_GLOBAL_RESET, addr);
2601 clear_bit(HNAE3_CORE_RESET, addr);
2602 clear_bit(HNAE3_FUNC_RESET, addr);
2603 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2604 rst_level = HNAE3_GLOBAL_RESET;
2605 clear_bit(HNAE3_GLOBAL_RESET, addr);
2606 clear_bit(HNAE3_CORE_RESET, addr);
2607 clear_bit(HNAE3_FUNC_RESET, addr);
2608 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2609 rst_level = HNAE3_CORE_RESET;
2610 clear_bit(HNAE3_CORE_RESET, addr);
2611 clear_bit(HNAE3_FUNC_RESET, addr);
2612 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2613 rst_level = HNAE3_FUNC_RESET;
2614 clear_bit(HNAE3_FUNC_RESET, addr);
2615 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2616 rst_level = HNAE3_FLR_RESET;
2617 clear_bit(HNAE3_FLR_RESET, addr);
2623 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2627 switch (hdev->reset_type) {
2628 case HNAE3_IMP_RESET:
2629 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2631 case HNAE3_GLOBAL_RESET:
2632 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2634 case HNAE3_CORE_RESET:
2635 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2644 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2645 hclge_enable_vector(&hdev->misc_vector, true);
2648 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2652 switch (hdev->reset_type) {
2653 case HNAE3_FUNC_RESET:
2655 case HNAE3_FLR_RESET:
2656 ret = hclge_set_all_vf_rst(hdev, true);
2665 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2670 switch (hdev->reset_type) {
2671 case HNAE3_FUNC_RESET:
2672 /* There is no mechanism for PF to know if VF has stopped IO
2673 * for now, just wait 100 ms for VF to stop IO
2676 ret = hclge_func_reset_cmd(hdev, 0);
2678 dev_err(&hdev->pdev->dev,
2679 "asserting function reset fail %d!\n", ret);
2683 /* After performaning pf reset, it is not necessary to do the
2684 * mailbox handling or send any command to firmware, because
2685 * any mailbox handling or command to firmware is only valid
2686 * after hclge_cmd_init is called.
2688 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690 case HNAE3_FLR_RESET:
2691 /* There is no mechanism for PF to know if VF has stopped IO
2692 * for now, just wait 100 ms for VF to stop IO
2695 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2696 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2698 case HNAE3_IMP_RESET:
2699 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2700 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2701 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2707 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2712 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2714 #define MAX_RESET_FAIL_CNT 5
2715 #define RESET_UPGRADE_DELAY_SEC 10
2717 if (hdev->reset_pending) {
2718 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2719 hdev->reset_pending);
2721 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2722 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2723 BIT(HCLGE_IMP_RESET_BIT))) {
2724 dev_info(&hdev->pdev->dev,
2725 "reset failed because IMP Reset is pending\n");
2726 hclge_clear_reset_cause(hdev);
2728 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2729 hdev->reset_fail_cnt++;
2731 set_bit(hdev->reset_type, &hdev->reset_pending);
2732 dev_info(&hdev->pdev->dev,
2733 "re-schedule to wait for hw reset done\n");
2737 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2738 hclge_clear_reset_cause(hdev);
2739 mod_timer(&hdev->reset_timer,
2740 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2745 hclge_clear_reset_cause(hdev);
2746 dev_err(&hdev->pdev->dev, "Reset fail!\n");
2750 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2754 switch (hdev->reset_type) {
2755 case HNAE3_FUNC_RESET:
2757 case HNAE3_FLR_RESET:
2758 ret = hclge_set_all_vf_rst(hdev, false);
2767 static void hclge_reset(struct hclge_dev *hdev)
2769 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2770 bool is_timeout = false;
2773 /* Initialize ae_dev reset status as well, in case enet layer wants to
2774 * know if device is undergoing reset
2776 ae_dev->reset_type = hdev->reset_type;
2777 hdev->reset_count++;
2778 hdev->last_reset_time = jiffies;
2779 /* perform reset of the stack & ae device for a client */
2780 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2784 ret = hclge_reset_prepare_down(hdev);
2789 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2791 goto err_reset_lock;
2795 ret = hclge_reset_prepare_wait(hdev);
2799 if (hclge_reset_wait(hdev)) {
2804 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2809 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2811 goto err_reset_lock;
2813 ret = hclge_reset_ae_dev(hdev->ae_dev);
2815 goto err_reset_lock;
2817 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2819 goto err_reset_lock;
2821 hclge_clear_reset_cause(hdev);
2823 ret = hclge_reset_prepare_up(hdev);
2825 goto err_reset_lock;
2827 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2829 goto err_reset_lock;
2833 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
2837 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
2846 if (hclge_reset_err_handle(hdev, is_timeout))
2847 hclge_reset_task_schedule(hdev);
2850 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
2852 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
2853 struct hclge_dev *hdev = ae_dev->priv;
2855 /* We might end up getting called broadly because of 2 below cases:
2856 * 1. Recoverable error was conveyed through APEI and only way to bring
2857 * normalcy is to reset.
2858 * 2. A new reset request from the stack due to timeout
2860 * For the first case,error event might not have ae handle available.
2861 * check if this is a new reset request and we are not here just because
2862 * last reset attempt did not succeed and watchdog hit us again. We will
2863 * know this if last reset request did not occur very recently (watchdog
2864 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2865 * In case of new request we reset the "reset level" to PF reset.
2866 * And if it is a repeat reset request of the most recent one then we
2867 * want to make sure we throttle the reset request. Therefore, we will
2868 * not allow it again before 3*HZ times.
2871 handle = &hdev->vport[0].nic;
2873 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
2875 else if (hdev->default_reset_request)
2877 hclge_get_reset_level(hdev,
2878 &hdev->default_reset_request);
2879 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
2880 hdev->reset_level = HNAE3_FUNC_RESET;
2882 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2885 /* request reset & schedule reset task */
2886 set_bit(hdev->reset_level, &hdev->reset_request);
2887 hclge_reset_task_schedule(hdev);
2889 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
2890 hdev->reset_level++;
2893 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
2894 enum hnae3_reset_type rst_type)
2896 struct hclge_dev *hdev = ae_dev->priv;
2898 set_bit(rst_type, &hdev->default_reset_request);
2901 static void hclge_reset_timer(struct timer_list *t)
2903 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
2905 dev_info(&hdev->pdev->dev,
2906 "triggering global reset in reset timer\n");
2907 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
2908 hclge_reset_event(hdev->pdev, NULL);
2911 static void hclge_reset_subtask(struct hclge_dev *hdev)
2913 /* check if there is any ongoing reset in the hardware. This status can
2914 * be checked from reset_pending. If there is then, we need to wait for
2915 * hardware to complete reset.
2916 * a. If we are able to figure out in reasonable time that hardware
2917 * has fully resetted then, we can proceed with driver, client
2919 * b. else, we can come back later to check this status so re-sched
2922 hdev->last_reset_time = jiffies;
2923 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2924 if (hdev->reset_type != HNAE3_NONE_RESET)
2927 /* check if we got any *new* reset requests to be honored */
2928 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2929 if (hdev->reset_type != HNAE3_NONE_RESET)
2930 hclge_do_reset(hdev);
2932 hdev->reset_type = HNAE3_NONE_RESET;
2935 static void hclge_reset_service_task(struct work_struct *work)
2937 struct hclge_dev *hdev =
2938 container_of(work, struct hclge_dev, rst_service_task);
2940 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2943 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2945 hclge_reset_subtask(hdev);
2947 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2950 static void hclge_mailbox_service_task(struct work_struct *work)
2952 struct hclge_dev *hdev =
2953 container_of(work, struct hclge_dev, mbx_service_task);
2955 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2958 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2960 hclge_mbx_handler(hdev);
2962 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2965 static void hclge_update_vport_alive(struct hclge_dev *hdev)
2969 /* start from vport 1 for PF is always alive */
2970 for (i = 1; i < hdev->num_alloc_vport; i++) {
2971 struct hclge_vport *vport = &hdev->vport[i];
2973 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
2974 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
2976 /* If vf is not alive, set to default value */
2977 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2978 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
2982 static void hclge_service_task(struct work_struct *work)
2984 struct hclge_dev *hdev =
2985 container_of(work, struct hclge_dev, service_task);
2987 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2988 hclge_update_stats_for_all(hdev);
2989 hdev->hw_stats.stats_timer = 0;
2992 hclge_update_speed_duplex(hdev);
2993 hclge_update_link_status(hdev);
2994 hclge_update_vport_alive(hdev);
2995 hclge_service_complete(hdev);
2998 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3000 /* VF handle has no client */
3001 if (!handle->client)
3002 return container_of(handle, struct hclge_vport, nic);
3003 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3004 return container_of(handle, struct hclge_vport, roce);
3006 return container_of(handle, struct hclge_vport, nic);
3009 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3010 struct hnae3_vector_info *vector_info)
3012 struct hclge_vport *vport = hclge_get_vport(handle);
3013 struct hnae3_vector_info *vector = vector_info;
3014 struct hclge_dev *hdev = vport->back;
3018 vector_num = min(hdev->num_msi_left, vector_num);
3020 for (j = 0; j < vector_num; j++) {
3021 for (i = 1; i < hdev->num_msi; i++) {
3022 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3023 vector->vector = pci_irq_vector(hdev->pdev, i);
3024 vector->io_addr = hdev->hw.io_base +
3025 HCLGE_VECTOR_REG_BASE +
3026 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3028 HCLGE_VECTOR_VF_OFFSET;
3029 hdev->vector_status[i] = vport->vport_id;
3030 hdev->vector_irq[i] = vector->vector;
3039 hdev->num_msi_left -= alloc;
3040 hdev->num_msi_used += alloc;
3045 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3049 for (i = 0; i < hdev->num_msi; i++)
3050 if (vector == hdev->vector_irq[i])
3056 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3058 struct hclge_vport *vport = hclge_get_vport(handle);
3059 struct hclge_dev *hdev = vport->back;
3062 vector_id = hclge_get_vector_index(hdev, vector);
3063 if (vector_id < 0) {
3064 dev_err(&hdev->pdev->dev,
3065 "Get vector index fail. vector_id =%d\n", vector_id);
3069 hclge_free_vector(hdev, vector_id);
3074 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3076 return HCLGE_RSS_KEY_SIZE;
3079 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3081 return HCLGE_RSS_IND_TBL_SIZE;
3084 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3085 const u8 hfunc, const u8 *key)
3087 struct hclge_rss_config_cmd *req;
3088 struct hclge_desc desc;
3093 req = (struct hclge_rss_config_cmd *)desc.data;
3095 for (key_offset = 0; key_offset < 3; key_offset++) {
3096 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3099 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3100 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3102 if (key_offset == 2)
3104 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3106 key_size = HCLGE_RSS_HASH_KEY_NUM;
3108 memcpy(req->hash_key,
3109 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3111 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3113 dev_err(&hdev->pdev->dev,
3114 "Configure RSS config fail, status = %d\n",
3122 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3124 struct hclge_rss_indirection_table_cmd *req;
3125 struct hclge_desc desc;
3129 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3131 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3132 hclge_cmd_setup_basic_desc
3133 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3135 req->start_table_index =
3136 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3137 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3139 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3140 req->rss_result[j] =
3141 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3143 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3145 dev_err(&hdev->pdev->dev,
3146 "Configure rss indir table fail,status = %d\n",
3154 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3155 u16 *tc_size, u16 *tc_offset)
3157 struct hclge_rss_tc_mode_cmd *req;
3158 struct hclge_desc desc;
3162 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3163 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3165 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3168 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3169 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3170 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3171 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3172 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3174 req->rss_tc_mode[i] = cpu_to_le16(mode);
3177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3179 dev_err(&hdev->pdev->dev,
3180 "Configure rss tc mode fail, status = %d\n", ret);
3185 static void hclge_get_rss_type(struct hclge_vport *vport)
3187 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3188 vport->rss_tuple_sets.ipv4_udp_en ||
3189 vport->rss_tuple_sets.ipv4_sctp_en ||
3190 vport->rss_tuple_sets.ipv6_tcp_en ||
3191 vport->rss_tuple_sets.ipv6_udp_en ||
3192 vport->rss_tuple_sets.ipv6_sctp_en)
3193 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3194 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3195 vport->rss_tuple_sets.ipv6_fragment_en)
3196 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3198 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3201 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3203 struct hclge_rss_input_tuple_cmd *req;
3204 struct hclge_desc desc;
3207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3209 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3211 /* Get the tuple cfg from pf */
3212 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3213 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3214 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3215 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3216 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3217 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3218 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3219 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3220 hclge_get_rss_type(&hdev->vport[0]);
3221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3223 dev_err(&hdev->pdev->dev,
3224 "Configure rss input fail, status = %d\n", ret);
3228 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3231 struct hclge_vport *vport = hclge_get_vport(handle);
3234 /* Get hash algorithm */
3236 switch (vport->rss_algo) {
3237 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3238 *hfunc = ETH_RSS_HASH_TOP;
3240 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3241 *hfunc = ETH_RSS_HASH_XOR;
3244 *hfunc = ETH_RSS_HASH_UNKNOWN;
3249 /* Get the RSS Key required by the user */
3251 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3253 /* Get indirect table */
3255 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3256 indir[i] = vport->rss_indirection_tbl[i];
3261 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3262 const u8 *key, const u8 hfunc)
3264 struct hclge_vport *vport = hclge_get_vport(handle);
3265 struct hclge_dev *hdev = vport->back;
3269 /* Set the RSS Hash Key if specififed by the user */
3272 case ETH_RSS_HASH_TOP:
3273 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3275 case ETH_RSS_HASH_XOR:
3276 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3278 case ETH_RSS_HASH_NO_CHANGE:
3279 hash_algo = vport->rss_algo;
3285 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3289 /* Update the shadow RSS key with user specified qids */
3290 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3291 vport->rss_algo = hash_algo;
3294 /* Update the shadow RSS table with user specified qids */
3295 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3296 vport->rss_indirection_tbl[i] = indir[i];
3298 /* Update the hardware */
3299 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3302 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3304 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3306 if (nfc->data & RXH_L4_B_2_3)
3307 hash_sets |= HCLGE_D_PORT_BIT;
3309 hash_sets &= ~HCLGE_D_PORT_BIT;
3311 if (nfc->data & RXH_IP_SRC)
3312 hash_sets |= HCLGE_S_IP_BIT;
3314 hash_sets &= ~HCLGE_S_IP_BIT;
3316 if (nfc->data & RXH_IP_DST)
3317 hash_sets |= HCLGE_D_IP_BIT;
3319 hash_sets &= ~HCLGE_D_IP_BIT;
3321 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3322 hash_sets |= HCLGE_V_TAG_BIT;
3327 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3328 struct ethtool_rxnfc *nfc)
3330 struct hclge_vport *vport = hclge_get_vport(handle);
3331 struct hclge_dev *hdev = vport->back;
3332 struct hclge_rss_input_tuple_cmd *req;
3333 struct hclge_desc desc;
3337 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3338 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3341 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3344 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3345 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3346 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3347 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3348 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3349 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3350 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3351 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3353 tuple_sets = hclge_get_rss_hash_bits(nfc);
3354 switch (nfc->flow_type) {
3356 req->ipv4_tcp_en = tuple_sets;
3359 req->ipv6_tcp_en = tuple_sets;
3362 req->ipv4_udp_en = tuple_sets;
3365 req->ipv6_udp_en = tuple_sets;
3368 req->ipv4_sctp_en = tuple_sets;
3371 if ((nfc->data & RXH_L4_B_0_1) ||
3372 (nfc->data & RXH_L4_B_2_3))
3375 req->ipv6_sctp_en = tuple_sets;
3378 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3381 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3387 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3389 dev_err(&hdev->pdev->dev,
3390 "Set rss tuple fail, status = %d\n", ret);
3394 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3395 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3396 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3397 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3398 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3399 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3400 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3401 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3402 hclge_get_rss_type(vport);
3406 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3407 struct ethtool_rxnfc *nfc)
3409 struct hclge_vport *vport = hclge_get_vport(handle);
3414 switch (nfc->flow_type) {
3416 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3419 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3422 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3425 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3428 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3431 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3435 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3444 if (tuple_sets & HCLGE_D_PORT_BIT)
3445 nfc->data |= RXH_L4_B_2_3;
3446 if (tuple_sets & HCLGE_S_PORT_BIT)
3447 nfc->data |= RXH_L4_B_0_1;
3448 if (tuple_sets & HCLGE_D_IP_BIT)
3449 nfc->data |= RXH_IP_DST;
3450 if (tuple_sets & HCLGE_S_IP_BIT)
3451 nfc->data |= RXH_IP_SRC;
3456 static int hclge_get_tc_size(struct hnae3_handle *handle)
3458 struct hclge_vport *vport = hclge_get_vport(handle);
3459 struct hclge_dev *hdev = vport->back;
3461 return hdev->rss_size_max;
3464 int hclge_rss_init_hw(struct hclge_dev *hdev)
3466 struct hclge_vport *vport = hdev->vport;
3467 u8 *rss_indir = vport[0].rss_indirection_tbl;
3468 u16 rss_size = vport[0].alloc_rss_size;
3469 u8 *key = vport[0].rss_hash_key;
3470 u8 hfunc = vport[0].rss_algo;
3471 u16 tc_offset[HCLGE_MAX_TC_NUM];
3472 u16 tc_valid[HCLGE_MAX_TC_NUM];
3473 u16 tc_size[HCLGE_MAX_TC_NUM];
3477 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3481 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3485 ret = hclge_set_rss_input_tuple(hdev);
3489 /* Each TC have the same queue size, and tc_size set to hardware is
3490 * the log2 of roundup power of two of rss_size, the acutal queue
3491 * size is limited by indirection table.
3493 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3494 dev_err(&hdev->pdev->dev,
3495 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3500 roundup_size = roundup_pow_of_two(rss_size);
3501 roundup_size = ilog2(roundup_size);
3503 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3506 if (!(hdev->hw_tc_map & BIT(i)))
3510 tc_size[i] = roundup_size;
3511 tc_offset[i] = rss_size * i;
3514 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3517 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3519 struct hclge_vport *vport = hdev->vport;
3522 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3523 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3524 vport[j].rss_indirection_tbl[i] =
3525 i % vport[j].alloc_rss_size;
3529 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3531 struct hclge_vport *vport = hdev->vport;
3534 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3535 vport[i].rss_tuple_sets.ipv4_tcp_en =
3536 HCLGE_RSS_INPUT_TUPLE_OTHER;
3537 vport[i].rss_tuple_sets.ipv4_udp_en =
3538 HCLGE_RSS_INPUT_TUPLE_OTHER;
3539 vport[i].rss_tuple_sets.ipv4_sctp_en =
3540 HCLGE_RSS_INPUT_TUPLE_SCTP;
3541 vport[i].rss_tuple_sets.ipv4_fragment_en =
3542 HCLGE_RSS_INPUT_TUPLE_OTHER;
3543 vport[i].rss_tuple_sets.ipv6_tcp_en =
3544 HCLGE_RSS_INPUT_TUPLE_OTHER;
3545 vport[i].rss_tuple_sets.ipv6_udp_en =
3546 HCLGE_RSS_INPUT_TUPLE_OTHER;
3547 vport[i].rss_tuple_sets.ipv6_sctp_en =
3548 HCLGE_RSS_INPUT_TUPLE_SCTP;
3549 vport[i].rss_tuple_sets.ipv6_fragment_en =
3550 HCLGE_RSS_INPUT_TUPLE_OTHER;
3552 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3554 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3557 hclge_rss_indir_init_cfg(hdev);
3560 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3561 int vector_id, bool en,
3562 struct hnae3_ring_chain_node *ring_chain)
3564 struct hclge_dev *hdev = vport->back;
3565 struct hnae3_ring_chain_node *node;
3566 struct hclge_desc desc;
3567 struct hclge_ctrl_vector_chain_cmd *req
3568 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3569 enum hclge_cmd_status status;
3570 enum hclge_opcode_type op;
3571 u16 tqp_type_and_id;
3574 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3575 hclge_cmd_setup_basic_desc(&desc, op, false);
3576 req->int_vector_id = vector_id;
3579 for (node = ring_chain; node; node = node->next) {
3580 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3581 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3583 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3584 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3585 HCLGE_TQP_ID_S, node->tqp_index);
3586 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3588 hnae3_get_field(node->int_gl_idx,
3589 HNAE3_RING_GL_IDX_M,
3590 HNAE3_RING_GL_IDX_S));
3591 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3592 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3593 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3594 req->vfid = vport->vport_id;
3596 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3598 dev_err(&hdev->pdev->dev,
3599 "Map TQP fail, status is %d.\n",
3605 hclge_cmd_setup_basic_desc(&desc,
3608 req->int_vector_id = vector_id;
3613 req->int_cause_num = i;
3614 req->vfid = vport->vport_id;
3615 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3617 dev_err(&hdev->pdev->dev,
3618 "Map TQP fail, status is %d.\n", status);
3626 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3628 struct hnae3_ring_chain_node *ring_chain)
3630 struct hclge_vport *vport = hclge_get_vport(handle);
3631 struct hclge_dev *hdev = vport->back;
3634 vector_id = hclge_get_vector_index(hdev, vector);
3635 if (vector_id < 0) {
3636 dev_err(&hdev->pdev->dev,
3637 "Get vector index fail. vector_id =%d\n", vector_id);
3641 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3644 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3646 struct hnae3_ring_chain_node *ring_chain)
3648 struct hclge_vport *vport = hclge_get_vport(handle);
3649 struct hclge_dev *hdev = vport->back;
3652 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3655 vector_id = hclge_get_vector_index(hdev, vector);
3656 if (vector_id < 0) {
3657 dev_err(&handle->pdev->dev,
3658 "Get vector index fail. ret =%d\n", vector_id);
3662 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3664 dev_err(&handle->pdev->dev,
3665 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3672 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3673 struct hclge_promisc_param *param)
3675 struct hclge_promisc_cfg_cmd *req;
3676 struct hclge_desc desc;
3679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3681 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3682 req->vf_id = param->vf_id;
3684 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3685 * pdev revision(0x20), new revision support them. The
3686 * value of this two fields will not return error when driver
3687 * send command to fireware in revision(0x20).
3689 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3690 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3692 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3694 dev_err(&hdev->pdev->dev,
3695 "Set promisc mode fail, status is %d.\n", ret);
3700 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3701 bool en_mc, bool en_bc, int vport_id)
3706 memset(param, 0, sizeof(struct hclge_promisc_param));
3708 param->enable = HCLGE_PROMISC_EN_UC;
3710 param->enable |= HCLGE_PROMISC_EN_MC;
3712 param->enable |= HCLGE_PROMISC_EN_BC;
3713 param->vf_id = vport_id;
3716 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3719 struct hclge_vport *vport = hclge_get_vport(handle);
3720 struct hclge_dev *hdev = vport->back;
3721 struct hclge_promisc_param param;
3723 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3725 return hclge_cmd_set_promisc_mode(hdev, ¶m);
3728 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3730 struct hclge_get_fd_mode_cmd *req;
3731 struct hclge_desc desc;
3734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3736 req = (struct hclge_get_fd_mode_cmd *)desc.data;
3738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3740 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3744 *fd_mode = req->mode;
3749 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3750 u32 *stage1_entry_num,
3751 u32 *stage2_entry_num,
3752 u16 *stage1_counter_num,
3753 u16 *stage2_counter_num)
3755 struct hclge_get_fd_allocation_cmd *req;
3756 struct hclge_desc desc;
3759 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3761 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3763 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3765 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3770 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3771 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3772 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3773 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3778 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3780 struct hclge_set_fd_key_config_cmd *req;
3781 struct hclge_fd_key_cfg *stage;
3782 struct hclge_desc desc;
3785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
3787 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
3788 stage = &hdev->fd_cfg.key_cfg[stage_num];
3789 req->stage = stage_num;
3790 req->key_select = stage->key_sel;
3791 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
3792 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
3793 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
3794 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
3795 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
3796 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
3798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3800 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
3805 static int hclge_init_fd_config(struct hclge_dev *hdev)
3807 #define LOW_2_WORDS 0x03
3808 struct hclge_fd_key_cfg *key_cfg;
3811 if (!hnae3_dev_fd_supported(hdev))
3814 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
3818 switch (hdev->fd_cfg.fd_mode) {
3819 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
3820 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
3822 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
3823 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
3826 dev_err(&hdev->pdev->dev,
3827 "Unsupported flow director mode %d\n",
3828 hdev->fd_cfg.fd_mode);
3832 hdev->fd_cfg.fd_en = true;
3833 hdev->fd_cfg.proto_support =
3834 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
3835 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
3836 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
3837 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
3838 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
3839 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
3840 key_cfg->outer_sipv6_word_en = 0;
3841 key_cfg->outer_dipv6_word_en = 0;
3843 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
3844 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
3845 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
3846 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
3848 /* If use max 400bit key, we can support tuples for ether type */
3849 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
3850 hdev->fd_cfg.proto_support |= ETHER_FLOW;
3851 key_cfg->tuple_active |=
3852 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
3855 /* roce_type is used to filter roce frames
3856 * dst_vport is used to specify the rule
3858 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
3860 ret = hclge_get_fd_allocation(hdev,
3861 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
3862 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
3863 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
3864 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
3868 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
3871 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
3872 int loc, u8 *key, bool is_add)
3874 struct hclge_fd_tcam_config_1_cmd *req1;
3875 struct hclge_fd_tcam_config_2_cmd *req2;
3876 struct hclge_fd_tcam_config_3_cmd *req3;
3877 struct hclge_desc desc[3];
3880 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
3881 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3882 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
3883 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3884 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
3886 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
3887 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
3888 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
3890 req1->stage = stage;
3891 req1->xy_sel = sel_x ? 1 : 0;
3892 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
3893 req1->index = cpu_to_le32(loc);
3894 req1->entry_vld = sel_x ? is_add : 0;
3897 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
3898 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
3899 sizeof(req2->tcam_data));
3900 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
3901 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
3904 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3906 dev_err(&hdev->pdev->dev,
3907 "config tcam key fail, ret=%d\n",
3913 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
3914 struct hclge_fd_ad_data *action)
3916 struct hclge_fd_ad_config_cmd *req;
3917 struct hclge_desc desc;
3921 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
3923 req = (struct hclge_fd_ad_config_cmd *)desc.data;
3924 req->index = cpu_to_le32(loc);
3927 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
3928 action->write_rule_id_to_bd);
3929 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
3932 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
3933 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
3934 action->forward_to_direct_queue);
3935 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
3937 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
3938 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
3939 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
3940 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
3941 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
3942 action->counter_id);
3944 req->ad_data = cpu_to_le64(ad_data);
3945 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3947 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
3952 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
3953 struct hclge_fd_rule *rule)
3955 u16 tmp_x_s, tmp_y_s;
3956 u32 tmp_x_l, tmp_y_l;
3959 if (rule->unused_tuple & tuple_bit)
3962 switch (tuple_bit) {
3965 case BIT(INNER_DST_MAC):
3966 for (i = 0; i < 6; i++) {
3967 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
3968 rule->tuples_mask.dst_mac[i]);
3969 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
3970 rule->tuples_mask.dst_mac[i]);
3974 case BIT(INNER_SRC_MAC):
3975 for (i = 0; i < 6; i++) {
3976 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
3977 rule->tuples.src_mac[i]);
3978 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
3979 rule->tuples.src_mac[i]);
3983 case BIT(INNER_VLAN_TAG_FST):
3984 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
3985 rule->tuples_mask.vlan_tag1);
3986 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
3987 rule->tuples_mask.vlan_tag1);
3988 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3989 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
3992 case BIT(INNER_ETH_TYPE):
3993 calc_x(tmp_x_s, rule->tuples.ether_proto,
3994 rule->tuples_mask.ether_proto);
3995 calc_y(tmp_y_s, rule->tuples.ether_proto,
3996 rule->tuples_mask.ether_proto);
3997 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
3998 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4001 case BIT(INNER_IP_TOS):
4002 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4003 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4006 case BIT(INNER_IP_PROTO):
4007 calc_x(*key_x, rule->tuples.ip_proto,
4008 rule->tuples_mask.ip_proto);
4009 calc_y(*key_y, rule->tuples.ip_proto,
4010 rule->tuples_mask.ip_proto);
4013 case BIT(INNER_SRC_IP):
4014 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4015 rule->tuples_mask.src_ip[3]);
4016 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4017 rule->tuples_mask.src_ip[3]);
4018 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4019 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4022 case BIT(INNER_DST_IP):
4023 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4024 rule->tuples_mask.dst_ip[3]);
4025 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4026 rule->tuples_mask.dst_ip[3]);
4027 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4028 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4031 case BIT(INNER_SRC_PORT):
4032 calc_x(tmp_x_s, rule->tuples.src_port,
4033 rule->tuples_mask.src_port);
4034 calc_y(tmp_y_s, rule->tuples.src_port,
4035 rule->tuples_mask.src_port);
4036 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4037 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4040 case BIT(INNER_DST_PORT):
4041 calc_x(tmp_x_s, rule->tuples.dst_port,
4042 rule->tuples_mask.dst_port);
4043 calc_y(tmp_y_s, rule->tuples.dst_port,
4044 rule->tuples_mask.dst_port);
4045 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4046 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4054 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4055 u8 vf_id, u8 network_port_id)
4057 u32 port_number = 0;
4059 if (port_type == HOST_PORT) {
4060 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4062 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4064 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4066 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4067 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4068 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4074 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4075 __le32 *key_x, __le32 *key_y,
4076 struct hclge_fd_rule *rule)
4078 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4079 u8 cur_pos = 0, tuple_size, shift_bits;
4082 for (i = 0; i < MAX_META_DATA; i++) {
4083 tuple_size = meta_data_key_info[i].key_length;
4084 tuple_bit = key_cfg->meta_data_active & BIT(i);
4086 switch (tuple_bit) {
4087 case BIT(ROCE_TYPE):
4088 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4089 cur_pos += tuple_size;
4091 case BIT(DST_VPORT):
4092 port_number = hclge_get_port_number(HOST_PORT, 0,
4094 hnae3_set_field(meta_data,
4095 GENMASK(cur_pos + tuple_size, cur_pos),
4096 cur_pos, port_number);
4097 cur_pos += tuple_size;
4104 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4105 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4106 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4108 *key_x = cpu_to_le32(tmp_x << shift_bits);
4109 *key_y = cpu_to_le32(tmp_y << shift_bits);
4112 /* A complete key is combined with meta data key and tuple key.
4113 * Meta data key is stored at the MSB region, and tuple key is stored at
4114 * the LSB region, unused bits will be filled 0.
4116 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4117 struct hclge_fd_rule *rule)
4119 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4120 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4121 u8 *cur_key_x, *cur_key_y;
4122 int i, ret, tuple_size;
4123 u8 meta_data_region;
4125 memset(key_x, 0, sizeof(key_x));
4126 memset(key_y, 0, sizeof(key_y));
4130 for (i = 0 ; i < MAX_TUPLE; i++) {
4134 tuple_size = tuple_key_info[i].key_length / 8;
4135 check_tuple = key_cfg->tuple_active & BIT(i);
4137 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4140 cur_key_x += tuple_size;
4141 cur_key_y += tuple_size;
4145 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4146 MAX_META_DATA_LENGTH / 8;
4148 hclge_fd_convert_meta_data(key_cfg,
4149 (__le32 *)(key_x + meta_data_region),
4150 (__le32 *)(key_y + meta_data_region),
4153 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4156 dev_err(&hdev->pdev->dev,
4157 "fd key_y config fail, loc=%d, ret=%d\n",
4158 rule->queue_id, ret);
4162 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4165 dev_err(&hdev->pdev->dev,
4166 "fd key_x config fail, loc=%d, ret=%d\n",
4167 rule->queue_id, ret);
4171 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4172 struct hclge_fd_rule *rule)
4174 struct hclge_fd_ad_data ad_data;
4176 ad_data.ad_id = rule->location;
4178 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4179 ad_data.drop_packet = true;
4180 ad_data.forward_to_direct_queue = false;
4181 ad_data.queue_id = 0;
4183 ad_data.drop_packet = false;
4184 ad_data.forward_to_direct_queue = true;
4185 ad_data.queue_id = rule->queue_id;
4188 ad_data.use_counter = false;
4189 ad_data.counter_id = 0;
4191 ad_data.use_next_stage = false;
4192 ad_data.next_input_key = 0;
4194 ad_data.write_rule_id_to_bd = true;
4195 ad_data.rule_id = rule->location;
4197 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4200 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4201 struct ethtool_rx_flow_spec *fs, u32 *unused)
4203 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4204 struct ethtool_usrip4_spec *usr_ip4_spec;
4205 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4206 struct ethtool_usrip6_spec *usr_ip6_spec;
4207 struct ethhdr *ether_spec;
4209 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4212 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4215 if ((fs->flow_type & FLOW_EXT) &&
4216 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4217 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4221 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4225 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4226 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4228 if (!tcp_ip4_spec->ip4src)
4229 *unused |= BIT(INNER_SRC_IP);
4231 if (!tcp_ip4_spec->ip4dst)
4232 *unused |= BIT(INNER_DST_IP);
4234 if (!tcp_ip4_spec->psrc)
4235 *unused |= BIT(INNER_SRC_PORT);
4237 if (!tcp_ip4_spec->pdst)
4238 *unused |= BIT(INNER_DST_PORT);
4240 if (!tcp_ip4_spec->tos)
4241 *unused |= BIT(INNER_IP_TOS);
4245 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4246 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4247 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4249 if (!usr_ip4_spec->ip4src)
4250 *unused |= BIT(INNER_SRC_IP);
4252 if (!usr_ip4_spec->ip4dst)
4253 *unused |= BIT(INNER_DST_IP);
4255 if (!usr_ip4_spec->tos)
4256 *unused |= BIT(INNER_IP_TOS);
4258 if (!usr_ip4_spec->proto)
4259 *unused |= BIT(INNER_IP_PROTO);
4261 if (usr_ip4_spec->l4_4_bytes)
4264 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4271 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4272 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4275 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4276 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4277 *unused |= BIT(INNER_SRC_IP);
4279 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4280 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4281 *unused |= BIT(INNER_DST_IP);
4283 if (!tcp_ip6_spec->psrc)
4284 *unused |= BIT(INNER_SRC_PORT);
4286 if (!tcp_ip6_spec->pdst)
4287 *unused |= BIT(INNER_DST_PORT);
4289 if (tcp_ip6_spec->tclass)
4293 case IPV6_USER_FLOW:
4294 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4295 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4296 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4297 BIT(INNER_DST_PORT);
4299 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4300 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4301 *unused |= BIT(INNER_SRC_IP);
4303 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4304 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4305 *unused |= BIT(INNER_DST_IP);
4307 if (!usr_ip6_spec->l4_proto)
4308 *unused |= BIT(INNER_IP_PROTO);
4310 if (usr_ip6_spec->tclass)
4313 if (usr_ip6_spec->l4_4_bytes)
4318 ether_spec = &fs->h_u.ether_spec;
4319 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4320 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4321 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4323 if (is_zero_ether_addr(ether_spec->h_source))
4324 *unused |= BIT(INNER_SRC_MAC);
4326 if (is_zero_ether_addr(ether_spec->h_dest))
4327 *unused |= BIT(INNER_DST_MAC);
4329 if (!ether_spec->h_proto)
4330 *unused |= BIT(INNER_ETH_TYPE);
4337 if ((fs->flow_type & FLOW_EXT)) {
4338 if (fs->h_ext.vlan_etype)
4340 if (!fs->h_ext.vlan_tci)
4341 *unused |= BIT(INNER_VLAN_TAG_FST);
4343 if (fs->m_ext.vlan_tci) {
4344 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4348 *unused |= BIT(INNER_VLAN_TAG_FST);
4351 if (fs->flow_type & FLOW_MAC_EXT) {
4352 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4355 if (is_zero_ether_addr(fs->h_ext.h_dest))
4356 *unused |= BIT(INNER_DST_MAC);
4358 *unused &= ~(BIT(INNER_DST_MAC));
4364 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4366 struct hclge_fd_rule *rule = NULL;
4367 struct hlist_node *node2;
4369 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4370 if (rule->location >= location)
4374 return rule && rule->location == location;
4377 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4378 struct hclge_fd_rule *new_rule,
4382 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4383 struct hlist_node *node2;
4385 if (is_add && !new_rule)
4388 hlist_for_each_entry_safe(rule, node2,
4389 &hdev->fd_rule_list, rule_node) {
4390 if (rule->location >= location)
4395 if (rule && rule->location == location) {
4396 hlist_del(&rule->rule_node);
4398 hdev->hclge_fd_rule_num--;
4403 } else if (!is_add) {
4404 dev_err(&hdev->pdev->dev,
4405 "delete fail, rule %d is inexistent\n",
4410 INIT_HLIST_NODE(&new_rule->rule_node);
4413 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4415 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4417 hdev->hclge_fd_rule_num++;
4422 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4423 struct ethtool_rx_flow_spec *fs,
4424 struct hclge_fd_rule *rule)
4426 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4428 switch (flow_type) {
4432 rule->tuples.src_ip[3] =
4433 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4434 rule->tuples_mask.src_ip[3] =
4435 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4437 rule->tuples.dst_ip[3] =
4438 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4439 rule->tuples_mask.dst_ip[3] =
4440 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4442 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4443 rule->tuples_mask.src_port =
4444 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4446 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4447 rule->tuples_mask.dst_port =
4448 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4450 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4451 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4453 rule->tuples.ether_proto = ETH_P_IP;
4454 rule->tuples_mask.ether_proto = 0xFFFF;
4458 rule->tuples.src_ip[3] =
4459 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4460 rule->tuples_mask.src_ip[3] =
4461 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4463 rule->tuples.dst_ip[3] =
4464 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4465 rule->tuples_mask.dst_ip[3] =
4466 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4468 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4469 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4471 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4472 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4474 rule->tuples.ether_proto = ETH_P_IP;
4475 rule->tuples_mask.ether_proto = 0xFFFF;
4481 be32_to_cpu_array(rule->tuples.src_ip,
4482 fs->h_u.tcp_ip6_spec.ip6src, 4);
4483 be32_to_cpu_array(rule->tuples_mask.src_ip,
4484 fs->m_u.tcp_ip6_spec.ip6src, 4);
4486 be32_to_cpu_array(rule->tuples.dst_ip,
4487 fs->h_u.tcp_ip6_spec.ip6dst, 4);
4488 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4489 fs->m_u.tcp_ip6_spec.ip6dst, 4);
4491 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4492 rule->tuples_mask.src_port =
4493 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4495 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4496 rule->tuples_mask.dst_port =
4497 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4499 rule->tuples.ether_proto = ETH_P_IPV6;
4500 rule->tuples_mask.ether_proto = 0xFFFF;
4503 case IPV6_USER_FLOW:
4504 be32_to_cpu_array(rule->tuples.src_ip,
4505 fs->h_u.usr_ip6_spec.ip6src, 4);
4506 be32_to_cpu_array(rule->tuples_mask.src_ip,
4507 fs->m_u.usr_ip6_spec.ip6src, 4);
4509 be32_to_cpu_array(rule->tuples.dst_ip,
4510 fs->h_u.usr_ip6_spec.ip6dst, 4);
4511 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4512 fs->m_u.usr_ip6_spec.ip6dst, 4);
4514 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4515 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4517 rule->tuples.ether_proto = ETH_P_IPV6;
4518 rule->tuples_mask.ether_proto = 0xFFFF;
4522 ether_addr_copy(rule->tuples.src_mac,
4523 fs->h_u.ether_spec.h_source);
4524 ether_addr_copy(rule->tuples_mask.src_mac,
4525 fs->m_u.ether_spec.h_source);
4527 ether_addr_copy(rule->tuples.dst_mac,
4528 fs->h_u.ether_spec.h_dest);
4529 ether_addr_copy(rule->tuples_mask.dst_mac,
4530 fs->m_u.ether_spec.h_dest);
4532 rule->tuples.ether_proto =
4533 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4534 rule->tuples_mask.ether_proto =
4535 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4542 switch (flow_type) {
4545 rule->tuples.ip_proto = IPPROTO_SCTP;
4546 rule->tuples_mask.ip_proto = 0xFF;
4550 rule->tuples.ip_proto = IPPROTO_TCP;
4551 rule->tuples_mask.ip_proto = 0xFF;
4555 rule->tuples.ip_proto = IPPROTO_UDP;
4556 rule->tuples_mask.ip_proto = 0xFF;
4562 if ((fs->flow_type & FLOW_EXT)) {
4563 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4564 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4567 if (fs->flow_type & FLOW_MAC_EXT) {
4568 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4569 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4575 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4576 struct ethtool_rxnfc *cmd)
4578 struct hclge_vport *vport = hclge_get_vport(handle);
4579 struct hclge_dev *hdev = vport->back;
4580 u16 dst_vport_id = 0, q_index = 0;
4581 struct ethtool_rx_flow_spec *fs;
4582 struct hclge_fd_rule *rule;
4587 if (!hnae3_dev_fd_supported(hdev))
4590 if (!hdev->fd_cfg.fd_en) {
4591 dev_warn(&hdev->pdev->dev,
4592 "Please enable flow director first\n");
4596 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4598 ret = hclge_fd_check_spec(hdev, fs, &unused);
4600 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4604 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4605 action = HCLGE_FD_ACTION_DROP_PACKET;
4607 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4608 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4611 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4612 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4615 dev_err(&hdev->pdev->dev,
4616 "Error: queue id (%d) > max tqp num (%d)\n",
4621 if (vf > hdev->num_req_vfs) {
4622 dev_err(&hdev->pdev->dev,
4623 "Error: vf id (%d) > max vf num (%d)\n",
4624 vf, hdev->num_req_vfs);
4628 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4632 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4636 ret = hclge_fd_get_tuple(hdev, fs, rule);
4640 rule->flow_type = fs->flow_type;
4642 rule->location = fs->location;
4643 rule->unused_tuple = unused;
4644 rule->vf_id = dst_vport_id;
4645 rule->queue_id = q_index;
4646 rule->action = action;
4648 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4652 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4656 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4667 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4668 struct ethtool_rxnfc *cmd)
4670 struct hclge_vport *vport = hclge_get_vport(handle);
4671 struct hclge_dev *hdev = vport->back;
4672 struct ethtool_rx_flow_spec *fs;
4675 if (!hnae3_dev_fd_supported(hdev))
4678 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4680 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4683 if (!hclge_fd_rule_exist(hdev, fs->location)) {
4684 dev_err(&hdev->pdev->dev,
4685 "Delete fail, rule %d is inexistent\n",
4690 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4691 fs->location, NULL, false);
4695 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4699 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4702 struct hclge_vport *vport = hclge_get_vport(handle);
4703 struct hclge_dev *hdev = vport->back;
4704 struct hclge_fd_rule *rule;
4705 struct hlist_node *node;
4707 if (!hnae3_dev_fd_supported(hdev))
4711 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4713 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4714 rule->location, NULL, false);
4715 hlist_del(&rule->rule_node);
4717 hdev->hclge_fd_rule_num--;
4720 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4722 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4723 rule->location, NULL, false);
4727 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4729 struct hclge_vport *vport = hclge_get_vport(handle);
4730 struct hclge_dev *hdev = vport->back;
4731 struct hclge_fd_rule *rule;
4732 struct hlist_node *node;
4735 /* Return ok here, because reset error handling will check this
4736 * return value. If error is returned here, the reset process will
4739 if (!hnae3_dev_fd_supported(hdev))
4742 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4743 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4745 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4748 dev_warn(&hdev->pdev->dev,
4749 "Restore rule %d failed, remove it\n",
4751 hlist_del(&rule->rule_node);
4753 hdev->hclge_fd_rule_num--;
4759 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4760 struct ethtool_rxnfc *cmd)
4762 struct hclge_vport *vport = hclge_get_vport(handle);
4763 struct hclge_dev *hdev = vport->back;
4765 if (!hnae3_dev_fd_supported(hdev))
4768 cmd->rule_cnt = hdev->hclge_fd_rule_num;
4769 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4774 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4775 struct ethtool_rxnfc *cmd)
4777 struct hclge_vport *vport = hclge_get_vport(handle);
4778 struct hclge_fd_rule *rule = NULL;
4779 struct hclge_dev *hdev = vport->back;
4780 struct ethtool_rx_flow_spec *fs;
4781 struct hlist_node *node2;
4783 if (!hnae3_dev_fd_supported(hdev))
4786 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4788 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4789 if (rule->location >= fs->location)
4793 if (!rule || fs->location != rule->location)
4796 fs->flow_type = rule->flow_type;
4797 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4801 fs->h_u.tcp_ip4_spec.ip4src =
4802 cpu_to_be32(rule->tuples.src_ip[3]);
4803 fs->m_u.tcp_ip4_spec.ip4src =
4804 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4805 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4807 fs->h_u.tcp_ip4_spec.ip4dst =
4808 cpu_to_be32(rule->tuples.dst_ip[3]);
4809 fs->m_u.tcp_ip4_spec.ip4dst =
4810 rule->unused_tuple & BIT(INNER_DST_IP) ?
4811 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4813 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4814 fs->m_u.tcp_ip4_spec.psrc =
4815 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4816 0 : cpu_to_be16(rule->tuples_mask.src_port);
4818 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4819 fs->m_u.tcp_ip4_spec.pdst =
4820 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4821 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4823 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
4824 fs->m_u.tcp_ip4_spec.tos =
4825 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4826 0 : rule->tuples_mask.ip_tos;
4830 fs->h_u.usr_ip4_spec.ip4src =
4831 cpu_to_be32(rule->tuples.src_ip[3]);
4832 fs->m_u.tcp_ip4_spec.ip4src =
4833 rule->unused_tuple & BIT(INNER_SRC_IP) ?
4834 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
4836 fs->h_u.usr_ip4_spec.ip4dst =
4837 cpu_to_be32(rule->tuples.dst_ip[3]);
4838 fs->m_u.usr_ip4_spec.ip4dst =
4839 rule->unused_tuple & BIT(INNER_DST_IP) ?
4840 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
4842 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
4843 fs->m_u.usr_ip4_spec.tos =
4844 rule->unused_tuple & BIT(INNER_IP_TOS) ?
4845 0 : rule->tuples_mask.ip_tos;
4847 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
4848 fs->m_u.usr_ip4_spec.proto =
4849 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4850 0 : rule->tuples_mask.ip_proto;
4852 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
4858 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
4859 rule->tuples.src_ip, 4);
4860 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4861 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
4863 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
4864 rule->tuples_mask.src_ip, 4);
4866 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
4867 rule->tuples.dst_ip, 4);
4868 if (rule->unused_tuple & BIT(INNER_DST_IP))
4869 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4871 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
4872 rule->tuples_mask.dst_ip, 4);
4874 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
4875 fs->m_u.tcp_ip6_spec.psrc =
4876 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
4877 0 : cpu_to_be16(rule->tuples_mask.src_port);
4879 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
4880 fs->m_u.tcp_ip6_spec.pdst =
4881 rule->unused_tuple & BIT(INNER_DST_PORT) ?
4882 0 : cpu_to_be16(rule->tuples_mask.dst_port);
4885 case IPV6_USER_FLOW:
4886 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
4887 rule->tuples.src_ip, 4);
4888 if (rule->unused_tuple & BIT(INNER_SRC_IP))
4889 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
4891 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
4892 rule->tuples_mask.src_ip, 4);
4894 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
4895 rule->tuples.dst_ip, 4);
4896 if (rule->unused_tuple & BIT(INNER_DST_IP))
4897 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
4899 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
4900 rule->tuples_mask.dst_ip, 4);
4902 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
4903 fs->m_u.usr_ip6_spec.l4_proto =
4904 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
4905 0 : rule->tuples_mask.ip_proto;
4909 ether_addr_copy(fs->h_u.ether_spec.h_source,
4910 rule->tuples.src_mac);
4911 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
4912 eth_zero_addr(fs->m_u.ether_spec.h_source);
4914 ether_addr_copy(fs->m_u.ether_spec.h_source,
4915 rule->tuples_mask.src_mac);
4917 ether_addr_copy(fs->h_u.ether_spec.h_dest,
4918 rule->tuples.dst_mac);
4919 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4920 eth_zero_addr(fs->m_u.ether_spec.h_dest);
4922 ether_addr_copy(fs->m_u.ether_spec.h_dest,
4923 rule->tuples_mask.dst_mac);
4925 fs->h_u.ether_spec.h_proto =
4926 cpu_to_be16(rule->tuples.ether_proto);
4927 fs->m_u.ether_spec.h_proto =
4928 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
4929 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
4936 if (fs->flow_type & FLOW_EXT) {
4937 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
4938 fs->m_ext.vlan_tci =
4939 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
4940 cpu_to_be16(VLAN_VID_MASK) :
4941 cpu_to_be16(rule->tuples_mask.vlan_tag1);
4944 if (fs->flow_type & FLOW_MAC_EXT) {
4945 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
4946 if (rule->unused_tuple & BIT(INNER_DST_MAC))
4947 eth_zero_addr(fs->m_u.ether_spec.h_dest);
4949 ether_addr_copy(fs->m_u.ether_spec.h_dest,
4950 rule->tuples_mask.dst_mac);
4953 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4954 fs->ring_cookie = RX_CLS_FLOW_DISC;
4958 fs->ring_cookie = rule->queue_id;
4959 vf_id = rule->vf_id;
4960 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4961 fs->ring_cookie |= vf_id;
4967 static int hclge_get_all_rules(struct hnae3_handle *handle,
4968 struct ethtool_rxnfc *cmd, u32 *rule_locs)
4970 struct hclge_vport *vport = hclge_get_vport(handle);
4971 struct hclge_dev *hdev = vport->back;
4972 struct hclge_fd_rule *rule;
4973 struct hlist_node *node2;
4976 if (!hnae3_dev_fd_supported(hdev))
4979 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4981 hlist_for_each_entry_safe(rule, node2,
4982 &hdev->fd_rule_list, rule_node) {
4983 if (cnt == cmd->rule_cnt)
4986 rule_locs[cnt] = rule->location;
4990 cmd->rule_cnt = cnt;
4995 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
4997 struct hclge_vport *vport = hclge_get_vport(handle);
4998 struct hclge_dev *hdev = vport->back;
5000 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5001 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5004 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5006 struct hclge_vport *vport = hclge_get_vport(handle);
5007 struct hclge_dev *hdev = vport->back;
5009 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5012 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5014 struct hclge_vport *vport = hclge_get_vport(handle);
5015 struct hclge_dev *hdev = vport->back;
5017 return hdev->reset_count;
5020 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5022 struct hclge_vport *vport = hclge_get_vport(handle);
5023 struct hclge_dev *hdev = vport->back;
5025 hdev->fd_cfg.fd_en = enable;
5027 hclge_del_all_fd_entries(handle, false);
5029 hclge_restore_fd_entries(handle);
5032 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5034 struct hclge_desc desc;
5035 struct hclge_config_mac_mode_cmd *req =
5036 (struct hclge_config_mac_mode_cmd *)desc.data;
5040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5041 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5042 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5043 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5044 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5045 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5046 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5047 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5048 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5049 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5050 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5051 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5052 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5053 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5054 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5055 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5057 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5059 dev_err(&hdev->pdev->dev,
5060 "mac enable fail, ret =%d.\n", ret);
5063 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5065 struct hclge_config_mac_mode_cmd *req;
5066 struct hclge_desc desc;
5070 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5071 /* 1 Read out the MAC mode config at first */
5072 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5073 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5075 dev_err(&hdev->pdev->dev,
5076 "mac loopback get fail, ret =%d.\n", ret);
5080 /* 2 Then setup the loopback flag */
5081 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5082 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5083 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5084 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5086 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5088 /* 3 Config mac work mode with loopback flag
5089 * and its original configure parameters
5091 hclge_cmd_reuse_desc(&desc, false);
5092 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5094 dev_err(&hdev->pdev->dev,
5095 "mac loopback set fail, ret =%d.\n", ret);
5099 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5100 enum hnae3_loop loop_mode)
5102 #define HCLGE_SERDES_RETRY_MS 10
5103 #define HCLGE_SERDES_RETRY_NUM 100
5104 struct hclge_serdes_lb_cmd *req;
5105 struct hclge_desc desc;
5109 req = (struct hclge_serdes_lb_cmd *)desc.data;
5110 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5112 switch (loop_mode) {
5113 case HNAE3_LOOP_SERIAL_SERDES:
5114 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5116 case HNAE3_LOOP_PARALLEL_SERDES:
5117 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5120 dev_err(&hdev->pdev->dev,
5121 "unsupported serdes loopback mode %d\n", loop_mode);
5126 req->enable = loop_mode_b;
5127 req->mask = loop_mode_b;
5129 req->mask = loop_mode_b;
5132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5134 dev_err(&hdev->pdev->dev,
5135 "serdes loopback set fail, ret = %d\n", ret);
5140 msleep(HCLGE_SERDES_RETRY_MS);
5141 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5143 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5145 dev_err(&hdev->pdev->dev,
5146 "serdes loopback get, ret = %d\n", ret);
5149 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5150 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5152 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5153 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5155 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5156 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5160 hclge_cfg_mac_mode(hdev, en);
5164 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5165 int stream_id, bool enable)
5167 struct hclge_desc desc;
5168 struct hclge_cfg_com_tqp_queue_cmd *req =
5169 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5172 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5173 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5174 req->stream_id = cpu_to_le16(stream_id);
5175 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5179 dev_err(&hdev->pdev->dev,
5180 "Tqp enable fail, status =%d.\n", ret);
5184 static int hclge_set_loopback(struct hnae3_handle *handle,
5185 enum hnae3_loop loop_mode, bool en)
5187 struct hclge_vport *vport = hclge_get_vport(handle);
5188 struct hclge_dev *hdev = vport->back;
5191 switch (loop_mode) {
5192 case HNAE3_LOOP_APP:
5193 ret = hclge_set_app_loopback(hdev, en);
5195 case HNAE3_LOOP_SERIAL_SERDES:
5196 case HNAE3_LOOP_PARALLEL_SERDES:
5197 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5201 dev_err(&hdev->pdev->dev,
5202 "loop_mode %d is not supported\n", loop_mode);
5206 for (i = 0; i < vport->alloc_tqps; i++) {
5207 ret = hclge_tqp_enable(hdev, i, 0, en);
5215 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5217 struct hclge_vport *vport = hclge_get_vport(handle);
5218 struct hnae3_queue *queue;
5219 struct hclge_tqp *tqp;
5222 for (i = 0; i < vport->alloc_tqps; i++) {
5223 queue = handle->kinfo.tqp[i];
5224 tqp = container_of(queue, struct hclge_tqp, q);
5225 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5229 static int hclge_ae_start(struct hnae3_handle *handle)
5231 struct hclge_vport *vport = hclge_get_vport(handle);
5232 struct hclge_dev *hdev = vport->back;
5235 hclge_cfg_mac_mode(hdev, true);
5236 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5237 mod_timer(&hdev->service_timer, jiffies + HZ);
5238 hdev->hw.mac.link = 0;
5240 /* reset tqp stats */
5241 hclge_reset_tqp_stats(handle);
5243 hclge_mac_start_phy(hdev);
5248 static void hclge_ae_stop(struct hnae3_handle *handle)
5250 struct hclge_vport *vport = hclge_get_vport(handle);
5251 struct hclge_dev *hdev = vport->back;
5253 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5255 del_timer_sync(&hdev->service_timer);
5256 cancel_work_sync(&hdev->service_task);
5257 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5259 /* If it is not PF reset, the firmware will disable the MAC,
5260 * so it only need to stop phy here.
5262 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5263 hdev->reset_type != HNAE3_FUNC_RESET) {
5264 hclge_mac_stop_phy(hdev);
5269 hclge_cfg_mac_mode(hdev, false);
5271 hclge_mac_stop_phy(hdev);
5273 /* reset tqp stats */
5274 hclge_reset_tqp_stats(handle);
5275 del_timer_sync(&hdev->service_timer);
5276 cancel_work_sync(&hdev->service_task);
5277 hclge_update_link_status(hdev);
5280 int hclge_vport_start(struct hclge_vport *vport)
5282 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5283 vport->last_active_jiffies = jiffies;
5287 void hclge_vport_stop(struct hclge_vport *vport)
5289 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5292 static int hclge_client_start(struct hnae3_handle *handle)
5294 struct hclge_vport *vport = hclge_get_vport(handle);
5296 return hclge_vport_start(vport);
5299 static void hclge_client_stop(struct hnae3_handle *handle)
5301 struct hclge_vport *vport = hclge_get_vport(handle);
5303 hclge_vport_stop(vport);
5306 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5307 u16 cmdq_resp, u8 resp_code,
5308 enum hclge_mac_vlan_tbl_opcode op)
5310 struct hclge_dev *hdev = vport->back;
5311 int return_status = -EIO;
5314 dev_err(&hdev->pdev->dev,
5315 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5320 if (op == HCLGE_MAC_VLAN_ADD) {
5321 if ((!resp_code) || (resp_code == 1)) {
5323 } else if (resp_code == 2) {
5324 return_status = -ENOSPC;
5325 dev_err(&hdev->pdev->dev,
5326 "add mac addr failed for uc_overflow.\n");
5327 } else if (resp_code == 3) {
5328 return_status = -ENOSPC;
5329 dev_err(&hdev->pdev->dev,
5330 "add mac addr failed for mc_overflow.\n");
5332 dev_err(&hdev->pdev->dev,
5333 "add mac addr failed for undefined, code=%d.\n",
5336 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5339 } else if (resp_code == 1) {
5340 return_status = -ENOENT;
5341 dev_dbg(&hdev->pdev->dev,
5342 "remove mac addr failed for miss.\n");
5344 dev_err(&hdev->pdev->dev,
5345 "remove mac addr failed for undefined, code=%d.\n",
5348 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5351 } else if (resp_code == 1) {
5352 return_status = -ENOENT;
5353 dev_dbg(&hdev->pdev->dev,
5354 "lookup mac addr failed for miss.\n");
5356 dev_err(&hdev->pdev->dev,
5357 "lookup mac addr failed for undefined, code=%d.\n",
5361 return_status = -EINVAL;
5362 dev_err(&hdev->pdev->dev,
5363 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5367 return return_status;
5370 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5375 if (vfid > 255 || vfid < 0)
5378 if (vfid >= 0 && vfid <= 191) {
5379 word_num = vfid / 32;
5380 bit_num = vfid % 32;
5382 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5384 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5386 word_num = (vfid - 192) / 32;
5387 bit_num = vfid % 32;
5389 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5391 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5397 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5399 #define HCLGE_DESC_NUMBER 3
5400 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5403 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5404 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5405 if (desc[i].data[j])
5411 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5414 const unsigned char *mac_addr = addr;
5415 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5416 (mac_addr[0]) | (mac_addr[1] << 8);
5417 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
5419 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5420 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5423 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5424 struct hclge_mac_vlan_tbl_entry_cmd *req)
5426 struct hclge_dev *hdev = vport->back;
5427 struct hclge_desc desc;
5432 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5434 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5436 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5438 dev_err(&hdev->pdev->dev,
5439 "del mac addr failed for cmd_send, ret =%d.\n",
5443 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5444 retval = le16_to_cpu(desc.retval);
5446 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5447 HCLGE_MAC_VLAN_REMOVE);
5450 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5451 struct hclge_mac_vlan_tbl_entry_cmd *req,
5452 struct hclge_desc *desc,
5455 struct hclge_dev *hdev = vport->back;
5460 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5462 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5463 memcpy(desc[0].data,
5465 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5466 hclge_cmd_setup_basic_desc(&desc[1],
5467 HCLGE_OPC_MAC_VLAN_ADD,
5469 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5470 hclge_cmd_setup_basic_desc(&desc[2],
5471 HCLGE_OPC_MAC_VLAN_ADD,
5473 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5475 memcpy(desc[0].data,
5477 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5478 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5481 dev_err(&hdev->pdev->dev,
5482 "lookup mac addr failed for cmd_send, ret =%d.\n",
5486 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5487 retval = le16_to_cpu(desc[0].retval);
5489 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5490 HCLGE_MAC_VLAN_LKUP);
5493 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5494 struct hclge_mac_vlan_tbl_entry_cmd *req,
5495 struct hclge_desc *mc_desc)
5497 struct hclge_dev *hdev = vport->back;
5504 struct hclge_desc desc;
5506 hclge_cmd_setup_basic_desc(&desc,
5507 HCLGE_OPC_MAC_VLAN_ADD,
5509 memcpy(desc.data, req,
5510 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5512 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5513 retval = le16_to_cpu(desc.retval);
5515 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5517 HCLGE_MAC_VLAN_ADD);
5519 hclge_cmd_reuse_desc(&mc_desc[0], false);
5520 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5521 hclge_cmd_reuse_desc(&mc_desc[1], false);
5522 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5523 hclge_cmd_reuse_desc(&mc_desc[2], false);
5524 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5525 memcpy(mc_desc[0].data, req,
5526 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5527 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5528 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5529 retval = le16_to_cpu(mc_desc[0].retval);
5531 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5533 HCLGE_MAC_VLAN_ADD);
5537 dev_err(&hdev->pdev->dev,
5538 "add mac addr failed for cmd_send, ret =%d.\n",
5546 static int hclge_init_umv_space(struct hclge_dev *hdev)
5548 u16 allocated_size = 0;
5551 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5556 if (allocated_size < hdev->wanted_umv_size)
5557 dev_warn(&hdev->pdev->dev,
5558 "Alloc umv space failed, want %d, get %d\n",
5559 hdev->wanted_umv_size, allocated_size);
5561 mutex_init(&hdev->umv_mutex);
5562 hdev->max_umv_size = allocated_size;
5563 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5564 hdev->share_umv_size = hdev->priv_umv_size +
5565 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5570 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5574 if (hdev->max_umv_size > 0) {
5575 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5579 hdev->max_umv_size = 0;
5581 mutex_destroy(&hdev->umv_mutex);
5586 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5587 u16 *allocated_size, bool is_alloc)
5589 struct hclge_umv_spc_alc_cmd *req;
5590 struct hclge_desc desc;
5593 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5594 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5595 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5596 req->space_size = cpu_to_le32(space_size);
5598 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5600 dev_err(&hdev->pdev->dev,
5601 "%s umv space failed for cmd_send, ret =%d\n",
5602 is_alloc ? "allocate" : "free", ret);
5606 if (is_alloc && allocated_size)
5607 *allocated_size = le32_to_cpu(desc.data[1]);
5612 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5614 struct hclge_vport *vport;
5617 for (i = 0; i < hdev->num_alloc_vport; i++) {
5618 vport = &hdev->vport[i];
5619 vport->used_umv_num = 0;
5622 mutex_lock(&hdev->umv_mutex);
5623 hdev->share_umv_size = hdev->priv_umv_size +
5624 hdev->max_umv_size % (hdev->num_req_vfs + 2);
5625 mutex_unlock(&hdev->umv_mutex);
5628 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5630 struct hclge_dev *hdev = vport->back;
5633 mutex_lock(&hdev->umv_mutex);
5634 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5635 hdev->share_umv_size == 0);
5636 mutex_unlock(&hdev->umv_mutex);
5641 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5643 struct hclge_dev *hdev = vport->back;
5645 mutex_lock(&hdev->umv_mutex);
5647 if (vport->used_umv_num > hdev->priv_umv_size)
5648 hdev->share_umv_size++;
5649 vport->used_umv_num--;
5651 if (vport->used_umv_num >= hdev->priv_umv_size)
5652 hdev->share_umv_size--;
5653 vport->used_umv_num++;
5655 mutex_unlock(&hdev->umv_mutex);
5658 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5659 const unsigned char *addr)
5661 struct hclge_vport *vport = hclge_get_vport(handle);
5663 return hclge_add_uc_addr_common(vport, addr);
5666 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5667 const unsigned char *addr)
5669 struct hclge_dev *hdev = vport->back;
5670 struct hclge_mac_vlan_tbl_entry_cmd req;
5671 struct hclge_desc desc;
5672 u16 egress_port = 0;
5675 /* mac addr check */
5676 if (is_zero_ether_addr(addr) ||
5677 is_broadcast_ether_addr(addr) ||
5678 is_multicast_ether_addr(addr)) {
5679 dev_err(&hdev->pdev->dev,
5680 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5682 is_zero_ether_addr(addr),
5683 is_broadcast_ether_addr(addr),
5684 is_multicast_ether_addr(addr));
5688 memset(&req, 0, sizeof(req));
5689 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5691 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5692 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5694 req.egress_port = cpu_to_le16(egress_port);
5696 hclge_prepare_mac_addr(&req, addr);
5698 /* Lookup the mac address in the mac_vlan table, and add
5699 * it if the entry is inexistent. Repeated unicast entry
5700 * is not allowed in the mac vlan table.
5702 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5703 if (ret == -ENOENT) {
5704 if (!hclge_is_umv_space_full(vport)) {
5705 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5707 hclge_update_umv_space(vport, false);
5711 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5712 hdev->priv_umv_size);
5717 /* check if we just hit the duplicate */
5721 dev_err(&hdev->pdev->dev,
5722 "PF failed to add unicast entry(%pM) in the MAC table\n",
5728 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5729 const unsigned char *addr)
5731 struct hclge_vport *vport = hclge_get_vport(handle);
5733 return hclge_rm_uc_addr_common(vport, addr);
5736 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
5737 const unsigned char *addr)
5739 struct hclge_dev *hdev = vport->back;
5740 struct hclge_mac_vlan_tbl_entry_cmd req;
5743 /* mac addr check */
5744 if (is_zero_ether_addr(addr) ||
5745 is_broadcast_ether_addr(addr) ||
5746 is_multicast_ether_addr(addr)) {
5747 dev_dbg(&hdev->pdev->dev,
5748 "Remove mac err! invalid mac:%pM.\n",
5753 memset(&req, 0, sizeof(req));
5754 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5755 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5756 hclge_prepare_mac_addr(&req, addr);
5757 ret = hclge_remove_mac_vlan_tbl(vport, &req);
5759 hclge_update_umv_space(vport, true);
5764 static int hclge_add_mc_addr(struct hnae3_handle *handle,
5765 const unsigned char *addr)
5767 struct hclge_vport *vport = hclge_get_vport(handle);
5769 return hclge_add_mc_addr_common(vport, addr);
5772 int hclge_add_mc_addr_common(struct hclge_vport *vport,
5773 const unsigned char *addr)
5775 struct hclge_dev *hdev = vport->back;
5776 struct hclge_mac_vlan_tbl_entry_cmd req;
5777 struct hclge_desc desc[3];
5780 /* mac addr check */
5781 if (!is_multicast_ether_addr(addr)) {
5782 dev_err(&hdev->pdev->dev,
5783 "Add mc mac err! invalid mac:%pM.\n",
5787 memset(&req, 0, sizeof(req));
5788 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5789 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5790 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5791 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5792 hclge_prepare_mac_addr(&req, addr);
5793 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5795 /* This mac addr exist, update VFID for it */
5796 hclge_update_desc_vfid(desc, vport->vport_id, false);
5797 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5799 /* This mac addr do not exist, add new entry for it */
5800 memset(desc[0].data, 0, sizeof(desc[0].data));
5801 memset(desc[1].data, 0, sizeof(desc[0].data));
5802 memset(desc[2].data, 0, sizeof(desc[0].data));
5803 hclge_update_desc_vfid(desc, vport->vport_id, false);
5804 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5807 if (status == -ENOSPC)
5808 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
5813 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
5814 const unsigned char *addr)
5816 struct hclge_vport *vport = hclge_get_vport(handle);
5818 return hclge_rm_mc_addr_common(vport, addr);
5821 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
5822 const unsigned char *addr)
5824 struct hclge_dev *hdev = vport->back;
5825 struct hclge_mac_vlan_tbl_entry_cmd req;
5826 enum hclge_cmd_status status;
5827 struct hclge_desc desc[3];
5829 /* mac addr check */
5830 if (!is_multicast_ether_addr(addr)) {
5831 dev_dbg(&hdev->pdev->dev,
5832 "Remove mc mac err! invalid mac:%pM.\n",
5837 memset(&req, 0, sizeof(req));
5838 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5839 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
5840 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5841 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5842 hclge_prepare_mac_addr(&req, addr);
5843 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
5845 /* This mac addr exist, remove this handle's VFID for it */
5846 hclge_update_desc_vfid(desc, vport->vport_id, true);
5848 if (hclge_is_all_function_id_zero(desc))
5849 /* All the vfid is zero, so need to delete this entry */
5850 status = hclge_remove_mac_vlan_tbl(vport, &req);
5852 /* Not all the vfid is zero, update the vfid */
5853 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
5856 /* Maybe this mac address is in mta table, but it cannot be
5857 * deleted here because an entry of mta represents an address
5858 * range rather than a specific address. the delete action to
5859 * all entries will take effect in update_mta_status called by
5860 * hns3_nic_set_rx_mode.
5868 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
5869 u16 cmdq_resp, u8 resp_code)
5871 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
5872 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
5873 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
5874 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
5879 dev_err(&hdev->pdev->dev,
5880 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
5885 switch (resp_code) {
5886 case HCLGE_ETHERTYPE_SUCCESS_ADD:
5887 case HCLGE_ETHERTYPE_ALREADY_ADD:
5890 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
5891 dev_err(&hdev->pdev->dev,
5892 "add mac ethertype failed for manager table overflow.\n");
5893 return_status = -EIO;
5895 case HCLGE_ETHERTYPE_KEY_CONFLICT:
5896 dev_err(&hdev->pdev->dev,
5897 "add mac ethertype failed for key conflict.\n");
5898 return_status = -EIO;
5901 dev_err(&hdev->pdev->dev,
5902 "add mac ethertype failed for undefined, code=%d.\n",
5904 return_status = -EIO;
5907 return return_status;
5910 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
5911 const struct hclge_mac_mgr_tbl_entry_cmd *req)
5913 struct hclge_desc desc;
5918 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
5919 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
5921 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5923 dev_err(&hdev->pdev->dev,
5924 "add mac ethertype failed for cmd_send, ret =%d.\n",
5929 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5930 retval = le16_to_cpu(desc.retval);
5932 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
5935 static int init_mgr_tbl(struct hclge_dev *hdev)
5940 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
5941 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
5943 dev_err(&hdev->pdev->dev,
5944 "add mac ethertype failed, ret =%d.\n",
5953 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
5955 struct hclge_vport *vport = hclge_get_vport(handle);
5956 struct hclge_dev *hdev = vport->back;
5958 ether_addr_copy(p, hdev->hw.mac.mac_addr);
5961 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
5964 const unsigned char *new_addr = (const unsigned char *)p;
5965 struct hclge_vport *vport = hclge_get_vport(handle);
5966 struct hclge_dev *hdev = vport->back;
5969 /* mac addr check */
5970 if (is_zero_ether_addr(new_addr) ||
5971 is_broadcast_ether_addr(new_addr) ||
5972 is_multicast_ether_addr(new_addr)) {
5973 dev_err(&hdev->pdev->dev,
5974 "Change uc mac err! invalid mac:%p.\n",
5979 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
5980 dev_warn(&hdev->pdev->dev,
5981 "remove old uc mac address fail.\n");
5983 ret = hclge_add_uc_addr(handle, new_addr);
5985 dev_err(&hdev->pdev->dev,
5986 "add uc mac address fail, ret =%d.\n",
5990 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
5991 dev_err(&hdev->pdev->dev,
5992 "restore uc mac address fail.\n");
5997 ret = hclge_pause_addr_cfg(hdev, new_addr);
5999 dev_err(&hdev->pdev->dev,
6000 "configure mac pause address fail, ret =%d.\n",
6005 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6010 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6013 struct hclge_vport *vport = hclge_get_vport(handle);
6014 struct hclge_dev *hdev = vport->back;
6016 if (!hdev->hw.mac.phydev)
6019 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6022 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6023 u8 fe_type, bool filter_en)
6025 struct hclge_vlan_filter_ctrl_cmd *req;
6026 struct hclge_desc desc;
6029 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6031 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6032 req->vlan_type = vlan_type;
6033 req->vlan_fe = filter_en ? fe_type : 0;
6035 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6037 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6043 #define HCLGE_FILTER_TYPE_VF 0
6044 #define HCLGE_FILTER_TYPE_PORT 1
6045 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6046 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6047 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6048 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6049 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6050 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6051 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6052 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6053 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6055 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6057 struct hclge_vport *vport = hclge_get_vport(handle);
6058 struct hclge_dev *hdev = vport->back;
6060 if (hdev->pdev->revision >= 0x21) {
6061 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6062 HCLGE_FILTER_FE_EGRESS, enable);
6063 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6064 HCLGE_FILTER_FE_INGRESS, enable);
6066 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6067 HCLGE_FILTER_FE_EGRESS_V1_B, enable);
6070 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6072 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6075 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6076 bool is_kill, u16 vlan, u8 qos,
6079 #define HCLGE_MAX_VF_BYTES 16
6080 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6081 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6082 struct hclge_desc desc[2];
6087 hclge_cmd_setup_basic_desc(&desc[0],
6088 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6089 hclge_cmd_setup_basic_desc(&desc[1],
6090 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6092 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6094 vf_byte_off = vfid / 8;
6095 vf_byte_val = 1 << (vfid % 8);
6097 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6098 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6100 req0->vlan_id = cpu_to_le16(vlan);
6101 req0->vlan_cfg = is_kill;
6103 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6104 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6106 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6108 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6110 dev_err(&hdev->pdev->dev,
6111 "Send vf vlan command fail, ret =%d.\n",
6117 #define HCLGE_VF_VLAN_NO_ENTRY 2
6118 if (!req0->resp_code || req0->resp_code == 1)
6121 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6122 dev_warn(&hdev->pdev->dev,
6123 "vf vlan table is full, vf vlan filter is disabled\n");
6127 dev_err(&hdev->pdev->dev,
6128 "Add vf vlan filter fail, ret =%d.\n",
6131 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6132 if (!req0->resp_code)
6135 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6136 dev_warn(&hdev->pdev->dev,
6137 "vlan %d filter is not in vf vlan table\n",
6142 dev_err(&hdev->pdev->dev,
6143 "Kill vf vlan filter fail, ret =%d.\n",
6150 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6151 u16 vlan_id, bool is_kill)
6153 struct hclge_vlan_filter_pf_cfg_cmd *req;
6154 struct hclge_desc desc;
6155 u8 vlan_offset_byte_val;
6156 u8 vlan_offset_byte;
6160 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6162 vlan_offset_160 = vlan_id / 160;
6163 vlan_offset_byte = (vlan_id % 160) / 8;
6164 vlan_offset_byte_val = 1 << (vlan_id % 8);
6166 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6167 req->vlan_offset = vlan_offset_160;
6168 req->vlan_cfg = is_kill;
6169 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6171 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6173 dev_err(&hdev->pdev->dev,
6174 "port vlan command, send fail, ret =%d.\n", ret);
6178 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6179 u16 vport_id, u16 vlan_id, u8 qos,
6182 u16 vport_idx, vport_num = 0;
6185 if (is_kill && !vlan_id)
6188 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6191 dev_err(&hdev->pdev->dev,
6192 "Set %d vport vlan filter config fail, ret =%d.\n",
6197 /* vlan 0 may be added twice when 8021q module is enabled */
6198 if (!is_kill && !vlan_id &&
6199 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6202 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6203 dev_err(&hdev->pdev->dev,
6204 "Add port vlan failed, vport %d is already in vlan %d\n",
6210 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6211 dev_err(&hdev->pdev->dev,
6212 "Delete port vlan failed, vport %d is not in vlan %d\n",
6217 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6220 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6221 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6227 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6228 u16 vlan_id, bool is_kill)
6230 struct hclge_vport *vport = hclge_get_vport(handle);
6231 struct hclge_dev *hdev = vport->back;
6233 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6237 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6238 u16 vlan, u8 qos, __be16 proto)
6240 struct hclge_vport *vport = hclge_get_vport(handle);
6241 struct hclge_dev *hdev = vport->back;
6243 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6245 if (proto != htons(ETH_P_8021Q))
6246 return -EPROTONOSUPPORT;
6248 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6251 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6253 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6254 struct hclge_vport_vtag_tx_cfg_cmd *req;
6255 struct hclge_dev *hdev = vport->back;
6256 struct hclge_desc desc;
6259 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6261 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6262 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6263 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6264 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6265 vcfg->accept_tag1 ? 1 : 0);
6266 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6267 vcfg->accept_untag1 ? 1 : 0);
6268 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6269 vcfg->accept_tag2 ? 1 : 0);
6270 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6271 vcfg->accept_untag2 ? 1 : 0);
6272 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6273 vcfg->insert_tag1_en ? 1 : 0);
6274 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6275 vcfg->insert_tag2_en ? 1 : 0);
6276 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6278 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6279 req->vf_bitmap[req->vf_offset] =
6280 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6282 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6284 dev_err(&hdev->pdev->dev,
6285 "Send port txvlan cfg command fail, ret =%d\n",
6291 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6293 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6294 struct hclge_vport_vtag_rx_cfg_cmd *req;
6295 struct hclge_dev *hdev = vport->back;
6296 struct hclge_desc desc;
6299 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6301 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6302 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6303 vcfg->strip_tag1_en ? 1 : 0);
6304 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6305 vcfg->strip_tag2_en ? 1 : 0);
6306 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6307 vcfg->vlan1_vlan_prionly ? 1 : 0);
6308 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6309 vcfg->vlan2_vlan_prionly ? 1 : 0);
6311 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6312 req->vf_bitmap[req->vf_offset] =
6313 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6315 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6317 dev_err(&hdev->pdev->dev,
6318 "Send port rxvlan cfg command fail, ret =%d\n",
6324 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6326 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6327 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6328 struct hclge_desc desc;
6331 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6332 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6333 rx_req->ot_fst_vlan_type =
6334 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6335 rx_req->ot_sec_vlan_type =
6336 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6337 rx_req->in_fst_vlan_type =
6338 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6339 rx_req->in_sec_vlan_type =
6340 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6342 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6344 dev_err(&hdev->pdev->dev,
6345 "Send rxvlan protocol type command fail, ret =%d\n",
6350 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6352 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6353 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6354 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6356 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6358 dev_err(&hdev->pdev->dev,
6359 "Send txvlan protocol type command fail, ret =%d\n",
6365 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6367 #define HCLGE_DEF_VLAN_TYPE 0x8100
6369 struct hnae3_handle *handle = &hdev->vport[0].nic;
6370 struct hclge_vport *vport;
6374 if (hdev->pdev->revision >= 0x21) {
6375 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6376 HCLGE_FILTER_FE_EGRESS, true);
6380 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6381 HCLGE_FILTER_FE_INGRESS, true);
6385 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6386 HCLGE_FILTER_FE_EGRESS_V1_B,
6392 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6394 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6395 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6396 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6397 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6398 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6399 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6401 ret = hclge_set_vlan_protocol_type(hdev);
6405 for (i = 0; i < hdev->num_alloc_vport; i++) {
6406 vport = &hdev->vport[i];
6407 vport->txvlan_cfg.accept_tag1 = true;
6408 vport->txvlan_cfg.accept_untag1 = true;
6410 /* accept_tag2 and accept_untag2 are not supported on
6411 * pdev revision(0x20), new revision support them. The
6412 * value of this two fields will not return error when driver
6413 * send command to fireware in revision(0x20).
6414 * This two fields can not configured by user.
6416 vport->txvlan_cfg.accept_tag2 = true;
6417 vport->txvlan_cfg.accept_untag2 = true;
6419 vport->txvlan_cfg.insert_tag1_en = false;
6420 vport->txvlan_cfg.insert_tag2_en = false;
6421 vport->txvlan_cfg.default_tag1 = 0;
6422 vport->txvlan_cfg.default_tag2 = 0;
6424 ret = hclge_set_vlan_tx_offload_cfg(vport);
6428 vport->rxvlan_cfg.strip_tag1_en = false;
6429 vport->rxvlan_cfg.strip_tag2_en = true;
6430 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6431 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6433 ret = hclge_set_vlan_rx_offload_cfg(vport);
6438 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6441 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6443 struct hclge_vport *vport = hclge_get_vport(handle);
6445 vport->rxvlan_cfg.strip_tag1_en = false;
6446 vport->rxvlan_cfg.strip_tag2_en = enable;
6447 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6448 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6450 return hclge_set_vlan_rx_offload_cfg(vport);
6453 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6455 struct hclge_config_max_frm_size_cmd *req;
6456 struct hclge_desc desc;
6458 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6460 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6461 req->max_frm_size = cpu_to_le16(new_mps);
6462 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6464 return hclge_cmd_send(&hdev->hw, &desc, 1);
6467 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6469 struct hclge_vport *vport = hclge_get_vport(handle);
6471 return hclge_set_vport_mtu(vport, new_mtu);
6474 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6476 struct hclge_dev *hdev = vport->back;
6477 int i, max_frm_size, ret = 0;
6479 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6480 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6481 max_frm_size > HCLGE_MAC_MAX_FRAME)
6484 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6485 mutex_lock(&hdev->vport_lock);
6486 /* VF's mps must fit within hdev->mps */
6487 if (vport->vport_id && max_frm_size > hdev->mps) {
6488 mutex_unlock(&hdev->vport_lock);
6490 } else if (vport->vport_id) {
6491 vport->mps = max_frm_size;
6492 mutex_unlock(&hdev->vport_lock);
6496 /* PF's mps must be greater then VF's mps */
6497 for (i = 1; i < hdev->num_alloc_vport; i++)
6498 if (max_frm_size < hdev->vport[i].mps) {
6499 mutex_unlock(&hdev->vport_lock);
6503 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6505 ret = hclge_set_mac_mtu(hdev, max_frm_size);
6507 dev_err(&hdev->pdev->dev,
6508 "Change mtu fail, ret =%d\n", ret);
6512 hdev->mps = max_frm_size;
6513 vport->mps = max_frm_size;
6515 ret = hclge_buffer_alloc(hdev);
6517 dev_err(&hdev->pdev->dev,
6518 "Allocate buffer fail, ret =%d\n", ret);
6521 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6522 mutex_unlock(&hdev->vport_lock);
6526 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
6529 struct hclge_reset_tqp_queue_cmd *req;
6530 struct hclge_desc desc;
6533 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
6535 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6536 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6537 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
6539 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6541 dev_err(&hdev->pdev->dev,
6542 "Send tqp reset cmd error, status =%d\n", ret);
6549 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
6551 struct hclge_reset_tqp_queue_cmd *req;
6552 struct hclge_desc desc;
6555 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
6557 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
6558 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
6560 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6562 dev_err(&hdev->pdev->dev,
6563 "Get reset status error, status =%d\n", ret);
6567 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
6570 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
6573 struct hnae3_queue *queue;
6574 struct hclge_tqp *tqp;
6576 queue = handle->kinfo.tqp[queue_id];
6577 tqp = container_of(queue, struct hclge_tqp, q);
6582 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
6584 struct hclge_vport *vport = hclge_get_vport(handle);
6585 struct hclge_dev *hdev = vport->back;
6586 int reset_try_times = 0;
6591 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
6593 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
6595 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
6599 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6601 dev_err(&hdev->pdev->dev,
6602 "Send reset tqp cmd fail, ret = %d\n", ret);
6606 reset_try_times = 0;
6607 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6608 /* Wait for tqp hw reset */
6610 reset_status = hclge_get_reset_status(hdev, queue_gid);
6615 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6616 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
6620 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6622 dev_err(&hdev->pdev->dev,
6623 "Deassert the soft reset fail, ret = %d\n", ret);
6628 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
6630 struct hclge_dev *hdev = vport->back;
6631 int reset_try_times = 0;
6636 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
6638 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
6640 dev_warn(&hdev->pdev->dev,
6641 "Send reset tqp cmd fail, ret = %d\n", ret);
6645 reset_try_times = 0;
6646 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
6647 /* Wait for tqp hw reset */
6649 reset_status = hclge_get_reset_status(hdev, queue_gid);
6654 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
6655 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
6659 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
6661 dev_warn(&hdev->pdev->dev,
6662 "Deassert the soft reset fail, ret = %d\n", ret);
6665 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
6667 struct hclge_vport *vport = hclge_get_vport(handle);
6668 struct hclge_dev *hdev = vport->back;
6670 return hdev->fw_version;
6673 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6675 struct phy_device *phydev = hdev->hw.mac.phydev;
6680 phy_set_asym_pause(phydev, rx_en, tx_en);
6683 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
6688 hdev->fc_mode_last_time = HCLGE_FC_FULL;
6689 else if (rx_en && !tx_en)
6690 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
6691 else if (!rx_en && tx_en)
6692 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
6694 hdev->fc_mode_last_time = HCLGE_FC_NONE;
6696 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
6699 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
6701 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
6706 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
6711 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
6713 struct phy_device *phydev = hdev->hw.mac.phydev;
6714 u16 remote_advertising = 0;
6715 u16 local_advertising = 0;
6716 u32 rx_pause, tx_pause;
6719 if (!phydev->link || !phydev->autoneg)
6722 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
6725 remote_advertising = LPA_PAUSE_CAP;
6727 if (phydev->asym_pause)
6728 remote_advertising |= LPA_PAUSE_ASYM;
6730 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
6731 remote_advertising);
6732 tx_pause = flowctl & FLOW_CTRL_TX;
6733 rx_pause = flowctl & FLOW_CTRL_RX;
6735 if (phydev->duplex == HCLGE_MAC_HALF) {
6740 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
6743 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
6744 u32 *rx_en, u32 *tx_en)
6746 struct hclge_vport *vport = hclge_get_vport(handle);
6747 struct hclge_dev *hdev = vport->back;
6749 *auto_neg = hclge_get_autoneg(handle);
6751 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6757 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
6760 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
6763 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
6772 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
6773 u32 rx_en, u32 tx_en)
6775 struct hclge_vport *vport = hclge_get_vport(handle);
6776 struct hclge_dev *hdev = vport->back;
6777 struct phy_device *phydev = hdev->hw.mac.phydev;
6780 fc_autoneg = hclge_get_autoneg(handle);
6781 if (auto_neg != fc_autoneg) {
6782 dev_info(&hdev->pdev->dev,
6783 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
6787 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
6788 dev_info(&hdev->pdev->dev,
6789 "Priority flow control enabled. Cannot set link flow control.\n");
6793 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
6796 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
6798 /* Only support flow control negotiation for netdev with
6799 * phy attached for now.
6804 return phy_start_aneg(phydev);
6807 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
6808 u8 *auto_neg, u32 *speed, u8 *duplex)
6810 struct hclge_vport *vport = hclge_get_vport(handle);
6811 struct hclge_dev *hdev = vport->back;
6814 *speed = hdev->hw.mac.speed;
6816 *duplex = hdev->hw.mac.duplex;
6818 *auto_neg = hdev->hw.mac.autoneg;
6821 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
6823 struct hclge_vport *vport = hclge_get_vport(handle);
6824 struct hclge_dev *hdev = vport->back;
6827 *media_type = hdev->hw.mac.media_type;
6830 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
6831 u8 *tp_mdix_ctrl, u8 *tp_mdix)
6833 struct hclge_vport *vport = hclge_get_vport(handle);
6834 struct hclge_dev *hdev = vport->back;
6835 struct phy_device *phydev = hdev->hw.mac.phydev;
6836 int mdix_ctrl, mdix, retval, is_resolved;
6839 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6840 *tp_mdix = ETH_TP_MDI_INVALID;
6844 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
6846 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
6847 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
6848 HCLGE_PHY_MDIX_CTRL_S);
6850 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
6851 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
6852 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
6854 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
6856 switch (mdix_ctrl) {
6858 *tp_mdix_ctrl = ETH_TP_MDI;
6861 *tp_mdix_ctrl = ETH_TP_MDI_X;
6864 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
6867 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
6872 *tp_mdix = ETH_TP_MDI_INVALID;
6874 *tp_mdix = ETH_TP_MDI_X;
6876 *tp_mdix = ETH_TP_MDI;
6879 static int hclge_init_instance_hw(struct hclge_dev *hdev)
6881 return hclge_mac_connect_phy(hdev);
6884 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
6886 hclge_mac_disconnect_phy(hdev);
6889 static int hclge_init_client_instance(struct hnae3_client *client,
6890 struct hnae3_ae_dev *ae_dev)
6892 struct hclge_dev *hdev = ae_dev->priv;
6893 struct hclge_vport *vport;
6896 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6897 vport = &hdev->vport[i];
6899 switch (client->type) {
6900 case HNAE3_CLIENT_KNIC:
6902 hdev->nic_client = client;
6903 vport->nic.client = client;
6904 ret = client->ops->init_instance(&vport->nic);
6908 ret = hclge_init_instance_hw(hdev);
6910 client->ops->uninit_instance(&vport->nic,
6915 hnae3_set_client_init_flag(client, ae_dev, 1);
6917 if (hdev->roce_client &&
6918 hnae3_dev_roce_supported(hdev)) {
6919 struct hnae3_client *rc = hdev->roce_client;
6921 ret = hclge_init_roce_base_info(vport);
6925 ret = rc->ops->init_instance(&vport->roce);
6929 hnae3_set_client_init_flag(hdev->roce_client,
6934 case HNAE3_CLIENT_UNIC:
6935 hdev->nic_client = client;
6936 vport->nic.client = client;
6938 ret = client->ops->init_instance(&vport->nic);
6942 hnae3_set_client_init_flag(client, ae_dev, 1);
6945 case HNAE3_CLIENT_ROCE:
6946 if (hnae3_dev_roce_supported(hdev)) {
6947 hdev->roce_client = client;
6948 vport->roce.client = client;
6951 if (hdev->roce_client && hdev->nic_client) {
6952 ret = hclge_init_roce_base_info(vport);
6956 ret = client->ops->init_instance(&vport->roce);
6960 hnae3_set_client_init_flag(client, ae_dev, 1);
6972 hdev->nic_client = NULL;
6973 vport->nic.client = NULL;
6976 hdev->roce_client = NULL;
6977 vport->roce.client = NULL;
6981 static void hclge_uninit_client_instance(struct hnae3_client *client,
6982 struct hnae3_ae_dev *ae_dev)
6984 struct hclge_dev *hdev = ae_dev->priv;
6985 struct hclge_vport *vport;
6988 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
6989 vport = &hdev->vport[i];
6990 if (hdev->roce_client) {
6991 hdev->roce_client->ops->uninit_instance(&vport->roce,
6993 hdev->roce_client = NULL;
6994 vport->roce.client = NULL;
6996 if (client->type == HNAE3_CLIENT_ROCE)
6998 if (hdev->nic_client && client->ops->uninit_instance) {
6999 hclge_uninit_instance_hw(hdev);
7000 client->ops->uninit_instance(&vport->nic, 0);
7001 hdev->nic_client = NULL;
7002 vport->nic.client = NULL;
7007 static int hclge_pci_init(struct hclge_dev *hdev)
7009 struct pci_dev *pdev = hdev->pdev;
7010 struct hclge_hw *hw;
7013 ret = pci_enable_device(pdev);
7015 dev_err(&pdev->dev, "failed to enable PCI device\n");
7019 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7021 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7024 "can't set consistent PCI DMA");
7025 goto err_disable_device;
7027 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7030 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7032 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7033 goto err_disable_device;
7036 pci_set_master(pdev);
7038 hw->io_base = pcim_iomap(pdev, 2, 0);
7040 dev_err(&pdev->dev, "Can't map configuration register space\n");
7042 goto err_clr_master;
7045 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7049 pci_clear_master(pdev);
7050 pci_release_regions(pdev);
7052 pci_disable_device(pdev);
7057 static void hclge_pci_uninit(struct hclge_dev *hdev)
7059 struct pci_dev *pdev = hdev->pdev;
7061 pcim_iounmap(pdev, hdev->hw.io_base);
7062 pci_free_irq_vectors(pdev);
7063 pci_clear_master(pdev);
7064 pci_release_mem_regions(pdev);
7065 pci_disable_device(pdev);
7068 static void hclge_state_init(struct hclge_dev *hdev)
7070 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7071 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7072 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7073 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7074 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7075 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7078 static void hclge_state_uninit(struct hclge_dev *hdev)
7080 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7082 if (hdev->service_timer.function)
7083 del_timer_sync(&hdev->service_timer);
7084 if (hdev->reset_timer.function)
7085 del_timer_sync(&hdev->reset_timer);
7086 if (hdev->service_task.func)
7087 cancel_work_sync(&hdev->service_task);
7088 if (hdev->rst_service_task.func)
7089 cancel_work_sync(&hdev->rst_service_task);
7090 if (hdev->mbx_service_task.func)
7091 cancel_work_sync(&hdev->mbx_service_task);
7094 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7096 #define HCLGE_FLR_WAIT_MS 100
7097 #define HCLGE_FLR_WAIT_CNT 50
7098 struct hclge_dev *hdev = ae_dev->priv;
7101 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7102 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7103 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7104 hclge_reset_event(hdev->pdev, NULL);
7106 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7107 cnt++ < HCLGE_FLR_WAIT_CNT)
7108 msleep(HCLGE_FLR_WAIT_MS);
7110 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7111 dev_err(&hdev->pdev->dev,
7112 "flr wait down timeout: %d\n", cnt);
7115 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7117 struct hclge_dev *hdev = ae_dev->priv;
7119 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7122 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7124 struct pci_dev *pdev = ae_dev->pdev;
7125 struct hclge_dev *hdev;
7128 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7135 hdev->ae_dev = ae_dev;
7136 hdev->reset_type = HNAE3_NONE_RESET;
7137 hdev->reset_level = HNAE3_FUNC_RESET;
7138 ae_dev->priv = hdev;
7139 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7141 mutex_init(&hdev->vport_lock);
7143 ret = hclge_pci_init(hdev);
7145 dev_err(&pdev->dev, "PCI init failed\n");
7149 /* Firmware command queue initialize */
7150 ret = hclge_cmd_queue_init(hdev);
7152 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7153 goto err_pci_uninit;
7156 /* Firmware command initialize */
7157 ret = hclge_cmd_init(hdev);
7159 goto err_cmd_uninit;
7161 ret = hclge_get_cap(hdev);
7163 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7165 goto err_cmd_uninit;
7168 ret = hclge_configure(hdev);
7170 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7171 goto err_cmd_uninit;
7174 ret = hclge_init_msi(hdev);
7176 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7177 goto err_cmd_uninit;
7180 ret = hclge_misc_irq_init(hdev);
7183 "Misc IRQ(vector0) init error, ret = %d.\n",
7185 goto err_msi_uninit;
7188 ret = hclge_alloc_tqps(hdev);
7190 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7191 goto err_msi_irq_uninit;
7194 ret = hclge_alloc_vport(hdev);
7196 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7197 goto err_msi_irq_uninit;
7200 ret = hclge_map_tqp(hdev);
7202 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7203 goto err_msi_irq_uninit;
7206 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7207 ret = hclge_mac_mdio_config(hdev);
7209 dev_err(&hdev->pdev->dev,
7210 "mdio config fail ret=%d\n", ret);
7211 goto err_msi_irq_uninit;
7215 ret = hclge_init_umv_space(hdev);
7217 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7218 goto err_msi_irq_uninit;
7221 ret = hclge_mac_init(hdev);
7223 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7224 goto err_mdiobus_unreg;
7227 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7229 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7230 goto err_mdiobus_unreg;
7233 ret = hclge_config_gro(hdev, true);
7235 goto err_mdiobus_unreg;
7237 ret = hclge_init_vlan_config(hdev);
7239 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7240 goto err_mdiobus_unreg;
7243 ret = hclge_tm_schd_init(hdev);
7245 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7246 goto err_mdiobus_unreg;
7249 hclge_rss_init_cfg(hdev);
7250 ret = hclge_rss_init_hw(hdev);
7252 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7253 goto err_mdiobus_unreg;
7256 ret = init_mgr_tbl(hdev);
7258 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7259 goto err_mdiobus_unreg;
7262 ret = hclge_init_fd_config(hdev);
7265 "fd table init fail, ret=%d\n", ret);
7266 goto err_mdiobus_unreg;
7269 ret = hclge_hw_error_set_state(hdev, true);
7272 "hw error interrupts enable failed, ret =%d\n", ret);
7273 goto err_mdiobus_unreg;
7276 hclge_dcb_ops_set(hdev);
7278 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7279 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7280 INIT_WORK(&hdev->service_task, hclge_service_task);
7281 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7282 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7284 hclge_clear_all_event_cause(hdev);
7286 /* Enable MISC vector(vector0) */
7287 hclge_enable_vector(&hdev->misc_vector, true);
7289 hclge_state_init(hdev);
7290 hdev->last_reset_time = jiffies;
7292 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7296 if (hdev->hw.mac.phydev)
7297 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7299 hclge_misc_irq_uninit(hdev);
7301 pci_free_irq_vectors(pdev);
7303 hclge_destroy_cmd_queue(&hdev->hw);
7305 pcim_iounmap(pdev, hdev->hw.io_base);
7306 pci_clear_master(pdev);
7307 pci_release_regions(pdev);
7308 pci_disable_device(pdev);
7313 static void hclge_stats_clear(struct hclge_dev *hdev)
7315 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7318 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7320 struct hclge_vport *vport = hdev->vport;
7323 for (i = 0; i < hdev->num_alloc_vport; i++) {
7324 hclge_vport_start(vport);
7329 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7331 struct hclge_dev *hdev = ae_dev->priv;
7332 struct pci_dev *pdev = ae_dev->pdev;
7335 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7337 hclge_stats_clear(hdev);
7338 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7340 ret = hclge_cmd_init(hdev);
7342 dev_err(&pdev->dev, "Cmd queue init failed\n");
7346 ret = hclge_get_cap(hdev);
7348 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7353 ret = hclge_configure(hdev);
7355 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7359 ret = hclge_map_tqp(hdev);
7361 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7365 hclge_reset_umv_space(hdev);
7367 ret = hclge_mac_init(hdev);
7369 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7373 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7375 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7379 ret = hclge_config_gro(hdev, true);
7383 ret = hclge_init_vlan_config(hdev);
7385 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7389 ret = hclge_tm_init_hw(hdev);
7391 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7395 ret = hclge_rss_init_hw(hdev);
7397 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7401 ret = hclge_init_fd_config(hdev);
7404 "fd table init fail, ret=%d\n", ret);
7408 /* Re-enable the TM hw error interrupts because
7409 * they get disabled on core/global reset.
7411 if (hclge_enable_tm_hw_error(hdev, true))
7412 dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
7414 hclge_reset_vport_state(hdev);
7416 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7422 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7424 struct hclge_dev *hdev = ae_dev->priv;
7425 struct hclge_mac *mac = &hdev->hw.mac;
7427 hclge_state_uninit(hdev);
7430 mdiobus_unregister(mac->mdio_bus);
7432 hclge_uninit_umv_space(hdev);
7434 /* Disable MISC vector(vector0) */
7435 hclge_enable_vector(&hdev->misc_vector, false);
7436 synchronize_irq(hdev->misc_vector.vector_irq);
7438 hclge_hw_error_set_state(hdev, false);
7439 hclge_destroy_cmd_queue(&hdev->hw);
7440 hclge_misc_irq_uninit(hdev);
7441 hclge_pci_uninit(hdev);
7442 mutex_destroy(&hdev->vport_lock);
7443 ae_dev->priv = NULL;
7446 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7448 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7449 struct hclge_vport *vport = hclge_get_vport(handle);
7450 struct hclge_dev *hdev = vport->back;
7452 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
7455 static void hclge_get_channels(struct hnae3_handle *handle,
7456 struct ethtool_channels *ch)
7458 struct hclge_vport *vport = hclge_get_vport(handle);
7460 ch->max_combined = hclge_get_max_channels(handle);
7461 ch->other_count = 1;
7463 ch->combined_count = vport->alloc_tqps;
7466 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7467 u16 *alloc_tqps, u16 *max_rss_size)
7469 struct hclge_vport *vport = hclge_get_vport(handle);
7470 struct hclge_dev *hdev = vport->back;
7472 *alloc_tqps = vport->alloc_tqps;
7473 *max_rss_size = hdev->rss_size_max;
7476 static void hclge_release_tqp(struct hclge_vport *vport)
7478 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7479 struct hclge_dev *hdev = vport->back;
7482 for (i = 0; i < kinfo->num_tqps; i++) {
7483 struct hclge_tqp *tqp =
7484 container_of(kinfo->tqp[i], struct hclge_tqp, q);
7486 tqp->q.handle = NULL;
7487 tqp->q.tqp_index = 0;
7488 tqp->alloced = false;
7491 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
7495 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
7497 struct hclge_vport *vport = hclge_get_vport(handle);
7498 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7499 struct hclge_dev *hdev = vport->back;
7500 int cur_rss_size = kinfo->rss_size;
7501 int cur_tqps = kinfo->num_tqps;
7502 u16 tc_offset[HCLGE_MAX_TC_NUM];
7503 u16 tc_valid[HCLGE_MAX_TC_NUM];
7504 u16 tc_size[HCLGE_MAX_TC_NUM];
7509 /* Free old tqps, and reallocate with new tqp number when nic setup */
7510 hclge_release_tqp(vport);
7512 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
7514 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
7518 ret = hclge_map_tqp_to_vport(hdev, vport);
7520 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
7524 ret = hclge_tm_schd_init(hdev);
7526 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
7530 roundup_size = roundup_pow_of_two(kinfo->rss_size);
7531 roundup_size = ilog2(roundup_size);
7532 /* Set the RSS TC mode according to the new RSS size */
7533 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7536 if (!(hdev->hw_tc_map & BIT(i)))
7540 tc_size[i] = roundup_size;
7541 tc_offset[i] = kinfo->rss_size * i;
7543 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7547 /* Reinitializes the rss indirect table according to the new RSS size */
7548 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7552 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7553 rss_indir[i] = i % kinfo->rss_size;
7555 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7557 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7563 dev_info(&hdev->pdev->dev,
7564 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7565 cur_rss_size, kinfo->rss_size,
7566 cur_tqps, kinfo->rss_size * kinfo->num_tc);
7571 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7572 u32 *regs_num_64_bit)
7574 struct hclge_desc desc;
7578 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
7579 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7581 dev_err(&hdev->pdev->dev,
7582 "Query register number cmd failed, ret = %d.\n", ret);
7586 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
7587 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
7589 total_num = *regs_num_32_bit + *regs_num_64_bit;
7596 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7599 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
7601 struct hclge_desc *desc;
7602 u32 *reg_val = data;
7611 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
7612 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7616 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
7617 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7619 dev_err(&hdev->pdev->dev,
7620 "Query 32 bit register cmd failed, ret = %d.\n", ret);
7625 for (i = 0; i < cmd_num; i++) {
7627 desc_data = (__le32 *)(&desc[i].data[0]);
7628 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
7630 desc_data = (__le32 *)(&desc[i]);
7631 n = HCLGE_32_BIT_REG_RTN_DATANUM;
7633 for (k = 0; k < n; k++) {
7634 *reg_val++ = le32_to_cpu(*desc_data++);
7646 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
7649 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
7651 struct hclge_desc *desc;
7652 u64 *reg_val = data;
7661 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
7662 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
7666 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
7667 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
7669 dev_err(&hdev->pdev->dev,
7670 "Query 64 bit register cmd failed, ret = %d.\n", ret);
7675 for (i = 0; i < cmd_num; i++) {
7677 desc_data = (__le64 *)(&desc[i].data[0]);
7678 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
7680 desc_data = (__le64 *)(&desc[i]);
7681 n = HCLGE_64_BIT_REG_RTN_DATANUM;
7683 for (k = 0; k < n; k++) {
7684 *reg_val++ = le64_to_cpu(*desc_data++);
7696 #define MAX_SEPARATE_NUM 4
7697 #define SEPARATOR_VALUE 0xFFFFFFFF
7698 #define REG_NUM_PER_LINE 4
7699 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
7701 static int hclge_get_regs_len(struct hnae3_handle *handle)
7703 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
7704 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7705 struct hclge_vport *vport = hclge_get_vport(handle);
7706 struct hclge_dev *hdev = vport->back;
7707 u32 regs_num_32_bit, regs_num_64_bit;
7710 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7712 dev_err(&hdev->pdev->dev,
7713 "Get register number failed, ret = %d.\n", ret);
7717 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
7718 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
7719 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
7720 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
7722 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
7723 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
7724 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
7727 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
7730 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7731 struct hclge_vport *vport = hclge_get_vport(handle);
7732 struct hclge_dev *hdev = vport->back;
7733 u32 regs_num_32_bit, regs_num_64_bit;
7734 int i, j, reg_um, separator_num;
7738 *version = hdev->fw_version;
7740 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
7742 dev_err(&hdev->pdev->dev,
7743 "Get register number failed, ret = %d.\n", ret);
7747 /* fetching per-PF registers valus from PF PCIe register space */
7748 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
7749 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7750 for (i = 0; i < reg_um; i++)
7751 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
7752 for (i = 0; i < separator_num; i++)
7753 *reg++ = SEPARATOR_VALUE;
7755 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
7756 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7757 for (i = 0; i < reg_um; i++)
7758 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
7759 for (i = 0; i < separator_num; i++)
7760 *reg++ = SEPARATOR_VALUE;
7762 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
7763 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7764 for (j = 0; j < kinfo->num_tqps; j++) {
7765 for (i = 0; i < reg_um; i++)
7766 *reg++ = hclge_read_dev(&hdev->hw,
7767 ring_reg_addr_list[i] +
7769 for (i = 0; i < separator_num; i++)
7770 *reg++ = SEPARATOR_VALUE;
7773 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
7774 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
7775 for (j = 0; j < hdev->num_msi_used - 1; j++) {
7776 for (i = 0; i < reg_um; i++)
7777 *reg++ = hclge_read_dev(&hdev->hw,
7778 tqp_intr_reg_addr_list[i] +
7780 for (i = 0; i < separator_num; i++)
7781 *reg++ = SEPARATOR_VALUE;
7784 /* fetching PF common registers values from firmware */
7785 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
7787 dev_err(&hdev->pdev->dev,
7788 "Get 32 bit register failed, ret = %d.\n", ret);
7792 reg += regs_num_32_bit;
7793 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
7795 dev_err(&hdev->pdev->dev,
7796 "Get 64 bit register failed, ret = %d.\n", ret);
7799 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
7801 struct hclge_set_led_state_cmd *req;
7802 struct hclge_desc desc;
7805 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
7807 req = (struct hclge_set_led_state_cmd *)desc.data;
7808 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
7809 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
7811 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7813 dev_err(&hdev->pdev->dev,
7814 "Send set led state cmd error, ret =%d\n", ret);
7819 enum hclge_led_status {
7822 HCLGE_LED_NO_CHANGE = 0xFF,
7825 static int hclge_set_led_id(struct hnae3_handle *handle,
7826 enum ethtool_phys_id_state status)
7828 struct hclge_vport *vport = hclge_get_vport(handle);
7829 struct hclge_dev *hdev = vport->back;
7832 case ETHTOOL_ID_ACTIVE:
7833 return hclge_set_led_status(hdev, HCLGE_LED_ON);
7834 case ETHTOOL_ID_INACTIVE:
7835 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
7841 static void hclge_get_link_mode(struct hnae3_handle *handle,
7842 unsigned long *supported,
7843 unsigned long *advertising)
7845 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
7846 struct hclge_vport *vport = hclge_get_vport(handle);
7847 struct hclge_dev *hdev = vport->back;
7848 unsigned int idx = 0;
7850 for (; idx < size; idx++) {
7851 supported[idx] = hdev->hw.mac.supported[idx];
7852 advertising[idx] = hdev->hw.mac.advertising[idx];
7856 static int hclge_gro_en(struct hnae3_handle *handle, int enable)
7858 struct hclge_vport *vport = hclge_get_vport(handle);
7859 struct hclge_dev *hdev = vport->back;
7861 return hclge_config_gro(hdev, enable);
7864 static const struct hnae3_ae_ops hclge_ops = {
7865 .init_ae_dev = hclge_init_ae_dev,
7866 .uninit_ae_dev = hclge_uninit_ae_dev,
7867 .flr_prepare = hclge_flr_prepare,
7868 .flr_done = hclge_flr_done,
7869 .init_client_instance = hclge_init_client_instance,
7870 .uninit_client_instance = hclge_uninit_client_instance,
7871 .map_ring_to_vector = hclge_map_ring_to_vector,
7872 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
7873 .get_vector = hclge_get_vector,
7874 .put_vector = hclge_put_vector,
7875 .set_promisc_mode = hclge_set_promisc_mode,
7876 .set_loopback = hclge_set_loopback,
7877 .start = hclge_ae_start,
7878 .stop = hclge_ae_stop,
7879 .client_start = hclge_client_start,
7880 .client_stop = hclge_client_stop,
7881 .get_status = hclge_get_status,
7882 .get_ksettings_an_result = hclge_get_ksettings_an_result,
7883 .update_speed_duplex_h = hclge_update_speed_duplex_h,
7884 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
7885 .get_media_type = hclge_get_media_type,
7886 .get_rss_key_size = hclge_get_rss_key_size,
7887 .get_rss_indir_size = hclge_get_rss_indir_size,
7888 .get_rss = hclge_get_rss,
7889 .set_rss = hclge_set_rss,
7890 .set_rss_tuple = hclge_set_rss_tuple,
7891 .get_rss_tuple = hclge_get_rss_tuple,
7892 .get_tc_size = hclge_get_tc_size,
7893 .get_mac_addr = hclge_get_mac_addr,
7894 .set_mac_addr = hclge_set_mac_addr,
7895 .do_ioctl = hclge_do_ioctl,
7896 .add_uc_addr = hclge_add_uc_addr,
7897 .rm_uc_addr = hclge_rm_uc_addr,
7898 .add_mc_addr = hclge_add_mc_addr,
7899 .rm_mc_addr = hclge_rm_mc_addr,
7900 .set_autoneg = hclge_set_autoneg,
7901 .get_autoneg = hclge_get_autoneg,
7902 .get_pauseparam = hclge_get_pauseparam,
7903 .set_pauseparam = hclge_set_pauseparam,
7904 .set_mtu = hclge_set_mtu,
7905 .reset_queue = hclge_reset_tqp,
7906 .get_stats = hclge_get_stats,
7907 .update_stats = hclge_update_stats,
7908 .get_strings = hclge_get_strings,
7909 .get_sset_count = hclge_get_sset_count,
7910 .get_fw_version = hclge_get_fw_version,
7911 .get_mdix_mode = hclge_get_mdix_mode,
7912 .enable_vlan_filter = hclge_enable_vlan_filter,
7913 .set_vlan_filter = hclge_set_vlan_filter,
7914 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
7915 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
7916 .reset_event = hclge_reset_event,
7917 .set_default_reset_request = hclge_set_def_reset_request,
7918 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
7919 .set_channels = hclge_set_channels,
7920 .get_channels = hclge_get_channels,
7921 .get_regs_len = hclge_get_regs_len,
7922 .get_regs = hclge_get_regs,
7923 .set_led_id = hclge_set_led_id,
7924 .get_link_mode = hclge_get_link_mode,
7925 .add_fd_entry = hclge_add_fd_entry,
7926 .del_fd_entry = hclge_del_fd_entry,
7927 .del_all_fd_entries = hclge_del_all_fd_entries,
7928 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
7929 .get_fd_rule_info = hclge_get_fd_rule_info,
7930 .get_fd_all_rules = hclge_get_all_rules,
7931 .restore_fd_rules = hclge_restore_fd_entries,
7932 .enable_fd = hclge_enable_fd,
7933 .dbg_run_cmd = hclge_dbg_run_cmd,
7934 .process_hw_error = hclge_process_ras_hw_error,
7935 .get_hw_reset_stat = hclge_get_hw_reset_stat,
7936 .ae_dev_resetting = hclge_ae_dev_resetting,
7937 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
7938 .set_gro_en = hclge_gro_en,
7941 static struct hnae3_ae_algo ae_algo = {
7943 .pdev_id_table = ae_algo_pci_tbl,
7946 static int hclge_init(void)
7948 pr_info("%s is initializing\n", HCLGE_NAME);
7950 hnae3_register_ae_algo(&ae_algo);
7955 static void hclge_exit(void)
7957 hnae3_unregister_ae_algo(&ae_algo);
7959 module_init(hclge_init);
7960 module_exit(hclge_exit);
7962 MODULE_LICENSE("GPL");
7963 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
7964 MODULE_DESCRIPTION("HCLGE Driver");
7965 MODULE_VERSION(HCLGE_MOD_VERSION);