1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
26 #include "hclge_devlink.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET 1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
48 #define HCLGE_DFX_IGU_BD_OFFSET 4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
51 #define HCLGE_DFX_NCSI_BD_OFFSET 7
52 #define HCLGE_DFX_RTC_BD_OFFSET 8
53 #define HCLGE_DFX_PPP_BD_OFFSET 9
54 #define HCLGE_DFX_RCB_BD_OFFSET 10
55 #define HCLGE_DFX_TQP_BD_OFFSET 11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
58 #define HCLGE_LINK_STATUS_MS 10
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
76 static struct hnae3_ae_algo ae_algo;
78 static struct workqueue_struct *hclge_wq;
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 HCLGE_NIC_CSQ_DEPTH_REG,
98 HCLGE_NIC_CSQ_TAIL_REG,
99 HCLGE_NIC_CSQ_HEAD_REG,
100 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 HCLGE_NIC_CRQ_DEPTH_REG,
103 HCLGE_NIC_CRQ_TAIL_REG,
104 HCLGE_NIC_CRQ_HEAD_REG,
105 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_CMDQ_INTR_STS_REG,
107 HCLGE_CMDQ_INTR_EN_REG,
108 HCLGE_CMDQ_INTR_GEN_REG};
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_PF_OTHER_INT_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
153 "Serdes serial Loopback test",
154 "Serdes parallel Loopback test",
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
165 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
167 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
169 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
171 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
173 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
175 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
177 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
179 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
181 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
183 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
185 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
187 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
189 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
191 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
193 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
195 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
197 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
199 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
201 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
203 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
205 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
206 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
207 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
209 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
211 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
213 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
215 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
217 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
219 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
221 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
223 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
225 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
227 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
229 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
231 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
233 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
235 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
237 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
239 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
241 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
243 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
245 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
247 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
249 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
251 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
252 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
253 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
254 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
255 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
256 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
257 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
258 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
259 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
260 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
261 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
262 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
263 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
264 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
265 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
266 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
267 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
269 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
271 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
273 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
275 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
277 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
279 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
281 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
283 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
285 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
287 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
289 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
291 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
293 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
295 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
297 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
299 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
301 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
303 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
304 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
305 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
306 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
307 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
308 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
309 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
310 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
311 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
312 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
313 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
314 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
315 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
317 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
319 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
321 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
323 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
325 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
327 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
329 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
331 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
333 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
334 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
335 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
336 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
337 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
338 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
340 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
341 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
342 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
343 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
344 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
345 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
346 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
347 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
348 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
349 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
350 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
351 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
352 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
354 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
356 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
358 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
360 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
362 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
366 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
368 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
369 .ethter_type = cpu_to_le16(ETH_P_LLDP),
370 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
371 .i_port_bitmap = 0x1,
375 static const u8 hclge_hash_key[] = {
376 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
377 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
378 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
379 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
380 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
383 static const u32 hclge_dfx_bd_offset_list[] = {
384 HCLGE_DFX_BIOS_BD_OFFSET,
385 HCLGE_DFX_SSU_0_BD_OFFSET,
386 HCLGE_DFX_SSU_1_BD_OFFSET,
387 HCLGE_DFX_IGU_BD_OFFSET,
388 HCLGE_DFX_RPU_0_BD_OFFSET,
389 HCLGE_DFX_RPU_1_BD_OFFSET,
390 HCLGE_DFX_NCSI_BD_OFFSET,
391 HCLGE_DFX_RTC_BD_OFFSET,
392 HCLGE_DFX_PPP_BD_OFFSET,
393 HCLGE_DFX_RCB_BD_OFFSET,
394 HCLGE_DFX_TQP_BD_OFFSET,
395 HCLGE_DFX_SSU_2_BD_OFFSET
398 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
399 HCLGE_OPC_DFX_BIOS_COMMON_REG,
400 HCLGE_OPC_DFX_SSU_REG_0,
401 HCLGE_OPC_DFX_SSU_REG_1,
402 HCLGE_OPC_DFX_IGU_EGU_REG,
403 HCLGE_OPC_DFX_RPU_REG_0,
404 HCLGE_OPC_DFX_RPU_REG_1,
405 HCLGE_OPC_DFX_NCSI_REG,
406 HCLGE_OPC_DFX_RTC_REG,
407 HCLGE_OPC_DFX_PPP_REG,
408 HCLGE_OPC_DFX_RCB_REG,
409 HCLGE_OPC_DFX_TQP_REG,
410 HCLGE_OPC_DFX_SSU_REG_2
413 static const struct key_info meta_data_key_info[] = {
414 { PACKET_TYPE_ID, 6 },
421 { TUNNEL_PACKET, 1 },
424 static const struct key_info tuple_key_info[] = {
425 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
426 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
427 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
428 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
429 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
430 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
431 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
432 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
433 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
434 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
435 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
436 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
437 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
438 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
439 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
440 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
441 { INNER_DST_MAC, 48, KEY_OPT_MAC,
442 offsetof(struct hclge_fd_rule, tuples.dst_mac),
443 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
444 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
445 offsetof(struct hclge_fd_rule, tuples.src_mac),
446 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
447 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
448 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
449 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
450 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
451 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
452 offsetof(struct hclge_fd_rule, tuples.ether_proto),
453 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
454 { INNER_L2_RSV, 16, KEY_OPT_LE16,
455 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
456 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
457 { INNER_IP_TOS, 8, KEY_OPT_U8,
458 offsetof(struct hclge_fd_rule, tuples.ip_tos),
459 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
460 { INNER_IP_PROTO, 8, KEY_OPT_U8,
461 offsetof(struct hclge_fd_rule, tuples.ip_proto),
462 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
463 { INNER_SRC_IP, 32, KEY_OPT_IP,
464 offsetof(struct hclge_fd_rule, tuples.src_ip),
465 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
466 { INNER_DST_IP, 32, KEY_OPT_IP,
467 offsetof(struct hclge_fd_rule, tuples.dst_ip),
468 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
469 { INNER_L3_RSV, 16, KEY_OPT_LE16,
470 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
471 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
472 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
473 offsetof(struct hclge_fd_rule, tuples.src_port),
474 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
475 { INNER_DST_PORT, 16, KEY_OPT_LE16,
476 offsetof(struct hclge_fd_rule, tuples.dst_port),
477 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
478 { INNER_L4_RSV, 32, KEY_OPT_LE32,
479 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
480 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
483 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
485 #define HCLGE_MAC_CMD_NUM 21
487 u64 *data = (u64 *)(&hdev->mac_stats);
488 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
494 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
495 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
497 dev_err(&hdev->pdev->dev,
498 "Get MAC pkt stats fail, status = %d.\n", ret);
503 /* The first desc has a 64-bit header, so data size need to minus 1 */
504 data_size = sizeof(desc) / (sizeof(u64)) - 1;
506 desc_data = (__le64 *)(&desc[0].data[0]);
507 for (i = 0; i < data_size; i++) {
508 /* data memory is continuous becase only the first desc has a
509 * header in this command
511 *data += le64_to_cpu(*desc_data);
519 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
521 #define HCLGE_REG_NUM_PER_DESC 4
523 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
524 u64 *data = (u64 *)(&hdev->mac_stats);
525 struct hclge_desc *desc;
532 /* The first desc has a 64-bit header, so need to consider it */
533 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
535 /* This may be called inside atomic sections,
536 * so GFP_ATOMIC is more suitalbe here
538 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
542 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
543 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
549 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
551 desc_data = (__le64 *)(&desc[0].data[0]);
552 for (i = 0; i < data_size; i++) {
553 /* data memory is continuous becase only the first desc has a
554 * header in this command
556 *data += le64_to_cpu(*desc_data);
566 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
568 struct hclge_desc desc;
571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
574 dev_err(&hdev->pdev->dev,
575 "failed to query mac statistic reg number, ret = %d\n",
580 *reg_num = le32_to_cpu(desc.data[0]);
582 dev_err(&hdev->pdev->dev,
583 "mac statistic reg number is invalid!\n");
590 static int hclge_mac_update_stats(struct hclge_dev *hdev)
592 /* The firmware supports the new statistics acquisition method */
593 if (hdev->ae_dev->dev_specs.mac_stats_num)
594 return hclge_mac_update_stats_complete(hdev);
596 return hclge_mac_update_stats_defective(hdev);
599 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
601 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
602 struct hclge_vport *vport = hclge_get_vport(handle);
603 struct hclge_dev *hdev = vport->back;
604 struct hnae3_queue *queue;
605 struct hclge_desc desc[1];
606 struct hclge_tqp *tqp;
609 for (i = 0; i < kinfo->num_tqps; i++) {
610 queue = handle->kinfo.tqp[i];
611 tqp = container_of(queue, struct hclge_tqp, q);
612 /* command : HCLGE_OPC_QUERY_IGU_STAT */
613 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
616 desc[0].data[0] = cpu_to_le32(tqp->index);
617 ret = hclge_cmd_send(&hdev->hw, desc, 1);
619 dev_err(&hdev->pdev->dev,
620 "Query tqp stat fail, status = %d,queue = %d\n",
624 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
625 le32_to_cpu(desc[0].data[1]);
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 queue = handle->kinfo.tqp[i];
630 tqp = container_of(queue, struct hclge_tqp, q);
631 /* command : HCLGE_OPC_QUERY_IGU_STAT */
632 hclge_cmd_setup_basic_desc(&desc[0],
633 HCLGE_OPC_QUERY_TX_STATS,
636 desc[0].data[0] = cpu_to_le32(tqp->index);
637 ret = hclge_cmd_send(&hdev->hw, desc, 1);
639 dev_err(&hdev->pdev->dev,
640 "Query tqp stat fail, status = %d,queue = %d\n",
644 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
645 le32_to_cpu(desc[0].data[1]);
651 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
653 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654 struct hclge_tqp *tqp;
658 for (i = 0; i < kinfo->num_tqps; i++) {
659 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
660 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
663 for (i = 0; i < kinfo->num_tqps; i++) {
664 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
665 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
671 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
673 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
675 /* each tqp has TX & RX two queues */
676 return kinfo->num_tqps * (2);
679 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
681 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
685 for (i = 0; i < kinfo->num_tqps; i++) {
686 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
687 struct hclge_tqp, q);
688 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
690 buff = buff + ETH_GSTRING_LEN;
693 for (i = 0; i < kinfo->num_tqps; i++) {
694 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
695 struct hclge_tqp, q);
696 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
698 buff = buff + ETH_GSTRING_LEN;
704 static int hclge_comm_get_count(struct hclge_dev *hdev,
705 const struct hclge_comm_stats_str strs[],
711 for (i = 0; i < size; i++)
712 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
718 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
719 const struct hclge_comm_stats_str strs[],
725 for (i = 0; i < size; i++) {
726 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
729 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
736 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
737 const struct hclge_comm_stats_str strs[],
740 char *buff = (char *)data;
743 if (stringset != ETH_SS_STATS)
746 for (i = 0; i < size; i++) {
747 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
750 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
751 buff = buff + ETH_GSTRING_LEN;
757 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
759 struct hnae3_handle *handle;
762 handle = &hdev->vport[0].nic;
763 if (handle->client) {
764 status = hclge_tqps_update_stats(handle);
766 dev_err(&hdev->pdev->dev,
767 "Update TQPS stats fail, status = %d.\n",
772 status = hclge_mac_update_stats(hdev);
774 dev_err(&hdev->pdev->dev,
775 "Update MAC stats fail, status = %d.\n", status);
778 static void hclge_update_stats(struct hnae3_handle *handle,
779 struct net_device_stats *net_stats)
781 struct hclge_vport *vport = hclge_get_vport(handle);
782 struct hclge_dev *hdev = vport->back;
785 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
788 status = hclge_mac_update_stats(hdev);
790 dev_err(&hdev->pdev->dev,
791 "Update MAC stats fail, status = %d.\n",
794 status = hclge_tqps_update_stats(handle);
796 dev_err(&hdev->pdev->dev,
797 "Update TQPS stats fail, status = %d.\n",
800 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
803 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
805 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
806 HNAE3_SUPPORT_PHY_LOOPBACK | \
807 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
808 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
810 struct hclge_vport *vport = hclge_get_vport(handle);
811 struct hclge_dev *hdev = vport->back;
814 /* Loopback test support rules:
815 * mac: only GE mode support
816 * serdes: all mac mode will support include GE/XGE/LGE/CGE
817 * phy: only support when phy device exist on board
819 if (stringset == ETH_SS_TEST) {
820 /* clear loopback bit flags at first */
821 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
822 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
823 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
824 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
825 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
827 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
831 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
832 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
834 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
835 hdev->hw.mac.phydev->drv->set_loopback) ||
836 hnae3_dev_phy_imp_supported(hdev)) {
838 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
840 } else if (stringset == ETH_SS_STATS) {
841 count = hclge_comm_get_count(hdev, g_mac_stats_string,
842 ARRAY_SIZE(g_mac_stats_string)) +
843 hclge_tqps_get_sset_count(handle, stringset);
849 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
852 struct hclge_vport *vport = hclge_get_vport(handle);
853 struct hclge_dev *hdev = vport->back;
854 u8 *p = (char *)data;
857 if (stringset == ETH_SS_STATS) {
858 size = ARRAY_SIZE(g_mac_stats_string);
859 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
861 p = hclge_tqps_get_strings(handle, p);
862 } else if (stringset == ETH_SS_TEST) {
863 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
864 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
866 p += ETH_GSTRING_LEN;
868 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
869 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
871 p += ETH_GSTRING_LEN;
873 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
875 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
877 p += ETH_GSTRING_LEN;
879 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
880 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
882 p += ETH_GSTRING_LEN;
887 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
889 struct hclge_vport *vport = hclge_get_vport(handle);
890 struct hclge_dev *hdev = vport->back;
893 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
894 ARRAY_SIZE(g_mac_stats_string), data);
895 p = hclge_tqps_get_stats(handle, p);
898 static void hclge_get_mac_stat(struct hnae3_handle *handle,
899 struct hns3_mac_stats *mac_stats)
901 struct hclge_vport *vport = hclge_get_vport(handle);
902 struct hclge_dev *hdev = vport->back;
904 hclge_update_stats(handle, NULL);
906 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
907 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
910 static int hclge_parse_func_status(struct hclge_dev *hdev,
911 struct hclge_func_status_cmd *status)
913 #define HCLGE_MAC_ID_MASK 0xF
915 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
918 /* Set the pf to main pf */
919 if (status->pf_state & HCLGE_PF_STATE_MAIN)
920 hdev->flag |= HCLGE_FLAG_MAIN;
922 hdev->flag &= ~HCLGE_FLAG_MAIN;
924 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
928 static int hclge_query_function_status(struct hclge_dev *hdev)
930 #define HCLGE_QUERY_MAX_CNT 5
932 struct hclge_func_status_cmd *req;
933 struct hclge_desc desc;
937 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
938 req = (struct hclge_func_status_cmd *)desc.data;
941 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
943 dev_err(&hdev->pdev->dev,
944 "query function status failed %d.\n", ret);
948 /* Check pf reset is done */
951 usleep_range(1000, 2000);
952 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
954 return hclge_parse_func_status(hdev, req);
957 static int hclge_query_pf_resource(struct hclge_dev *hdev)
959 struct hclge_pf_res_cmd *req;
960 struct hclge_desc desc;
963 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
964 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
966 dev_err(&hdev->pdev->dev,
967 "query pf resource failed %d.\n", ret);
971 req = (struct hclge_pf_res_cmd *)desc.data;
972 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
973 le16_to_cpu(req->ext_tqp_num);
974 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
976 if (req->tx_buf_size)
978 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
980 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
982 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
984 if (req->dv_buf_size)
986 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
988 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
990 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
992 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
993 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
994 dev_err(&hdev->pdev->dev,
995 "only %u msi resources available, not enough for pf(min:2).\n",
1000 if (hnae3_dev_roce_supported(hdev)) {
1001 hdev->num_roce_msi =
1002 le16_to_cpu(req->pf_intr_vector_number_roce);
1004 /* PF should have NIC vectors and Roce vectors,
1005 * NIC vectors are queued before Roce vectors.
1007 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
1009 hdev->num_msi = hdev->num_nic_msi;
1015 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
1017 switch (speed_cmd) {
1018 case HCLGE_FW_MAC_SPEED_10M:
1019 *speed = HCLGE_MAC_SPEED_10M;
1021 case HCLGE_FW_MAC_SPEED_100M:
1022 *speed = HCLGE_MAC_SPEED_100M;
1024 case HCLGE_FW_MAC_SPEED_1G:
1025 *speed = HCLGE_MAC_SPEED_1G;
1027 case HCLGE_FW_MAC_SPEED_10G:
1028 *speed = HCLGE_MAC_SPEED_10G;
1030 case HCLGE_FW_MAC_SPEED_25G:
1031 *speed = HCLGE_MAC_SPEED_25G;
1033 case HCLGE_FW_MAC_SPEED_40G:
1034 *speed = HCLGE_MAC_SPEED_40G;
1036 case HCLGE_FW_MAC_SPEED_50G:
1037 *speed = HCLGE_MAC_SPEED_50G;
1039 case HCLGE_FW_MAC_SPEED_100G:
1040 *speed = HCLGE_MAC_SPEED_100G;
1042 case HCLGE_FW_MAC_SPEED_200G:
1043 *speed = HCLGE_MAC_SPEED_200G;
1052 static const struct hclge_speed_bit_map speed_bit_map[] = {
1053 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
1054 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
1055 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1056 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1057 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1058 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1059 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1060 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1061 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1064 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1068 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1069 if (speed == speed_bit_map[i].speed) {
1070 *speed_bit = speed_bit_map[i].speed_bit;
1078 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1080 struct hclge_vport *vport = hclge_get_vport(handle);
1081 struct hclge_dev *hdev = vport->back;
1082 u32 speed_ability = hdev->hw.mac.speed_ability;
1086 ret = hclge_get_speed_bit(speed, &speed_bit);
1090 if (speed_bit & speed_ability)
1096 static void hclge_convert_setting_sr(u16 speed_ability,
1097 unsigned long *link_mode)
1099 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1105 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1108 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1109 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1111 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1112 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1114 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1119 static void hclge_convert_setting_lr(u16 speed_ability,
1120 unsigned long *link_mode)
1122 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1123 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1125 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1126 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1128 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1131 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1134 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1135 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1137 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1139 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1143 static void hclge_convert_setting_cr(u16 speed_ability,
1144 unsigned long *link_mode)
1146 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1149 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1152 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1155 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1158 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1159 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1161 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1162 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1166 static void hclge_convert_setting_kr(u16 speed_ability,
1167 unsigned long *link_mode)
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1172 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1175 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1178 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1181 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1184 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1187 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1192 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1194 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1195 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1197 switch (mac->speed) {
1198 case HCLGE_MAC_SPEED_10G:
1199 case HCLGE_MAC_SPEED_40G:
1200 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1203 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1205 case HCLGE_MAC_SPEED_25G:
1206 case HCLGE_MAC_SPEED_50G:
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1210 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1211 BIT(HNAE3_FEC_AUTO);
1213 case HCLGE_MAC_SPEED_100G:
1214 case HCLGE_MAC_SPEED_200G:
1215 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1216 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1219 mac->fec_ability = 0;
1224 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1227 struct hclge_mac *mac = &hdev->hw.mac;
1229 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1230 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1233 hclge_convert_setting_sr(speed_ability, mac->supported);
1234 hclge_convert_setting_lr(speed_ability, mac->supported);
1235 hclge_convert_setting_cr(speed_ability, mac->supported);
1236 if (hnae3_dev_fec_supported(hdev))
1237 hclge_convert_setting_fec(mac);
1239 if (hnae3_dev_pause_supported(hdev))
1240 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1242 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1243 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1246 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1249 struct hclge_mac *mac = &hdev->hw.mac;
1251 hclge_convert_setting_kr(speed_ability, mac->supported);
1252 if (hnae3_dev_fec_supported(hdev))
1253 hclge_convert_setting_fec(mac);
1255 if (hnae3_dev_pause_supported(hdev))
1256 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1258 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1259 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1262 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1265 unsigned long *supported = hdev->hw.mac.supported;
1267 /* default to support all speed for GE port */
1269 speed_ability = HCLGE_SUPPORT_GE;
1271 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1272 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1275 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1276 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1278 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1282 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1283 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1284 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1287 if (hnae3_dev_pause_supported(hdev)) {
1288 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1289 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1292 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1293 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1296 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1298 u8 media_type = hdev->hw.mac.media_type;
1300 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1301 hclge_parse_fiber_link_mode(hdev, speed_ability);
1302 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1303 hclge_parse_copper_link_mode(hdev, speed_ability);
1304 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1305 hclge_parse_backplane_link_mode(hdev, speed_ability);
1308 static u32 hclge_get_max_speed(u16 speed_ability)
1310 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1311 return HCLGE_MAC_SPEED_200G;
1313 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1314 return HCLGE_MAC_SPEED_100G;
1316 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1317 return HCLGE_MAC_SPEED_50G;
1319 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1320 return HCLGE_MAC_SPEED_40G;
1322 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1323 return HCLGE_MAC_SPEED_25G;
1325 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1326 return HCLGE_MAC_SPEED_10G;
1328 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1329 return HCLGE_MAC_SPEED_1G;
1331 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1332 return HCLGE_MAC_SPEED_100M;
1334 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1335 return HCLGE_MAC_SPEED_10M;
1337 return HCLGE_MAC_SPEED_1G;
1340 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1342 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1343 #define SPEED_ABILITY_EXT_SHIFT 8
1345 struct hclge_cfg_param_cmd *req;
1346 u64 mac_addr_tmp_high;
1347 u16 speed_ability_ext;
1351 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1353 /* get the configuration */
1354 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1355 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1356 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1357 HCLGE_CFG_TQP_DESC_N_M,
1358 HCLGE_CFG_TQP_DESC_N_S);
1360 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1361 HCLGE_CFG_PHY_ADDR_M,
1362 HCLGE_CFG_PHY_ADDR_S);
1363 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1364 HCLGE_CFG_MEDIA_TP_M,
1365 HCLGE_CFG_MEDIA_TP_S);
1366 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1367 HCLGE_CFG_RX_BUF_LEN_M,
1368 HCLGE_CFG_RX_BUF_LEN_S);
1369 /* get mac_address */
1370 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1371 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1372 HCLGE_CFG_MAC_ADDR_H_M,
1373 HCLGE_CFG_MAC_ADDR_H_S);
1375 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1377 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1378 HCLGE_CFG_DEFAULT_SPEED_M,
1379 HCLGE_CFG_DEFAULT_SPEED_S);
1380 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1381 HCLGE_CFG_RSS_SIZE_M,
1382 HCLGE_CFG_RSS_SIZE_S);
1384 for (i = 0; i < ETH_ALEN; i++)
1385 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1387 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1388 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1390 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1391 HCLGE_CFG_SPEED_ABILITY_M,
1392 HCLGE_CFG_SPEED_ABILITY_S);
1393 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1394 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1395 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1396 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1398 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1399 HCLGE_CFG_VLAN_FLTR_CAP_M,
1400 HCLGE_CFG_VLAN_FLTR_CAP_S);
1402 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1403 HCLGE_CFG_UMV_TBL_SPACE_M,
1404 HCLGE_CFG_UMV_TBL_SPACE_S);
1406 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1407 HCLGE_CFG_PF_RSS_SIZE_M,
1408 HCLGE_CFG_PF_RSS_SIZE_S);
1410 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1411 * power of 2, instead of reading out directly. This would
1412 * be more flexible for future changes and expansions.
1413 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1414 * it does not make sense if PF's field is 0. In this case, PF and VF
1415 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1417 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1418 1U << cfg->pf_rss_size_max :
1419 cfg->vf_rss_size_max;
1421 /* The unit of the tx spare buffer size queried from configuration
1422 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1425 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1426 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1427 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1428 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1431 /* hclge_get_cfg: query the static parameter from flash
1432 * @hdev: pointer to struct hclge_dev
1433 * @hcfg: the config structure to be getted
1435 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1437 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1438 struct hclge_cfg_param_cmd *req;
1442 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1445 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1446 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1448 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1449 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1450 /* Len should be united by 4 bytes when send to hardware */
1451 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1452 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1453 req->offset = cpu_to_le32(offset);
1456 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1458 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1462 hclge_parse_cfg(hcfg, desc);
1467 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1469 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1471 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1473 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1474 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1475 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1476 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1477 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1478 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1479 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1480 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1483 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1484 struct hclge_desc *desc)
1486 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1487 struct hclge_dev_specs_0_cmd *req0;
1488 struct hclge_dev_specs_1_cmd *req1;
1490 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1491 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1493 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1494 ae_dev->dev_specs.rss_ind_tbl_size =
1495 le16_to_cpu(req0->rss_ind_tbl_size);
1496 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1497 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1498 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1499 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1500 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1501 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1502 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1503 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1506 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1508 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1510 if (!dev_specs->max_non_tso_bd_num)
1511 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1512 if (!dev_specs->rss_ind_tbl_size)
1513 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1514 if (!dev_specs->rss_key_size)
1515 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1516 if (!dev_specs->max_tm_rate)
1517 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1518 if (!dev_specs->max_qset_num)
1519 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1520 if (!dev_specs->max_int_gl)
1521 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1522 if (!dev_specs->max_frm_size)
1523 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1524 if (!dev_specs->umv_size)
1525 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1528 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1533 ret = hclge_mac_query_reg_num(hdev, ®_num);
1534 if (ret && ret != -EOPNOTSUPP)
1537 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1541 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1543 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1547 ret = hclge_query_mac_stats_num(hdev);
1551 /* set default specifications as devices lower than version V3 do not
1552 * support querying specifications from firmware.
1554 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1555 hclge_set_default_dev_specs(hdev);
1559 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1560 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1562 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1564 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1566 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1570 hclge_parse_dev_specs(hdev, desc);
1571 hclge_check_dev_specs(hdev);
1576 static int hclge_get_cap(struct hclge_dev *hdev)
1580 ret = hclge_query_function_status(hdev);
1582 dev_err(&hdev->pdev->dev,
1583 "query function status error %d.\n", ret);
1587 /* get pf resource */
1588 return hclge_query_pf_resource(hdev);
1591 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1593 #define HCLGE_MIN_TX_DESC 64
1594 #define HCLGE_MIN_RX_DESC 64
1596 if (!is_kdump_kernel())
1599 dev_info(&hdev->pdev->dev,
1600 "Running kdump kernel. Using minimal resources\n");
1602 /* minimal queue pairs equals to the number of vports */
1603 hdev->num_tqps = hdev->num_req_vfs + 1;
1604 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1605 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1608 static int hclge_configure(struct hclge_dev *hdev)
1610 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1611 const struct cpumask *cpumask = cpu_online_mask;
1612 struct hclge_cfg cfg;
1616 ret = hclge_get_cfg(hdev, &cfg);
1620 hdev->base_tqp_pid = 0;
1621 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1622 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1623 hdev->rx_buf_len = cfg.rx_buf_len;
1624 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1625 hdev->hw.mac.media_type = cfg.media_type;
1626 hdev->hw.mac.phy_addr = cfg.phy_addr;
1627 hdev->num_tx_desc = cfg.tqp_desc_num;
1628 hdev->num_rx_desc = cfg.tqp_desc_num;
1629 hdev->tm_info.num_pg = 1;
1630 hdev->tc_max = cfg.tc_num;
1631 hdev->tm_info.hw_pfc_map = 0;
1633 hdev->wanted_umv_size = cfg.umv_space;
1635 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1636 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1637 hdev->gro_en = true;
1638 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1639 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1641 if (hnae3_dev_fd_supported(hdev)) {
1643 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1646 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1648 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1649 cfg.default_speed, ret);
1653 hclge_parse_link_mode(hdev, cfg.speed_ability);
1655 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1657 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1658 (hdev->tc_max < 1)) {
1659 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1664 /* Dev does not support DCB */
1665 if (!hnae3_dev_dcb_supported(hdev)) {
1669 hdev->pfc_max = hdev->tc_max;
1672 hdev->tm_info.num_tc = 1;
1674 /* Currently not support uncontiuous tc */
1675 for (i = 0; i < hdev->tm_info.num_tc; i++)
1676 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1678 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1680 hclge_init_kdump_kernel_config(hdev);
1682 /* Set the affinity based on numa node */
1683 node = dev_to_node(&hdev->pdev->dev);
1684 if (node != NUMA_NO_NODE)
1685 cpumask = cpumask_of_node(node);
1687 cpumask_copy(&hdev->affinity_mask, cpumask);
1692 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1695 struct hclge_cfg_tso_status_cmd *req;
1696 struct hclge_desc desc;
1698 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1700 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1701 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1702 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1704 return hclge_cmd_send(&hdev->hw, &desc, 1);
1707 static int hclge_config_gro(struct hclge_dev *hdev)
1709 struct hclge_cfg_gro_status_cmd *req;
1710 struct hclge_desc desc;
1713 if (!hnae3_dev_gro_supported(hdev))
1716 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1717 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1719 req->gro_en = hdev->gro_en ? 1 : 0;
1721 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1723 dev_err(&hdev->pdev->dev,
1724 "GRO hardware config cmd failed, ret = %d\n", ret);
1729 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1731 struct hclge_tqp *tqp;
1734 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1735 sizeof(struct hclge_tqp), GFP_KERNEL);
1741 for (i = 0; i < hdev->num_tqps; i++) {
1742 tqp->dev = &hdev->pdev->dev;
1745 tqp->q.ae_algo = &ae_algo;
1746 tqp->q.buf_size = hdev->rx_buf_len;
1747 tqp->q.tx_desc_num = hdev->num_tx_desc;
1748 tqp->q.rx_desc_num = hdev->num_rx_desc;
1750 /* need an extended offset to configure queues >=
1751 * HCLGE_TQP_MAX_SIZE_DEV_V2
1753 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1754 tqp->q.io_base = hdev->hw.io_base +
1755 HCLGE_TQP_REG_OFFSET +
1756 i * HCLGE_TQP_REG_SIZE;
1758 tqp->q.io_base = hdev->hw.io_base +
1759 HCLGE_TQP_REG_OFFSET +
1760 HCLGE_TQP_EXT_REG_OFFSET +
1761 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1770 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1771 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1773 struct hclge_tqp_map_cmd *req;
1774 struct hclge_desc desc;
1777 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1779 req = (struct hclge_tqp_map_cmd *)desc.data;
1780 req->tqp_id = cpu_to_le16(tqp_pid);
1781 req->tqp_vf = func_id;
1782 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1784 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1785 req->tqp_vid = cpu_to_le16(tqp_vid);
1787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1789 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1794 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1796 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1797 struct hclge_dev *hdev = vport->back;
1800 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1801 alloced < num_tqps; i++) {
1802 if (!hdev->htqp[i].alloced) {
1803 hdev->htqp[i].q.handle = &vport->nic;
1804 hdev->htqp[i].q.tqp_index = alloced;
1805 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1806 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1807 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1808 hdev->htqp[i].alloced = true;
1812 vport->alloc_tqps = alloced;
1813 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1814 vport->alloc_tqps / hdev->tm_info.num_tc);
1816 /* ensure one to one mapping between irq and queue at default */
1817 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1818 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1823 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1824 u16 num_tx_desc, u16 num_rx_desc)
1827 struct hnae3_handle *nic = &vport->nic;
1828 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1829 struct hclge_dev *hdev = vport->back;
1832 kinfo->num_tx_desc = num_tx_desc;
1833 kinfo->num_rx_desc = num_rx_desc;
1835 kinfo->rx_buf_len = hdev->rx_buf_len;
1836 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1838 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1839 sizeof(struct hnae3_queue *), GFP_KERNEL);
1843 ret = hclge_assign_tqp(vport, num_tqps);
1845 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1850 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1851 struct hclge_vport *vport)
1853 struct hnae3_handle *nic = &vport->nic;
1854 struct hnae3_knic_private_info *kinfo;
1857 kinfo = &nic->kinfo;
1858 for (i = 0; i < vport->alloc_tqps; i++) {
1859 struct hclge_tqp *q =
1860 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1864 is_pf = !(vport->vport_id);
1865 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1874 static int hclge_map_tqp(struct hclge_dev *hdev)
1876 struct hclge_vport *vport = hdev->vport;
1879 num_vport = hdev->num_req_vfs + 1;
1880 for (i = 0; i < num_vport; i++) {
1883 ret = hclge_map_tqp_to_vport(hdev, vport);
1893 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1895 struct hnae3_handle *nic = &vport->nic;
1896 struct hclge_dev *hdev = vport->back;
1899 nic->pdev = hdev->pdev;
1900 nic->ae_algo = &ae_algo;
1901 nic->numa_node_mask = hdev->numa_node_mask;
1902 nic->kinfo.io_base = hdev->hw.io_base;
1904 ret = hclge_knic_setup(vport, num_tqps,
1905 hdev->num_tx_desc, hdev->num_rx_desc);
1907 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1912 static int hclge_alloc_vport(struct hclge_dev *hdev)
1914 struct pci_dev *pdev = hdev->pdev;
1915 struct hclge_vport *vport;
1921 /* We need to alloc a vport for main NIC of PF */
1922 num_vport = hdev->num_req_vfs + 1;
1924 if (hdev->num_tqps < num_vport) {
1925 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1926 hdev->num_tqps, num_vport);
1930 /* Alloc the same number of TQPs for every vport */
1931 tqp_per_vport = hdev->num_tqps / num_vport;
1932 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1934 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1939 hdev->vport = vport;
1940 hdev->num_alloc_vport = num_vport;
1942 if (IS_ENABLED(CONFIG_PCI_IOV))
1943 hdev->num_alloc_vfs = hdev->num_req_vfs;
1945 for (i = 0; i < num_vport; i++) {
1947 vport->vport_id = i;
1948 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1949 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1950 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1951 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1952 vport->req_vlan_fltr_en = true;
1953 INIT_LIST_HEAD(&vport->vlan_list);
1954 INIT_LIST_HEAD(&vport->uc_mac_list);
1955 INIT_LIST_HEAD(&vport->mc_mac_list);
1956 spin_lock_init(&vport->mac_list_lock);
1959 ret = hclge_vport_setup(vport, tqp_main_vport);
1961 ret = hclge_vport_setup(vport, tqp_per_vport);
1964 "vport setup failed for vport %d, %d\n",
1975 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1976 struct hclge_pkt_buf_alloc *buf_alloc)
1978 /* TX buffer size is unit by 128 byte */
1979 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1980 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1981 struct hclge_tx_buff_alloc_cmd *req;
1982 struct hclge_desc desc;
1986 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1988 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1989 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1990 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1992 req->tx_pkt_buff[i] =
1993 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1994 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1997 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1999 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
2005 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
2006 struct hclge_pkt_buf_alloc *buf_alloc)
2008 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
2011 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
2016 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
2021 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2022 if (hdev->hw_tc_map & BIT(i))
2027 /* Get the number of pfc enabled TCs, which have private buffer */
2028 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
2029 struct hclge_pkt_buf_alloc *buf_alloc)
2031 struct hclge_priv_buf *priv;
2035 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2036 priv = &buf_alloc->priv_buf[i];
2037 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
2045 /* Get the number of pfc disabled TCs, which have private buffer */
2046 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
2047 struct hclge_pkt_buf_alloc *buf_alloc)
2049 struct hclge_priv_buf *priv;
2053 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2054 priv = &buf_alloc->priv_buf[i];
2055 if (hdev->hw_tc_map & BIT(i) &&
2056 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
2064 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2066 struct hclge_priv_buf *priv;
2070 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2071 priv = &buf_alloc->priv_buf[i];
2073 rx_priv += priv->buf_size;
2078 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2080 u32 i, total_tx_size = 0;
2082 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2083 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2085 return total_tx_size;
2088 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2089 struct hclge_pkt_buf_alloc *buf_alloc,
2092 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2093 u32 tc_num = hclge_get_tc_num(hdev);
2094 u32 shared_buf, aligned_mps;
2098 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2100 if (hnae3_dev_dcb_supported(hdev))
2101 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2104 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2105 + hdev->dv_buf_size;
2107 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2108 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2109 HCLGE_BUF_SIZE_UNIT);
2111 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2112 if (rx_all < rx_priv + shared_std)
2115 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2116 buf_alloc->s_buf.buf_size = shared_buf;
2117 if (hnae3_dev_dcb_supported(hdev)) {
2118 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2119 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2120 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2121 HCLGE_BUF_SIZE_UNIT);
2123 buf_alloc->s_buf.self.high = aligned_mps +
2124 HCLGE_NON_DCB_ADDITIONAL_BUF;
2125 buf_alloc->s_buf.self.low = aligned_mps;
2128 if (hnae3_dev_dcb_supported(hdev)) {
2129 hi_thrd = shared_buf - hdev->dv_buf_size;
2131 if (tc_num <= NEED_RESERVE_TC_NUM)
2132 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2136 hi_thrd = hi_thrd / tc_num;
2138 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2139 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2140 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2142 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2143 lo_thrd = aligned_mps;
2146 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2147 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2148 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2154 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2155 struct hclge_pkt_buf_alloc *buf_alloc)
2159 total_size = hdev->pkt_buf_size;
2161 /* alloc tx buffer for all enabled tc */
2162 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2163 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2165 if (hdev->hw_tc_map & BIT(i)) {
2166 if (total_size < hdev->tx_buf_size)
2169 priv->tx_buf_size = hdev->tx_buf_size;
2171 priv->tx_buf_size = 0;
2174 total_size -= priv->tx_buf_size;
2180 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2181 struct hclge_pkt_buf_alloc *buf_alloc)
2183 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2184 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2187 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2188 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2195 if (!(hdev->hw_tc_map & BIT(i)))
2200 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2201 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2202 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2203 HCLGE_BUF_SIZE_UNIT);
2206 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2210 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2213 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2216 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2217 struct hclge_pkt_buf_alloc *buf_alloc)
2219 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2220 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2223 /* let the last to be cleared first */
2224 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2225 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2226 unsigned int mask = BIT((unsigned int)i);
2228 if (hdev->hw_tc_map & mask &&
2229 !(hdev->tm_info.hw_pfc_map & mask)) {
2230 /* Clear the no pfc TC private buffer */
2238 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2239 no_pfc_priv_num == 0)
2243 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2246 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2247 struct hclge_pkt_buf_alloc *buf_alloc)
2249 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2250 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2253 /* let the last to be cleared first */
2254 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2255 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2256 unsigned int mask = BIT((unsigned int)i);
2258 if (hdev->hw_tc_map & mask &&
2259 hdev->tm_info.hw_pfc_map & mask) {
2260 /* Reduce the number of pfc TC with private buffer */
2268 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2273 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2276 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2277 struct hclge_pkt_buf_alloc *buf_alloc)
2279 #define COMPENSATE_BUFFER 0x3C00
2280 #define COMPENSATE_HALF_MPS_NUM 5
2281 #define PRIV_WL_GAP 0x1800
2283 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2284 u32 tc_num = hclge_get_tc_num(hdev);
2285 u32 half_mps = hdev->mps >> 1;
2290 rx_priv = rx_priv / tc_num;
2292 if (tc_num <= NEED_RESERVE_TC_NUM)
2293 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2295 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2296 COMPENSATE_HALF_MPS_NUM * half_mps;
2297 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2298 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2299 if (rx_priv < min_rx_priv)
2302 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2303 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2310 if (!(hdev->hw_tc_map & BIT(i)))
2314 priv->buf_size = rx_priv;
2315 priv->wl.high = rx_priv - hdev->dv_buf_size;
2316 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2319 buf_alloc->s_buf.buf_size = 0;
2324 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2325 * @hdev: pointer to struct hclge_dev
2326 * @buf_alloc: pointer to buffer calculation data
2327 * @return: 0: calculate successful, negative: fail
2329 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2330 struct hclge_pkt_buf_alloc *buf_alloc)
2332 /* When DCB is not supported, rx private buffer is not allocated. */
2333 if (!hnae3_dev_dcb_supported(hdev)) {
2334 u32 rx_all = hdev->pkt_buf_size;
2336 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2337 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2343 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2346 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2349 /* try to decrease the buffer size */
2350 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2353 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2356 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2362 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2363 struct hclge_pkt_buf_alloc *buf_alloc)
2365 struct hclge_rx_priv_buff_cmd *req;
2366 struct hclge_desc desc;
2370 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2371 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2373 /* Alloc private buffer TCs */
2374 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2375 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2378 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2380 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2384 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2385 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2387 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2389 dev_err(&hdev->pdev->dev,
2390 "rx private buffer alloc cmd failed %d\n", ret);
2395 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2396 struct hclge_pkt_buf_alloc *buf_alloc)
2398 struct hclge_rx_priv_wl_buf *req;
2399 struct hclge_priv_buf *priv;
2400 struct hclge_desc desc[2];
2404 for (i = 0; i < 2; i++) {
2405 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2407 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2409 /* The first descriptor set the NEXT bit to 1 */
2411 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2413 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2415 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2416 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2418 priv = &buf_alloc->priv_buf[idx];
2419 req->tc_wl[j].high =
2420 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2421 req->tc_wl[j].high |=
2422 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2424 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2425 req->tc_wl[j].low |=
2426 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2430 /* Send 2 descriptor at one time */
2431 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2433 dev_err(&hdev->pdev->dev,
2434 "rx private waterline config cmd failed %d\n",
2439 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2440 struct hclge_pkt_buf_alloc *buf_alloc)
2442 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2443 struct hclge_rx_com_thrd *req;
2444 struct hclge_desc desc[2];
2445 struct hclge_tc_thrd *tc;
2449 for (i = 0; i < 2; i++) {
2450 hclge_cmd_setup_basic_desc(&desc[i],
2451 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2452 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2454 /* The first descriptor set the NEXT bit to 1 */
2456 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2458 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2460 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2461 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2463 req->com_thrd[j].high =
2464 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2465 req->com_thrd[j].high |=
2466 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2467 req->com_thrd[j].low =
2468 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2469 req->com_thrd[j].low |=
2470 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2474 /* Send 2 descriptors at one time */
2475 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2477 dev_err(&hdev->pdev->dev,
2478 "common threshold config cmd failed %d\n", ret);
2482 static int hclge_common_wl_config(struct hclge_dev *hdev,
2483 struct hclge_pkt_buf_alloc *buf_alloc)
2485 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2486 struct hclge_rx_com_wl *req;
2487 struct hclge_desc desc;
2490 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2492 req = (struct hclge_rx_com_wl *)desc.data;
2493 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2494 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2496 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2497 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2499 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2501 dev_err(&hdev->pdev->dev,
2502 "common waterline config cmd failed %d\n", ret);
2507 int hclge_buffer_alloc(struct hclge_dev *hdev)
2509 struct hclge_pkt_buf_alloc *pkt_buf;
2512 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2516 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2518 dev_err(&hdev->pdev->dev,
2519 "could not calc tx buffer size for all TCs %d\n", ret);
2523 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2525 dev_err(&hdev->pdev->dev,
2526 "could not alloc tx buffers %d\n", ret);
2530 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2532 dev_err(&hdev->pdev->dev,
2533 "could not calc rx priv buffer size for all TCs %d\n",
2538 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2540 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2545 if (hnae3_dev_dcb_supported(hdev)) {
2546 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2548 dev_err(&hdev->pdev->dev,
2549 "could not configure rx private waterline %d\n",
2554 ret = hclge_common_thrd_config(hdev, pkt_buf);
2556 dev_err(&hdev->pdev->dev,
2557 "could not configure common threshold %d\n",
2563 ret = hclge_common_wl_config(hdev, pkt_buf);
2565 dev_err(&hdev->pdev->dev,
2566 "could not configure common waterline %d\n", ret);
2573 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2575 struct hnae3_handle *roce = &vport->roce;
2576 struct hnae3_handle *nic = &vport->nic;
2577 struct hclge_dev *hdev = vport->back;
2579 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2581 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2584 roce->rinfo.base_vector = hdev->roce_base_vector;
2586 roce->rinfo.netdev = nic->kinfo.netdev;
2587 roce->rinfo.roce_io_base = hdev->hw.io_base;
2588 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2590 roce->pdev = nic->pdev;
2591 roce->ae_algo = nic->ae_algo;
2592 roce->numa_node_mask = nic->numa_node_mask;
2597 static int hclge_init_msi(struct hclge_dev *hdev)
2599 struct pci_dev *pdev = hdev->pdev;
2603 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2605 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2608 "failed(%d) to allocate MSI/MSI-X vectors\n",
2612 if (vectors < hdev->num_msi)
2613 dev_warn(&hdev->pdev->dev,
2614 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2615 hdev->num_msi, vectors);
2617 hdev->num_msi = vectors;
2618 hdev->num_msi_left = vectors;
2620 hdev->base_msi_vector = pdev->irq;
2621 hdev->roce_base_vector = hdev->base_msi_vector +
2624 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2625 sizeof(u16), GFP_KERNEL);
2626 if (!hdev->vector_status) {
2627 pci_free_irq_vectors(pdev);
2631 for (i = 0; i < hdev->num_msi; i++)
2632 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2634 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2635 sizeof(int), GFP_KERNEL);
2636 if (!hdev->vector_irq) {
2637 pci_free_irq_vectors(pdev);
2644 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2646 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2647 duplex = HCLGE_MAC_FULL;
2652 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2655 struct hclge_config_mac_speed_dup_cmd *req;
2656 struct hclge_desc desc;
2659 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2661 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2664 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2667 case HCLGE_MAC_SPEED_10M:
2668 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2669 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2671 case HCLGE_MAC_SPEED_100M:
2672 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2673 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2675 case HCLGE_MAC_SPEED_1G:
2676 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2677 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2679 case HCLGE_MAC_SPEED_10G:
2680 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2681 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2683 case HCLGE_MAC_SPEED_25G:
2684 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2685 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2687 case HCLGE_MAC_SPEED_40G:
2688 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2689 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2691 case HCLGE_MAC_SPEED_50G:
2692 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2693 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2695 case HCLGE_MAC_SPEED_100G:
2696 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2697 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2699 case HCLGE_MAC_SPEED_200G:
2700 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2701 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2704 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2708 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2713 dev_err(&hdev->pdev->dev,
2714 "mac speed/duplex config cmd failed %d.\n", ret);
2721 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2723 struct hclge_mac *mac = &hdev->hw.mac;
2726 duplex = hclge_check_speed_dup(duplex, speed);
2727 if (!mac->support_autoneg && mac->speed == speed &&
2728 mac->duplex == duplex)
2731 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2735 hdev->hw.mac.speed = speed;
2736 hdev->hw.mac.duplex = duplex;
2741 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2744 struct hclge_vport *vport = hclge_get_vport(handle);
2745 struct hclge_dev *hdev = vport->back;
2747 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2750 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2752 struct hclge_config_auto_neg_cmd *req;
2753 struct hclge_desc desc;
2757 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2759 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2761 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2762 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2764 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2766 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2772 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2774 struct hclge_vport *vport = hclge_get_vport(handle);
2775 struct hclge_dev *hdev = vport->back;
2777 if (!hdev->hw.mac.support_autoneg) {
2779 dev_err(&hdev->pdev->dev,
2780 "autoneg is not supported by current port\n");
2787 return hclge_set_autoneg_en(hdev, enable);
2790 static int hclge_get_autoneg(struct hnae3_handle *handle)
2792 struct hclge_vport *vport = hclge_get_vport(handle);
2793 struct hclge_dev *hdev = vport->back;
2794 struct phy_device *phydev = hdev->hw.mac.phydev;
2797 return phydev->autoneg;
2799 return hdev->hw.mac.autoneg;
2802 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2804 struct hclge_vport *vport = hclge_get_vport(handle);
2805 struct hclge_dev *hdev = vport->back;
2808 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2810 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2813 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2816 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2818 struct hclge_vport *vport = hclge_get_vport(handle);
2819 struct hclge_dev *hdev = vport->back;
2821 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2822 return hclge_set_autoneg_en(hdev, !halt);
2827 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2829 struct hclge_config_fec_cmd *req;
2830 struct hclge_desc desc;
2833 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2835 req = (struct hclge_config_fec_cmd *)desc.data;
2836 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2837 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2838 if (fec_mode & BIT(HNAE3_FEC_RS))
2839 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2840 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2841 if (fec_mode & BIT(HNAE3_FEC_BASER))
2842 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2843 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2845 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2847 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2852 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2854 struct hclge_vport *vport = hclge_get_vport(handle);
2855 struct hclge_dev *hdev = vport->back;
2856 struct hclge_mac *mac = &hdev->hw.mac;
2859 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2860 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2864 ret = hclge_set_fec_hw(hdev, fec_mode);
2868 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2872 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2875 struct hclge_vport *vport = hclge_get_vport(handle);
2876 struct hclge_dev *hdev = vport->back;
2877 struct hclge_mac *mac = &hdev->hw.mac;
2880 *fec_ability = mac->fec_ability;
2882 *fec_mode = mac->fec_mode;
2885 static int hclge_mac_init(struct hclge_dev *hdev)
2887 struct hclge_mac *mac = &hdev->hw.mac;
2890 hdev->support_sfp_query = true;
2891 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2892 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2893 hdev->hw.mac.duplex);
2897 if (hdev->hw.mac.support_autoneg) {
2898 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2905 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2906 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2911 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2913 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2917 ret = hclge_set_default_loopback(hdev);
2921 ret = hclge_buffer_alloc(hdev);
2923 dev_err(&hdev->pdev->dev,
2924 "allocate buffer fail, ret=%d\n", ret);
2929 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2931 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2932 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2933 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2934 hclge_wq, &hdev->service_task, 0);
2937 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2939 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2940 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2941 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2942 hclge_wq, &hdev->service_task, 0);
2945 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2947 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2948 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2949 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2950 hclge_wq, &hdev->service_task, 0);
2953 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2955 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2956 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2957 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2958 hclge_wq, &hdev->service_task,
2962 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2964 struct hclge_link_status_cmd *req;
2965 struct hclge_desc desc;
2968 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2969 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2971 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2976 req = (struct hclge_link_status_cmd *)desc.data;
2977 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2978 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2983 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2985 struct phy_device *phydev = hdev->hw.mac.phydev;
2987 *link_status = HCLGE_LINK_STATUS_DOWN;
2989 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2992 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2995 return hclge_get_mac_link_status(hdev, link_status);
2998 static void hclge_push_link_status(struct hclge_dev *hdev)
3000 struct hclge_vport *vport;
3004 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3005 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3007 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3008 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3011 ret = hclge_push_vf_link_status(vport);
3013 dev_err(&hdev->pdev->dev,
3014 "failed to push link status to vf%u, ret = %d\n",
3020 static void hclge_update_link_status(struct hclge_dev *hdev)
3022 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3023 struct hnae3_handle *handle = &hdev->vport[0].nic;
3024 struct hnae3_client *rclient = hdev->roce_client;
3025 struct hnae3_client *client = hdev->nic_client;
3032 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3035 ret = hclge_get_mac_phy_link(hdev, &state);
3037 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3041 if (state != hdev->hw.mac.link) {
3042 hdev->hw.mac.link = state;
3043 client->ops->link_status_change(handle, state);
3044 hclge_config_mac_tnl_int(hdev, state);
3045 if (rclient && rclient->ops->link_status_change)
3046 rclient->ops->link_status_change(rhandle, state);
3048 hclge_push_link_status(hdev);
3051 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3054 static void hclge_update_port_capability(struct hclge_dev *hdev,
3055 struct hclge_mac *mac)
3057 if (hnae3_dev_fec_supported(hdev))
3058 /* update fec ability by speed */
3059 hclge_convert_setting_fec(mac);
3061 /* firmware can not identify back plane type, the media type
3062 * read from configuration can help deal it
3064 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3065 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3066 mac->module_type = HNAE3_MODULE_TYPE_KR;
3067 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3068 mac->module_type = HNAE3_MODULE_TYPE_TP;
3070 if (mac->support_autoneg) {
3071 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3072 linkmode_copy(mac->advertising, mac->supported);
3074 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3076 linkmode_zero(mac->advertising);
3080 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3082 struct hclge_sfp_info_cmd *resp;
3083 struct hclge_desc desc;
3086 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3087 resp = (struct hclge_sfp_info_cmd *)desc.data;
3088 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3089 if (ret == -EOPNOTSUPP) {
3090 dev_warn(&hdev->pdev->dev,
3091 "IMP do not support get SFP speed %d\n", ret);
3094 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3098 *speed = le32_to_cpu(resp->speed);
3103 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3105 struct hclge_sfp_info_cmd *resp;
3106 struct hclge_desc desc;
3109 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3110 resp = (struct hclge_sfp_info_cmd *)desc.data;
3112 resp->query_type = QUERY_ACTIVE_SPEED;
3114 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3115 if (ret == -EOPNOTSUPP) {
3116 dev_warn(&hdev->pdev->dev,
3117 "IMP does not support get SFP info %d\n", ret);
3120 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3124 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3125 * set to mac->speed.
3127 if (!le32_to_cpu(resp->speed))
3130 mac->speed = le32_to_cpu(resp->speed);
3131 /* if resp->speed_ability is 0, it means it's an old version
3132 * firmware, do not update these params
3134 if (resp->speed_ability) {
3135 mac->module_type = le32_to_cpu(resp->module_type);
3136 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3137 mac->autoneg = resp->autoneg;
3138 mac->support_autoneg = resp->autoneg_ability;
3139 mac->speed_type = QUERY_ACTIVE_SPEED;
3140 if (!resp->active_fec)
3143 mac->fec_mode = BIT(resp->active_fec);
3145 mac->speed_type = QUERY_SFP_SPEED;
3151 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3152 struct ethtool_link_ksettings *cmd)
3154 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3155 struct hclge_vport *vport = hclge_get_vport(handle);
3156 struct hclge_phy_link_ksetting_0_cmd *req0;
3157 struct hclge_phy_link_ksetting_1_cmd *req1;
3158 u32 supported, advertising, lp_advertising;
3159 struct hclge_dev *hdev = vport->back;
3162 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3164 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3165 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3168 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3170 dev_err(&hdev->pdev->dev,
3171 "failed to get phy link ksetting, ret = %d.\n", ret);
3175 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3176 cmd->base.autoneg = req0->autoneg;
3177 cmd->base.speed = le32_to_cpu(req0->speed);
3178 cmd->base.duplex = req0->duplex;
3179 cmd->base.port = req0->port;
3180 cmd->base.transceiver = req0->transceiver;
3181 cmd->base.phy_address = req0->phy_address;
3182 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3183 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3184 supported = le32_to_cpu(req0->supported);
3185 advertising = le32_to_cpu(req0->advertising);
3186 lp_advertising = le32_to_cpu(req0->lp_advertising);
3187 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3189 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3191 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3194 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3195 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3196 cmd->base.master_slave_state = req1->master_slave_state;
3202 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3203 const struct ethtool_link_ksettings *cmd)
3205 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3206 struct hclge_vport *vport = hclge_get_vport(handle);
3207 struct hclge_phy_link_ksetting_0_cmd *req0;
3208 struct hclge_phy_link_ksetting_1_cmd *req1;
3209 struct hclge_dev *hdev = vport->back;
3213 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3214 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3215 (cmd->base.duplex != DUPLEX_HALF &&
3216 cmd->base.duplex != DUPLEX_FULL)))
3219 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3221 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3222 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3225 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3226 req0->autoneg = cmd->base.autoneg;
3227 req0->speed = cpu_to_le32(cmd->base.speed);
3228 req0->duplex = cmd->base.duplex;
3229 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3230 cmd->link_modes.advertising);
3231 req0->advertising = cpu_to_le32(advertising);
3232 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3234 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3235 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3237 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3239 dev_err(&hdev->pdev->dev,
3240 "failed to set phy link ksettings, ret = %d.\n", ret);
3244 hdev->hw.mac.autoneg = cmd->base.autoneg;
3245 hdev->hw.mac.speed = cmd->base.speed;
3246 hdev->hw.mac.duplex = cmd->base.duplex;
3247 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3252 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3254 struct ethtool_link_ksettings cmd;
3257 if (!hnae3_dev_phy_imp_supported(hdev))
3260 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3264 hdev->hw.mac.autoneg = cmd.base.autoneg;
3265 hdev->hw.mac.speed = cmd.base.speed;
3266 hdev->hw.mac.duplex = cmd.base.duplex;
3271 static int hclge_tp_port_init(struct hclge_dev *hdev)
3273 struct ethtool_link_ksettings cmd;
3275 if (!hnae3_dev_phy_imp_supported(hdev))
3278 cmd.base.autoneg = hdev->hw.mac.autoneg;
3279 cmd.base.speed = hdev->hw.mac.speed;
3280 cmd.base.duplex = hdev->hw.mac.duplex;
3281 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3283 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3286 static int hclge_update_port_info(struct hclge_dev *hdev)
3288 struct hclge_mac *mac = &hdev->hw.mac;
3289 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3292 /* get the port info from SFP cmd if not copper port */
3293 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3294 return hclge_update_tp_port_info(hdev);
3296 /* if IMP does not support get SFP/qSFP info, return directly */
3297 if (!hdev->support_sfp_query)
3300 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3301 ret = hclge_get_sfp_info(hdev, mac);
3303 ret = hclge_get_sfp_speed(hdev, &speed);
3305 if (ret == -EOPNOTSUPP) {
3306 hdev->support_sfp_query = false;
3312 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3313 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3314 hclge_update_port_capability(hdev, mac);
3317 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3320 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3321 return 0; /* do nothing if no SFP */
3323 /* must config full duplex for SFP */
3324 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3328 static int hclge_get_status(struct hnae3_handle *handle)
3330 struct hclge_vport *vport = hclge_get_vport(handle);
3331 struct hclge_dev *hdev = vport->back;
3333 hclge_update_link_status(hdev);
3335 return hdev->hw.mac.link;
3338 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3340 if (!pci_num_vf(hdev->pdev)) {
3341 dev_err(&hdev->pdev->dev,
3342 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3346 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3347 dev_err(&hdev->pdev->dev,
3348 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3349 vf, pci_num_vf(hdev->pdev));
3353 /* VF start from 1 in vport */
3354 vf += HCLGE_VF_VPORT_START_NUM;
3355 return &hdev->vport[vf];
3358 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3359 struct ifla_vf_info *ivf)
3361 struct hclge_vport *vport = hclge_get_vport(handle);
3362 struct hclge_dev *hdev = vport->back;
3364 vport = hclge_get_vf_vport(hdev, vf);
3369 ivf->linkstate = vport->vf_info.link_state;
3370 ivf->spoofchk = vport->vf_info.spoofchk;
3371 ivf->trusted = vport->vf_info.trusted;
3372 ivf->min_tx_rate = 0;
3373 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3374 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3375 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3376 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3377 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3382 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3385 struct hclge_vport *vport = hclge_get_vport(handle);
3386 struct hclge_dev *hdev = vport->back;
3390 vport = hclge_get_vf_vport(hdev, vf);
3394 link_state_old = vport->vf_info.link_state;
3395 vport->vf_info.link_state = link_state;
3397 ret = hclge_push_vf_link_status(vport);
3399 vport->vf_info.link_state = link_state_old;
3400 dev_err(&hdev->pdev->dev,
3401 "failed to push vf%d link status, ret = %d\n", vf, ret);
3407 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3409 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3411 /* fetch the events from their corresponding regs */
3412 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3413 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3414 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3415 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3417 /* Assumption: If by any chance reset and mailbox events are reported
3418 * together then we will only process reset event in this go and will
3419 * defer the processing of the mailbox events. Since, we would have not
3420 * cleared RX CMDQ event this time we would receive again another
3421 * interrupt from H/W just for the mailbox.
3423 * check for vector0 reset event sources
3425 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3426 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3427 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3428 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3429 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3430 hdev->rst_stats.imp_rst_cnt++;
3431 return HCLGE_VECTOR0_EVENT_RST;
3434 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3435 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3436 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3437 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3438 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3439 hdev->rst_stats.global_rst_cnt++;
3440 return HCLGE_VECTOR0_EVENT_RST;
3443 /* check for vector0 msix event and hardware error event source */
3444 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3445 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3446 return HCLGE_VECTOR0_EVENT_ERR;
3448 /* check for vector0 ptp event source */
3449 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3450 *clearval = msix_src_reg;
3451 return HCLGE_VECTOR0_EVENT_PTP;
3454 /* check for vector0 mailbox(=CMDQ RX) event source */
3455 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3456 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3457 *clearval = cmdq_src_reg;
3458 return HCLGE_VECTOR0_EVENT_MBX;
3461 /* print other vector0 event source */
3462 dev_info(&hdev->pdev->dev,
3463 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3464 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3466 return HCLGE_VECTOR0_EVENT_OTHER;
3469 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3472 switch (event_type) {
3473 case HCLGE_VECTOR0_EVENT_PTP:
3474 case HCLGE_VECTOR0_EVENT_RST:
3475 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3477 case HCLGE_VECTOR0_EVENT_MBX:
3478 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3485 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3487 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3488 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3489 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3490 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3491 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3494 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3496 writel(enable ? 1 : 0, vector->addr);
3499 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3501 struct hclge_dev *hdev = data;
3502 unsigned long flags;
3506 hclge_enable_vector(&hdev->misc_vector, false);
3507 event_cause = hclge_check_event_cause(hdev, &clearval);
3509 /* vector 0 interrupt is shared with reset and mailbox source events. */
3510 switch (event_cause) {
3511 case HCLGE_VECTOR0_EVENT_ERR:
3512 hclge_errhand_task_schedule(hdev);
3514 case HCLGE_VECTOR0_EVENT_RST:
3515 hclge_reset_task_schedule(hdev);
3517 case HCLGE_VECTOR0_EVENT_PTP:
3518 spin_lock_irqsave(&hdev->ptp->lock, flags);
3519 hclge_ptp_clean_tx_hwts(hdev);
3520 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3522 case HCLGE_VECTOR0_EVENT_MBX:
3523 /* If we are here then,
3524 * 1. Either we are not handling any mbx task and we are not
3527 * 2. We could be handling a mbx task but nothing more is
3529 * In both cases, we should schedule mbx task as there are more
3530 * mbx messages reported by this interrupt.
3532 hclge_mbx_task_schedule(hdev);
3535 dev_warn(&hdev->pdev->dev,
3536 "received unknown or unhandled event of vector0\n");
3540 hclge_clear_event_cause(hdev, event_cause, clearval);
3542 /* Enable interrupt if it is not caused by reset event or error event */
3543 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3544 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3545 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3546 hclge_enable_vector(&hdev->misc_vector, true);
3551 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3553 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3554 dev_warn(&hdev->pdev->dev,
3555 "vector(vector_id %d) has been freed.\n", vector_id);
3559 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3560 hdev->num_msi_left += 1;
3561 hdev->num_msi_used -= 1;
3564 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3566 struct hclge_misc_vector *vector = &hdev->misc_vector;
3568 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3570 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3571 hdev->vector_status[0] = 0;
3573 hdev->num_msi_left -= 1;
3574 hdev->num_msi_used += 1;
3577 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3578 const cpumask_t *mask)
3580 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3583 cpumask_copy(&hdev->affinity_mask, mask);
3586 static void hclge_irq_affinity_release(struct kref *ref)
3590 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3592 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3593 &hdev->affinity_mask);
3595 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3596 hdev->affinity_notify.release = hclge_irq_affinity_release;
3597 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3598 &hdev->affinity_notify);
3601 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3603 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3604 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3607 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3611 hclge_get_misc_vector(hdev);
3613 /* this would be explicitly freed in the end */
3614 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3615 HCLGE_NAME, pci_name(hdev->pdev));
3616 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3617 0, hdev->misc_vector.name, hdev);
3619 hclge_free_vector(hdev, 0);
3620 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3621 hdev->misc_vector.vector_irq);
3627 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3629 free_irq(hdev->misc_vector.vector_irq, hdev);
3630 hclge_free_vector(hdev, 0);
3633 int hclge_notify_client(struct hclge_dev *hdev,
3634 enum hnae3_reset_notify_type type)
3636 struct hnae3_handle *handle = &hdev->vport[0].nic;
3637 struct hnae3_client *client = hdev->nic_client;
3640 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3643 if (!client->ops->reset_notify)
3646 ret = client->ops->reset_notify(handle, type);
3648 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3654 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3655 enum hnae3_reset_notify_type type)
3657 struct hnae3_handle *handle = &hdev->vport[0].roce;
3658 struct hnae3_client *client = hdev->roce_client;
3661 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3664 if (!client->ops->reset_notify)
3667 ret = client->ops->reset_notify(handle, type);
3669 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3675 static int hclge_reset_wait(struct hclge_dev *hdev)
3677 #define HCLGE_RESET_WATI_MS 100
3678 #define HCLGE_RESET_WAIT_CNT 350
3680 u32 val, reg, reg_bit;
3683 switch (hdev->reset_type) {
3684 case HNAE3_IMP_RESET:
3685 reg = HCLGE_GLOBAL_RESET_REG;
3686 reg_bit = HCLGE_IMP_RESET_BIT;
3688 case HNAE3_GLOBAL_RESET:
3689 reg = HCLGE_GLOBAL_RESET_REG;
3690 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3692 case HNAE3_FUNC_RESET:
3693 reg = HCLGE_FUN_RST_ING;
3694 reg_bit = HCLGE_FUN_RST_ING_B;
3697 dev_err(&hdev->pdev->dev,
3698 "Wait for unsupported reset type: %d\n",
3703 val = hclge_read_dev(&hdev->hw, reg);
3704 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3705 msleep(HCLGE_RESET_WATI_MS);
3706 val = hclge_read_dev(&hdev->hw, reg);
3710 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3711 dev_warn(&hdev->pdev->dev,
3712 "Wait for reset timeout: %d\n", hdev->reset_type);
3719 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3721 struct hclge_vf_rst_cmd *req;
3722 struct hclge_desc desc;
3724 req = (struct hclge_vf_rst_cmd *)desc.data;
3725 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3726 req->dest_vfid = func_id;
3731 return hclge_cmd_send(&hdev->hw, &desc, 1);
3734 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3738 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3739 struct hclge_vport *vport = &hdev->vport[i];
3742 /* Send cmd to set/clear VF's FUNC_RST_ING */
3743 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3745 dev_err(&hdev->pdev->dev,
3746 "set vf(%u) rst failed %d!\n",
3747 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3752 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3755 /* Inform VF to process the reset.
3756 * hclge_inform_reset_assert_to_vf may fail if VF
3757 * driver is not loaded.
3759 ret = hclge_inform_reset_assert_to_vf(vport);
3761 dev_warn(&hdev->pdev->dev,
3762 "inform reset to vf(%u) failed %d!\n",
3763 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3770 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3772 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3773 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3774 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3777 hclge_mbx_handler(hdev);
3779 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3782 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3784 struct hclge_pf_rst_sync_cmd *req;
3785 struct hclge_desc desc;
3789 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3793 /* vf need to down netdev by mbx during PF or FLR reset */
3794 hclge_mailbox_service_task(hdev);
3796 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3797 /* for compatible with old firmware, wait
3798 * 100 ms for VF to stop IO
3800 if (ret == -EOPNOTSUPP) {
3801 msleep(HCLGE_RESET_SYNC_TIME);
3804 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3807 } else if (req->all_vf_ready) {
3810 msleep(HCLGE_PF_RESET_SYNC_TIME);
3811 hclge_cmd_reuse_desc(&desc, true);
3812 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3814 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3817 void hclge_report_hw_error(struct hclge_dev *hdev,
3818 enum hnae3_hw_error_type type)
3820 struct hnae3_client *client = hdev->nic_client;
3822 if (!client || !client->ops->process_hw_error ||
3823 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3826 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3829 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3833 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3834 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3835 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3836 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3837 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3840 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3841 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3842 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3843 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3847 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3849 struct hclge_desc desc;
3850 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3854 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3855 req->fun_reset_vfid = func_id;
3857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3859 dev_err(&hdev->pdev->dev,
3860 "send function reset cmd fail, status =%d\n", ret);
3865 static void hclge_do_reset(struct hclge_dev *hdev)
3867 struct hnae3_handle *handle = &hdev->vport[0].nic;
3868 struct pci_dev *pdev = hdev->pdev;
3871 if (hclge_get_hw_reset_stat(handle)) {
3872 dev_info(&pdev->dev, "hardware reset not finish\n");
3873 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3874 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3875 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3879 switch (hdev->reset_type) {
3880 case HNAE3_IMP_RESET:
3881 dev_info(&pdev->dev, "IMP reset requested\n");
3882 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3883 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3884 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3886 case HNAE3_GLOBAL_RESET:
3887 dev_info(&pdev->dev, "global reset requested\n");
3888 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3889 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3890 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3892 case HNAE3_FUNC_RESET:
3893 dev_info(&pdev->dev, "PF reset requested\n");
3894 /* schedule again to check later */
3895 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3896 hclge_reset_task_schedule(hdev);
3899 dev_warn(&pdev->dev,
3900 "unsupported reset type: %d\n", hdev->reset_type);
3905 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3906 unsigned long *addr)
3908 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3909 struct hclge_dev *hdev = ae_dev->priv;
3911 /* return the highest priority reset level amongst all */
3912 if (test_bit(HNAE3_IMP_RESET, addr)) {
3913 rst_level = HNAE3_IMP_RESET;
3914 clear_bit(HNAE3_IMP_RESET, addr);
3915 clear_bit(HNAE3_GLOBAL_RESET, addr);
3916 clear_bit(HNAE3_FUNC_RESET, addr);
3917 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3918 rst_level = HNAE3_GLOBAL_RESET;
3919 clear_bit(HNAE3_GLOBAL_RESET, addr);
3920 clear_bit(HNAE3_FUNC_RESET, addr);
3921 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3922 rst_level = HNAE3_FUNC_RESET;
3923 clear_bit(HNAE3_FUNC_RESET, addr);
3924 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3925 rst_level = HNAE3_FLR_RESET;
3926 clear_bit(HNAE3_FLR_RESET, addr);
3929 if (hdev->reset_type != HNAE3_NONE_RESET &&
3930 rst_level < hdev->reset_type)
3931 return HNAE3_NONE_RESET;
3936 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3940 switch (hdev->reset_type) {
3941 case HNAE3_IMP_RESET:
3942 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3944 case HNAE3_GLOBAL_RESET:
3945 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3954 /* For revision 0x20, the reset interrupt source
3955 * can only be cleared after hardware reset done
3957 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3958 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3961 hclge_enable_vector(&hdev->misc_vector, true);
3964 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3968 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3970 reg_val |= HCLGE_NIC_SW_RST_RDY;
3972 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3974 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3977 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3981 ret = hclge_set_all_vf_rst(hdev, true);
3985 hclge_func_reset_sync_vf(hdev);
3990 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3995 switch (hdev->reset_type) {
3996 case HNAE3_FUNC_RESET:
3997 ret = hclge_func_reset_notify_vf(hdev);
4001 ret = hclge_func_reset_cmd(hdev, 0);
4003 dev_err(&hdev->pdev->dev,
4004 "asserting function reset fail %d!\n", ret);
4008 /* After performaning pf reset, it is not necessary to do the
4009 * mailbox handling or send any command to firmware, because
4010 * any mailbox handling or command to firmware is only valid
4011 * after hclge_cmd_init is called.
4013 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
4014 hdev->rst_stats.pf_rst_cnt++;
4016 case HNAE3_FLR_RESET:
4017 ret = hclge_func_reset_notify_vf(hdev);
4021 case HNAE3_IMP_RESET:
4022 hclge_handle_imp_error(hdev);
4023 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4024 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4025 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4031 /* inform hardware that preparatory work is done */
4032 msleep(HCLGE_RESET_SYNC_TIME);
4033 hclge_reset_handshake(hdev, true);
4034 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4039 static void hclge_show_rst_info(struct hclge_dev *hdev)
4043 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4047 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4049 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4054 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4056 #define MAX_RESET_FAIL_CNT 5
4058 if (hdev->reset_pending) {
4059 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4060 hdev->reset_pending);
4062 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4063 HCLGE_RESET_INT_M) {
4064 dev_info(&hdev->pdev->dev,
4065 "reset failed because new reset interrupt\n");
4066 hclge_clear_reset_cause(hdev);
4068 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4069 hdev->rst_stats.reset_fail_cnt++;
4070 set_bit(hdev->reset_type, &hdev->reset_pending);
4071 dev_info(&hdev->pdev->dev,
4072 "re-schedule reset task(%u)\n",
4073 hdev->rst_stats.reset_fail_cnt);
4077 hclge_clear_reset_cause(hdev);
4079 /* recover the handshake status when reset fail */
4080 hclge_reset_handshake(hdev, true);
4082 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4084 hclge_show_rst_info(hdev);
4086 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4091 static void hclge_update_reset_level(struct hclge_dev *hdev)
4093 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4094 enum hnae3_reset_type reset_level;
4096 /* reset request will not be set during reset, so clear
4097 * pending reset request to avoid unnecessary reset
4098 * caused by the same reason.
4100 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4102 /* if default_reset_request has a higher level reset request,
4103 * it should be handled as soon as possible. since some errors
4104 * need this kind of reset to fix.
4106 reset_level = hclge_get_reset_level(ae_dev,
4107 &hdev->default_reset_request);
4108 if (reset_level != HNAE3_NONE_RESET)
4109 set_bit(reset_level, &hdev->reset_request);
4112 static int hclge_set_rst_done(struct hclge_dev *hdev)
4114 struct hclge_pf_rst_done_cmd *req;
4115 struct hclge_desc desc;
4118 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4120 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4122 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4123 /* To be compatible with the old firmware, which does not support
4124 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4127 if (ret == -EOPNOTSUPP) {
4128 dev_warn(&hdev->pdev->dev,
4129 "current firmware does not support command(0x%x)!\n",
4130 HCLGE_OPC_PF_RST_DONE);
4133 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4140 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4144 switch (hdev->reset_type) {
4145 case HNAE3_FUNC_RESET:
4146 case HNAE3_FLR_RESET:
4147 ret = hclge_set_all_vf_rst(hdev, false);
4149 case HNAE3_GLOBAL_RESET:
4150 case HNAE3_IMP_RESET:
4151 ret = hclge_set_rst_done(hdev);
4157 /* clear up the handshake status after re-initialize done */
4158 hclge_reset_handshake(hdev, false);
4163 static int hclge_reset_stack(struct hclge_dev *hdev)
4167 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4171 ret = hclge_reset_ae_dev(hdev->ae_dev);
4175 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4178 static int hclge_reset_prepare(struct hclge_dev *hdev)
4182 hdev->rst_stats.reset_cnt++;
4183 /* perform reset of the stack & ae device for a client */
4184 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4189 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4194 return hclge_reset_prepare_wait(hdev);
4197 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4201 hdev->rst_stats.hw_reset_done_cnt++;
4203 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4208 ret = hclge_reset_stack(hdev);
4213 hclge_clear_reset_cause(hdev);
4215 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4216 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4220 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4223 ret = hclge_reset_prepare_up(hdev);
4228 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4233 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4237 hdev->last_reset_time = jiffies;
4238 hdev->rst_stats.reset_fail_cnt = 0;
4239 hdev->rst_stats.reset_done_cnt++;
4240 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4242 hclge_update_reset_level(hdev);
4247 static void hclge_reset(struct hclge_dev *hdev)
4249 if (hclge_reset_prepare(hdev))
4252 if (hclge_reset_wait(hdev))
4255 if (hclge_reset_rebuild(hdev))
4261 if (hclge_reset_err_handle(hdev))
4262 hclge_reset_task_schedule(hdev);
4265 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4268 struct hclge_dev *hdev = ae_dev->priv;
4270 /* We might end up getting called broadly because of 2 below cases:
4271 * 1. Recoverable error was conveyed through APEI and only way to bring
4272 * normalcy is to reset.
4273 * 2. A new reset request from the stack due to timeout
4275 * check if this is a new reset request and we are not here just because
4276 * last reset attempt did not succeed and watchdog hit us again. We will
4277 * know this if last reset request did not occur very recently (watchdog
4278 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4279 * In case of new request we reset the "reset level" to PF reset.
4280 * And if it is a repeat reset request of the most recent one then we
4281 * want to make sure we throttle the reset request. Therefore, we will
4282 * not allow it again before 3*HZ times.
4285 if (time_before(jiffies, (hdev->last_reset_time +
4286 HCLGE_RESET_INTERVAL))) {
4287 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4291 if (hdev->default_reset_request) {
4293 hclge_get_reset_level(ae_dev,
4294 &hdev->default_reset_request);
4295 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4296 hdev->reset_level = HNAE3_FUNC_RESET;
4299 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4302 /* request reset & schedule reset task */
4303 set_bit(hdev->reset_level, &hdev->reset_request);
4304 hclge_reset_task_schedule(hdev);
4306 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4307 hdev->reset_level++;
4310 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4311 enum hnae3_reset_type rst_type)
4313 struct hclge_dev *hdev = ae_dev->priv;
4315 set_bit(rst_type, &hdev->default_reset_request);
4318 static void hclge_reset_timer(struct timer_list *t)
4320 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4322 /* if default_reset_request has no value, it means that this reset
4323 * request has already be handled, so just return here
4325 if (!hdev->default_reset_request)
4328 dev_info(&hdev->pdev->dev,
4329 "triggering reset in reset timer\n");
4330 hclge_reset_event(hdev->pdev, NULL);
4333 static void hclge_reset_subtask(struct hclge_dev *hdev)
4335 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4337 /* check if there is any ongoing reset in the hardware. This status can
4338 * be checked from reset_pending. If there is then, we need to wait for
4339 * hardware to complete reset.
4340 * a. If we are able to figure out in reasonable time that hardware
4341 * has fully resetted then, we can proceed with driver, client
4343 * b. else, we can come back later to check this status so re-sched
4346 hdev->last_reset_time = jiffies;
4347 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4348 if (hdev->reset_type != HNAE3_NONE_RESET)
4351 /* check if we got any *new* reset requests to be honored */
4352 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4353 if (hdev->reset_type != HNAE3_NONE_RESET)
4354 hclge_do_reset(hdev);
4356 hdev->reset_type = HNAE3_NONE_RESET;
4359 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4361 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4362 enum hnae3_reset_type reset_type;
4364 if (ae_dev->hw_err_reset_req) {
4365 reset_type = hclge_get_reset_level(ae_dev,
4366 &ae_dev->hw_err_reset_req);
4367 hclge_set_def_reset_request(ae_dev, reset_type);
4370 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4371 ae_dev->ops->reset_event(hdev->pdev, NULL);
4373 /* enable interrupt after error handling complete */
4374 hclge_enable_vector(&hdev->misc_vector, true);
4377 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4379 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4381 ae_dev->hw_err_reset_req = 0;
4383 if (hclge_find_error_source(hdev)) {
4384 hclge_handle_error_info_log(ae_dev);
4385 hclge_handle_mac_tnl(hdev);
4388 hclge_handle_err_reset_request(hdev);
4391 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4393 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4394 struct device *dev = &hdev->pdev->dev;
4397 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4398 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4399 if (hclge_handle_hw_msix_error
4400 (hdev, &hdev->default_reset_request))
4401 dev_info(dev, "received msix interrupt 0x%x\n",
4405 hclge_handle_hw_ras_error(ae_dev);
4407 hclge_handle_err_reset_request(hdev);
4410 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4412 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4415 if (hnae3_dev_ras_imp_supported(hdev))
4416 hclge_handle_err_recovery(hdev);
4418 hclge_misc_err_recovery(hdev);
4421 static void hclge_reset_service_task(struct hclge_dev *hdev)
4423 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4426 down(&hdev->reset_sem);
4427 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4429 hclge_reset_subtask(hdev);
4431 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4432 up(&hdev->reset_sem);
4435 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4439 /* start from vport 1 for PF is always alive */
4440 for (i = 1; i < hdev->num_alloc_vport; i++) {
4441 struct hclge_vport *vport = &hdev->vport[i];
4443 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4444 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4446 /* If vf is not alive, set to default value */
4447 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4448 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4452 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4454 unsigned long delta = round_jiffies_relative(HZ);
4456 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4459 /* Always handle the link updating to make sure link state is
4460 * updated when it is triggered by mbx.
4462 hclge_update_link_status(hdev);
4463 hclge_sync_mac_table(hdev);
4464 hclge_sync_promisc_mode(hdev);
4465 hclge_sync_fd_table(hdev);
4467 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4468 delta = jiffies - hdev->last_serv_processed;
4470 if (delta < round_jiffies_relative(HZ)) {
4471 delta = round_jiffies_relative(HZ) - delta;
4476 hdev->serv_processed_cnt++;
4477 hclge_update_vport_alive(hdev);
4479 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4480 hdev->last_serv_processed = jiffies;
4484 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4485 hclge_update_stats_for_all(hdev);
4487 hclge_update_port_info(hdev);
4488 hclge_sync_vlan_filter(hdev);
4490 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4491 hclge_rfs_filter_expire(hdev);
4493 hdev->last_serv_processed = jiffies;
4496 hclge_task_schedule(hdev, delta);
4499 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4501 unsigned long flags;
4503 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4504 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4505 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4508 /* to prevent concurrence with the irq handler */
4509 spin_lock_irqsave(&hdev->ptp->lock, flags);
4511 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4512 * handler may handle it just before spin_lock_irqsave().
4514 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4515 hclge_ptp_clean_tx_hwts(hdev);
4517 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4520 static void hclge_service_task(struct work_struct *work)
4522 struct hclge_dev *hdev =
4523 container_of(work, struct hclge_dev, service_task.work);
4525 hclge_errhand_service_task(hdev);
4526 hclge_reset_service_task(hdev);
4527 hclge_ptp_service_task(hdev);
4528 hclge_mailbox_service_task(hdev);
4529 hclge_periodic_service_task(hdev);
4531 /* Handle error recovery, reset and mbx again in case periodical task
4532 * delays the handling by calling hclge_task_schedule() in
4533 * hclge_periodic_service_task().
4535 hclge_errhand_service_task(hdev);
4536 hclge_reset_service_task(hdev);
4537 hclge_mailbox_service_task(hdev);
4540 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4542 /* VF handle has no client */
4543 if (!handle->client)
4544 return container_of(handle, struct hclge_vport, nic);
4545 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4546 return container_of(handle, struct hclge_vport, roce);
4548 return container_of(handle, struct hclge_vport, nic);
4551 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4552 struct hnae3_vector_info *vector_info)
4554 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4556 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4558 /* need an extend offset to config vector >= 64 */
4559 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4560 vector_info->io_addr = hdev->hw.io_base +
4561 HCLGE_VECTOR_REG_BASE +
4562 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4564 vector_info->io_addr = hdev->hw.io_base +
4565 HCLGE_VECTOR_EXT_REG_BASE +
4566 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4567 HCLGE_VECTOR_REG_OFFSET_H +
4568 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4569 HCLGE_VECTOR_REG_OFFSET;
4571 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4572 hdev->vector_irq[idx] = vector_info->vector;
4575 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4576 struct hnae3_vector_info *vector_info)
4578 struct hclge_vport *vport = hclge_get_vport(handle);
4579 struct hnae3_vector_info *vector = vector_info;
4580 struct hclge_dev *hdev = vport->back;
4585 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4586 vector_num = min(hdev->num_msi_left, vector_num);
4588 for (j = 0; j < vector_num; j++) {
4589 while (++i < hdev->num_nic_msi) {
4590 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4591 hclge_get_vector_info(hdev, i, vector);
4599 hdev->num_msi_left -= alloc;
4600 hdev->num_msi_used += alloc;
4605 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4609 for (i = 0; i < hdev->num_msi; i++)
4610 if (vector == hdev->vector_irq[i])
4616 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4618 struct hclge_vport *vport = hclge_get_vport(handle);
4619 struct hclge_dev *hdev = vport->back;
4622 vector_id = hclge_get_vector_index(hdev, vector);
4623 if (vector_id < 0) {
4624 dev_err(&hdev->pdev->dev,
4625 "Get vector index fail. vector = %d\n", vector);
4629 hclge_free_vector(hdev, vector_id);
4634 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4636 return HCLGE_RSS_KEY_SIZE;
4639 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4640 const u8 hfunc, const u8 *key)
4642 struct hclge_rss_config_cmd *req;
4643 unsigned int key_offset = 0;
4644 struct hclge_desc desc;
4649 key_counts = HCLGE_RSS_KEY_SIZE;
4650 req = (struct hclge_rss_config_cmd *)desc.data;
4652 while (key_counts) {
4653 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4656 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4657 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4659 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4660 memcpy(req->hash_key,
4661 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4663 key_counts -= key_size;
4665 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4667 dev_err(&hdev->pdev->dev,
4668 "Configure RSS config fail, status = %d\n",
4676 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4678 struct hclge_rss_indirection_table_cmd *req;
4679 struct hclge_desc desc;
4680 int rss_cfg_tbl_num;
4688 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4689 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4690 HCLGE_RSS_CFG_TBL_SIZE;
4692 for (i = 0; i < rss_cfg_tbl_num; i++) {
4693 hclge_cmd_setup_basic_desc
4694 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4696 req->start_table_index =
4697 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4698 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4699 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4700 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4701 req->rss_qid_l[j] = qid & 0xff;
4703 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4704 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4705 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4706 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4708 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4710 dev_err(&hdev->pdev->dev,
4711 "Configure rss indir table fail,status = %d\n",
4719 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4720 u16 *tc_size, u16 *tc_offset)
4722 struct hclge_rss_tc_mode_cmd *req;
4723 struct hclge_desc desc;
4727 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4728 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4730 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4733 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4734 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4735 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4736 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4737 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4738 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4739 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4741 req->rss_tc_mode[i] = cpu_to_le16(mode);
4744 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4746 dev_err(&hdev->pdev->dev,
4747 "Configure rss tc mode fail, status = %d\n", ret);
4752 static void hclge_get_rss_type(struct hclge_vport *vport)
4754 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4755 vport->rss_tuple_sets.ipv4_udp_en ||
4756 vport->rss_tuple_sets.ipv4_sctp_en ||
4757 vport->rss_tuple_sets.ipv6_tcp_en ||
4758 vport->rss_tuple_sets.ipv6_udp_en ||
4759 vport->rss_tuple_sets.ipv6_sctp_en)
4760 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4761 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4762 vport->rss_tuple_sets.ipv6_fragment_en)
4763 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4765 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4768 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4770 struct hclge_rss_input_tuple_cmd *req;
4771 struct hclge_desc desc;
4774 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4776 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4778 /* Get the tuple cfg from pf */
4779 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4780 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4781 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4782 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4783 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4784 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4785 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4786 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4787 hclge_get_rss_type(&hdev->vport[0]);
4788 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4790 dev_err(&hdev->pdev->dev,
4791 "Configure rss input fail, status = %d\n", ret);
4795 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4798 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4799 struct hclge_vport *vport = hclge_get_vport(handle);
4802 /* Get hash algorithm */
4804 switch (vport->rss_algo) {
4805 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4806 *hfunc = ETH_RSS_HASH_TOP;
4808 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4809 *hfunc = ETH_RSS_HASH_XOR;
4812 *hfunc = ETH_RSS_HASH_UNKNOWN;
4817 /* Get the RSS Key required by the user */
4819 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4821 /* Get indirect table */
4823 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4824 indir[i] = vport->rss_indirection_tbl[i];
4829 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4833 case ETH_RSS_HASH_TOP:
4834 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4836 case ETH_RSS_HASH_XOR:
4837 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4839 case ETH_RSS_HASH_NO_CHANGE:
4840 *hash_algo = vport->rss_algo;
4847 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4848 const u8 *key, const u8 hfunc)
4850 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4851 struct hclge_vport *vport = hclge_get_vport(handle);
4852 struct hclge_dev *hdev = vport->back;
4856 ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4858 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4862 /* Set the RSS Hash Key if specififed by the user */
4864 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4868 /* Update the shadow RSS key with user specified qids */
4869 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4871 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4872 vport->rss_hash_key);
4876 vport->rss_algo = hash_algo;
4878 /* Update the shadow RSS table with user specified qids */
4879 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4880 vport->rss_indirection_tbl[i] = indir[i];
4882 /* Update the hardware */
4883 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4886 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4888 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4890 if (nfc->data & RXH_L4_B_2_3)
4891 hash_sets |= HCLGE_D_PORT_BIT;
4893 hash_sets &= ~HCLGE_D_PORT_BIT;
4895 if (nfc->data & RXH_IP_SRC)
4896 hash_sets |= HCLGE_S_IP_BIT;
4898 hash_sets &= ~HCLGE_S_IP_BIT;
4900 if (nfc->data & RXH_IP_DST)
4901 hash_sets |= HCLGE_D_IP_BIT;
4903 hash_sets &= ~HCLGE_D_IP_BIT;
4905 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4906 hash_sets |= HCLGE_V_TAG_BIT;
4911 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4912 struct ethtool_rxnfc *nfc,
4913 struct hclge_rss_input_tuple_cmd *req)
4915 struct hclge_dev *hdev = vport->back;
4918 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4919 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4920 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4921 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4922 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4923 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4924 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4925 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4927 tuple_sets = hclge_get_rss_hash_bits(nfc);
4928 switch (nfc->flow_type) {
4930 req->ipv4_tcp_en = tuple_sets;
4933 req->ipv6_tcp_en = tuple_sets;
4936 req->ipv4_udp_en = tuple_sets;
4939 req->ipv6_udp_en = tuple_sets;
4942 req->ipv4_sctp_en = tuple_sets;
4945 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4946 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4949 req->ipv6_sctp_en = tuple_sets;
4952 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4955 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4964 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4965 struct ethtool_rxnfc *nfc)
4967 struct hclge_vport *vport = hclge_get_vport(handle);
4968 struct hclge_dev *hdev = vport->back;
4969 struct hclge_rss_input_tuple_cmd *req;
4970 struct hclge_desc desc;
4973 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4974 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4977 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4978 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4980 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4982 dev_err(&hdev->pdev->dev,
4983 "failed to init rss tuple cmd, ret = %d\n", ret);
4987 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4989 dev_err(&hdev->pdev->dev,
4990 "Set rss tuple fail, status = %d\n", ret);
4994 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4995 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4996 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4997 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4998 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4999 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
5000 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
5001 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
5002 hclge_get_rss_type(vport);
5006 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
5009 switch (flow_type) {
5011 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
5014 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
5017 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
5020 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
5023 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
5026 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
5030 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
5039 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
5043 if (tuple_sets & HCLGE_D_PORT_BIT)
5044 tuple_data |= RXH_L4_B_2_3;
5045 if (tuple_sets & HCLGE_S_PORT_BIT)
5046 tuple_data |= RXH_L4_B_0_1;
5047 if (tuple_sets & HCLGE_D_IP_BIT)
5048 tuple_data |= RXH_IP_DST;
5049 if (tuple_sets & HCLGE_S_IP_BIT)
5050 tuple_data |= RXH_IP_SRC;
5055 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
5056 struct ethtool_rxnfc *nfc)
5058 struct hclge_vport *vport = hclge_get_vport(handle);
5064 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
5065 if (ret || !tuple_sets)
5068 nfc->data = hclge_convert_rss_tuple(tuple_sets);
5073 static int hclge_get_tc_size(struct hnae3_handle *handle)
5075 struct hclge_vport *vport = hclge_get_vport(handle);
5076 struct hclge_dev *hdev = vport->back;
5078 return hdev->pf_rss_size_max;
5081 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5083 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5084 struct hclge_vport *vport = hdev->vport;
5085 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5086 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5087 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5088 struct hnae3_tc_info *tc_info;
5093 tc_info = &vport->nic.kinfo.tc_info;
5094 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5095 rss_size = tc_info->tqp_count[i];
5098 if (!(hdev->hw_tc_map & BIT(i)))
5101 /* tc_size set to hardware is the log2 of roundup power of two
5102 * of rss_size, the acutal queue size is limited by indirection
5105 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5107 dev_err(&hdev->pdev->dev,
5108 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5113 roundup_size = roundup_pow_of_two(rss_size);
5114 roundup_size = ilog2(roundup_size);
5117 tc_size[i] = roundup_size;
5118 tc_offset[i] = tc_info->tqp_offset[i];
5121 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5124 int hclge_rss_init_hw(struct hclge_dev *hdev)
5126 struct hclge_vport *vport = hdev->vport;
5127 u16 *rss_indir = vport[0].rss_indirection_tbl;
5128 u8 *key = vport[0].rss_hash_key;
5129 u8 hfunc = vport[0].rss_algo;
5132 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5136 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5140 ret = hclge_set_rss_input_tuple(hdev);
5144 return hclge_init_rss_tc_mode(hdev);
5147 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5149 struct hclge_vport *vport = &hdev->vport[0];
5152 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5153 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5156 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5158 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5159 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5160 struct hclge_vport *vport = &hdev->vport[0];
5163 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5164 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5166 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5167 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5168 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5169 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5170 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5171 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5172 vport->rss_tuple_sets.ipv6_sctp_en =
5173 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5174 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5175 HCLGE_RSS_INPUT_TUPLE_SCTP;
5176 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5178 vport->rss_algo = rss_algo;
5180 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5181 sizeof(*rss_ind_tbl), GFP_KERNEL);
5185 vport->rss_indirection_tbl = rss_ind_tbl;
5186 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5188 hclge_rss_indir_init_cfg(hdev);
5193 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5194 int vector_id, bool en,
5195 struct hnae3_ring_chain_node *ring_chain)
5197 struct hclge_dev *hdev = vport->back;
5198 struct hnae3_ring_chain_node *node;
5199 struct hclge_desc desc;
5200 struct hclge_ctrl_vector_chain_cmd *req =
5201 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5202 enum hclge_cmd_status status;
5203 enum hclge_opcode_type op;
5204 u16 tqp_type_and_id;
5207 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5208 hclge_cmd_setup_basic_desc(&desc, op, false);
5209 req->int_vector_id_l = hnae3_get_field(vector_id,
5210 HCLGE_VECTOR_ID_L_M,
5211 HCLGE_VECTOR_ID_L_S);
5212 req->int_vector_id_h = hnae3_get_field(vector_id,
5213 HCLGE_VECTOR_ID_H_M,
5214 HCLGE_VECTOR_ID_H_S);
5217 for (node = ring_chain; node; node = node->next) {
5218 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5219 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5221 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5222 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5223 HCLGE_TQP_ID_S, node->tqp_index);
5224 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5226 hnae3_get_field(node->int_gl_idx,
5227 HNAE3_RING_GL_IDX_M,
5228 HNAE3_RING_GL_IDX_S));
5229 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5230 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5231 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5232 req->vfid = vport->vport_id;
5234 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5236 dev_err(&hdev->pdev->dev,
5237 "Map TQP fail, status is %d.\n",
5243 hclge_cmd_setup_basic_desc(&desc,
5246 req->int_vector_id_l =
5247 hnae3_get_field(vector_id,
5248 HCLGE_VECTOR_ID_L_M,
5249 HCLGE_VECTOR_ID_L_S);
5250 req->int_vector_id_h =
5251 hnae3_get_field(vector_id,
5252 HCLGE_VECTOR_ID_H_M,
5253 HCLGE_VECTOR_ID_H_S);
5258 req->int_cause_num = i;
5259 req->vfid = vport->vport_id;
5260 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5262 dev_err(&hdev->pdev->dev,
5263 "Map TQP fail, status is %d.\n", status);
5271 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5272 struct hnae3_ring_chain_node *ring_chain)
5274 struct hclge_vport *vport = hclge_get_vport(handle);
5275 struct hclge_dev *hdev = vport->back;
5278 vector_id = hclge_get_vector_index(hdev, vector);
5279 if (vector_id < 0) {
5280 dev_err(&hdev->pdev->dev,
5281 "failed to get vector index. vector=%d\n", vector);
5285 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5288 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5289 struct hnae3_ring_chain_node *ring_chain)
5291 struct hclge_vport *vport = hclge_get_vport(handle);
5292 struct hclge_dev *hdev = vport->back;
5295 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5298 vector_id = hclge_get_vector_index(hdev, vector);
5299 if (vector_id < 0) {
5300 dev_err(&handle->pdev->dev,
5301 "Get vector index fail. ret =%d\n", vector_id);
5305 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5307 dev_err(&handle->pdev->dev,
5308 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5314 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5315 bool en_uc, bool en_mc, bool en_bc)
5317 struct hclge_vport *vport = &hdev->vport[vf_id];
5318 struct hnae3_handle *handle = &vport->nic;
5319 struct hclge_promisc_cfg_cmd *req;
5320 struct hclge_desc desc;
5321 bool uc_tx_en = en_uc;
5325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5327 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5330 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5333 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5334 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5335 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5336 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5337 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5338 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5339 req->extend_promisc = promisc_cfg;
5341 /* to be compatible with DEVICE_VERSION_V1/2 */
5343 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5344 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5345 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5346 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5347 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5348 req->promisc = promisc_cfg;
5350 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5352 dev_err(&hdev->pdev->dev,
5353 "failed to set vport %u promisc mode, ret = %d.\n",
5359 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5360 bool en_mc_pmc, bool en_bc_pmc)
5362 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5363 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5366 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5369 struct hclge_vport *vport = hclge_get_vport(handle);
5370 struct hclge_dev *hdev = vport->back;
5371 bool en_bc_pmc = true;
5373 /* For device whose version below V2, if broadcast promisc enabled,
5374 * vlan filter is always bypassed. So broadcast promisc should be
5375 * disabled until user enable promisc mode
5377 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5378 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5380 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5384 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5386 struct hclge_vport *vport = hclge_get_vport(handle);
5388 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5391 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5393 if (hlist_empty(&hdev->fd_rule_list))
5394 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5397 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5399 if (!test_bit(location, hdev->fd_bmap)) {
5400 set_bit(location, hdev->fd_bmap);
5401 hdev->hclge_fd_rule_num++;
5405 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5407 if (test_bit(location, hdev->fd_bmap)) {
5408 clear_bit(location, hdev->fd_bmap);
5409 hdev->hclge_fd_rule_num--;
5413 static void hclge_fd_free_node(struct hclge_dev *hdev,
5414 struct hclge_fd_rule *rule)
5416 hlist_del(&rule->rule_node);
5418 hclge_sync_fd_state(hdev);
5421 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5422 struct hclge_fd_rule *old_rule,
5423 struct hclge_fd_rule *new_rule,
5424 enum HCLGE_FD_NODE_STATE state)
5427 case HCLGE_FD_TO_ADD:
5428 case HCLGE_FD_ACTIVE:
5429 /* 1) if the new state is TO_ADD, just replace the old rule
5430 * with the same location, no matter its state, because the
5431 * new rule will be configured to the hardware.
5432 * 2) if the new state is ACTIVE, it means the new rule
5433 * has been configured to the hardware, so just replace
5434 * the old rule node with the same location.
5435 * 3) for it doesn't add a new node to the list, so it's
5436 * unnecessary to update the rule number and fd_bmap.
5438 new_rule->rule_node.next = old_rule->rule_node.next;
5439 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5440 memcpy(old_rule, new_rule, sizeof(*old_rule));
5443 case HCLGE_FD_DELETED:
5444 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5445 hclge_fd_free_node(hdev, old_rule);
5447 case HCLGE_FD_TO_DEL:
5448 /* if new request is TO_DEL, and old rule is existent
5449 * 1) the state of old rule is TO_DEL, we need do nothing,
5450 * because we delete rule by location, other rule content
5452 * 2) the state of old rule is ACTIVE, we need to change its
5453 * state to TO_DEL, so the rule will be deleted when periodic
5454 * task being scheduled.
5455 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5456 * been added to hardware, so we just delete the rule node from
5457 * fd_rule_list directly.
5459 if (old_rule->state == HCLGE_FD_TO_ADD) {
5460 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5461 hclge_fd_free_node(hdev, old_rule);
5464 old_rule->state = HCLGE_FD_TO_DEL;
5469 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5471 struct hclge_fd_rule **parent)
5473 struct hclge_fd_rule *rule;
5474 struct hlist_node *node;
5476 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5477 if (rule->location == location)
5479 else if (rule->location > location)
5481 /* record the parent node, use to keep the nodes in fd_rule_list
5490 /* insert fd rule node in ascend order according to rule->location */
5491 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5492 struct hclge_fd_rule *rule,
5493 struct hclge_fd_rule *parent)
5495 INIT_HLIST_NODE(&rule->rule_node);
5498 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5500 hlist_add_head(&rule->rule_node, hlist);
5503 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5504 struct hclge_fd_user_def_cfg *cfg)
5506 struct hclge_fd_user_def_cfg_cmd *req;
5507 struct hclge_desc desc;
5511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5513 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5515 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5516 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5517 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5518 req->ol2_cfg = cpu_to_le16(data);
5521 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5522 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5523 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5524 req->ol3_cfg = cpu_to_le16(data);
5527 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5528 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5529 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5530 req->ol4_cfg = cpu_to_le16(data);
5532 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5534 dev_err(&hdev->pdev->dev,
5535 "failed to set fd user def data, ret= %d\n", ret);
5539 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5543 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5547 spin_lock_bh(&hdev->fd_rule_lock);
5549 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5551 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5554 spin_unlock_bh(&hdev->fd_rule_lock);
5557 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5558 struct hclge_fd_rule *rule)
5560 struct hlist_head *hlist = &hdev->fd_rule_list;
5561 struct hclge_fd_rule *fd_rule, *parent = NULL;
5562 struct hclge_fd_user_def_info *info, *old_info;
5563 struct hclge_fd_user_def_cfg *cfg;
5565 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5566 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5569 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5570 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5571 info = &rule->ep.user_def;
5573 if (!cfg->ref_cnt || cfg->offset == info->offset)
5576 if (cfg->ref_cnt > 1)
5579 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5581 old_info = &fd_rule->ep.user_def;
5582 if (info->layer == old_info->layer)
5587 dev_err(&hdev->pdev->dev,
5588 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5593 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5594 struct hclge_fd_rule *rule)
5596 struct hclge_fd_user_def_cfg *cfg;
5598 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5599 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5602 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5603 if (!cfg->ref_cnt) {
5604 cfg->offset = rule->ep.user_def.offset;
5605 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5610 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5611 struct hclge_fd_rule *rule)
5613 struct hclge_fd_user_def_cfg *cfg;
5615 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5616 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5619 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5624 if (!cfg->ref_cnt) {
5626 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5630 static void hclge_update_fd_list(struct hclge_dev *hdev,
5631 enum HCLGE_FD_NODE_STATE state, u16 location,
5632 struct hclge_fd_rule *new_rule)
5634 struct hlist_head *hlist = &hdev->fd_rule_list;
5635 struct hclge_fd_rule *fd_rule, *parent = NULL;
5637 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5639 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5640 if (state == HCLGE_FD_ACTIVE)
5641 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5642 hclge_sync_fd_user_def_cfg(hdev, true);
5644 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5648 /* it's unlikely to fail here, because we have checked the rule
5651 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5652 dev_warn(&hdev->pdev->dev,
5653 "failed to delete fd rule %u, it's inexistent\n",
5658 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5659 hclge_sync_fd_user_def_cfg(hdev, true);
5661 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5662 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5664 if (state == HCLGE_FD_TO_ADD) {
5665 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5666 hclge_task_schedule(hdev, 0);
5670 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5672 struct hclge_get_fd_mode_cmd *req;
5673 struct hclge_desc desc;
5676 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5678 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5680 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5682 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5686 *fd_mode = req->mode;
5691 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5692 u32 *stage1_entry_num,
5693 u32 *stage2_entry_num,
5694 u16 *stage1_counter_num,
5695 u16 *stage2_counter_num)
5697 struct hclge_get_fd_allocation_cmd *req;
5698 struct hclge_desc desc;
5701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5703 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5705 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5707 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5712 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5713 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5714 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5715 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5720 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5721 enum HCLGE_FD_STAGE stage_num)
5723 struct hclge_set_fd_key_config_cmd *req;
5724 struct hclge_fd_key_cfg *stage;
5725 struct hclge_desc desc;
5728 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5730 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5731 stage = &hdev->fd_cfg.key_cfg[stage_num];
5732 req->stage = stage_num;
5733 req->key_select = stage->key_sel;
5734 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5735 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5736 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5737 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5738 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5739 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5741 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5743 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5748 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5750 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5752 spin_lock_bh(&hdev->fd_rule_lock);
5753 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5754 spin_unlock_bh(&hdev->fd_rule_lock);
5756 hclge_fd_set_user_def_cmd(hdev, cfg);
5759 static int hclge_init_fd_config(struct hclge_dev *hdev)
5761 #define LOW_2_WORDS 0x03
5762 struct hclge_fd_key_cfg *key_cfg;
5765 if (!hnae3_dev_fd_supported(hdev))
5768 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5772 switch (hdev->fd_cfg.fd_mode) {
5773 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5774 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5776 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5777 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5780 dev_err(&hdev->pdev->dev,
5781 "Unsupported flow director mode %u\n",
5782 hdev->fd_cfg.fd_mode);
5786 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5787 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5788 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5789 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5790 key_cfg->outer_sipv6_word_en = 0;
5791 key_cfg->outer_dipv6_word_en = 0;
5793 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5794 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5795 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5796 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5798 /* If use max 400bit key, we can support tuples for ether type */
5799 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5800 key_cfg->tuple_active |=
5801 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5802 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5803 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5806 /* roce_type is used to filter roce frames
5807 * dst_vport is used to specify the rule
5809 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5811 ret = hclge_get_fd_allocation(hdev,
5812 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5813 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5814 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5815 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5819 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5822 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5823 int loc, u8 *key, bool is_add)
5825 struct hclge_fd_tcam_config_1_cmd *req1;
5826 struct hclge_fd_tcam_config_2_cmd *req2;
5827 struct hclge_fd_tcam_config_3_cmd *req3;
5828 struct hclge_desc desc[3];
5831 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5832 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5833 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5834 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5835 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5837 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5838 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5839 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5841 req1->stage = stage;
5842 req1->xy_sel = sel_x ? 1 : 0;
5843 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5844 req1->index = cpu_to_le32(loc);
5845 req1->entry_vld = sel_x ? is_add : 0;
5848 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5849 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5850 sizeof(req2->tcam_data));
5851 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5852 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5855 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5857 dev_err(&hdev->pdev->dev,
5858 "config tcam key fail, ret=%d\n",
5864 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5865 struct hclge_fd_ad_data *action)
5867 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5868 struct hclge_fd_ad_config_cmd *req;
5869 struct hclge_desc desc;
5873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5875 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5876 req->index = cpu_to_le32(loc);
5879 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5880 action->write_rule_id_to_bd);
5881 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5883 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5884 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5885 action->override_tc);
5886 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5887 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5890 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5891 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5892 action->forward_to_direct_queue);
5893 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5895 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5896 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5897 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5898 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5899 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5900 action->counter_id);
5902 req->ad_data = cpu_to_le64(ad_data);
5903 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5905 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5910 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5911 struct hclge_fd_rule *rule)
5913 int offset, moffset, ip_offset;
5914 enum HCLGE_FD_KEY_OPT key_opt;
5915 u16 tmp_x_s, tmp_y_s;
5916 u32 tmp_x_l, tmp_y_l;
5920 if (rule->unused_tuple & BIT(tuple_bit))
5923 key_opt = tuple_key_info[tuple_bit].key_opt;
5924 offset = tuple_key_info[tuple_bit].offset;
5925 moffset = tuple_key_info[tuple_bit].moffset;
5929 calc_x(*key_x, p[offset], p[moffset]);
5930 calc_y(*key_y, p[offset], p[moffset]);
5934 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5935 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5936 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5937 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5941 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5942 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5943 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5944 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5948 for (i = 0; i < ETH_ALEN; i++) {
5949 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5951 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5957 ip_offset = IPV4_INDEX * sizeof(u32);
5958 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5959 *(u32 *)(&p[moffset + ip_offset]));
5960 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5961 *(u32 *)(&p[moffset + ip_offset]));
5962 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5963 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5971 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5972 u8 vf_id, u8 network_port_id)
5974 u32 port_number = 0;
5976 if (port_type == HOST_PORT) {
5977 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5979 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5981 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5983 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5984 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5985 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5991 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5992 __le32 *key_x, __le32 *key_y,
5993 struct hclge_fd_rule *rule)
5995 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5996 u8 cur_pos = 0, tuple_size, shift_bits;
5999 for (i = 0; i < MAX_META_DATA; i++) {
6000 tuple_size = meta_data_key_info[i].key_length;
6001 tuple_bit = key_cfg->meta_data_active & BIT(i);
6003 switch (tuple_bit) {
6004 case BIT(ROCE_TYPE):
6005 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
6006 cur_pos += tuple_size;
6008 case BIT(DST_VPORT):
6009 port_number = hclge_get_port_number(HOST_PORT, 0,
6011 hnae3_set_field(meta_data,
6012 GENMASK(cur_pos + tuple_size, cur_pos),
6013 cur_pos, port_number);
6014 cur_pos += tuple_size;
6021 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
6022 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
6023 shift_bits = sizeof(meta_data) * 8 - cur_pos;
6025 *key_x = cpu_to_le32(tmp_x << shift_bits);
6026 *key_y = cpu_to_le32(tmp_y << shift_bits);
6029 /* A complete key is combined with meta data key and tuple key.
6030 * Meta data key is stored at the MSB region, and tuple key is stored at
6031 * the LSB region, unused bits will be filled 0.
6033 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
6034 struct hclge_fd_rule *rule)
6036 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
6037 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
6038 u8 *cur_key_x, *cur_key_y;
6039 u8 meta_data_region;
6044 memset(key_x, 0, sizeof(key_x));
6045 memset(key_y, 0, sizeof(key_y));
6049 for (i = 0; i < MAX_TUPLE; i++) {
6052 tuple_size = tuple_key_info[i].key_length / 8;
6053 if (!(key_cfg->tuple_active & BIT(i)))
6056 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
6059 cur_key_x += tuple_size;
6060 cur_key_y += tuple_size;
6064 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
6065 MAX_META_DATA_LENGTH / 8;
6067 hclge_fd_convert_meta_data(key_cfg,
6068 (__le32 *)(key_x + meta_data_region),
6069 (__le32 *)(key_y + meta_data_region),
6072 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
6075 dev_err(&hdev->pdev->dev,
6076 "fd key_y config fail, loc=%u, ret=%d\n",
6077 rule->queue_id, ret);
6081 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6084 dev_err(&hdev->pdev->dev,
6085 "fd key_x config fail, loc=%u, ret=%d\n",
6086 rule->queue_id, ret);
6090 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6091 struct hclge_fd_rule *rule)
6093 struct hclge_vport *vport = hdev->vport;
6094 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6095 struct hclge_fd_ad_data ad_data;
6097 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6098 ad_data.ad_id = rule->location;
6100 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6101 ad_data.drop_packet = true;
6102 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6103 ad_data.override_tc = true;
6105 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6107 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6109 ad_data.forward_to_direct_queue = true;
6110 ad_data.queue_id = rule->queue_id;
6113 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6114 ad_data.use_counter = true;
6115 ad_data.counter_id = rule->vf_id %
6116 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6118 ad_data.use_counter = false;
6119 ad_data.counter_id = 0;
6122 ad_data.use_next_stage = false;
6123 ad_data.next_input_key = 0;
6125 ad_data.write_rule_id_to_bd = true;
6126 ad_data.rule_id = rule->location;
6128 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6131 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6134 if (!spec || !unused_tuple)
6137 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6140 *unused_tuple |= BIT(INNER_SRC_IP);
6143 *unused_tuple |= BIT(INNER_DST_IP);
6146 *unused_tuple |= BIT(INNER_SRC_PORT);
6149 *unused_tuple |= BIT(INNER_DST_PORT);
6152 *unused_tuple |= BIT(INNER_IP_TOS);
6157 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6160 if (!spec || !unused_tuple)
6163 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6164 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6167 *unused_tuple |= BIT(INNER_SRC_IP);
6170 *unused_tuple |= BIT(INNER_DST_IP);
6173 *unused_tuple |= BIT(INNER_IP_TOS);
6176 *unused_tuple |= BIT(INNER_IP_PROTO);
6178 if (spec->l4_4_bytes)
6181 if (spec->ip_ver != ETH_RX_NFC_IP4)
6187 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6190 if (!spec || !unused_tuple)
6193 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6195 /* check whether src/dst ip address used */
6196 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6197 *unused_tuple |= BIT(INNER_SRC_IP);
6199 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6200 *unused_tuple |= BIT(INNER_DST_IP);
6203 *unused_tuple |= BIT(INNER_SRC_PORT);
6206 *unused_tuple |= BIT(INNER_DST_PORT);
6209 *unused_tuple |= BIT(INNER_IP_TOS);
6214 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6217 if (!spec || !unused_tuple)
6220 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6221 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6223 /* check whether src/dst ip address used */
6224 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6225 *unused_tuple |= BIT(INNER_SRC_IP);
6227 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6228 *unused_tuple |= BIT(INNER_DST_IP);
6230 if (!spec->l4_proto)
6231 *unused_tuple |= BIT(INNER_IP_PROTO);
6234 *unused_tuple |= BIT(INNER_IP_TOS);
6236 if (spec->l4_4_bytes)
6242 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6244 if (!spec || !unused_tuple)
6247 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6248 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6249 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6251 if (is_zero_ether_addr(spec->h_source))
6252 *unused_tuple |= BIT(INNER_SRC_MAC);
6254 if (is_zero_ether_addr(spec->h_dest))
6255 *unused_tuple |= BIT(INNER_DST_MAC);
6258 *unused_tuple |= BIT(INNER_ETH_TYPE);
6263 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6264 struct ethtool_rx_flow_spec *fs,
6267 if (fs->flow_type & FLOW_EXT) {
6268 if (fs->h_ext.vlan_etype) {
6269 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6273 if (!fs->h_ext.vlan_tci)
6274 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6276 if (fs->m_ext.vlan_tci &&
6277 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6278 dev_err(&hdev->pdev->dev,
6279 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6280 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6284 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6287 if (fs->flow_type & FLOW_MAC_EXT) {
6288 if (hdev->fd_cfg.fd_mode !=
6289 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6290 dev_err(&hdev->pdev->dev,
6291 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6295 if (is_zero_ether_addr(fs->h_ext.h_dest))
6296 *unused_tuple |= BIT(INNER_DST_MAC);
6298 *unused_tuple &= ~BIT(INNER_DST_MAC);
6304 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6305 struct hclge_fd_user_def_info *info)
6307 switch (flow_type) {
6309 info->layer = HCLGE_FD_USER_DEF_L2;
6310 *unused_tuple &= ~BIT(INNER_L2_RSV);
6313 case IPV6_USER_FLOW:
6314 info->layer = HCLGE_FD_USER_DEF_L3;
6315 *unused_tuple &= ~BIT(INNER_L3_RSV);
6321 info->layer = HCLGE_FD_USER_DEF_L4;
6322 *unused_tuple &= ~BIT(INNER_L4_RSV);
6331 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6333 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6336 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6337 struct ethtool_rx_flow_spec *fs,
6339 struct hclge_fd_user_def_info *info)
6341 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6342 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6343 u16 data, offset, data_mask, offset_mask;
6346 info->layer = HCLGE_FD_USER_DEF_NONE;
6347 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6349 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6352 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6353 * for data, and bit32~47 is used for offset.
6355 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6356 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6357 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6358 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6360 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6361 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6365 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6366 dev_err(&hdev->pdev->dev,
6367 "user-def offset[%u] should be no more than %u\n",
6368 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6372 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6373 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6377 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6379 dev_err(&hdev->pdev->dev,
6380 "unsupported flow type for user-def bytes, ret = %d\n",
6386 info->data_mask = data_mask;
6387 info->offset = offset;
6392 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6393 struct ethtool_rx_flow_spec *fs,
6395 struct hclge_fd_user_def_info *info)
6400 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6401 dev_err(&hdev->pdev->dev,
6402 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6404 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6408 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6412 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6413 switch (flow_type) {
6417 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6421 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6427 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6430 case IPV6_USER_FLOW:
6431 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6435 if (hdev->fd_cfg.fd_mode !=
6436 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6437 dev_err(&hdev->pdev->dev,
6438 "ETHER_FLOW is not supported in current fd mode!\n");
6442 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6446 dev_err(&hdev->pdev->dev,
6447 "unsupported protocol type, protocol type = %#x\n",
6453 dev_err(&hdev->pdev->dev,
6454 "failed to check flow union tuple, ret = %d\n",
6459 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6462 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6463 struct ethtool_rx_flow_spec *fs,
6464 struct hclge_fd_rule *rule, u8 ip_proto)
6466 rule->tuples.src_ip[IPV4_INDEX] =
6467 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6468 rule->tuples_mask.src_ip[IPV4_INDEX] =
6469 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6471 rule->tuples.dst_ip[IPV4_INDEX] =
6472 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6473 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6474 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6476 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6477 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6479 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6480 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6482 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6483 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6485 rule->tuples.ether_proto = ETH_P_IP;
6486 rule->tuples_mask.ether_proto = 0xFFFF;
6488 rule->tuples.ip_proto = ip_proto;
6489 rule->tuples_mask.ip_proto = 0xFF;
6492 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6493 struct ethtool_rx_flow_spec *fs,
6494 struct hclge_fd_rule *rule)
6496 rule->tuples.src_ip[IPV4_INDEX] =
6497 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6498 rule->tuples_mask.src_ip[IPV4_INDEX] =
6499 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6501 rule->tuples.dst_ip[IPV4_INDEX] =
6502 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6503 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6504 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6506 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6507 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6509 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6510 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6512 rule->tuples.ether_proto = ETH_P_IP;
6513 rule->tuples_mask.ether_proto = 0xFFFF;
6516 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6517 struct ethtool_rx_flow_spec *fs,
6518 struct hclge_fd_rule *rule, u8 ip_proto)
6520 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6522 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6525 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6527 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6530 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6531 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6533 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6534 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6536 rule->tuples.ether_proto = ETH_P_IPV6;
6537 rule->tuples_mask.ether_proto = 0xFFFF;
6539 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6540 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6542 rule->tuples.ip_proto = ip_proto;
6543 rule->tuples_mask.ip_proto = 0xFF;
6546 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6547 struct ethtool_rx_flow_spec *fs,
6548 struct hclge_fd_rule *rule)
6550 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6552 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6555 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6557 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6560 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6561 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6563 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6564 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6566 rule->tuples.ether_proto = ETH_P_IPV6;
6567 rule->tuples_mask.ether_proto = 0xFFFF;
6570 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6571 struct ethtool_rx_flow_spec *fs,
6572 struct hclge_fd_rule *rule)
6574 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6575 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6577 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6578 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6580 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6581 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6584 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6585 struct hclge_fd_rule *rule)
6587 switch (info->layer) {
6588 case HCLGE_FD_USER_DEF_L2:
6589 rule->tuples.l2_user_def = info->data;
6590 rule->tuples_mask.l2_user_def = info->data_mask;
6592 case HCLGE_FD_USER_DEF_L3:
6593 rule->tuples.l3_user_def = info->data;
6594 rule->tuples_mask.l3_user_def = info->data_mask;
6596 case HCLGE_FD_USER_DEF_L4:
6597 rule->tuples.l4_user_def = (u32)info->data << 16;
6598 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6604 rule->ep.user_def = *info;
6607 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6608 struct ethtool_rx_flow_spec *fs,
6609 struct hclge_fd_rule *rule,
6610 struct hclge_fd_user_def_info *info)
6612 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6614 switch (flow_type) {
6616 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6619 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6622 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6625 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6628 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6631 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6634 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6636 case IPV6_USER_FLOW:
6637 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6640 hclge_fd_get_ether_tuple(hdev, fs, rule);
6646 if (fs->flow_type & FLOW_EXT) {
6647 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6648 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6649 hclge_fd_get_user_def_tuple(info, rule);
6652 if (fs->flow_type & FLOW_MAC_EXT) {
6653 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6654 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6660 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6661 struct hclge_fd_rule *rule)
6665 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6669 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6672 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6673 struct hclge_fd_rule *rule)
6677 spin_lock_bh(&hdev->fd_rule_lock);
6679 if (hdev->fd_active_type != rule->rule_type &&
6680 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6681 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6682 dev_err(&hdev->pdev->dev,
6683 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6684 rule->rule_type, hdev->fd_active_type);
6685 spin_unlock_bh(&hdev->fd_rule_lock);
6689 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6693 ret = hclge_clear_arfs_rules(hdev);
6697 ret = hclge_fd_config_rule(hdev, rule);
6701 rule->state = HCLGE_FD_ACTIVE;
6702 hdev->fd_active_type = rule->rule_type;
6703 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6706 spin_unlock_bh(&hdev->fd_rule_lock);
6710 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6712 struct hclge_vport *vport = hclge_get_vport(handle);
6713 struct hclge_dev *hdev = vport->back;
6715 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6718 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6719 u16 *vport_id, u8 *action, u16 *queue_id)
6721 struct hclge_vport *vport = hdev->vport;
6723 if (ring_cookie == RX_CLS_FLOW_DISC) {
6724 *action = HCLGE_FD_ACTION_DROP_PACKET;
6726 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6727 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6730 /* To keep consistent with user's configuration, minus 1 when
6731 * printing 'vf', because vf id from ethtool is added 1 for vf.
6733 if (vf > hdev->num_req_vfs) {
6734 dev_err(&hdev->pdev->dev,
6735 "Error: vf id (%u) should be less than %u\n",
6736 vf - 1, hdev->num_req_vfs);
6740 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6741 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6744 dev_err(&hdev->pdev->dev,
6745 "Error: queue id (%u) > max tqp num (%u)\n",
6750 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6757 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6758 struct ethtool_rxnfc *cmd)
6760 struct hclge_vport *vport = hclge_get_vport(handle);
6761 struct hclge_dev *hdev = vport->back;
6762 struct hclge_fd_user_def_info info;
6763 u16 dst_vport_id = 0, q_index = 0;
6764 struct ethtool_rx_flow_spec *fs;
6765 struct hclge_fd_rule *rule;
6770 if (!hnae3_dev_fd_supported(hdev)) {
6771 dev_err(&hdev->pdev->dev,
6772 "flow table director is not supported\n");
6777 dev_err(&hdev->pdev->dev,
6778 "please enable flow director first\n");
6782 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6784 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6788 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6793 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6797 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6803 rule->flow_type = fs->flow_type;
6804 rule->location = fs->location;
6805 rule->unused_tuple = unused;
6806 rule->vf_id = dst_vport_id;
6807 rule->queue_id = q_index;
6808 rule->action = action;
6809 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6811 ret = hclge_add_fd_entry_common(hdev, rule);
6818 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6819 struct ethtool_rxnfc *cmd)
6821 struct hclge_vport *vport = hclge_get_vport(handle);
6822 struct hclge_dev *hdev = vport->back;
6823 struct ethtool_rx_flow_spec *fs;
6826 if (!hnae3_dev_fd_supported(hdev))
6829 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6831 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6834 spin_lock_bh(&hdev->fd_rule_lock);
6835 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6836 !test_bit(fs->location, hdev->fd_bmap)) {
6837 dev_err(&hdev->pdev->dev,
6838 "Delete fail, rule %u is inexistent\n", fs->location);
6839 spin_unlock_bh(&hdev->fd_rule_lock);
6843 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6848 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6851 spin_unlock_bh(&hdev->fd_rule_lock);
6855 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6858 struct hclge_fd_rule *rule;
6859 struct hlist_node *node;
6862 if (!hnae3_dev_fd_supported(hdev))
6865 spin_lock_bh(&hdev->fd_rule_lock);
6867 for_each_set_bit(location, hdev->fd_bmap,
6868 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6869 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6873 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6875 hlist_del(&rule->rule_node);
6878 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6879 hdev->hclge_fd_rule_num = 0;
6880 bitmap_zero(hdev->fd_bmap,
6881 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6884 spin_unlock_bh(&hdev->fd_rule_lock);
6887 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6889 hclge_clear_fd_rules_in_list(hdev, true);
6890 hclge_fd_disable_user_def(hdev);
6893 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6895 struct hclge_vport *vport = hclge_get_vport(handle);
6896 struct hclge_dev *hdev = vport->back;
6897 struct hclge_fd_rule *rule;
6898 struct hlist_node *node;
6900 /* Return ok here, because reset error handling will check this
6901 * return value. If error is returned here, the reset process will
6904 if (!hnae3_dev_fd_supported(hdev))
6907 /* if fd is disabled, should not restore it when reset */
6911 spin_lock_bh(&hdev->fd_rule_lock);
6912 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6913 if (rule->state == HCLGE_FD_ACTIVE)
6914 rule->state = HCLGE_FD_TO_ADD;
6916 spin_unlock_bh(&hdev->fd_rule_lock);
6917 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6922 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6923 struct ethtool_rxnfc *cmd)
6925 struct hclge_vport *vport = hclge_get_vport(handle);
6926 struct hclge_dev *hdev = vport->back;
6928 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6931 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6932 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6937 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6938 struct ethtool_tcpip4_spec *spec,
6939 struct ethtool_tcpip4_spec *spec_mask)
6941 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6942 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6943 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6945 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6946 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6947 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6949 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6950 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6951 0 : cpu_to_be16(rule->tuples_mask.src_port);
6953 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6954 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6955 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6957 spec->tos = rule->tuples.ip_tos;
6958 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6959 0 : rule->tuples_mask.ip_tos;
6962 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6963 struct ethtool_usrip4_spec *spec,
6964 struct ethtool_usrip4_spec *spec_mask)
6966 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6967 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6968 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6970 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6971 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6972 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6974 spec->tos = rule->tuples.ip_tos;
6975 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6976 0 : rule->tuples_mask.ip_tos;
6978 spec->proto = rule->tuples.ip_proto;
6979 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6980 0 : rule->tuples_mask.ip_proto;
6982 spec->ip_ver = ETH_RX_NFC_IP4;
6985 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6986 struct ethtool_tcpip6_spec *spec,
6987 struct ethtool_tcpip6_spec *spec_mask)
6989 cpu_to_be32_array(spec->ip6src,
6990 rule->tuples.src_ip, IPV6_SIZE);
6991 cpu_to_be32_array(spec->ip6dst,
6992 rule->tuples.dst_ip, IPV6_SIZE);
6993 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6994 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6996 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6999 if (rule->unused_tuple & BIT(INNER_DST_IP))
7000 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7002 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
7005 spec->tclass = rule->tuples.ip_tos;
7006 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7007 0 : rule->tuples_mask.ip_tos;
7009 spec->psrc = cpu_to_be16(rule->tuples.src_port);
7010 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
7011 0 : cpu_to_be16(rule->tuples_mask.src_port);
7013 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
7014 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
7015 0 : cpu_to_be16(rule->tuples_mask.dst_port);
7018 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
7019 struct ethtool_usrip6_spec *spec,
7020 struct ethtool_usrip6_spec *spec_mask)
7022 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
7023 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
7024 if (rule->unused_tuple & BIT(INNER_SRC_IP))
7025 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
7027 cpu_to_be32_array(spec_mask->ip6src,
7028 rule->tuples_mask.src_ip, IPV6_SIZE);
7030 if (rule->unused_tuple & BIT(INNER_DST_IP))
7031 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7033 cpu_to_be32_array(spec_mask->ip6dst,
7034 rule->tuples_mask.dst_ip, IPV6_SIZE);
7036 spec->tclass = rule->tuples.ip_tos;
7037 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7038 0 : rule->tuples_mask.ip_tos;
7040 spec->l4_proto = rule->tuples.ip_proto;
7041 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
7042 0 : rule->tuples_mask.ip_proto;
7045 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
7046 struct ethhdr *spec,
7047 struct ethhdr *spec_mask)
7049 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
7050 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
7052 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
7053 eth_zero_addr(spec_mask->h_source);
7055 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
7057 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7058 eth_zero_addr(spec_mask->h_dest);
7060 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
7062 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
7063 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
7064 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
7067 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
7068 struct hclge_fd_rule *rule)
7070 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
7071 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
7072 fs->h_ext.data[0] = 0;
7073 fs->h_ext.data[1] = 0;
7074 fs->m_ext.data[0] = 0;
7075 fs->m_ext.data[1] = 0;
7077 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7078 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7080 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7081 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7085 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7086 struct hclge_fd_rule *rule)
7088 if (fs->flow_type & FLOW_EXT) {
7089 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7090 fs->m_ext.vlan_tci =
7091 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7092 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7094 hclge_fd_get_user_def_info(fs, rule);
7097 if (fs->flow_type & FLOW_MAC_EXT) {
7098 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7099 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7100 eth_zero_addr(fs->m_u.ether_spec.h_dest);
7102 ether_addr_copy(fs->m_u.ether_spec.h_dest,
7103 rule->tuples_mask.dst_mac);
7107 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7108 struct ethtool_rxnfc *cmd)
7110 struct hclge_vport *vport = hclge_get_vport(handle);
7111 struct hclge_fd_rule *rule = NULL;
7112 struct hclge_dev *hdev = vport->back;
7113 struct ethtool_rx_flow_spec *fs;
7114 struct hlist_node *node2;
7116 if (!hnae3_dev_fd_supported(hdev))
7119 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7121 spin_lock_bh(&hdev->fd_rule_lock);
7123 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7124 if (rule->location >= fs->location)
7128 if (!rule || fs->location != rule->location) {
7129 spin_unlock_bh(&hdev->fd_rule_lock);
7134 fs->flow_type = rule->flow_type;
7135 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7139 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7140 &fs->m_u.tcp_ip4_spec);
7143 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7144 &fs->m_u.usr_ip4_spec);
7149 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7150 &fs->m_u.tcp_ip6_spec);
7152 case IPV6_USER_FLOW:
7153 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7154 &fs->m_u.usr_ip6_spec);
7156 /* The flow type of fd rule has been checked before adding in to rule
7157 * list. As other flow types have been handled, it must be ETHER_FLOW
7158 * for the default case
7161 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7162 &fs->m_u.ether_spec);
7166 hclge_fd_get_ext_info(fs, rule);
7168 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7169 fs->ring_cookie = RX_CLS_FLOW_DISC;
7173 fs->ring_cookie = rule->queue_id;
7174 vf_id = rule->vf_id;
7175 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7176 fs->ring_cookie |= vf_id;
7179 spin_unlock_bh(&hdev->fd_rule_lock);
7184 static int hclge_get_all_rules(struct hnae3_handle *handle,
7185 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7187 struct hclge_vport *vport = hclge_get_vport(handle);
7188 struct hclge_dev *hdev = vport->back;
7189 struct hclge_fd_rule *rule;
7190 struct hlist_node *node2;
7193 if (!hnae3_dev_fd_supported(hdev))
7196 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7198 spin_lock_bh(&hdev->fd_rule_lock);
7199 hlist_for_each_entry_safe(rule, node2,
7200 &hdev->fd_rule_list, rule_node) {
7201 if (cnt == cmd->rule_cnt) {
7202 spin_unlock_bh(&hdev->fd_rule_lock);
7206 if (rule->state == HCLGE_FD_TO_DEL)
7209 rule_locs[cnt] = rule->location;
7213 spin_unlock_bh(&hdev->fd_rule_lock);
7215 cmd->rule_cnt = cnt;
7220 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7221 struct hclge_fd_rule_tuples *tuples)
7223 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7224 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7226 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7227 tuples->ip_proto = fkeys->basic.ip_proto;
7228 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7230 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7231 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7232 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7236 for (i = 0; i < IPV6_SIZE; i++) {
7237 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7238 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7243 /* traverse all rules, check whether an existed rule has the same tuples */
7244 static struct hclge_fd_rule *
7245 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7246 const struct hclge_fd_rule_tuples *tuples)
7248 struct hclge_fd_rule *rule = NULL;
7249 struct hlist_node *node;
7251 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7252 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7259 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7260 struct hclge_fd_rule *rule)
7262 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7263 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7264 BIT(INNER_SRC_PORT);
7267 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7268 rule->state = HCLGE_FD_TO_ADD;
7269 if (tuples->ether_proto == ETH_P_IP) {
7270 if (tuples->ip_proto == IPPROTO_TCP)
7271 rule->flow_type = TCP_V4_FLOW;
7273 rule->flow_type = UDP_V4_FLOW;
7275 if (tuples->ip_proto == IPPROTO_TCP)
7276 rule->flow_type = TCP_V6_FLOW;
7278 rule->flow_type = UDP_V6_FLOW;
7280 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7281 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7284 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7285 u16 flow_id, struct flow_keys *fkeys)
7287 struct hclge_vport *vport = hclge_get_vport(handle);
7288 struct hclge_fd_rule_tuples new_tuples = {};
7289 struct hclge_dev *hdev = vport->back;
7290 struct hclge_fd_rule *rule;
7293 if (!hnae3_dev_fd_supported(hdev))
7296 /* when there is already fd rule existed add by user,
7297 * arfs should not work
7299 spin_lock_bh(&hdev->fd_rule_lock);
7300 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7301 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7302 spin_unlock_bh(&hdev->fd_rule_lock);
7306 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7308 /* check is there flow director filter existed for this flow,
7309 * if not, create a new filter for it;
7310 * if filter exist with different queue id, modify the filter;
7311 * if filter exist with same queue id, do nothing
7313 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7315 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7316 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7317 spin_unlock_bh(&hdev->fd_rule_lock);
7321 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7323 spin_unlock_bh(&hdev->fd_rule_lock);
7327 rule->location = bit_id;
7328 rule->arfs.flow_id = flow_id;
7329 rule->queue_id = queue_id;
7330 hclge_fd_build_arfs_rule(&new_tuples, rule);
7331 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7332 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7333 } else if (rule->queue_id != queue_id) {
7334 rule->queue_id = queue_id;
7335 rule->state = HCLGE_FD_TO_ADD;
7336 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7337 hclge_task_schedule(hdev, 0);
7339 spin_unlock_bh(&hdev->fd_rule_lock);
7340 return rule->location;
7343 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7345 #ifdef CONFIG_RFS_ACCEL
7346 struct hnae3_handle *handle = &hdev->vport[0].nic;
7347 struct hclge_fd_rule *rule;
7348 struct hlist_node *node;
7350 spin_lock_bh(&hdev->fd_rule_lock);
7351 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7352 spin_unlock_bh(&hdev->fd_rule_lock);
7355 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7356 if (rule->state != HCLGE_FD_ACTIVE)
7358 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7359 rule->arfs.flow_id, rule->location)) {
7360 rule->state = HCLGE_FD_TO_DEL;
7361 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7364 spin_unlock_bh(&hdev->fd_rule_lock);
7368 /* make sure being called after lock up with fd_rule_lock */
7369 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7371 #ifdef CONFIG_RFS_ACCEL
7372 struct hclge_fd_rule *rule;
7373 struct hlist_node *node;
7376 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7379 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7380 switch (rule->state) {
7381 case HCLGE_FD_TO_DEL:
7382 case HCLGE_FD_ACTIVE:
7383 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7384 rule->location, NULL, false);
7388 case HCLGE_FD_TO_ADD:
7389 hclge_fd_dec_rule_cnt(hdev, rule->location);
7390 hlist_del(&rule->rule_node);
7397 hclge_sync_fd_state(hdev);
7403 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7404 struct hclge_fd_rule *rule)
7406 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7407 struct flow_match_basic match;
7408 u16 ethtype_key, ethtype_mask;
7410 flow_rule_match_basic(flow, &match);
7411 ethtype_key = ntohs(match.key->n_proto);
7412 ethtype_mask = ntohs(match.mask->n_proto);
7414 if (ethtype_key == ETH_P_ALL) {
7418 rule->tuples.ether_proto = ethtype_key;
7419 rule->tuples_mask.ether_proto = ethtype_mask;
7420 rule->tuples.ip_proto = match.key->ip_proto;
7421 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7423 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7424 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7428 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7429 struct hclge_fd_rule *rule)
7431 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7432 struct flow_match_eth_addrs match;
7434 flow_rule_match_eth_addrs(flow, &match);
7435 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7436 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7437 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7438 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7440 rule->unused_tuple |= BIT(INNER_DST_MAC);
7441 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7445 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7446 struct hclge_fd_rule *rule)
7448 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7449 struct flow_match_vlan match;
7451 flow_rule_match_vlan(flow, &match);
7452 rule->tuples.vlan_tag1 = match.key->vlan_id |
7453 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7454 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7455 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7457 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7461 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7462 struct hclge_fd_rule *rule)
7466 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7467 struct flow_match_control match;
7469 flow_rule_match_control(flow, &match);
7470 addr_type = match.key->addr_type;
7473 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7474 struct flow_match_ipv4_addrs match;
7476 flow_rule_match_ipv4_addrs(flow, &match);
7477 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7478 rule->tuples_mask.src_ip[IPV4_INDEX] =
7479 be32_to_cpu(match.mask->src);
7480 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7481 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7482 be32_to_cpu(match.mask->dst);
7483 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7484 struct flow_match_ipv6_addrs match;
7486 flow_rule_match_ipv6_addrs(flow, &match);
7487 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7489 be32_to_cpu_array(rule->tuples_mask.src_ip,
7490 match.mask->src.s6_addr32, IPV6_SIZE);
7491 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7493 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7494 match.mask->dst.s6_addr32, IPV6_SIZE);
7496 rule->unused_tuple |= BIT(INNER_SRC_IP);
7497 rule->unused_tuple |= BIT(INNER_DST_IP);
7501 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7502 struct hclge_fd_rule *rule)
7504 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7505 struct flow_match_ports match;
7507 flow_rule_match_ports(flow, &match);
7509 rule->tuples.src_port = be16_to_cpu(match.key->src);
7510 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7511 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7512 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7514 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7515 rule->unused_tuple |= BIT(INNER_DST_PORT);
7519 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7520 struct flow_cls_offload *cls_flower,
7521 struct hclge_fd_rule *rule)
7523 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7524 struct flow_dissector *dissector = flow->match.dissector;
7526 if (dissector->used_keys &
7527 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7528 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7529 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7530 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7531 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7532 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7533 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7534 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7535 dissector->used_keys);
7539 hclge_get_cls_key_basic(flow, rule);
7540 hclge_get_cls_key_mac(flow, rule);
7541 hclge_get_cls_key_vlan(flow, rule);
7542 hclge_get_cls_key_ip(flow, rule);
7543 hclge_get_cls_key_port(flow, rule);
7548 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7549 struct flow_cls_offload *cls_flower, int tc)
7551 u32 prio = cls_flower->common.prio;
7553 if (tc < 0 || tc > hdev->tc_max) {
7554 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7559 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7560 dev_err(&hdev->pdev->dev,
7561 "prio %u should be in range[1, %u]\n",
7562 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7566 if (test_bit(prio - 1, hdev->fd_bmap)) {
7567 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7573 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7574 struct flow_cls_offload *cls_flower,
7577 struct hclge_vport *vport = hclge_get_vport(handle);
7578 struct hclge_dev *hdev = vport->back;
7579 struct hclge_fd_rule *rule;
7582 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7584 dev_err(&hdev->pdev->dev,
7585 "failed to check cls flower params, ret = %d\n", ret);
7589 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7593 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7599 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7600 rule->cls_flower.tc = tc;
7601 rule->location = cls_flower->common.prio - 1;
7603 rule->cls_flower.cookie = cls_flower->cookie;
7604 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7606 ret = hclge_add_fd_entry_common(hdev, rule);
7613 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7614 unsigned long cookie)
7616 struct hclge_fd_rule *rule;
7617 struct hlist_node *node;
7619 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7620 if (rule->cls_flower.cookie == cookie)
7627 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7628 struct flow_cls_offload *cls_flower)
7630 struct hclge_vport *vport = hclge_get_vport(handle);
7631 struct hclge_dev *hdev = vport->back;
7632 struct hclge_fd_rule *rule;
7635 spin_lock_bh(&hdev->fd_rule_lock);
7637 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7639 spin_unlock_bh(&hdev->fd_rule_lock);
7643 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7646 spin_unlock_bh(&hdev->fd_rule_lock);
7650 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7651 spin_unlock_bh(&hdev->fd_rule_lock);
7656 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7658 struct hclge_fd_rule *rule;
7659 struct hlist_node *node;
7662 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7665 spin_lock_bh(&hdev->fd_rule_lock);
7667 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7668 switch (rule->state) {
7669 case HCLGE_FD_TO_ADD:
7670 ret = hclge_fd_config_rule(hdev, rule);
7673 rule->state = HCLGE_FD_ACTIVE;
7675 case HCLGE_FD_TO_DEL:
7676 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7677 rule->location, NULL, false);
7680 hclge_fd_dec_rule_cnt(hdev, rule->location);
7681 hclge_fd_free_node(hdev, rule);
7690 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7692 spin_unlock_bh(&hdev->fd_rule_lock);
7695 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7697 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7698 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7700 hclge_clear_fd_rules_in_list(hdev, clear_list);
7703 hclge_sync_fd_user_def_cfg(hdev, false);
7705 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7708 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7710 struct hclge_vport *vport = hclge_get_vport(handle);
7711 struct hclge_dev *hdev = vport->back;
7713 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7714 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7717 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7719 struct hclge_vport *vport = hclge_get_vport(handle);
7720 struct hclge_dev *hdev = vport->back;
7722 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7725 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7727 struct hclge_vport *vport = hclge_get_vport(handle);
7728 struct hclge_dev *hdev = vport->back;
7730 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7733 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7735 struct hclge_vport *vport = hclge_get_vport(handle);
7736 struct hclge_dev *hdev = vport->back;
7738 return hdev->rst_stats.hw_reset_done_cnt;
7741 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7743 struct hclge_vport *vport = hclge_get_vport(handle);
7744 struct hclge_dev *hdev = vport->back;
7746 hdev->fd_en = enable;
7749 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7751 hclge_restore_fd_entries(handle);
7753 hclge_task_schedule(hdev, 0);
7756 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7758 struct hclge_desc desc;
7759 struct hclge_config_mac_mode_cmd *req =
7760 (struct hclge_config_mac_mode_cmd *)desc.data;
7764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7767 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7768 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7769 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7770 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7771 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7772 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7773 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7774 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7775 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7776 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7779 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7781 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7783 dev_err(&hdev->pdev->dev,
7784 "mac enable fail, ret =%d.\n", ret);
7787 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7788 u8 switch_param, u8 param_mask)
7790 struct hclge_mac_vlan_switch_cmd *req;
7791 struct hclge_desc desc;
7795 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7796 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7798 /* read current config parameter */
7799 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7801 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7802 req->func_id = cpu_to_le32(func_id);
7804 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7806 dev_err(&hdev->pdev->dev,
7807 "read mac vlan switch parameter fail, ret = %d\n", ret);
7811 /* modify and write new config parameter */
7812 hclge_cmd_reuse_desc(&desc, false);
7813 req->switch_param = (req->switch_param & param_mask) | switch_param;
7814 req->param_mask = param_mask;
7816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7818 dev_err(&hdev->pdev->dev,
7819 "set mac vlan switch parameter fail, ret = %d\n", ret);
7823 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7826 #define HCLGE_PHY_LINK_STATUS_NUM 200
7828 struct phy_device *phydev = hdev->hw.mac.phydev;
7833 ret = phy_read_status(phydev);
7835 dev_err(&hdev->pdev->dev,
7836 "phy update link status fail, ret = %d\n", ret);
7840 if (phydev->link == link_ret)
7843 msleep(HCLGE_LINK_STATUS_MS);
7844 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7847 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7849 #define HCLGE_MAC_LINK_STATUS_NUM 100
7856 ret = hclge_get_mac_link_status(hdev, &link_status);
7859 if (link_status == link_ret)
7862 msleep(HCLGE_LINK_STATUS_MS);
7863 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7867 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7872 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7875 hclge_phy_link_status_wait(hdev, link_ret);
7877 return hclge_mac_link_status_wait(hdev, link_ret);
7880 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7882 struct hclge_config_mac_mode_cmd *req;
7883 struct hclge_desc desc;
7887 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7888 /* 1 Read out the MAC mode config at first */
7889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7892 dev_err(&hdev->pdev->dev,
7893 "mac loopback get fail, ret =%d.\n", ret);
7897 /* 2 Then setup the loopback flag */
7898 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7899 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7901 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7903 /* 3 Config mac work mode with loopback flag
7904 * and its original configure parameters
7906 hclge_cmd_reuse_desc(&desc, false);
7907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7909 dev_err(&hdev->pdev->dev,
7910 "mac loopback set fail, ret =%d.\n", ret);
7914 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7915 enum hnae3_loop loop_mode)
7917 #define HCLGE_COMMON_LB_RETRY_MS 10
7918 #define HCLGE_COMMON_LB_RETRY_NUM 100
7920 struct hclge_common_lb_cmd *req;
7921 struct hclge_desc desc;
7925 req = (struct hclge_common_lb_cmd *)desc.data;
7926 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7928 switch (loop_mode) {
7929 case HNAE3_LOOP_SERIAL_SERDES:
7930 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7932 case HNAE3_LOOP_PARALLEL_SERDES:
7933 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7935 case HNAE3_LOOP_PHY:
7936 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7939 dev_err(&hdev->pdev->dev,
7940 "unsupported common loopback mode %d\n", loop_mode);
7945 req->enable = loop_mode_b;
7946 req->mask = loop_mode_b;
7948 req->mask = loop_mode_b;
7951 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7953 dev_err(&hdev->pdev->dev,
7954 "common loopback set fail, ret = %d\n", ret);
7959 msleep(HCLGE_COMMON_LB_RETRY_MS);
7960 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7964 dev_err(&hdev->pdev->dev,
7965 "common loopback get, ret = %d\n", ret);
7968 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7969 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7971 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7972 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7974 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7975 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7981 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7982 enum hnae3_loop loop_mode)
7986 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7990 hclge_cfg_mac_mode(hdev, en);
7992 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7994 dev_err(&hdev->pdev->dev,
7995 "serdes loopback config mac mode timeout\n");
8000 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
8001 struct phy_device *phydev)
8005 if (!phydev->suspended) {
8006 ret = phy_suspend(phydev);
8011 ret = phy_resume(phydev);
8015 return phy_loopback(phydev, true);
8018 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
8019 struct phy_device *phydev)
8023 ret = phy_loopback(phydev, false);
8027 return phy_suspend(phydev);
8030 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
8032 struct phy_device *phydev = hdev->hw.mac.phydev;
8036 if (hnae3_dev_phy_imp_supported(hdev))
8037 return hclge_set_common_loopback(hdev, en,
8043 ret = hclge_enable_phy_loopback(hdev, phydev);
8045 ret = hclge_disable_phy_loopback(hdev, phydev);
8047 dev_err(&hdev->pdev->dev,
8048 "set phy loopback fail, ret = %d\n", ret);
8052 hclge_cfg_mac_mode(hdev, en);
8054 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
8056 dev_err(&hdev->pdev->dev,
8057 "phy loopback config mac mode timeout\n");
8062 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
8063 u16 stream_id, bool enable)
8065 struct hclge_desc desc;
8066 struct hclge_cfg_com_tqp_queue_cmd *req =
8067 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
8069 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
8070 req->tqp_id = cpu_to_le16(tqp_id);
8071 req->stream_id = cpu_to_le16(stream_id);
8073 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
8075 return hclge_cmd_send(&hdev->hw, &desc, 1);
8078 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8080 struct hclge_vport *vport = hclge_get_vport(handle);
8081 struct hclge_dev *hdev = vport->back;
8085 for (i = 0; i < handle->kinfo.num_tqps; i++) {
8086 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8093 static int hclge_set_loopback(struct hnae3_handle *handle,
8094 enum hnae3_loop loop_mode, bool en)
8096 struct hclge_vport *vport = hclge_get_vport(handle);
8097 struct hclge_dev *hdev = vport->back;
8100 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8101 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8102 * the same, the packets are looped back in the SSU. If SSU loopback
8103 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8105 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8106 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8108 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8109 HCLGE_SWITCH_ALW_LPBK_MASK);
8114 switch (loop_mode) {
8115 case HNAE3_LOOP_APP:
8116 ret = hclge_set_app_loopback(hdev, en);
8118 case HNAE3_LOOP_SERIAL_SERDES:
8119 case HNAE3_LOOP_PARALLEL_SERDES:
8120 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8122 case HNAE3_LOOP_PHY:
8123 ret = hclge_set_phy_loopback(hdev, en);
8127 dev_err(&hdev->pdev->dev,
8128 "loop_mode %d is not supported\n", loop_mode);
8135 ret = hclge_tqp_enable(handle, en);
8137 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8138 en ? "enable" : "disable", ret);
8143 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8147 ret = hclge_set_app_loopback(hdev, false);
8151 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8155 return hclge_cfg_common_loopback(hdev, false,
8156 HNAE3_LOOP_PARALLEL_SERDES);
8159 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8161 struct hclge_vport *vport = hclge_get_vport(handle);
8162 struct hnae3_knic_private_info *kinfo;
8163 struct hnae3_queue *queue;
8164 struct hclge_tqp *tqp;
8167 kinfo = &vport->nic.kinfo;
8168 for (i = 0; i < kinfo->num_tqps; i++) {
8169 queue = handle->kinfo.tqp[i];
8170 tqp = container_of(queue, struct hclge_tqp, q);
8171 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8175 static void hclge_flush_link_update(struct hclge_dev *hdev)
8177 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8179 unsigned long last = hdev->serv_processed_cnt;
8182 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8183 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8184 last == hdev->serv_processed_cnt)
8188 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8190 struct hclge_vport *vport = hclge_get_vport(handle);
8191 struct hclge_dev *hdev = vport->back;
8194 hclge_task_schedule(hdev, 0);
8196 /* Set the DOWN flag here to disable link updating */
8197 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8199 /* flush memory to make sure DOWN is seen by service task */
8200 smp_mb__before_atomic();
8201 hclge_flush_link_update(hdev);
8205 static int hclge_ae_start(struct hnae3_handle *handle)
8207 struct hclge_vport *vport = hclge_get_vport(handle);
8208 struct hclge_dev *hdev = vport->back;
8211 hclge_cfg_mac_mode(hdev, true);
8212 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8213 hdev->hw.mac.link = 0;
8215 /* reset tqp stats */
8216 hclge_reset_tqp_stats(handle);
8218 hclge_mac_start_phy(hdev);
8223 static void hclge_ae_stop(struct hnae3_handle *handle)
8225 struct hclge_vport *vport = hclge_get_vport(handle);
8226 struct hclge_dev *hdev = vport->back;
8228 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8229 spin_lock_bh(&hdev->fd_rule_lock);
8230 hclge_clear_arfs_rules(hdev);
8231 spin_unlock_bh(&hdev->fd_rule_lock);
8233 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8234 * so it only need to stop phy here.
8236 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8237 hdev->reset_type != HNAE3_FUNC_RESET &&
8238 hdev->reset_type != HNAE3_FLR_RESET) {
8239 hclge_mac_stop_phy(hdev);
8240 hclge_update_link_status(hdev);
8244 hclge_reset_tqp(handle);
8246 hclge_config_mac_tnl_int(hdev, false);
8249 hclge_cfg_mac_mode(hdev, false);
8251 hclge_mac_stop_phy(hdev);
8253 /* reset tqp stats */
8254 hclge_reset_tqp_stats(handle);
8255 hclge_update_link_status(hdev);
8258 int hclge_vport_start(struct hclge_vport *vport)
8260 struct hclge_dev *hdev = vport->back;
8262 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8263 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8264 vport->last_active_jiffies = jiffies;
8266 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8267 if (vport->vport_id) {
8268 hclge_restore_mac_table_common(vport);
8269 hclge_restore_vport_vlan_table(vport);
8271 hclge_restore_hw_table(hdev);
8275 clear_bit(vport->vport_id, hdev->vport_config_block);
8280 void hclge_vport_stop(struct hclge_vport *vport)
8282 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8285 static int hclge_client_start(struct hnae3_handle *handle)
8287 struct hclge_vport *vport = hclge_get_vport(handle);
8289 return hclge_vport_start(vport);
8292 static void hclge_client_stop(struct hnae3_handle *handle)
8294 struct hclge_vport *vport = hclge_get_vport(handle);
8296 hclge_vport_stop(vport);
8299 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8300 u16 cmdq_resp, u8 resp_code,
8301 enum hclge_mac_vlan_tbl_opcode op)
8303 struct hclge_dev *hdev = vport->back;
8306 dev_err(&hdev->pdev->dev,
8307 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8312 if (op == HCLGE_MAC_VLAN_ADD) {
8313 if (!resp_code || resp_code == 1)
8315 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8316 resp_code == HCLGE_ADD_MC_OVERFLOW)
8319 dev_err(&hdev->pdev->dev,
8320 "add mac addr failed for undefined, code=%u.\n",
8323 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8326 } else if (resp_code == 1) {
8327 dev_dbg(&hdev->pdev->dev,
8328 "remove mac addr failed for miss.\n");
8332 dev_err(&hdev->pdev->dev,
8333 "remove mac addr failed for undefined, code=%u.\n",
8336 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8339 } else if (resp_code == 1) {
8340 dev_dbg(&hdev->pdev->dev,
8341 "lookup mac addr failed for miss.\n");
8345 dev_err(&hdev->pdev->dev,
8346 "lookup mac addr failed for undefined, code=%u.\n",
8351 dev_err(&hdev->pdev->dev,
8352 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8357 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8359 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8361 unsigned int word_num;
8362 unsigned int bit_num;
8364 if (vfid > 255 || vfid < 0)
8367 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8368 word_num = vfid / 32;
8369 bit_num = vfid % 32;
8371 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8373 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8375 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8376 bit_num = vfid % 32;
8378 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8380 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8386 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8388 #define HCLGE_DESC_NUMBER 3
8389 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8392 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8393 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8394 if (desc[i].data[j])
8400 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8401 const u8 *addr, bool is_mc)
8403 const unsigned char *mac_addr = addr;
8404 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8405 (mac_addr[0]) | (mac_addr[1] << 8);
8406 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8408 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8410 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8411 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8414 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8415 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8418 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8419 struct hclge_mac_vlan_tbl_entry_cmd *req)
8421 struct hclge_dev *hdev = vport->back;
8422 struct hclge_desc desc;
8427 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8429 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8431 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8433 dev_err(&hdev->pdev->dev,
8434 "del mac addr failed for cmd_send, ret =%d.\n",
8438 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8439 retval = le16_to_cpu(desc.retval);
8441 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8442 HCLGE_MAC_VLAN_REMOVE);
8445 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8446 struct hclge_mac_vlan_tbl_entry_cmd *req,
8447 struct hclge_desc *desc,
8450 struct hclge_dev *hdev = vport->back;
8455 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8457 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8458 memcpy(desc[0].data,
8460 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8461 hclge_cmd_setup_basic_desc(&desc[1],
8462 HCLGE_OPC_MAC_VLAN_ADD,
8464 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8465 hclge_cmd_setup_basic_desc(&desc[2],
8466 HCLGE_OPC_MAC_VLAN_ADD,
8468 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8470 memcpy(desc[0].data,
8472 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8473 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8476 dev_err(&hdev->pdev->dev,
8477 "lookup mac addr failed for cmd_send, ret =%d.\n",
8481 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8482 retval = le16_to_cpu(desc[0].retval);
8484 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8485 HCLGE_MAC_VLAN_LKUP);
8488 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8489 struct hclge_mac_vlan_tbl_entry_cmd *req,
8490 struct hclge_desc *mc_desc)
8492 struct hclge_dev *hdev = vport->back;
8499 struct hclge_desc desc;
8501 hclge_cmd_setup_basic_desc(&desc,
8502 HCLGE_OPC_MAC_VLAN_ADD,
8504 memcpy(desc.data, req,
8505 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8506 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8507 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8508 retval = le16_to_cpu(desc.retval);
8510 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8512 HCLGE_MAC_VLAN_ADD);
8514 hclge_cmd_reuse_desc(&mc_desc[0], false);
8515 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8516 hclge_cmd_reuse_desc(&mc_desc[1], false);
8517 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8518 hclge_cmd_reuse_desc(&mc_desc[2], false);
8519 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8520 memcpy(mc_desc[0].data, req,
8521 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8522 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8523 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8524 retval = le16_to_cpu(mc_desc[0].retval);
8526 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8528 HCLGE_MAC_VLAN_ADD);
8532 dev_err(&hdev->pdev->dev,
8533 "add mac addr failed for cmd_send, ret =%d.\n",
8541 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8542 u16 *allocated_size)
8544 struct hclge_umv_spc_alc_cmd *req;
8545 struct hclge_desc desc;
8548 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8549 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8551 req->space_size = cpu_to_le32(space_size);
8553 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8555 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8560 *allocated_size = le32_to_cpu(desc.data[1]);
8565 static int hclge_init_umv_space(struct hclge_dev *hdev)
8567 u16 allocated_size = 0;
8570 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8574 if (allocated_size < hdev->wanted_umv_size)
8575 dev_warn(&hdev->pdev->dev,
8576 "failed to alloc umv space, want %u, get %u\n",
8577 hdev->wanted_umv_size, allocated_size);
8579 hdev->max_umv_size = allocated_size;
8580 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8581 hdev->share_umv_size = hdev->priv_umv_size +
8582 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8584 if (hdev->ae_dev->dev_specs.mc_mac_size)
8585 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8590 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8592 struct hclge_vport *vport;
8595 for (i = 0; i < hdev->num_alloc_vport; i++) {
8596 vport = &hdev->vport[i];
8597 vport->used_umv_num = 0;
8600 mutex_lock(&hdev->vport_lock);
8601 hdev->share_umv_size = hdev->priv_umv_size +
8602 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8603 mutex_unlock(&hdev->vport_lock);
8605 hdev->used_mc_mac_num = 0;
8608 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8610 struct hclge_dev *hdev = vport->back;
8614 mutex_lock(&hdev->vport_lock);
8616 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8617 hdev->share_umv_size == 0);
8620 mutex_unlock(&hdev->vport_lock);
8625 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8627 struct hclge_dev *hdev = vport->back;
8630 if (vport->used_umv_num > hdev->priv_umv_size)
8631 hdev->share_umv_size++;
8633 if (vport->used_umv_num > 0)
8634 vport->used_umv_num--;
8636 if (vport->used_umv_num >= hdev->priv_umv_size &&
8637 hdev->share_umv_size > 0)
8638 hdev->share_umv_size--;
8639 vport->used_umv_num++;
8643 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8646 struct hclge_mac_node *mac_node, *tmp;
8648 list_for_each_entry_safe(mac_node, tmp, list, node)
8649 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8655 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8656 enum HCLGE_MAC_NODE_STATE state)
8659 /* from set_rx_mode or tmp_add_list */
8660 case HCLGE_MAC_TO_ADD:
8661 if (mac_node->state == HCLGE_MAC_TO_DEL)
8662 mac_node->state = HCLGE_MAC_ACTIVE;
8664 /* only from set_rx_mode */
8665 case HCLGE_MAC_TO_DEL:
8666 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8667 list_del(&mac_node->node);
8670 mac_node->state = HCLGE_MAC_TO_DEL;
8673 /* only from tmp_add_list, the mac_node->state won't be
8676 case HCLGE_MAC_ACTIVE:
8677 if (mac_node->state == HCLGE_MAC_TO_ADD)
8678 mac_node->state = HCLGE_MAC_ACTIVE;
8684 int hclge_update_mac_list(struct hclge_vport *vport,
8685 enum HCLGE_MAC_NODE_STATE state,
8686 enum HCLGE_MAC_ADDR_TYPE mac_type,
8687 const unsigned char *addr)
8689 struct hclge_dev *hdev = vport->back;
8690 struct hclge_mac_node *mac_node;
8691 struct list_head *list;
8693 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8694 &vport->uc_mac_list : &vport->mc_mac_list;
8696 spin_lock_bh(&vport->mac_list_lock);
8698 /* if the mac addr is already in the mac list, no need to add a new
8699 * one into it, just check the mac addr state, convert it to a new
8700 * state, or just remove it, or do nothing.
8702 mac_node = hclge_find_mac_node(list, addr);
8704 hclge_update_mac_node(mac_node, state);
8705 spin_unlock_bh(&vport->mac_list_lock);
8706 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8710 /* if this address is never added, unnecessary to delete */
8711 if (state == HCLGE_MAC_TO_DEL) {
8712 spin_unlock_bh(&vport->mac_list_lock);
8713 dev_err(&hdev->pdev->dev,
8714 "failed to delete address %pM from mac list\n",
8719 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8721 spin_unlock_bh(&vport->mac_list_lock);
8725 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8727 mac_node->state = state;
8728 ether_addr_copy(mac_node->mac_addr, addr);
8729 list_add_tail(&mac_node->node, list);
8731 spin_unlock_bh(&vport->mac_list_lock);
8736 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8737 const unsigned char *addr)
8739 struct hclge_vport *vport = hclge_get_vport(handle);
8741 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8745 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8746 const unsigned char *addr)
8748 struct hclge_dev *hdev = vport->back;
8749 struct hclge_mac_vlan_tbl_entry_cmd req;
8750 struct hclge_desc desc;
8751 u16 egress_port = 0;
8754 /* mac addr check */
8755 if (is_zero_ether_addr(addr) ||
8756 is_broadcast_ether_addr(addr) ||
8757 is_multicast_ether_addr(addr)) {
8758 dev_err(&hdev->pdev->dev,
8759 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8760 addr, is_zero_ether_addr(addr),
8761 is_broadcast_ether_addr(addr),
8762 is_multicast_ether_addr(addr));
8766 memset(&req, 0, sizeof(req));
8768 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8769 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8771 req.egress_port = cpu_to_le16(egress_port);
8773 hclge_prepare_mac_addr(&req, addr, false);
8775 /* Lookup the mac address in the mac_vlan table, and add
8776 * it if the entry is inexistent. Repeated unicast entry
8777 * is not allowed in the mac vlan table.
8779 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8780 if (ret == -ENOENT) {
8781 mutex_lock(&hdev->vport_lock);
8782 if (!hclge_is_umv_space_full(vport, false)) {
8783 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8785 hclge_update_umv_space(vport, false);
8786 mutex_unlock(&hdev->vport_lock);
8789 mutex_unlock(&hdev->vport_lock);
8791 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8792 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8793 hdev->priv_umv_size);
8798 /* check if we just hit the duplicate */
8805 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8806 const unsigned char *addr)
8808 struct hclge_vport *vport = hclge_get_vport(handle);
8810 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8814 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8815 const unsigned char *addr)
8817 struct hclge_dev *hdev = vport->back;
8818 struct hclge_mac_vlan_tbl_entry_cmd req;
8821 /* mac addr check */
8822 if (is_zero_ether_addr(addr) ||
8823 is_broadcast_ether_addr(addr) ||
8824 is_multicast_ether_addr(addr)) {
8825 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8830 memset(&req, 0, sizeof(req));
8831 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8832 hclge_prepare_mac_addr(&req, addr, false);
8833 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8835 mutex_lock(&hdev->vport_lock);
8836 hclge_update_umv_space(vport, true);
8837 mutex_unlock(&hdev->vport_lock);
8838 } else if (ret == -ENOENT) {
8845 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8846 const unsigned char *addr)
8848 struct hclge_vport *vport = hclge_get_vport(handle);
8850 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8854 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8855 const unsigned char *addr)
8857 struct hclge_dev *hdev = vport->back;
8858 struct hclge_mac_vlan_tbl_entry_cmd req;
8859 struct hclge_desc desc[3];
8860 bool is_new_addr = false;
8863 /* mac addr check */
8864 if (!is_multicast_ether_addr(addr)) {
8865 dev_err(&hdev->pdev->dev,
8866 "Add mc mac err! invalid mac:%pM.\n",
8870 memset(&req, 0, sizeof(req));
8871 hclge_prepare_mac_addr(&req, addr, true);
8872 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8874 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8875 hdev->used_mc_mac_num >=
8876 hdev->ae_dev->dev_specs.mc_mac_size)
8881 /* This mac addr do not exist, add new entry for it */
8882 memset(desc[0].data, 0, sizeof(desc[0].data));
8883 memset(desc[1].data, 0, sizeof(desc[0].data));
8884 memset(desc[2].data, 0, sizeof(desc[0].data));
8886 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8889 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8890 if (status == -ENOSPC)
8892 else if (!status && is_new_addr)
8893 hdev->used_mc_mac_num++;
8898 /* if already overflow, not to print each time */
8899 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8900 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8904 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8905 const unsigned char *addr)
8907 struct hclge_vport *vport = hclge_get_vport(handle);
8909 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8913 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8914 const unsigned char *addr)
8916 struct hclge_dev *hdev = vport->back;
8917 struct hclge_mac_vlan_tbl_entry_cmd req;
8918 enum hclge_cmd_status status;
8919 struct hclge_desc desc[3];
8921 /* mac addr check */
8922 if (!is_multicast_ether_addr(addr)) {
8923 dev_dbg(&hdev->pdev->dev,
8924 "Remove mc mac err! invalid mac:%pM.\n",
8929 memset(&req, 0, sizeof(req));
8930 hclge_prepare_mac_addr(&req, addr, true);
8931 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8933 /* This mac addr exist, remove this handle's VFID for it */
8934 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8938 if (hclge_is_all_function_id_zero(desc)) {
8939 /* All the vfid is zero, so need to delete this entry */
8940 status = hclge_remove_mac_vlan_tbl(vport, &req);
8942 hdev->used_mc_mac_num--;
8944 /* Not all the vfid is zero, update the vfid */
8945 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8947 } else if (status == -ENOENT) {
8954 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8955 struct list_head *list,
8956 int (*sync)(struct hclge_vport *,
8957 const unsigned char *))
8959 struct hclge_mac_node *mac_node, *tmp;
8962 list_for_each_entry_safe(mac_node, tmp, list, node) {
8963 ret = sync(vport, mac_node->mac_addr);
8965 mac_node->state = HCLGE_MAC_ACTIVE;
8967 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8970 /* If one unicast mac address is existing in hardware,
8971 * we need to try whether other unicast mac addresses
8972 * are new addresses that can be added.
8980 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8981 struct list_head *list,
8982 int (*unsync)(struct hclge_vport *,
8983 const unsigned char *))
8985 struct hclge_mac_node *mac_node, *tmp;
8988 list_for_each_entry_safe(mac_node, tmp, list, node) {
8989 ret = unsync(vport, mac_node->mac_addr);
8990 if (!ret || ret == -ENOENT) {
8991 list_del(&mac_node->node);
8994 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
9001 static bool hclge_sync_from_add_list(struct list_head *add_list,
9002 struct list_head *mac_list)
9004 struct hclge_mac_node *mac_node, *tmp, *new_node;
9005 bool all_added = true;
9007 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
9008 if (mac_node->state == HCLGE_MAC_TO_ADD)
9011 /* if the mac address from tmp_add_list is not in the
9012 * uc/mc_mac_list, it means have received a TO_DEL request
9013 * during the time window of adding the mac address into mac
9014 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
9015 * then it will be removed at next time. else it must be TO_ADD,
9016 * this address hasn't been added into mac table,
9017 * so just remove the mac node.
9019 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9021 hclge_update_mac_node(new_node, mac_node->state);
9022 list_del(&mac_node->node);
9024 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
9025 mac_node->state = HCLGE_MAC_TO_DEL;
9026 list_move_tail(&mac_node->node, mac_list);
9028 list_del(&mac_node->node);
9036 static void hclge_sync_from_del_list(struct list_head *del_list,
9037 struct list_head *mac_list)
9039 struct hclge_mac_node *mac_node, *tmp, *new_node;
9041 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
9042 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9044 /* If the mac addr exists in the mac list, it means
9045 * received a new TO_ADD request during the time window
9046 * of configuring the mac address. For the mac node
9047 * state is TO_ADD, and the address is already in the
9048 * in the hardware(due to delete fail), so we just need
9049 * to change the mac node state to ACTIVE.
9051 new_node->state = HCLGE_MAC_ACTIVE;
9052 list_del(&mac_node->node);
9055 list_move_tail(&mac_node->node, mac_list);
9060 static void hclge_update_overflow_flags(struct hclge_vport *vport,
9061 enum HCLGE_MAC_ADDR_TYPE mac_type,
9064 if (mac_type == HCLGE_MAC_ADDR_UC) {
9066 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
9068 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
9071 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
9073 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
9077 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9078 enum HCLGE_MAC_ADDR_TYPE mac_type)
9080 struct hclge_mac_node *mac_node, *tmp, *new_node;
9081 struct list_head tmp_add_list, tmp_del_list;
9082 struct list_head *list;
9085 INIT_LIST_HEAD(&tmp_add_list);
9086 INIT_LIST_HEAD(&tmp_del_list);
9088 /* move the mac addr to the tmp_add_list and tmp_del_list, then
9089 * we can add/delete these mac addr outside the spin lock
9091 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9092 &vport->uc_mac_list : &vport->mc_mac_list;
9094 spin_lock_bh(&vport->mac_list_lock);
9096 list_for_each_entry_safe(mac_node, tmp, list, node) {
9097 switch (mac_node->state) {
9098 case HCLGE_MAC_TO_DEL:
9099 list_move_tail(&mac_node->node, &tmp_del_list);
9101 case HCLGE_MAC_TO_ADD:
9102 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9105 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9106 new_node->state = mac_node->state;
9107 list_add_tail(&new_node->node, &tmp_add_list);
9115 spin_unlock_bh(&vport->mac_list_lock);
9117 /* delete first, in order to get max mac table space for adding */
9118 if (mac_type == HCLGE_MAC_ADDR_UC) {
9119 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9120 hclge_rm_uc_addr_common);
9121 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9122 hclge_add_uc_addr_common);
9124 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9125 hclge_rm_mc_addr_common);
9126 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9127 hclge_add_mc_addr_common);
9130 /* if some mac addresses were added/deleted fail, move back to the
9131 * mac_list, and retry at next time.
9133 spin_lock_bh(&vport->mac_list_lock);
9135 hclge_sync_from_del_list(&tmp_del_list, list);
9136 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9138 spin_unlock_bh(&vport->mac_list_lock);
9140 hclge_update_overflow_flags(vport, mac_type, all_added);
9143 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9145 struct hclge_dev *hdev = vport->back;
9147 if (test_bit(vport->vport_id, hdev->vport_config_block))
9150 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9156 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9160 for (i = 0; i < hdev->num_alloc_vport; i++) {
9161 struct hclge_vport *vport = &hdev->vport[i];
9163 if (!hclge_need_sync_mac_table(vport))
9166 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9167 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9171 static void hclge_build_del_list(struct list_head *list,
9173 struct list_head *tmp_del_list)
9175 struct hclge_mac_node *mac_cfg, *tmp;
9177 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9178 switch (mac_cfg->state) {
9179 case HCLGE_MAC_TO_DEL:
9180 case HCLGE_MAC_ACTIVE:
9181 list_move_tail(&mac_cfg->node, tmp_del_list);
9183 case HCLGE_MAC_TO_ADD:
9185 list_del(&mac_cfg->node);
9193 static void hclge_unsync_del_list(struct hclge_vport *vport,
9194 int (*unsync)(struct hclge_vport *vport,
9195 const unsigned char *addr),
9197 struct list_head *tmp_del_list)
9199 struct hclge_mac_node *mac_cfg, *tmp;
9202 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9203 ret = unsync(vport, mac_cfg->mac_addr);
9204 if (!ret || ret == -ENOENT) {
9205 /* clear all mac addr from hardware, but remain these
9206 * mac addr in the mac list, and restore them after
9207 * vf reset finished.
9210 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9211 mac_cfg->state = HCLGE_MAC_TO_ADD;
9213 list_del(&mac_cfg->node);
9216 } else if (is_del_list) {
9217 mac_cfg->state = HCLGE_MAC_TO_DEL;
9222 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9223 enum HCLGE_MAC_ADDR_TYPE mac_type)
9225 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9226 struct hclge_dev *hdev = vport->back;
9227 struct list_head tmp_del_list, *list;
9229 if (mac_type == HCLGE_MAC_ADDR_UC) {
9230 list = &vport->uc_mac_list;
9231 unsync = hclge_rm_uc_addr_common;
9233 list = &vport->mc_mac_list;
9234 unsync = hclge_rm_mc_addr_common;
9237 INIT_LIST_HEAD(&tmp_del_list);
9240 set_bit(vport->vport_id, hdev->vport_config_block);
9242 spin_lock_bh(&vport->mac_list_lock);
9244 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9246 spin_unlock_bh(&vport->mac_list_lock);
9248 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9250 spin_lock_bh(&vport->mac_list_lock);
9252 hclge_sync_from_del_list(&tmp_del_list, list);
9254 spin_unlock_bh(&vport->mac_list_lock);
9257 /* remove all mac address when uninitailize */
9258 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9259 enum HCLGE_MAC_ADDR_TYPE mac_type)
9261 struct hclge_mac_node *mac_node, *tmp;
9262 struct hclge_dev *hdev = vport->back;
9263 struct list_head tmp_del_list, *list;
9265 INIT_LIST_HEAD(&tmp_del_list);
9267 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9268 &vport->uc_mac_list : &vport->mc_mac_list;
9270 spin_lock_bh(&vport->mac_list_lock);
9272 list_for_each_entry_safe(mac_node, tmp, list, node) {
9273 switch (mac_node->state) {
9274 case HCLGE_MAC_TO_DEL:
9275 case HCLGE_MAC_ACTIVE:
9276 list_move_tail(&mac_node->node, &tmp_del_list);
9278 case HCLGE_MAC_TO_ADD:
9279 list_del(&mac_node->node);
9285 spin_unlock_bh(&vport->mac_list_lock);
9287 if (mac_type == HCLGE_MAC_ADDR_UC)
9288 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9289 hclge_rm_uc_addr_common);
9291 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9292 hclge_rm_mc_addr_common);
9294 if (!list_empty(&tmp_del_list))
9295 dev_warn(&hdev->pdev->dev,
9296 "uninit %s mac list for vport %u not completely.\n",
9297 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9300 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9301 list_del(&mac_node->node);
9306 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9308 struct hclge_vport *vport;
9311 for (i = 0; i < hdev->num_alloc_vport; i++) {
9312 vport = &hdev->vport[i];
9313 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9314 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9318 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9319 u16 cmdq_resp, u8 resp_code)
9321 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9322 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9323 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9324 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9329 dev_err(&hdev->pdev->dev,
9330 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9335 switch (resp_code) {
9336 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9337 case HCLGE_ETHERTYPE_ALREADY_ADD:
9340 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9341 dev_err(&hdev->pdev->dev,
9342 "add mac ethertype failed for manager table overflow.\n");
9343 return_status = -EIO;
9345 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9346 dev_err(&hdev->pdev->dev,
9347 "add mac ethertype failed for key conflict.\n");
9348 return_status = -EIO;
9351 dev_err(&hdev->pdev->dev,
9352 "add mac ethertype failed for undefined, code=%u.\n",
9354 return_status = -EIO;
9357 return return_status;
9360 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9363 struct hclge_mac_vlan_tbl_entry_cmd req;
9364 struct hclge_dev *hdev = vport->back;
9365 struct hclge_desc desc;
9366 u16 egress_port = 0;
9369 if (is_zero_ether_addr(mac_addr))
9372 memset(&req, 0, sizeof(req));
9373 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9374 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9375 req.egress_port = cpu_to_le16(egress_port);
9376 hclge_prepare_mac_addr(&req, mac_addr, false);
9378 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9381 vf_idx += HCLGE_VF_VPORT_START_NUM;
9382 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9384 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9390 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9393 struct hclge_vport *vport = hclge_get_vport(handle);
9394 struct hclge_dev *hdev = vport->back;
9396 vport = hclge_get_vf_vport(hdev, vf);
9400 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9401 dev_info(&hdev->pdev->dev,
9402 "Specified MAC(=%pM) is same as before, no change committed!\n",
9407 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9408 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9413 ether_addr_copy(vport->vf_info.mac, mac_addr);
9415 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9416 dev_info(&hdev->pdev->dev,
9417 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9419 return hclge_inform_reset_assert_to_vf(vport);
9422 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9427 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9428 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9430 struct hclge_desc desc;
9435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9436 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9438 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9440 dev_err(&hdev->pdev->dev,
9441 "add mac ethertype failed for cmd_send, ret =%d.\n",
9446 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9447 retval = le16_to_cpu(desc.retval);
9449 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9452 static int init_mgr_tbl(struct hclge_dev *hdev)
9457 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9458 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9460 dev_err(&hdev->pdev->dev,
9461 "add mac ethertype failed, ret =%d.\n",
9470 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9472 struct hclge_vport *vport = hclge_get_vport(handle);
9473 struct hclge_dev *hdev = vport->back;
9475 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9478 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9479 const u8 *old_addr, const u8 *new_addr)
9481 struct list_head *list = &vport->uc_mac_list;
9482 struct hclge_mac_node *old_node, *new_node;
9484 new_node = hclge_find_mac_node(list, new_addr);
9486 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9490 new_node->state = HCLGE_MAC_TO_ADD;
9491 ether_addr_copy(new_node->mac_addr, new_addr);
9492 list_add(&new_node->node, list);
9494 if (new_node->state == HCLGE_MAC_TO_DEL)
9495 new_node->state = HCLGE_MAC_ACTIVE;
9497 /* make sure the new addr is in the list head, avoid dev
9498 * addr may be not re-added into mac table for the umv space
9499 * limitation after global/imp reset which will clear mac
9500 * table by hardware.
9502 list_move(&new_node->node, list);
9505 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9506 old_node = hclge_find_mac_node(list, old_addr);
9508 if (old_node->state == HCLGE_MAC_TO_ADD) {
9509 list_del(&old_node->node);
9512 old_node->state = HCLGE_MAC_TO_DEL;
9517 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9522 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9525 const unsigned char *new_addr = (const unsigned char *)p;
9526 struct hclge_vport *vport = hclge_get_vport(handle);
9527 struct hclge_dev *hdev = vport->back;
9528 unsigned char *old_addr = NULL;
9531 /* mac addr check */
9532 if (is_zero_ether_addr(new_addr) ||
9533 is_broadcast_ether_addr(new_addr) ||
9534 is_multicast_ether_addr(new_addr)) {
9535 dev_err(&hdev->pdev->dev,
9536 "change uc mac err! invalid mac: %pM.\n",
9541 ret = hclge_pause_addr_cfg(hdev, new_addr);
9543 dev_err(&hdev->pdev->dev,
9544 "failed to configure mac pause address, ret = %d\n",
9550 old_addr = hdev->hw.mac.mac_addr;
9552 spin_lock_bh(&vport->mac_list_lock);
9553 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9555 dev_err(&hdev->pdev->dev,
9556 "failed to change the mac addr:%pM, ret = %d\n",
9558 spin_unlock_bh(&vport->mac_list_lock);
9561 hclge_pause_addr_cfg(hdev, old_addr);
9565 /* we must update dev addr with spin lock protect, preventing dev addr
9566 * being removed by set_rx_mode path.
9568 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9569 spin_unlock_bh(&vport->mac_list_lock);
9571 hclge_task_schedule(hdev, 0);
9576 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9578 struct mii_ioctl_data *data = if_mii(ifr);
9580 if (!hnae3_dev_phy_imp_supported(hdev))
9585 data->phy_id = hdev->hw.mac.phy_addr;
9586 /* this command reads phy id and register at the same time */
9589 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9593 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9599 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9602 struct hclge_vport *vport = hclge_get_vport(handle);
9603 struct hclge_dev *hdev = vport->back;
9607 return hclge_ptp_get_cfg(hdev, ifr);
9609 return hclge_ptp_set_cfg(hdev, ifr);
9611 if (!hdev->hw.mac.phydev)
9612 return hclge_mii_ioctl(hdev, ifr, cmd);
9615 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9618 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9621 struct hclge_port_vlan_filter_bypass_cmd *req;
9622 struct hclge_desc desc;
9625 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9626 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9628 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9631 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9633 dev_err(&hdev->pdev->dev,
9634 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9640 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9641 u8 fe_type, bool filter_en, u8 vf_id)
9643 struct hclge_vlan_filter_ctrl_cmd *req;
9644 struct hclge_desc desc;
9647 /* read current vlan filter parameter */
9648 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9649 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9650 req->vlan_type = vlan_type;
9653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9655 dev_err(&hdev->pdev->dev,
9656 "failed to get vlan filter config, ret = %d.\n", ret);
9660 /* modify and write new config parameter */
9661 hclge_cmd_reuse_desc(&desc, false);
9662 req->vlan_fe = filter_en ?
9663 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9665 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9667 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9673 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9675 struct hclge_dev *hdev = vport->back;
9676 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9679 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9680 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9681 HCLGE_FILTER_FE_EGRESS_V1_B,
9682 enable, vport->vport_id);
9684 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9685 HCLGE_FILTER_FE_EGRESS, enable,
9690 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9691 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9693 } else if (!vport->vport_id) {
9694 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9697 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9698 HCLGE_FILTER_FE_INGRESS,
9705 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9707 struct hnae3_handle *handle = &vport->nic;
9708 struct hclge_vport_vlan_cfg *vlan, *tmp;
9709 struct hclge_dev *hdev = vport->back;
9711 if (vport->vport_id) {
9712 if (vport->port_base_vlan_cfg.state !=
9713 HNAE3_PORT_BASE_VLAN_DISABLE)
9716 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9718 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9722 if (!vport->req_vlan_fltr_en)
9725 /* compatible with former device, always enable vlan filter */
9726 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9729 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9730 if (vlan->vlan_id != 0)
9736 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9738 struct hclge_dev *hdev = vport->back;
9742 mutex_lock(&hdev->vport_lock);
9744 vport->req_vlan_fltr_en = request_en;
9746 need_en = hclge_need_enable_vport_vlan_filter(vport);
9747 if (need_en == vport->cur_vlan_fltr_en) {
9748 mutex_unlock(&hdev->vport_lock);
9752 ret = hclge_set_vport_vlan_filter(vport, need_en);
9754 mutex_unlock(&hdev->vport_lock);
9758 vport->cur_vlan_fltr_en = need_en;
9760 mutex_unlock(&hdev->vport_lock);
9765 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9767 struct hclge_vport *vport = hclge_get_vport(handle);
9769 return hclge_enable_vport_vlan_filter(vport, enable);
9772 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9773 bool is_kill, u16 vlan,
9774 struct hclge_desc *desc)
9776 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9777 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9782 hclge_cmd_setup_basic_desc(&desc[0],
9783 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9784 hclge_cmd_setup_basic_desc(&desc[1],
9785 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9787 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9789 vf_byte_off = vfid / 8;
9790 vf_byte_val = 1 << (vfid % 8);
9792 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9793 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9795 req0->vlan_id = cpu_to_le16(vlan);
9796 req0->vlan_cfg = is_kill;
9798 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9799 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9801 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9803 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9805 dev_err(&hdev->pdev->dev,
9806 "Send vf vlan command fail, ret =%d.\n",
9814 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9815 bool is_kill, struct hclge_desc *desc)
9817 struct hclge_vlan_filter_vf_cfg_cmd *req;
9819 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9822 #define HCLGE_VF_VLAN_NO_ENTRY 2
9823 if (!req->resp_code || req->resp_code == 1)
9826 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9827 set_bit(vfid, hdev->vf_vlan_full);
9828 dev_warn(&hdev->pdev->dev,
9829 "vf vlan table is full, vf vlan filter is disabled\n");
9833 dev_err(&hdev->pdev->dev,
9834 "Add vf vlan filter fail, ret =%u.\n",
9837 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9838 if (!req->resp_code)
9841 /* vf vlan filter is disabled when vf vlan table is full,
9842 * then new vlan id will not be added into vf vlan table.
9843 * Just return 0 without warning, avoid massive verbose
9844 * print logs when unload.
9846 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9849 dev_err(&hdev->pdev->dev,
9850 "Kill vf vlan filter fail, ret =%u.\n",
9857 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9858 bool is_kill, u16 vlan)
9860 struct hclge_vport *vport = &hdev->vport[vfid];
9861 struct hclge_desc desc[2];
9864 /* if vf vlan table is full, firmware will close vf vlan filter, it
9865 * is unable and unnecessary to add new vlan id to vf vlan filter.
9866 * If spoof check is enable, and vf vlan is full, it shouldn't add
9867 * new vlan, because tx packets with these vlan id will be dropped.
9869 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9870 if (vport->vf_info.spoofchk && vlan) {
9871 dev_err(&hdev->pdev->dev,
9872 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9878 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9882 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9885 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9886 u16 vlan_id, bool is_kill)
9888 struct hclge_vlan_filter_pf_cfg_cmd *req;
9889 struct hclge_desc desc;
9890 u8 vlan_offset_byte_val;
9891 u8 vlan_offset_byte;
9895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9897 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9898 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9899 HCLGE_VLAN_BYTE_SIZE;
9900 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9902 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9903 req->vlan_offset = vlan_offset_160;
9904 req->vlan_cfg = is_kill;
9905 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9909 dev_err(&hdev->pdev->dev,
9910 "port vlan command, send fail, ret =%d.\n", ret);
9914 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9915 u16 vport_id, u16 vlan_id,
9918 u16 vport_idx, vport_num = 0;
9921 if (is_kill && !vlan_id)
9924 if (vlan_id >= VLAN_N_VID)
9927 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9929 dev_err(&hdev->pdev->dev,
9930 "Set %u vport vlan filter config fail, ret =%d.\n",
9935 /* vlan 0 may be added twice when 8021q module is enabled */
9936 if (!is_kill && !vlan_id &&
9937 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9940 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9941 dev_err(&hdev->pdev->dev,
9942 "Add port vlan failed, vport %u is already in vlan %u\n",
9948 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9949 dev_err(&hdev->pdev->dev,
9950 "Delete port vlan failed, vport %u is not in vlan %u\n",
9955 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9958 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9959 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9965 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9967 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9968 struct hclge_vport_vtag_tx_cfg_cmd *req;
9969 struct hclge_dev *hdev = vport->back;
9970 struct hclge_desc desc;
9974 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9976 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9977 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9978 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9979 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9980 vcfg->accept_tag1 ? 1 : 0);
9981 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9982 vcfg->accept_untag1 ? 1 : 0);
9983 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9984 vcfg->accept_tag2 ? 1 : 0);
9985 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9986 vcfg->accept_untag2 ? 1 : 0);
9987 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9988 vcfg->insert_tag1_en ? 1 : 0);
9989 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9990 vcfg->insert_tag2_en ? 1 : 0);
9991 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9992 vcfg->tag_shift_mode_en ? 1 : 0);
9993 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9995 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9996 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9997 HCLGE_VF_NUM_PER_BYTE;
9998 req->vf_bitmap[bmap_index] =
9999 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10001 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10003 dev_err(&hdev->pdev->dev,
10004 "Send port txvlan cfg command fail, ret =%d\n",
10010 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
10012 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
10013 struct hclge_vport_vtag_rx_cfg_cmd *req;
10014 struct hclge_dev *hdev = vport->back;
10015 struct hclge_desc desc;
10019 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
10021 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
10022 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
10023 vcfg->strip_tag1_en ? 1 : 0);
10024 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
10025 vcfg->strip_tag2_en ? 1 : 0);
10026 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
10027 vcfg->vlan1_vlan_prionly ? 1 : 0);
10028 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
10029 vcfg->vlan2_vlan_prionly ? 1 : 0);
10030 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
10031 vcfg->strip_tag1_discard_en ? 1 : 0);
10032 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
10033 vcfg->strip_tag2_discard_en ? 1 : 0);
10035 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
10036 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
10037 HCLGE_VF_NUM_PER_BYTE;
10038 req->vf_bitmap[bmap_index] =
10039 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10041 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10043 dev_err(&hdev->pdev->dev,
10044 "Send port rxvlan cfg command fail, ret =%d\n",
10050 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
10051 u16 port_base_vlan_state,
10052 u16 vlan_tag, u8 qos)
10056 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10057 vport->txvlan_cfg.accept_tag1 = true;
10058 vport->txvlan_cfg.insert_tag1_en = false;
10059 vport->txvlan_cfg.default_tag1 = 0;
10061 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
10063 vport->txvlan_cfg.accept_tag1 =
10064 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
10065 vport->txvlan_cfg.insert_tag1_en = true;
10066 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
10070 vport->txvlan_cfg.accept_untag1 = true;
10072 /* accept_tag2 and accept_untag2 are not supported on
10073 * pdev revision(0x20), new revision support them,
10074 * this two fields can not be configured by user.
10076 vport->txvlan_cfg.accept_tag2 = true;
10077 vport->txvlan_cfg.accept_untag2 = true;
10078 vport->txvlan_cfg.insert_tag2_en = false;
10079 vport->txvlan_cfg.default_tag2 = 0;
10080 vport->txvlan_cfg.tag_shift_mode_en = true;
10082 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10083 vport->rxvlan_cfg.strip_tag1_en = false;
10084 vport->rxvlan_cfg.strip_tag2_en =
10085 vport->rxvlan_cfg.rx_vlan_offload_en;
10086 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10088 vport->rxvlan_cfg.strip_tag1_en =
10089 vport->rxvlan_cfg.rx_vlan_offload_en;
10090 vport->rxvlan_cfg.strip_tag2_en = true;
10091 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10094 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10095 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10096 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10098 ret = hclge_set_vlan_tx_offload_cfg(vport);
10102 return hclge_set_vlan_rx_offload_cfg(vport);
10105 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10107 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10108 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10109 struct hclge_desc desc;
10112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10113 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10114 rx_req->ot_fst_vlan_type =
10115 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10116 rx_req->ot_sec_vlan_type =
10117 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10118 rx_req->in_fst_vlan_type =
10119 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10120 rx_req->in_sec_vlan_type =
10121 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10123 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10125 dev_err(&hdev->pdev->dev,
10126 "Send rxvlan protocol type command fail, ret =%d\n",
10131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10133 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10134 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10135 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10137 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10139 dev_err(&hdev->pdev->dev,
10140 "Send txvlan protocol type command fail, ret =%d\n",
10146 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10148 #define HCLGE_DEF_VLAN_TYPE 0x8100
10150 struct hnae3_handle *handle = &hdev->vport[0].nic;
10151 struct hclge_vport *vport;
10155 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10156 /* for revision 0x21, vf vlan filter is per function */
10157 for (i = 0; i < hdev->num_alloc_vport; i++) {
10158 vport = &hdev->vport[i];
10159 ret = hclge_set_vlan_filter_ctrl(hdev,
10160 HCLGE_FILTER_TYPE_VF,
10161 HCLGE_FILTER_FE_EGRESS,
10166 vport->cur_vlan_fltr_en = true;
10169 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10170 HCLGE_FILTER_FE_INGRESS, true,
10175 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10176 HCLGE_FILTER_FE_EGRESS_V1_B,
10182 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10183 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10184 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10185 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10186 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10187 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10189 ret = hclge_set_vlan_protocol_type(hdev);
10193 for (i = 0; i < hdev->num_alloc_vport; i++) {
10197 vport = &hdev->vport[i];
10198 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10199 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10201 ret = hclge_vlan_offload_cfg(vport,
10202 vport->port_base_vlan_cfg.state,
10208 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10211 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10212 bool writen_to_tbl)
10214 struct hclge_vport_vlan_cfg *vlan, *tmp;
10216 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10217 if (vlan->vlan_id == vlan_id)
10220 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10224 vlan->hd_tbl_status = writen_to_tbl;
10225 vlan->vlan_id = vlan_id;
10227 list_add_tail(&vlan->node, &vport->vlan_list);
10230 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10232 struct hclge_vport_vlan_cfg *vlan, *tmp;
10233 struct hclge_dev *hdev = vport->back;
10236 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10237 if (!vlan->hd_tbl_status) {
10238 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10240 vlan->vlan_id, false);
10242 dev_err(&hdev->pdev->dev,
10243 "restore vport vlan list failed, ret=%d\n",
10248 vlan->hd_tbl_status = true;
10254 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10257 struct hclge_vport_vlan_cfg *vlan, *tmp;
10258 struct hclge_dev *hdev = vport->back;
10260 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10261 if (vlan->vlan_id == vlan_id) {
10262 if (is_write_tbl && vlan->hd_tbl_status)
10263 hclge_set_vlan_filter_hw(hdev,
10264 htons(ETH_P_8021Q),
10269 list_del(&vlan->node);
10276 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10278 struct hclge_vport_vlan_cfg *vlan, *tmp;
10279 struct hclge_dev *hdev = vport->back;
10281 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10282 if (vlan->hd_tbl_status)
10283 hclge_set_vlan_filter_hw(hdev,
10284 htons(ETH_P_8021Q),
10289 vlan->hd_tbl_status = false;
10291 list_del(&vlan->node);
10295 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10298 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10300 struct hclge_vport_vlan_cfg *vlan, *tmp;
10301 struct hclge_vport *vport;
10304 for (i = 0; i < hdev->num_alloc_vport; i++) {
10305 vport = &hdev->vport[i];
10306 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10307 list_del(&vlan->node);
10313 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10315 struct hclge_vport_vlan_cfg *vlan, *tmp;
10316 struct hclge_dev *hdev = vport->back;
10322 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10323 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10324 state = vport->port_base_vlan_cfg.state;
10326 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10327 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10328 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10329 vport->vport_id, vlan_id,
10334 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10335 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10337 vlan->vlan_id, false);
10340 vlan->hd_tbl_status = true;
10344 /* For global reset and imp reset, hardware will clear the mac table,
10345 * so we change the mac address state from ACTIVE to TO_ADD, then they
10346 * can be restored in the service task after reset complete. Furtherly,
10347 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10348 * be restored after reset, so just remove these mac nodes from mac_list.
10350 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10352 struct hclge_mac_node *mac_node, *tmp;
10354 list_for_each_entry_safe(mac_node, tmp, list, node) {
10355 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10356 mac_node->state = HCLGE_MAC_TO_ADD;
10357 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10358 list_del(&mac_node->node);
10364 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10366 spin_lock_bh(&vport->mac_list_lock);
10368 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10369 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10370 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10372 spin_unlock_bh(&vport->mac_list_lock);
10375 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10377 struct hclge_vport *vport = &hdev->vport[0];
10378 struct hnae3_handle *handle = &vport->nic;
10380 hclge_restore_mac_table_common(vport);
10381 hclge_restore_vport_vlan_table(vport);
10382 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10383 hclge_restore_fd_entries(handle);
10386 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10388 struct hclge_vport *vport = hclge_get_vport(handle);
10390 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10391 vport->rxvlan_cfg.strip_tag1_en = false;
10392 vport->rxvlan_cfg.strip_tag2_en = enable;
10393 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10395 vport->rxvlan_cfg.strip_tag1_en = enable;
10396 vport->rxvlan_cfg.strip_tag2_en = true;
10397 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10400 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10401 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10402 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10403 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10405 return hclge_set_vlan_rx_offload_cfg(vport);
10408 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10410 struct hclge_dev *hdev = vport->back;
10412 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10413 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10416 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10417 u16 port_base_vlan_state,
10418 struct hclge_vlan_info *new_info,
10419 struct hclge_vlan_info *old_info)
10421 struct hclge_dev *hdev = vport->back;
10424 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10425 hclge_rm_vport_all_vlan_table(vport, false);
10426 /* force clear VLAN 0 */
10427 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10430 return hclge_set_vlan_filter_hw(hdev,
10431 htons(new_info->vlan_proto),
10433 new_info->vlan_tag,
10437 /* force add VLAN 0 */
10438 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10442 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10443 vport->vport_id, old_info->vlan_tag,
10448 return hclge_add_vport_all_vlan_table(vport);
10451 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10452 const struct hclge_vlan_info *old_cfg)
10454 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10457 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10463 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10464 struct hclge_vlan_info *vlan_info)
10466 struct hnae3_handle *nic = &vport->nic;
10467 struct hclge_vlan_info *old_vlan_info;
10468 struct hclge_dev *hdev = vport->back;
10471 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10473 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10478 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10481 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10482 /* add new VLAN tag */
10483 ret = hclge_set_vlan_filter_hw(hdev,
10484 htons(vlan_info->vlan_proto),
10486 vlan_info->vlan_tag,
10491 /* remove old VLAN tag */
10492 if (old_vlan_info->vlan_tag == 0)
10493 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10496 ret = hclge_set_vlan_filter_hw(hdev,
10497 htons(ETH_P_8021Q),
10499 old_vlan_info->vlan_tag,
10502 dev_err(&hdev->pdev->dev,
10503 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10504 vport->vport_id, old_vlan_info->vlan_tag, ret);
10511 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10517 vport->port_base_vlan_cfg.state = state;
10518 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10519 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10521 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10523 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10524 hclge_set_vport_vlan_fltr_change(vport);
10529 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10530 enum hnae3_port_base_vlan_state state,
10533 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10535 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10537 return HNAE3_PORT_BASE_VLAN_ENABLE;
10541 return HNAE3_PORT_BASE_VLAN_DISABLE;
10543 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10544 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10545 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10547 return HNAE3_PORT_BASE_VLAN_MODIFY;
10550 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10551 u16 vlan, u8 qos, __be16 proto)
10553 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10554 struct hclge_vport *vport = hclge_get_vport(handle);
10555 struct hclge_dev *hdev = vport->back;
10556 struct hclge_vlan_info vlan_info;
10560 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10561 return -EOPNOTSUPP;
10563 vport = hclge_get_vf_vport(hdev, vfid);
10567 /* qos is a 3 bits value, so can not be bigger than 7 */
10568 if (vlan > VLAN_N_VID - 1 || qos > 7)
10570 if (proto != htons(ETH_P_8021Q))
10571 return -EPROTONOSUPPORT;
10573 state = hclge_get_port_base_vlan_state(vport,
10574 vport->port_base_vlan_cfg.state,
10576 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10579 vlan_info.vlan_tag = vlan;
10580 vlan_info.qos = qos;
10581 vlan_info.vlan_proto = ntohs(proto);
10583 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10585 dev_err(&hdev->pdev->dev,
10586 "failed to update port base vlan for vf %d, ret = %d\n",
10591 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10594 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10595 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10596 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10597 vport->vport_id, state,
10603 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10605 struct hclge_vlan_info *vlan_info;
10606 struct hclge_vport *vport;
10610 /* clear port base vlan for all vf */
10611 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10612 vport = &hdev->vport[vf];
10613 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10615 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10617 vlan_info->vlan_tag, true);
10619 dev_err(&hdev->pdev->dev,
10620 "failed to clear vf vlan for vf%d, ret = %d\n",
10621 vf - HCLGE_VF_VPORT_START_NUM, ret);
10625 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10626 u16 vlan_id, bool is_kill)
10628 struct hclge_vport *vport = hclge_get_vport(handle);
10629 struct hclge_dev *hdev = vport->back;
10630 bool writen_to_tbl = false;
10633 /* When device is resetting or reset failed, firmware is unable to
10634 * handle mailbox. Just record the vlan id, and remove it after
10637 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10638 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10639 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10643 /* when port base vlan enabled, we use port base vlan as the vlan
10644 * filter entry. In this case, we don't update vlan filter table
10645 * when user add new vlan or remove exist vlan, just update the vport
10646 * vlan list. The vlan id in vlan list will be writen in vlan filter
10647 * table until port base vlan disabled
10649 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10650 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10652 writen_to_tbl = true;
10657 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10659 hclge_add_vport_vlan_table(vport, vlan_id,
10661 } else if (is_kill) {
10662 /* when remove hw vlan filter failed, record the vlan id,
10663 * and try to remove it from hw later, to be consistence
10666 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10669 hclge_set_vport_vlan_fltr_change(vport);
10674 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10676 struct hclge_vport *vport;
10680 for (i = 0; i < hdev->num_alloc_vport; i++) {
10681 vport = &hdev->vport[i];
10682 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10686 ret = hclge_enable_vport_vlan_filter(vport,
10687 vport->req_vlan_fltr_en);
10689 dev_err(&hdev->pdev->dev,
10690 "failed to sync vlan filter state for vport%u, ret = %d\n",
10691 vport->vport_id, ret);
10692 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10699 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10701 #define HCLGE_MAX_SYNC_COUNT 60
10703 int i, ret, sync_cnt = 0;
10706 /* start from vport 1 for PF is always alive */
10707 for (i = 0; i < hdev->num_alloc_vport; i++) {
10708 struct hclge_vport *vport = &hdev->vport[i];
10710 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10712 while (vlan_id != VLAN_N_VID) {
10713 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10714 vport->vport_id, vlan_id,
10716 if (ret && ret != -EINVAL)
10719 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10720 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10721 hclge_set_vport_vlan_fltr_change(vport);
10724 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10727 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10732 hclge_sync_vlan_fltr_state(hdev);
10735 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10737 struct hclge_config_max_frm_size_cmd *req;
10738 struct hclge_desc desc;
10740 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10742 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10743 req->max_frm_size = cpu_to_le16(new_mps);
10744 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10746 return hclge_cmd_send(&hdev->hw, &desc, 1);
10749 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10751 struct hclge_vport *vport = hclge_get_vport(handle);
10753 return hclge_set_vport_mtu(vport, new_mtu);
10756 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10758 struct hclge_dev *hdev = vport->back;
10759 int i, max_frm_size, ret;
10761 /* HW supprt 2 layer vlan */
10762 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10763 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10764 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10767 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10768 mutex_lock(&hdev->vport_lock);
10769 /* VF's mps must fit within hdev->mps */
10770 if (vport->vport_id && max_frm_size > hdev->mps) {
10771 mutex_unlock(&hdev->vport_lock);
10773 } else if (vport->vport_id) {
10774 vport->mps = max_frm_size;
10775 mutex_unlock(&hdev->vport_lock);
10779 /* PF's mps must be greater then VF's mps */
10780 for (i = 1; i < hdev->num_alloc_vport; i++)
10781 if (max_frm_size < hdev->vport[i].mps) {
10782 mutex_unlock(&hdev->vport_lock);
10786 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10788 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10790 dev_err(&hdev->pdev->dev,
10791 "Change mtu fail, ret =%d\n", ret);
10795 hdev->mps = max_frm_size;
10796 vport->mps = max_frm_size;
10798 ret = hclge_buffer_alloc(hdev);
10800 dev_err(&hdev->pdev->dev,
10801 "Allocate buffer fail, ret =%d\n", ret);
10804 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10805 mutex_unlock(&hdev->vport_lock);
10809 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10812 struct hclge_reset_tqp_queue_cmd *req;
10813 struct hclge_desc desc;
10816 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10818 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10819 req->tqp_id = cpu_to_le16(queue_id);
10821 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10823 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10825 dev_err(&hdev->pdev->dev,
10826 "Send tqp reset cmd error, status =%d\n", ret);
10833 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10836 struct hclge_reset_tqp_queue_cmd *req;
10837 struct hclge_desc desc;
10840 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10842 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10843 req->tqp_id = cpu_to_le16(queue_id);
10845 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10847 dev_err(&hdev->pdev->dev,
10848 "Get reset status error, status =%d\n", ret);
10852 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10857 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10859 struct hnae3_queue *queue;
10860 struct hclge_tqp *tqp;
10862 queue = handle->kinfo.tqp[queue_id];
10863 tqp = container_of(queue, struct hclge_tqp, q);
10868 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10870 struct hclge_vport *vport = hclge_get_vport(handle);
10871 struct hclge_dev *hdev = vport->back;
10872 u16 reset_try_times = 0;
10878 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10879 queue_gid = hclge_covert_handle_qid_global(handle, i);
10880 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10882 dev_err(&hdev->pdev->dev,
10883 "failed to send reset tqp cmd, ret = %d\n",
10888 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10889 ret = hclge_get_reset_status(hdev, queue_gid,
10897 /* Wait for tqp hw reset */
10898 usleep_range(1000, 1200);
10901 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10902 dev_err(&hdev->pdev->dev,
10903 "wait for tqp hw reset timeout\n");
10907 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10909 dev_err(&hdev->pdev->dev,
10910 "failed to deassert soft reset, ret = %d\n",
10914 reset_try_times = 0;
10919 static int hclge_reset_rcb(struct hnae3_handle *handle)
10921 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10922 #define HCLGE_RESET_RCB_SUCCESS 1U
10924 struct hclge_vport *vport = hclge_get_vport(handle);
10925 struct hclge_dev *hdev = vport->back;
10926 struct hclge_reset_cmd *req;
10927 struct hclge_desc desc;
10932 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10934 req = (struct hclge_reset_cmd *)desc.data;
10935 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10936 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10937 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10938 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10940 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10942 dev_err(&hdev->pdev->dev,
10943 "failed to send rcb reset cmd, ret = %d\n", ret);
10947 return_status = req->fun_reset_rcb_return_status;
10948 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10951 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10952 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10957 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10958 * again to reset all tqps
10960 return hclge_reset_tqp_cmd(handle);
10963 int hclge_reset_tqp(struct hnae3_handle *handle)
10965 struct hclge_vport *vport = hclge_get_vport(handle);
10966 struct hclge_dev *hdev = vport->back;
10969 /* only need to disable PF's tqp */
10970 if (!vport->vport_id) {
10971 ret = hclge_tqp_enable(handle, false);
10973 dev_err(&hdev->pdev->dev,
10974 "failed to disable tqp, ret = %d\n", ret);
10979 return hclge_reset_rcb(handle);
10982 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10984 struct hclge_vport *vport = hclge_get_vport(handle);
10985 struct hclge_dev *hdev = vport->back;
10987 return hdev->fw_version;
10990 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10992 struct phy_device *phydev = hdev->hw.mac.phydev;
10997 phy_set_asym_pause(phydev, rx_en, tx_en);
11000 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
11004 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
11007 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
11009 dev_err(&hdev->pdev->dev,
11010 "configure pauseparam error, ret = %d.\n", ret);
11015 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
11017 struct phy_device *phydev = hdev->hw.mac.phydev;
11018 u16 remote_advertising = 0;
11019 u16 local_advertising;
11020 u32 rx_pause, tx_pause;
11023 if (!phydev->link || !phydev->autoneg)
11026 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
11029 remote_advertising = LPA_PAUSE_CAP;
11031 if (phydev->asym_pause)
11032 remote_advertising |= LPA_PAUSE_ASYM;
11034 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
11035 remote_advertising);
11036 tx_pause = flowctl & FLOW_CTRL_TX;
11037 rx_pause = flowctl & FLOW_CTRL_RX;
11039 if (phydev->duplex == HCLGE_MAC_HALF) {
11044 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
11047 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
11048 u32 *rx_en, u32 *tx_en)
11050 struct hclge_vport *vport = hclge_get_vport(handle);
11051 struct hclge_dev *hdev = vport->back;
11052 u8 media_type = hdev->hw.mac.media_type;
11054 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
11055 hclge_get_autoneg(handle) : 0;
11057 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11063 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11066 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11069 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11078 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11079 u32 rx_en, u32 tx_en)
11081 if (rx_en && tx_en)
11082 hdev->fc_mode_last_time = HCLGE_FC_FULL;
11083 else if (rx_en && !tx_en)
11084 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11085 else if (!rx_en && tx_en)
11086 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11088 hdev->fc_mode_last_time = HCLGE_FC_NONE;
11090 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11093 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11094 u32 rx_en, u32 tx_en)
11096 struct hclge_vport *vport = hclge_get_vport(handle);
11097 struct hclge_dev *hdev = vport->back;
11098 struct phy_device *phydev = hdev->hw.mac.phydev;
11101 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11102 fc_autoneg = hclge_get_autoneg(handle);
11103 if (auto_neg != fc_autoneg) {
11104 dev_info(&hdev->pdev->dev,
11105 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11106 return -EOPNOTSUPP;
11110 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11111 dev_info(&hdev->pdev->dev,
11112 "Priority flow control enabled. Cannot set link flow control.\n");
11113 return -EOPNOTSUPP;
11116 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11118 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11120 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11121 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11124 return phy_start_aneg(phydev);
11126 return -EOPNOTSUPP;
11129 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11130 u8 *auto_neg, u32 *speed, u8 *duplex)
11132 struct hclge_vport *vport = hclge_get_vport(handle);
11133 struct hclge_dev *hdev = vport->back;
11136 *speed = hdev->hw.mac.speed;
11138 *duplex = hdev->hw.mac.duplex;
11140 *auto_neg = hdev->hw.mac.autoneg;
11143 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11146 struct hclge_vport *vport = hclge_get_vport(handle);
11147 struct hclge_dev *hdev = vport->back;
11149 /* When nic is down, the service task is not running, doesn't update
11150 * the port information per second. Query the port information before
11151 * return the media type, ensure getting the correct media information.
11153 hclge_update_port_info(hdev);
11156 *media_type = hdev->hw.mac.media_type;
11159 *module_type = hdev->hw.mac.module_type;
11162 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11163 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11165 struct hclge_vport *vport = hclge_get_vport(handle);
11166 struct hclge_dev *hdev = vport->back;
11167 struct phy_device *phydev = hdev->hw.mac.phydev;
11168 int mdix_ctrl, mdix, is_resolved;
11169 unsigned int retval;
11172 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11173 *tp_mdix = ETH_TP_MDI_INVALID;
11177 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11179 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11180 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11181 HCLGE_PHY_MDIX_CTRL_S);
11183 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11184 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11185 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11187 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11189 switch (mdix_ctrl) {
11191 *tp_mdix_ctrl = ETH_TP_MDI;
11194 *tp_mdix_ctrl = ETH_TP_MDI_X;
11197 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11200 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11205 *tp_mdix = ETH_TP_MDI_INVALID;
11207 *tp_mdix = ETH_TP_MDI_X;
11209 *tp_mdix = ETH_TP_MDI;
11212 static void hclge_info_show(struct hclge_dev *hdev)
11214 struct device *dev = &hdev->pdev->dev;
11216 dev_info(dev, "PF info begin:\n");
11218 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11219 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11220 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11221 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11222 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11223 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11224 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11225 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11226 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11227 dev_info(dev, "This is %s PF\n",
11228 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11229 dev_info(dev, "DCB %s\n",
11230 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11231 dev_info(dev, "MQPRIO %s\n",
11232 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11233 dev_info(dev, "Default tx spare buffer size: %u\n",
11234 hdev->tx_spare_buf_size);
11236 dev_info(dev, "PF info end.\n");
11239 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11240 struct hclge_vport *vport)
11242 struct hnae3_client *client = vport->nic.client;
11243 struct hclge_dev *hdev = ae_dev->priv;
11244 int rst_cnt = hdev->rst_stats.reset_cnt;
11247 ret = client->ops->init_instance(&vport->nic);
11251 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11252 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11253 rst_cnt != hdev->rst_stats.reset_cnt) {
11258 /* Enable nic hw error interrupts */
11259 ret = hclge_config_nic_hw_error(hdev, true);
11261 dev_err(&ae_dev->pdev->dev,
11262 "fail(%d) to enable hw error interrupts\n", ret);
11266 hnae3_set_client_init_flag(client, ae_dev, 1);
11268 if (netif_msg_drv(&hdev->vport->nic))
11269 hclge_info_show(hdev);
11274 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11275 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11276 msleep(HCLGE_WAIT_RESET_DONE);
11278 client->ops->uninit_instance(&vport->nic, 0);
11283 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11284 struct hclge_vport *vport)
11286 struct hclge_dev *hdev = ae_dev->priv;
11287 struct hnae3_client *client;
11291 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11295 client = hdev->roce_client;
11296 ret = hclge_init_roce_base_info(vport);
11300 rst_cnt = hdev->rst_stats.reset_cnt;
11301 ret = client->ops->init_instance(&vport->roce);
11305 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11306 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11307 rst_cnt != hdev->rst_stats.reset_cnt) {
11309 goto init_roce_err;
11312 /* Enable roce ras interrupts */
11313 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11315 dev_err(&ae_dev->pdev->dev,
11316 "fail(%d) to enable roce ras interrupts\n", ret);
11317 goto init_roce_err;
11320 hnae3_set_client_init_flag(client, ae_dev, 1);
11325 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11326 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11327 msleep(HCLGE_WAIT_RESET_DONE);
11329 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11334 static int hclge_init_client_instance(struct hnae3_client *client,
11335 struct hnae3_ae_dev *ae_dev)
11337 struct hclge_dev *hdev = ae_dev->priv;
11338 struct hclge_vport *vport = &hdev->vport[0];
11341 switch (client->type) {
11342 case HNAE3_CLIENT_KNIC:
11343 hdev->nic_client = client;
11344 vport->nic.client = client;
11345 ret = hclge_init_nic_client_instance(ae_dev, vport);
11349 ret = hclge_init_roce_client_instance(ae_dev, vport);
11354 case HNAE3_CLIENT_ROCE:
11355 if (hnae3_dev_roce_supported(hdev)) {
11356 hdev->roce_client = client;
11357 vport->roce.client = client;
11360 ret = hclge_init_roce_client_instance(ae_dev, vport);
11372 hdev->nic_client = NULL;
11373 vport->nic.client = NULL;
11376 hdev->roce_client = NULL;
11377 vport->roce.client = NULL;
11381 static void hclge_uninit_client_instance(struct hnae3_client *client,
11382 struct hnae3_ae_dev *ae_dev)
11384 struct hclge_dev *hdev = ae_dev->priv;
11385 struct hclge_vport *vport = &hdev->vport[0];
11387 if (hdev->roce_client) {
11388 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11389 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11390 msleep(HCLGE_WAIT_RESET_DONE);
11392 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11393 hdev->roce_client = NULL;
11394 vport->roce.client = NULL;
11396 if (client->type == HNAE3_CLIENT_ROCE)
11398 if (hdev->nic_client && client->ops->uninit_instance) {
11399 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11400 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11401 msleep(HCLGE_WAIT_RESET_DONE);
11403 client->ops->uninit_instance(&vport->nic, 0);
11404 hdev->nic_client = NULL;
11405 vport->nic.client = NULL;
11409 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11411 #define HCLGE_MEM_BAR 4
11413 struct pci_dev *pdev = hdev->pdev;
11414 struct hclge_hw *hw = &hdev->hw;
11416 /* for device does not have device memory, return directly */
11417 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11420 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11421 pci_resource_start(pdev, HCLGE_MEM_BAR),
11422 pci_resource_len(pdev, HCLGE_MEM_BAR));
11423 if (!hw->mem_base) {
11424 dev_err(&pdev->dev, "failed to map device memory\n");
11431 static int hclge_pci_init(struct hclge_dev *hdev)
11433 struct pci_dev *pdev = hdev->pdev;
11434 struct hclge_hw *hw;
11437 ret = pci_enable_device(pdev);
11439 dev_err(&pdev->dev, "failed to enable PCI device\n");
11443 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11445 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11447 dev_err(&pdev->dev,
11448 "can't set consistent PCI DMA");
11449 goto err_disable_device;
11451 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11454 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11456 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11457 goto err_disable_device;
11460 pci_set_master(pdev);
11462 hw->io_base = pcim_iomap(pdev, 2, 0);
11463 if (!hw->io_base) {
11464 dev_err(&pdev->dev, "Can't map configuration register space\n");
11466 goto err_clr_master;
11469 ret = hclge_dev_mem_map(hdev);
11471 goto err_unmap_io_base;
11473 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11478 pcim_iounmap(pdev, hdev->hw.io_base);
11480 pci_clear_master(pdev);
11481 pci_release_regions(pdev);
11482 err_disable_device:
11483 pci_disable_device(pdev);
11488 static void hclge_pci_uninit(struct hclge_dev *hdev)
11490 struct pci_dev *pdev = hdev->pdev;
11492 if (hdev->hw.mem_base)
11493 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11495 pcim_iounmap(pdev, hdev->hw.io_base);
11496 pci_free_irq_vectors(pdev);
11497 pci_clear_master(pdev);
11498 pci_release_mem_regions(pdev);
11499 pci_disable_device(pdev);
11502 static void hclge_state_init(struct hclge_dev *hdev)
11504 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11505 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11506 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11507 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11508 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11509 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11510 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11513 static void hclge_state_uninit(struct hclge_dev *hdev)
11515 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11516 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11518 if (hdev->reset_timer.function)
11519 del_timer_sync(&hdev->reset_timer);
11520 if (hdev->service_task.work.func)
11521 cancel_delayed_work_sync(&hdev->service_task);
11524 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11525 enum hnae3_reset_type rst_type)
11527 #define HCLGE_RESET_RETRY_WAIT_MS 500
11528 #define HCLGE_RESET_RETRY_CNT 5
11530 struct hclge_dev *hdev = ae_dev->priv;
11535 down(&hdev->reset_sem);
11536 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11537 hdev->reset_type = rst_type;
11538 ret = hclge_reset_prepare(hdev);
11539 if (ret || hdev->reset_pending) {
11540 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11542 if (hdev->reset_pending ||
11543 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11544 dev_err(&hdev->pdev->dev,
11545 "reset_pending:0x%lx, retry_cnt:%d\n",
11546 hdev->reset_pending, retry_cnt);
11547 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11548 up(&hdev->reset_sem);
11549 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11554 /* disable misc vector before reset done */
11555 hclge_enable_vector(&hdev->misc_vector, false);
11556 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11558 if (hdev->reset_type == HNAE3_FLR_RESET)
11559 hdev->rst_stats.flr_rst_cnt++;
11562 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11564 struct hclge_dev *hdev = ae_dev->priv;
11567 hclge_enable_vector(&hdev->misc_vector, true);
11569 ret = hclge_reset_rebuild(hdev);
11571 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11573 hdev->reset_type = HNAE3_NONE_RESET;
11574 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11575 up(&hdev->reset_sem);
11578 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11582 for (i = 0; i < hdev->num_alloc_vport; i++) {
11583 struct hclge_vport *vport = &hdev->vport[i];
11586 /* Send cmd to clear vport's FUNC_RST_ING */
11587 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11589 dev_warn(&hdev->pdev->dev,
11590 "clear vport(%u) rst failed %d!\n",
11591 vport->vport_id, ret);
11595 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11597 struct hclge_desc desc;
11600 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11602 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11603 /* This new command is only supported by new firmware, it will
11604 * fail with older firmware. Error value -EOPNOSUPP can only be
11605 * returned by older firmware running this command, to keep code
11606 * backward compatible we will override this value and return
11609 if (ret && ret != -EOPNOTSUPP) {
11610 dev_err(&hdev->pdev->dev,
11611 "failed to clear hw resource, ret = %d\n", ret);
11617 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11619 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11620 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11623 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11625 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11626 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11629 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11631 struct pci_dev *pdev = ae_dev->pdev;
11632 struct hclge_dev *hdev;
11635 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11640 hdev->ae_dev = ae_dev;
11641 hdev->reset_type = HNAE3_NONE_RESET;
11642 hdev->reset_level = HNAE3_FUNC_RESET;
11643 ae_dev->priv = hdev;
11645 /* HW supprt 2 layer vlan */
11646 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11648 mutex_init(&hdev->vport_lock);
11649 spin_lock_init(&hdev->fd_rule_lock);
11650 sema_init(&hdev->reset_sem, 1);
11652 ret = hclge_pci_init(hdev);
11656 ret = hclge_devlink_init(hdev);
11658 goto err_pci_uninit;
11660 /* Firmware command queue initialize */
11661 ret = hclge_cmd_queue_init(hdev);
11663 goto err_devlink_uninit;
11665 /* Firmware command initialize */
11666 ret = hclge_cmd_init(hdev);
11668 goto err_cmd_uninit;
11670 ret = hclge_clear_hw_resource(hdev);
11672 goto err_cmd_uninit;
11674 ret = hclge_get_cap(hdev);
11676 goto err_cmd_uninit;
11678 ret = hclge_query_dev_specs(hdev);
11680 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11682 goto err_cmd_uninit;
11685 ret = hclge_configure(hdev);
11687 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11688 goto err_cmd_uninit;
11691 ret = hclge_init_msi(hdev);
11693 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11694 goto err_cmd_uninit;
11697 ret = hclge_misc_irq_init(hdev);
11699 goto err_msi_uninit;
11701 ret = hclge_alloc_tqps(hdev);
11703 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11704 goto err_msi_irq_uninit;
11707 ret = hclge_alloc_vport(hdev);
11709 goto err_msi_irq_uninit;
11711 ret = hclge_map_tqp(hdev);
11713 goto err_msi_irq_uninit;
11715 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11716 !hnae3_dev_phy_imp_supported(hdev)) {
11717 ret = hclge_mac_mdio_config(hdev);
11719 goto err_msi_irq_uninit;
11722 ret = hclge_init_umv_space(hdev);
11724 goto err_mdiobus_unreg;
11726 ret = hclge_mac_init(hdev);
11728 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11729 goto err_mdiobus_unreg;
11732 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11734 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11735 goto err_mdiobus_unreg;
11738 ret = hclge_config_gro(hdev);
11740 goto err_mdiobus_unreg;
11742 ret = hclge_init_vlan_config(hdev);
11744 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11745 goto err_mdiobus_unreg;
11748 ret = hclge_tm_schd_init(hdev);
11750 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11751 goto err_mdiobus_unreg;
11754 ret = hclge_rss_init_cfg(hdev);
11756 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11757 goto err_mdiobus_unreg;
11760 ret = hclge_rss_init_hw(hdev);
11762 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11763 goto err_mdiobus_unreg;
11766 ret = init_mgr_tbl(hdev);
11768 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11769 goto err_mdiobus_unreg;
11772 ret = hclge_init_fd_config(hdev);
11774 dev_err(&pdev->dev,
11775 "fd table init fail, ret=%d\n", ret);
11776 goto err_mdiobus_unreg;
11779 ret = hclge_ptp_init(hdev);
11781 goto err_mdiobus_unreg;
11783 INIT_KFIFO(hdev->mac_tnl_log);
11785 hclge_dcb_ops_set(hdev);
11787 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11788 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11790 /* Setup affinity after service timer setup because add_timer_on
11791 * is called in affinity notify.
11793 hclge_misc_affinity_setup(hdev);
11795 hclge_clear_all_event_cause(hdev);
11796 hclge_clear_resetting_state(hdev);
11798 /* Log and clear the hw errors those already occurred */
11799 if (hnae3_dev_ras_imp_supported(hdev))
11800 hclge_handle_occurred_error(hdev);
11802 hclge_handle_all_hns_hw_errors(ae_dev);
11804 /* request delayed reset for the error recovery because an immediate
11805 * global reset on a PF affecting pending initialization of other PFs
11807 if (ae_dev->hw_err_reset_req) {
11808 enum hnae3_reset_type reset_level;
11810 reset_level = hclge_get_reset_level(ae_dev,
11811 &ae_dev->hw_err_reset_req);
11812 hclge_set_def_reset_request(ae_dev, reset_level);
11813 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11816 hclge_init_rxd_adv_layout(hdev);
11818 /* Enable MISC vector(vector0) */
11819 hclge_enable_vector(&hdev->misc_vector, true);
11821 hclge_state_init(hdev);
11822 hdev->last_reset_time = jiffies;
11824 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11825 HCLGE_DRIVER_NAME);
11827 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11832 if (hdev->hw.mac.phydev)
11833 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11834 err_msi_irq_uninit:
11835 hclge_misc_irq_uninit(hdev);
11837 pci_free_irq_vectors(pdev);
11839 hclge_cmd_uninit(hdev);
11840 err_devlink_uninit:
11841 hclge_devlink_uninit(hdev);
11843 pcim_iounmap(pdev, hdev->hw.io_base);
11844 pci_clear_master(pdev);
11845 pci_release_regions(pdev);
11846 pci_disable_device(pdev);
11848 mutex_destroy(&hdev->vport_lock);
11852 static void hclge_stats_clear(struct hclge_dev *hdev)
11854 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11857 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11859 return hclge_config_switch_param(hdev, vf, enable,
11860 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11863 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11865 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11866 HCLGE_FILTER_FE_NIC_INGRESS_B,
11870 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11874 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11876 dev_err(&hdev->pdev->dev,
11877 "Set vf %d mac spoof check %s failed, ret=%d\n",
11878 vf, enable ? "on" : "off", ret);
11882 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11884 dev_err(&hdev->pdev->dev,
11885 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11886 vf, enable ? "on" : "off", ret);
11891 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11894 struct hclge_vport *vport = hclge_get_vport(handle);
11895 struct hclge_dev *hdev = vport->back;
11896 u32 new_spoofchk = enable ? 1 : 0;
11899 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11900 return -EOPNOTSUPP;
11902 vport = hclge_get_vf_vport(hdev, vf);
11906 if (vport->vf_info.spoofchk == new_spoofchk)
11909 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11910 dev_warn(&hdev->pdev->dev,
11911 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11913 else if (enable && hclge_is_umv_space_full(vport, true))
11914 dev_warn(&hdev->pdev->dev,
11915 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11918 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11922 vport->vf_info.spoofchk = new_spoofchk;
11926 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11928 struct hclge_vport *vport = hdev->vport;
11932 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11935 /* resume the vf spoof check state after reset */
11936 for (i = 0; i < hdev->num_alloc_vport; i++) {
11937 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11938 vport->vf_info.spoofchk);
11948 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11950 struct hclge_vport *vport = hclge_get_vport(handle);
11951 struct hclge_dev *hdev = vport->back;
11952 u32 new_trusted = enable ? 1 : 0;
11954 vport = hclge_get_vf_vport(hdev, vf);
11958 if (vport->vf_info.trusted == new_trusted)
11961 vport->vf_info.trusted = new_trusted;
11962 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11963 hclge_task_schedule(hdev, 0);
11968 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11973 /* reset vf rate to default value */
11974 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11975 struct hclge_vport *vport = &hdev->vport[vf];
11977 vport->vf_info.max_tx_rate = 0;
11978 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11980 dev_err(&hdev->pdev->dev,
11981 "vf%d failed to reset to default, ret=%d\n",
11982 vf - HCLGE_VF_VPORT_START_NUM, ret);
11986 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11987 int min_tx_rate, int max_tx_rate)
11989 if (min_tx_rate != 0 ||
11990 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11991 dev_err(&hdev->pdev->dev,
11992 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11993 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12000 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12001 int min_tx_rate, int max_tx_rate, bool force)
12003 struct hclge_vport *vport = hclge_get_vport(handle);
12004 struct hclge_dev *hdev = vport->back;
12007 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12011 vport = hclge_get_vf_vport(hdev, vf);
12015 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12018 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12022 vport->vf_info.max_tx_rate = max_tx_rate;
12027 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12029 struct hnae3_handle *handle = &hdev->vport->nic;
12030 struct hclge_vport *vport;
12034 /* resume the vf max_tx_rate after reset */
12035 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12036 vport = hclge_get_vf_vport(hdev, vf);
12040 /* zero means max rate, after reset, firmware already set it to
12041 * max rate, so just continue.
12043 if (!vport->vf_info.max_tx_rate)
12046 ret = hclge_set_vf_rate(handle, vf, 0,
12047 vport->vf_info.max_tx_rate, true);
12049 dev_err(&hdev->pdev->dev,
12050 "vf%d failed to resume tx_rate:%u, ret=%d\n",
12051 vf, vport->vf_info.max_tx_rate, ret);
12059 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12061 struct hclge_vport *vport = hdev->vport;
12064 for (i = 0; i < hdev->num_alloc_vport; i++) {
12065 hclge_vport_stop(vport);
12070 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12072 struct hclge_dev *hdev = ae_dev->priv;
12073 struct pci_dev *pdev = ae_dev->pdev;
12076 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12078 hclge_stats_clear(hdev);
12079 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12080 * so here should not clean table in memory.
12082 if (hdev->reset_type == HNAE3_IMP_RESET ||
12083 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12084 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12085 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12086 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12087 hclge_reset_umv_space(hdev);
12090 ret = hclge_cmd_init(hdev);
12092 dev_err(&pdev->dev, "Cmd queue init failed\n");
12096 ret = hclge_map_tqp(hdev);
12098 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12102 ret = hclge_mac_init(hdev);
12104 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12108 ret = hclge_tp_port_init(hdev);
12110 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12115 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12117 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12121 ret = hclge_config_gro(hdev);
12125 ret = hclge_init_vlan_config(hdev);
12127 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12131 ret = hclge_tm_init_hw(hdev, true);
12133 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12137 ret = hclge_rss_init_hw(hdev);
12139 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12143 ret = init_mgr_tbl(hdev);
12145 dev_err(&pdev->dev,
12146 "failed to reinit manager table, ret = %d\n", ret);
12150 ret = hclge_init_fd_config(hdev);
12152 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12156 ret = hclge_ptp_init(hdev);
12160 /* Log and clear the hw errors those already occurred */
12161 if (hnae3_dev_ras_imp_supported(hdev))
12162 hclge_handle_occurred_error(hdev);
12164 hclge_handle_all_hns_hw_errors(ae_dev);
12166 /* Re-enable the hw error interrupts because
12167 * the interrupts get disabled on global reset.
12169 ret = hclge_config_nic_hw_error(hdev, true);
12171 dev_err(&pdev->dev,
12172 "fail(%d) to re-enable NIC hw error interrupts\n",
12177 if (hdev->roce_client) {
12178 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12180 dev_err(&pdev->dev,
12181 "fail(%d) to re-enable roce ras interrupts\n",
12187 hclge_reset_vport_state(hdev);
12188 ret = hclge_reset_vport_spoofchk(hdev);
12192 ret = hclge_resume_vf_rate(hdev);
12196 hclge_init_rxd_adv_layout(hdev);
12198 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12199 HCLGE_DRIVER_NAME);
12204 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12206 struct hclge_dev *hdev = ae_dev->priv;
12207 struct hclge_mac *mac = &hdev->hw.mac;
12209 hclge_reset_vf_rate(hdev);
12210 hclge_clear_vf_vlan(hdev);
12211 hclge_misc_affinity_teardown(hdev);
12212 hclge_state_uninit(hdev);
12213 hclge_ptp_uninit(hdev);
12214 hclge_uninit_rxd_adv_layout(hdev);
12215 hclge_uninit_mac_table(hdev);
12216 hclge_del_all_fd_entries(hdev);
12219 mdiobus_unregister(mac->mdio_bus);
12221 /* Disable MISC vector(vector0) */
12222 hclge_enable_vector(&hdev->misc_vector, false);
12223 synchronize_irq(hdev->misc_vector.vector_irq);
12225 /* Disable all hw interrupts */
12226 hclge_config_mac_tnl_int(hdev, false);
12227 hclge_config_nic_hw_error(hdev, false);
12228 hclge_config_rocee_ras_interrupt(hdev, false);
12230 hclge_cmd_uninit(hdev);
12231 hclge_misc_irq_uninit(hdev);
12232 hclge_devlink_uninit(hdev);
12233 hclge_pci_uninit(hdev);
12234 mutex_destroy(&hdev->vport_lock);
12235 hclge_uninit_vport_vlan_table(hdev);
12236 ae_dev->priv = NULL;
12239 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12241 struct hclge_vport *vport = hclge_get_vport(handle);
12242 struct hclge_dev *hdev = vport->back;
12244 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12247 static void hclge_get_channels(struct hnae3_handle *handle,
12248 struct ethtool_channels *ch)
12250 ch->max_combined = hclge_get_max_channels(handle);
12251 ch->other_count = 1;
12253 ch->combined_count = handle->kinfo.rss_size;
12256 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12257 u16 *alloc_tqps, u16 *max_rss_size)
12259 struct hclge_vport *vport = hclge_get_vport(handle);
12260 struct hclge_dev *hdev = vport->back;
12262 *alloc_tqps = vport->alloc_tqps;
12263 *max_rss_size = hdev->pf_rss_size_max;
12266 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12267 bool rxfh_configured)
12269 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12270 struct hclge_vport *vport = hclge_get_vport(handle);
12271 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12272 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12273 struct hclge_dev *hdev = vport->back;
12274 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12275 u16 cur_rss_size = kinfo->rss_size;
12276 u16 cur_tqps = kinfo->num_tqps;
12277 u16 tc_valid[HCLGE_MAX_TC_NUM];
12283 kinfo->req_rss_size = new_tqps_num;
12285 ret = hclge_tm_vport_map_update(hdev);
12287 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12291 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12292 roundup_size = ilog2(roundup_size);
12293 /* Set the RSS TC mode according to the new RSS size */
12294 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12297 if (!(hdev->hw_tc_map & BIT(i)))
12301 tc_size[i] = roundup_size;
12302 tc_offset[i] = kinfo->rss_size * i;
12304 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12308 /* RSS indirection table has been configured by user */
12309 if (rxfh_configured)
12312 /* Reinitializes the rss indirect table according to the new RSS size */
12313 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12318 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12319 rss_indir[i] = i % kinfo->rss_size;
12321 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12323 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12330 dev_info(&hdev->pdev->dev,
12331 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12332 cur_rss_size, kinfo->rss_size,
12333 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12338 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12339 u32 *regs_num_64_bit)
12341 struct hclge_desc desc;
12345 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12346 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12348 dev_err(&hdev->pdev->dev,
12349 "Query register number cmd failed, ret = %d.\n", ret);
12353 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12354 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12356 total_num = *regs_num_32_bit + *regs_num_64_bit;
12363 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12366 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12367 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12369 struct hclge_desc *desc;
12370 u32 *reg_val = data;
12380 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12381 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12382 HCLGE_32_BIT_REG_RTN_DATANUM);
12383 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12387 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12388 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12390 dev_err(&hdev->pdev->dev,
12391 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12396 for (i = 0; i < cmd_num; i++) {
12398 desc_data = (__le32 *)(&desc[i].data[0]);
12399 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12401 desc_data = (__le32 *)(&desc[i]);
12402 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12404 for (k = 0; k < n; k++) {
12405 *reg_val++ = le32_to_cpu(*desc_data++);
12417 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12420 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12421 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12423 struct hclge_desc *desc;
12424 u64 *reg_val = data;
12434 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12435 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12436 HCLGE_64_BIT_REG_RTN_DATANUM);
12437 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12441 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12442 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12444 dev_err(&hdev->pdev->dev,
12445 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12450 for (i = 0; i < cmd_num; i++) {
12452 desc_data = (__le64 *)(&desc[i].data[0]);
12453 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12455 desc_data = (__le64 *)(&desc[i]);
12456 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12458 for (k = 0; k < n; k++) {
12459 *reg_val++ = le64_to_cpu(*desc_data++);
12471 #define MAX_SEPARATE_NUM 4
12472 #define SEPARATOR_VALUE 0xFDFCFBFA
12473 #define REG_NUM_PER_LINE 4
12474 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12475 #define REG_SEPARATOR_LINE 1
12476 #define REG_NUM_REMAIN_MASK 3
12478 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12482 /* initialize command BD except the last one */
12483 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12486 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12489 /* initialize the last command BD */
12490 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12492 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12495 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12499 u32 entries_per_desc, desc_index, index, offset, i;
12500 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12503 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12505 dev_err(&hdev->pdev->dev,
12506 "Get dfx bd num fail, status is %d.\n", ret);
12510 entries_per_desc = ARRAY_SIZE(desc[0].data);
12511 for (i = 0; i < type_num; i++) {
12512 offset = hclge_dfx_bd_offset_list[i];
12513 index = offset % entries_per_desc;
12514 desc_index = offset / entries_per_desc;
12515 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12521 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12522 struct hclge_desc *desc_src, int bd_num,
12523 enum hclge_opcode_type cmd)
12525 struct hclge_desc *desc = desc_src;
12528 hclge_cmd_setup_basic_desc(desc, cmd, true);
12529 for (i = 0; i < bd_num - 1; i++) {
12530 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12532 hclge_cmd_setup_basic_desc(desc, cmd, true);
12536 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12538 dev_err(&hdev->pdev->dev,
12539 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12545 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12548 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12549 struct hclge_desc *desc = desc_src;
12552 entries_per_desc = ARRAY_SIZE(desc->data);
12553 reg_num = entries_per_desc * bd_num;
12554 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12555 for (i = 0; i < reg_num; i++) {
12556 index = i % entries_per_desc;
12557 desc_index = i / entries_per_desc;
12558 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12560 for (i = 0; i < separator_num; i++)
12561 *reg++ = SEPARATOR_VALUE;
12563 return reg_num + separator_num;
12566 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12568 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12569 int data_len_per_desc, bd_num, i;
12574 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12578 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12580 dev_err(&hdev->pdev->dev,
12581 "Get dfx reg bd num fail, status is %d.\n", ret);
12585 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12587 for (i = 0; i < dfx_reg_type_num; i++) {
12588 bd_num = bd_num_list[i];
12589 data_len = data_len_per_desc * bd_num;
12590 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12594 kfree(bd_num_list);
12598 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12600 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12601 int bd_num, bd_num_max, buf_len, i;
12602 struct hclge_desc *desc_src;
12607 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12611 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12613 dev_err(&hdev->pdev->dev,
12614 "Get dfx reg bd num fail, status is %d.\n", ret);
12618 bd_num_max = bd_num_list[0];
12619 for (i = 1; i < dfx_reg_type_num; i++)
12620 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12622 buf_len = sizeof(*desc_src) * bd_num_max;
12623 desc_src = kzalloc(buf_len, GFP_KERNEL);
12629 for (i = 0; i < dfx_reg_type_num; i++) {
12630 bd_num = bd_num_list[i];
12631 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12632 hclge_dfx_reg_opcode_list[i]);
12634 dev_err(&hdev->pdev->dev,
12635 "Get dfx reg fail, status is %d.\n", ret);
12639 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12644 kfree(bd_num_list);
12648 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12649 struct hnae3_knic_private_info *kinfo)
12651 #define HCLGE_RING_REG_OFFSET 0x200
12652 #define HCLGE_RING_INT_REG_OFFSET 0x4
12654 int i, j, reg_num, separator_num;
12658 /* fetching per-PF registers valus from PF PCIe register space */
12659 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12660 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12661 for (i = 0; i < reg_num; i++)
12662 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12663 for (i = 0; i < separator_num; i++)
12664 *reg++ = SEPARATOR_VALUE;
12665 data_num_sum = reg_num + separator_num;
12667 reg_num = ARRAY_SIZE(common_reg_addr_list);
12668 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12669 for (i = 0; i < reg_num; i++)
12670 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12671 for (i = 0; i < separator_num; i++)
12672 *reg++ = SEPARATOR_VALUE;
12673 data_num_sum += reg_num + separator_num;
12675 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12676 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12677 for (j = 0; j < kinfo->num_tqps; j++) {
12678 for (i = 0; i < reg_num; i++)
12679 *reg++ = hclge_read_dev(&hdev->hw,
12680 ring_reg_addr_list[i] +
12681 HCLGE_RING_REG_OFFSET * j);
12682 for (i = 0; i < separator_num; i++)
12683 *reg++ = SEPARATOR_VALUE;
12685 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12687 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12688 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12689 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12690 for (i = 0; i < reg_num; i++)
12691 *reg++ = hclge_read_dev(&hdev->hw,
12692 tqp_intr_reg_addr_list[i] +
12693 HCLGE_RING_INT_REG_OFFSET * j);
12694 for (i = 0; i < separator_num; i++)
12695 *reg++ = SEPARATOR_VALUE;
12697 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12699 return data_num_sum;
12702 static int hclge_get_regs_len(struct hnae3_handle *handle)
12704 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12705 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12706 struct hclge_vport *vport = hclge_get_vport(handle);
12707 struct hclge_dev *hdev = vport->back;
12708 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12709 int regs_lines_32_bit, regs_lines_64_bit;
12712 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12714 dev_err(&hdev->pdev->dev,
12715 "Get register number failed, ret = %d.\n", ret);
12719 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12721 dev_err(&hdev->pdev->dev,
12722 "Get dfx reg len failed, ret = %d.\n", ret);
12726 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12727 REG_SEPARATOR_LINE;
12728 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12729 REG_SEPARATOR_LINE;
12730 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12731 REG_SEPARATOR_LINE;
12732 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12733 REG_SEPARATOR_LINE;
12734 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12735 REG_SEPARATOR_LINE;
12736 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12737 REG_SEPARATOR_LINE;
12739 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12740 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12741 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12744 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12747 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12748 struct hclge_vport *vport = hclge_get_vport(handle);
12749 struct hclge_dev *hdev = vport->back;
12750 u32 regs_num_32_bit, regs_num_64_bit;
12751 int i, reg_num, separator_num, ret;
12754 *version = hdev->fw_version;
12756 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12758 dev_err(&hdev->pdev->dev,
12759 "Get register number failed, ret = %d.\n", ret);
12763 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12765 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12767 dev_err(&hdev->pdev->dev,
12768 "Get 32 bit register failed, ret = %d.\n", ret);
12771 reg_num = regs_num_32_bit;
12773 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12774 for (i = 0; i < separator_num; i++)
12775 *reg++ = SEPARATOR_VALUE;
12777 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12779 dev_err(&hdev->pdev->dev,
12780 "Get 64 bit register failed, ret = %d.\n", ret);
12783 reg_num = regs_num_64_bit * 2;
12785 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12786 for (i = 0; i < separator_num; i++)
12787 *reg++ = SEPARATOR_VALUE;
12789 ret = hclge_get_dfx_reg(hdev, reg);
12791 dev_err(&hdev->pdev->dev,
12792 "Get dfx register failed, ret = %d.\n", ret);
12795 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12797 struct hclge_set_led_state_cmd *req;
12798 struct hclge_desc desc;
12801 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12803 req = (struct hclge_set_led_state_cmd *)desc.data;
12804 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12805 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12809 dev_err(&hdev->pdev->dev,
12810 "Send set led state cmd error, ret =%d\n", ret);
12815 enum hclge_led_status {
12818 HCLGE_LED_NO_CHANGE = 0xFF,
12821 static int hclge_set_led_id(struct hnae3_handle *handle,
12822 enum ethtool_phys_id_state status)
12824 struct hclge_vport *vport = hclge_get_vport(handle);
12825 struct hclge_dev *hdev = vport->back;
12828 case ETHTOOL_ID_ACTIVE:
12829 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12830 case ETHTOOL_ID_INACTIVE:
12831 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12837 static void hclge_get_link_mode(struct hnae3_handle *handle,
12838 unsigned long *supported,
12839 unsigned long *advertising)
12841 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12842 struct hclge_vport *vport = hclge_get_vport(handle);
12843 struct hclge_dev *hdev = vport->back;
12844 unsigned int idx = 0;
12846 for (; idx < size; idx++) {
12847 supported[idx] = hdev->hw.mac.supported[idx];
12848 advertising[idx] = hdev->hw.mac.advertising[idx];
12852 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12854 struct hclge_vport *vport = hclge_get_vport(handle);
12855 struct hclge_dev *hdev = vport->back;
12856 bool gro_en_old = hdev->gro_en;
12859 hdev->gro_en = enable;
12860 ret = hclge_config_gro(hdev);
12862 hdev->gro_en = gro_en_old;
12867 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12869 struct hclge_vport *vport = &hdev->vport[0];
12870 struct hnae3_handle *handle = &vport->nic;
12875 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12876 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12877 vport->last_promisc_flags = vport->overflow_promisc_flags;
12880 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12881 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12882 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12883 tmp_flags & HNAE3_MPE);
12885 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12887 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12892 for (i = 1; i < hdev->num_alloc_vport; i++) {
12893 bool uc_en = false;
12894 bool mc_en = false;
12897 vport = &hdev->vport[i];
12899 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12903 if (vport->vf_info.trusted) {
12904 uc_en = vport->vf_info.request_uc_en > 0 ||
12905 vport->overflow_promisc_flags &
12906 HNAE3_OVERFLOW_UPE;
12907 mc_en = vport->vf_info.request_mc_en > 0 ||
12908 vport->overflow_promisc_flags &
12909 HNAE3_OVERFLOW_MPE;
12911 bc_en = vport->vf_info.request_bc_en > 0;
12913 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12916 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12920 hclge_set_vport_vlan_fltr_change(vport);
12924 static bool hclge_module_existed(struct hclge_dev *hdev)
12926 struct hclge_desc desc;
12930 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12931 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12933 dev_err(&hdev->pdev->dev,
12934 "failed to get SFP exist state, ret = %d\n", ret);
12938 existed = le32_to_cpu(desc.data[0]);
12940 return existed != 0;
12943 /* need 6 bds(total 140 bytes) in one reading
12944 * return the number of bytes actually read, 0 means read failed.
12946 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12949 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12950 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12956 /* setup all 6 bds to read module eeprom info. */
12957 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12958 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12961 /* bd0~bd4 need next flag */
12962 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12963 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12966 /* setup bd0, this bd contains offset and read length. */
12967 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12968 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12969 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12970 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12972 ret = hclge_cmd_send(&hdev->hw, desc, i);
12974 dev_err(&hdev->pdev->dev,
12975 "failed to get SFP eeprom info, ret = %d\n", ret);
12979 /* copy sfp info from bd0 to out buffer. */
12980 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12981 memcpy(data, sfp_info_bd0->data, copy_len);
12982 read_len = copy_len;
12984 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12985 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12986 if (read_len >= len)
12989 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12990 memcpy(data + read_len, desc[i].data, copy_len);
12991 read_len += copy_len;
12997 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
13000 struct hclge_vport *vport = hclge_get_vport(handle);
13001 struct hclge_dev *hdev = vport->back;
13005 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
13006 return -EOPNOTSUPP;
13008 if (!hclge_module_existed(hdev))
13011 while (read_len < len) {
13012 data_len = hclge_get_sfp_eeprom_info(hdev,
13019 read_len += data_len;
13025 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
13028 struct hclge_vport *vport = hclge_get_vport(handle);
13029 struct hclge_dev *hdev = vport->back;
13030 struct hclge_desc desc;
13033 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
13034 return -EOPNOTSUPP;
13036 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
13037 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
13039 dev_err(&hdev->pdev->dev,
13040 "failed to query link diagnosis info, ret = %d\n", ret);
13044 *status_code = le32_to_cpu(desc.data[0]);
13048 static const struct hnae3_ae_ops hclge_ops = {
13049 .init_ae_dev = hclge_init_ae_dev,
13050 .uninit_ae_dev = hclge_uninit_ae_dev,
13051 .reset_prepare = hclge_reset_prepare_general,
13052 .reset_done = hclge_reset_done,
13053 .init_client_instance = hclge_init_client_instance,
13054 .uninit_client_instance = hclge_uninit_client_instance,
13055 .map_ring_to_vector = hclge_map_ring_to_vector,
13056 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
13057 .get_vector = hclge_get_vector,
13058 .put_vector = hclge_put_vector,
13059 .set_promisc_mode = hclge_set_promisc_mode,
13060 .request_update_promisc_mode = hclge_request_update_promisc_mode,
13061 .set_loopback = hclge_set_loopback,
13062 .start = hclge_ae_start,
13063 .stop = hclge_ae_stop,
13064 .client_start = hclge_client_start,
13065 .client_stop = hclge_client_stop,
13066 .get_status = hclge_get_status,
13067 .get_ksettings_an_result = hclge_get_ksettings_an_result,
13068 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
13069 .get_media_type = hclge_get_media_type,
13070 .check_port_speed = hclge_check_port_speed,
13071 .get_fec = hclge_get_fec,
13072 .set_fec = hclge_set_fec,
13073 .get_rss_key_size = hclge_get_rss_key_size,
13074 .get_rss = hclge_get_rss,
13075 .set_rss = hclge_set_rss,
13076 .set_rss_tuple = hclge_set_rss_tuple,
13077 .get_rss_tuple = hclge_get_rss_tuple,
13078 .get_tc_size = hclge_get_tc_size,
13079 .get_mac_addr = hclge_get_mac_addr,
13080 .set_mac_addr = hclge_set_mac_addr,
13081 .do_ioctl = hclge_do_ioctl,
13082 .add_uc_addr = hclge_add_uc_addr,
13083 .rm_uc_addr = hclge_rm_uc_addr,
13084 .add_mc_addr = hclge_add_mc_addr,
13085 .rm_mc_addr = hclge_rm_mc_addr,
13086 .set_autoneg = hclge_set_autoneg,
13087 .get_autoneg = hclge_get_autoneg,
13088 .restart_autoneg = hclge_restart_autoneg,
13089 .halt_autoneg = hclge_halt_autoneg,
13090 .get_pauseparam = hclge_get_pauseparam,
13091 .set_pauseparam = hclge_set_pauseparam,
13092 .set_mtu = hclge_set_mtu,
13093 .reset_queue = hclge_reset_tqp,
13094 .get_stats = hclge_get_stats,
13095 .get_mac_stats = hclge_get_mac_stat,
13096 .update_stats = hclge_update_stats,
13097 .get_strings = hclge_get_strings,
13098 .get_sset_count = hclge_get_sset_count,
13099 .get_fw_version = hclge_get_fw_version,
13100 .get_mdix_mode = hclge_get_mdix_mode,
13101 .enable_vlan_filter = hclge_enable_vlan_filter,
13102 .set_vlan_filter = hclge_set_vlan_filter,
13103 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13104 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13105 .reset_event = hclge_reset_event,
13106 .get_reset_level = hclge_get_reset_level,
13107 .set_default_reset_request = hclge_set_def_reset_request,
13108 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13109 .set_channels = hclge_set_channels,
13110 .get_channels = hclge_get_channels,
13111 .get_regs_len = hclge_get_regs_len,
13112 .get_regs = hclge_get_regs,
13113 .set_led_id = hclge_set_led_id,
13114 .get_link_mode = hclge_get_link_mode,
13115 .add_fd_entry = hclge_add_fd_entry,
13116 .del_fd_entry = hclge_del_fd_entry,
13117 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13118 .get_fd_rule_info = hclge_get_fd_rule_info,
13119 .get_fd_all_rules = hclge_get_all_rules,
13120 .enable_fd = hclge_enable_fd,
13121 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13122 .dbg_read_cmd = hclge_dbg_read_cmd,
13123 .handle_hw_ras_error = hclge_handle_hw_ras_error,
13124 .get_hw_reset_stat = hclge_get_hw_reset_stat,
13125 .ae_dev_resetting = hclge_ae_dev_resetting,
13126 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13127 .set_gro_en = hclge_gro_en,
13128 .get_global_queue_id = hclge_covert_handle_qid_global,
13129 .set_timer_task = hclge_set_timer_task,
13130 .mac_connect_phy = hclge_mac_connect_phy,
13131 .mac_disconnect_phy = hclge_mac_disconnect_phy,
13132 .get_vf_config = hclge_get_vf_config,
13133 .set_vf_link_state = hclge_set_vf_link_state,
13134 .set_vf_spoofchk = hclge_set_vf_spoofchk,
13135 .set_vf_trust = hclge_set_vf_trust,
13136 .set_vf_rate = hclge_set_vf_rate,
13137 .set_vf_mac = hclge_set_vf_mac,
13138 .get_module_eeprom = hclge_get_module_eeprom,
13139 .get_cmdq_stat = hclge_get_cmdq_stat,
13140 .add_cls_flower = hclge_add_cls_flower,
13141 .del_cls_flower = hclge_del_cls_flower,
13142 .cls_flower_active = hclge_is_cls_flower_active,
13143 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13144 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13145 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13146 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13147 .get_ts_info = hclge_ptp_get_ts_info,
13148 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13151 static struct hnae3_ae_algo ae_algo = {
13153 .pdev_id_table = ae_algo_pci_tbl,
13156 static int hclge_init(void)
13158 pr_info("%s is initializing\n", HCLGE_NAME);
13160 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13162 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13166 hnae3_register_ae_algo(&ae_algo);
13171 static void hclge_exit(void)
13173 hnae3_unregister_ae_algo_prepare(&ae_algo);
13174 hnae3_unregister_ae_algo(&ae_algo);
13175 destroy_workqueue(hclge_wq);
13177 module_init(hclge_init);
13178 module_exit(hclge_exit);
13180 MODULE_LICENSE("GPL");
13181 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13182 MODULE_DESCRIPTION("HCLGE Driver");
13183 MODULE_VERSION(HCLGE_MOD_VERSION);