1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 ret = hclge_parse_func_status(hdev, req);
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 struct hclge_pf_res_cmd *req;
873 struct hclge_desc desc;
876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 dev_err(&hdev->pdev->dev,
880 "query pf resource failed %d.\n", ret);
884 req = (struct hclge_pf_res_cmd *)desc.data;
885 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 if (req->tx_buf_size)
890 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 if (req->dv_buf_size)
898 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 if (hnae3_dev_roce_supported(hdev)) {
905 hdev->roce_base_msix_offset =
906 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 /* nic's msix numbers is always equals to the roce's. */
913 hdev->num_nic_msi = hdev->num_roce_msi;
915 /* PF should have NIC vectors and Roce vectors,
916 * NIC vectors are queued before Roce vectors.
918 hdev->num_msi = hdev->num_roce_msi +
919 hdev->roce_base_msix_offset;
922 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 hdev->num_nic_msi = hdev->num_msi;
928 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929 dev_err(&hdev->pdev->dev,
930 "Just %u msi resources, not enough for pf(min:2).\n",
938 static int hclge_parse_speed(int speed_cmd, int *speed)
942 *speed = HCLGE_MAC_SPEED_10M;
945 *speed = HCLGE_MAC_SPEED_100M;
948 *speed = HCLGE_MAC_SPEED_1G;
951 *speed = HCLGE_MAC_SPEED_10G;
954 *speed = HCLGE_MAC_SPEED_25G;
957 *speed = HCLGE_MAC_SPEED_40G;
960 *speed = HCLGE_MAC_SPEED_50G;
963 *speed = HCLGE_MAC_SPEED_100G;
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 struct hclge_vport *vport = hclge_get_vport(handle);
975 struct hclge_dev *hdev = vport->back;
976 u32 speed_ability = hdev->hw.mac.speed_ability;
980 case HCLGE_MAC_SPEED_10M:
981 speed_bit = HCLGE_SUPPORT_10M_BIT;
983 case HCLGE_MAC_SPEED_100M:
984 speed_bit = HCLGE_SUPPORT_100M_BIT;
986 case HCLGE_MAC_SPEED_1G:
987 speed_bit = HCLGE_SUPPORT_1G_BIT;
989 case HCLGE_MAC_SPEED_10G:
990 speed_bit = HCLGE_SUPPORT_10G_BIT;
992 case HCLGE_MAC_SPEED_25G:
993 speed_bit = HCLGE_SUPPORT_25G_BIT;
995 case HCLGE_MAC_SPEED_40G:
996 speed_bit = HCLGE_SUPPORT_40G_BIT;
998 case HCLGE_MAC_SPEED_50G:
999 speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 case HCLGE_MAC_SPEED_100G:
1002 speed_bit = HCLGE_SUPPORT_100G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 switch (mac->speed) {
1099 case HCLGE_MAC_SPEED_10G:
1100 case HCLGE_MAC_SPEED_40G:
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1104 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 case HCLGE_MAC_SPEED_25G:
1107 case HCLGE_MAC_SPEED_50G:
1108 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1111 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112 BIT(HNAE3_FEC_AUTO);
1114 case HCLGE_MAC_SPEED_100G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1119 mac->fec_ability = 0;
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1127 struct hclge_mac *mac = &hdev->hw.mac;
1129 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1133 hclge_convert_setting_sr(mac, speed_ability);
1134 hclge_convert_setting_lr(mac, speed_ability);
1135 hclge_convert_setting_cr(mac, speed_ability);
1136 if (hdev->pdev->revision >= 0x21)
1137 hclge_convert_setting_fec(mac);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1147 struct hclge_mac *mac = &hdev->hw.mac;
1149 hclge_convert_setting_kr(mac, speed_ability);
1150 if (hdev->pdev->revision >= 0x21)
1151 hclge_convert_setting_fec(mac);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1160 unsigned long *supported = hdev->hw.mac.supported;
1162 /* default to support all speed for GE port */
1164 speed_ability = HCLGE_SUPPORT_GE;
1166 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1170 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 u8 media_type = hdev->hw.mac.media_type;
1192 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195 hclge_parse_copper_link_mode(hdev, speed_ability);
1196 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197 hclge_parse_backplane_link_mode(hdev, speed_ability);
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1202 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203 return HCLGE_MAC_SPEED_100G;
1205 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206 return HCLGE_MAC_SPEED_50G;
1208 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209 return HCLGE_MAC_SPEED_40G;
1211 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212 return HCLGE_MAC_SPEED_25G;
1214 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215 return HCLGE_MAC_SPEED_10G;
1217 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218 return HCLGE_MAC_SPEED_1G;
1220 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221 return HCLGE_MAC_SPEED_100M;
1223 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224 return HCLGE_MAC_SPEED_10M;
1226 return HCLGE_MAC_SPEED_1G;
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 struct hclge_cfg_param_cmd *req;
1232 u64 mac_addr_tmp_high;
1236 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 /* get the configuration */
1239 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1242 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 HCLGE_CFG_TQP_DESC_N_M,
1246 HCLGE_CFG_TQP_DESC_N_S);
1248 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249 HCLGE_CFG_PHY_ADDR_M,
1250 HCLGE_CFG_PHY_ADDR_S);
1251 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_MEDIA_TP_M,
1253 HCLGE_CFG_MEDIA_TP_S);
1254 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_RX_BUF_LEN_M,
1256 HCLGE_CFG_RX_BUF_LEN_S);
1257 /* get mac_address */
1258 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260 HCLGE_CFG_MAC_ADDR_H_M,
1261 HCLGE_CFG_MAC_ADDR_H_S);
1263 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266 HCLGE_CFG_DEFAULT_SPEED_M,
1267 HCLGE_CFG_DEFAULT_SPEED_S);
1268 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_RSS_SIZE_M,
1270 HCLGE_CFG_RSS_SIZE_S);
1272 for (i = 0; i < ETH_ALEN; i++)
1273 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279 HCLGE_CFG_SPEED_ABILITY_M,
1280 HCLGE_CFG_SPEED_ABILITY_S);
1281 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_UMV_TBL_SPACE_M,
1283 HCLGE_CFG_UMV_TBL_SPACE_S);
1284 if (!cfg->umv_space)
1285 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1288 /* hclge_get_cfg: query the static parameter from flash
1289 * @hdev: pointer to struct hclge_dev
1290 * @hcfg: the config structure to be getted
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295 struct hclge_cfg_param_cmd *req;
1299 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1302 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307 /* Len should be united by 4 bytes when send to hardware */
1308 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310 req->offset = cpu_to_le32(offset);
1313 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 hclge_parse_cfg(hcfg, desc);
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1328 ret = hclge_query_function_status(hdev);
1330 dev_err(&hdev->pdev->dev,
1331 "query function status error %d.\n", ret);
1335 /* get pf resource */
1336 ret = hclge_query_pf_resource(hdev);
1338 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1345 #define HCLGE_MIN_TX_DESC 64
1346 #define HCLGE_MIN_RX_DESC 64
1348 if (!is_kdump_kernel())
1351 dev_info(&hdev->pdev->dev,
1352 "Running kdump kernel. Using minimal resources\n");
1354 /* minimal queue pairs equals to the number of vports */
1355 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1360 static int hclge_configure(struct hclge_dev *hdev)
1362 struct hclge_cfg cfg;
1366 ret = hclge_get_cfg(hdev, &cfg);
1368 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1372 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373 hdev->base_tqp_pid = 0;
1374 hdev->rss_size_max = cfg.rss_size_max;
1375 hdev->rx_buf_len = cfg.rx_buf_len;
1376 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377 hdev->hw.mac.media_type = cfg.media_type;
1378 hdev->hw.mac.phy_addr = cfg.phy_addr;
1379 hdev->num_tx_desc = cfg.tqp_desc_num;
1380 hdev->num_rx_desc = cfg.tqp_desc_num;
1381 hdev->tm_info.num_pg = 1;
1382 hdev->tc_max = cfg.tc_num;
1383 hdev->tm_info.hw_pfc_map = 0;
1384 hdev->wanted_umv_size = cfg.umv_space;
1386 if (hnae3_dev_fd_supported(hdev)) {
1388 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1391 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1393 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1397 hclge_parse_link_mode(hdev, cfg.speed_ability);
1399 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1401 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402 (hdev->tc_max < 1)) {
1403 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1408 /* Dev does not support DCB */
1409 if (!hnae3_dev_dcb_supported(hdev)) {
1413 hdev->pfc_max = hdev->tc_max;
1416 hdev->tm_info.num_tc = 1;
1418 /* Currently not support uncontiuous tc */
1419 for (i = 0; i < hdev->tm_info.num_tc; i++)
1420 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1422 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1424 hclge_init_kdump_kernel_config(hdev);
1426 /* Set the init affinity based on pci func number */
1427 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430 &hdev->affinity_mask);
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436 unsigned int tso_mss_max)
1438 struct hclge_cfg_tso_status_cmd *req;
1439 struct hclge_desc desc;
1442 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1444 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1447 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449 req->tso_mss_min = cpu_to_le16(tso_mss);
1452 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454 req->tso_mss_max = cpu_to_le16(tso_mss);
1456 return hclge_cmd_send(&hdev->hw, &desc, 1);
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1461 struct hclge_cfg_gro_status_cmd *req;
1462 struct hclge_desc desc;
1465 if (!hnae3_dev_gro_supported(hdev))
1468 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1471 req->gro_en = cpu_to_le16(en ? 1 : 0);
1473 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1475 dev_err(&hdev->pdev->dev,
1476 "GRO hardware config cmd failed, ret = %d\n", ret);
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1483 struct hclge_tqp *tqp;
1486 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487 sizeof(struct hclge_tqp), GFP_KERNEL);
1493 for (i = 0; i < hdev->num_tqps; i++) {
1494 tqp->dev = &hdev->pdev->dev;
1497 tqp->q.ae_algo = &ae_algo;
1498 tqp->q.buf_size = hdev->rx_buf_len;
1499 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502 i * HCLGE_TQP_REG_SIZE;
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1513 struct hclge_tqp_map_cmd *req;
1514 struct hclge_desc desc;
1517 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1519 req = (struct hclge_tqp_map_cmd *)desc.data;
1520 req->tqp_id = cpu_to_le16(tqp_pid);
1521 req->tqp_vf = func_id;
1522 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1524 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525 req->tqp_vid = cpu_to_le16(tqp_vid);
1527 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1529 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1534 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1536 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537 struct hclge_dev *hdev = vport->back;
1540 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541 alloced < num_tqps; i++) {
1542 if (!hdev->htqp[i].alloced) {
1543 hdev->htqp[i].q.handle = &vport->nic;
1544 hdev->htqp[i].q.tqp_index = alloced;
1545 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548 hdev->htqp[i].alloced = true;
1552 vport->alloc_tqps = alloced;
1553 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554 vport->alloc_tqps / hdev->tm_info.num_tc);
1556 /* ensure one to one mapping between irq and queue at default */
1557 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564 u16 num_tx_desc, u16 num_rx_desc)
1567 struct hnae3_handle *nic = &vport->nic;
1568 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569 struct hclge_dev *hdev = vport->back;
1572 kinfo->num_tx_desc = num_tx_desc;
1573 kinfo->num_rx_desc = num_rx_desc;
1575 kinfo->rx_buf_len = hdev->rx_buf_len;
1577 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578 sizeof(struct hnae3_queue *), GFP_KERNEL);
1582 ret = hclge_assign_tqp(vport, num_tqps);
1584 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590 struct hclge_vport *vport)
1592 struct hnae3_handle *nic = &vport->nic;
1593 struct hnae3_knic_private_info *kinfo;
1596 kinfo = &nic->kinfo;
1597 for (i = 0; i < vport->alloc_tqps; i++) {
1598 struct hclge_tqp *q =
1599 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1603 is_pf = !(vport->vport_id);
1604 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1615 struct hclge_vport *vport = hdev->vport;
1618 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619 for (i = 0; i < num_vport; i++) {
1622 ret = hclge_map_tqp_to_vport(hdev, vport);
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1634 struct hnae3_handle *nic = &vport->nic;
1635 struct hclge_dev *hdev = vport->back;
1638 nic->pdev = hdev->pdev;
1639 nic->ae_algo = &ae_algo;
1640 nic->numa_node_mask = hdev->numa_node_mask;
1642 ret = hclge_knic_setup(vport, num_tqps,
1643 hdev->num_tx_desc, hdev->num_rx_desc);
1645 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1652 struct pci_dev *pdev = hdev->pdev;
1653 struct hclge_vport *vport;
1659 /* We need to alloc a vport for main NIC of PF */
1660 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1662 if (hdev->num_tqps < num_vport) {
1663 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664 hdev->num_tqps, num_vport);
1668 /* Alloc the same number of TQPs for every vport */
1669 tqp_per_vport = hdev->num_tqps / num_vport;
1670 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1672 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1677 hdev->vport = vport;
1678 hdev->num_alloc_vport = num_vport;
1680 if (IS_ENABLED(CONFIG_PCI_IOV))
1681 hdev->num_alloc_vfs = hdev->num_req_vfs;
1683 for (i = 0; i < num_vport; i++) {
1685 vport->vport_id = i;
1686 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690 INIT_LIST_HEAD(&vport->vlan_list);
1691 INIT_LIST_HEAD(&vport->uc_mac_list);
1692 INIT_LIST_HEAD(&vport->mc_mac_list);
1695 ret = hclge_vport_setup(vport, tqp_main_vport);
1697 ret = hclge_vport_setup(vport, tqp_per_vport);
1700 "vport setup failed for vport %d, %d\n",
1711 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712 struct hclge_pkt_buf_alloc *buf_alloc)
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1717 struct hclge_tx_buff_alloc_cmd *req;
1718 struct hclge_desc desc;
1722 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1728 req->tx_pkt_buff[i] =
1729 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1735 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742 struct hclge_pkt_buf_alloc *buf_alloc)
1744 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1747 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1757 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758 if (hdev->hw_tc_map & BIT(i))
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765 struct hclge_pkt_buf_alloc *buf_alloc)
1767 struct hclge_priv_buf *priv;
1771 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 priv = &buf_alloc->priv_buf[i];
1773 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 struct hclge_priv_buf *priv;
1789 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790 priv = &buf_alloc->priv_buf[i];
1791 if (hdev->hw_tc_map & BIT(i) &&
1792 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1802 struct hclge_priv_buf *priv;
1806 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807 priv = &buf_alloc->priv_buf[i];
1809 rx_priv += priv->buf_size;
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1816 u32 i, total_tx_size = 0;
1818 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1821 return total_tx_size;
1824 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc,
1828 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829 u32 tc_num = hclge_get_tc_num(hdev);
1830 u32 shared_buf, aligned_mps;
1834 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1836 if (hnae3_dev_dcb_supported(hdev))
1837 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1840 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841 + hdev->dv_buf_size;
1843 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845 HCLGE_BUF_SIZE_UNIT);
1847 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848 if (rx_all < rx_priv + shared_std)
1851 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852 buf_alloc->s_buf.buf_size = shared_buf;
1853 if (hnae3_dev_dcb_supported(hdev)) {
1854 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857 HCLGE_BUF_SIZE_UNIT);
1859 buf_alloc->s_buf.self.high = aligned_mps +
1860 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861 buf_alloc->s_buf.self.low = aligned_mps;
1864 if (hnae3_dev_dcb_supported(hdev)) {
1865 hi_thrd = shared_buf - hdev->dv_buf_size;
1867 if (tc_num <= NEED_RESERVE_TC_NUM)
1868 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1872 hi_thrd = hi_thrd / tc_num;
1874 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1878 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879 lo_thrd = aligned_mps;
1882 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1895 total_size = hdev->pkt_buf_size;
1897 /* alloc tx buffer for all enabled tc */
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1901 if (hdev->hw_tc_map & BIT(i)) {
1902 if (total_size < hdev->tx_buf_size)
1905 priv->tx_buf_size = hdev->tx_buf_size;
1907 priv->tx_buf_size = 0;
1910 total_size -= priv->tx_buf_size;
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917 struct hclge_pkt_buf_alloc *buf_alloc)
1919 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1923 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1931 if (!(hdev->hw_tc_map & BIT(i)))
1936 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939 HCLGE_BUF_SIZE_UNIT);
1942 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1946 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1949 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1959 /* let the last to be cleared first */
1960 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962 unsigned int mask = BIT((unsigned int)i);
1964 if (hdev->hw_tc_map & mask &&
1965 !(hdev->tm_info.hw_pfc_map & mask)) {
1966 /* Clear the no pfc TC private buffer */
1974 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975 no_pfc_priv_num == 0)
1979 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983 struct hclge_pkt_buf_alloc *buf_alloc)
1985 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1989 /* let the last to be cleared first */
1990 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992 unsigned int mask = BIT((unsigned int)i);
1994 if (hdev->hw_tc_map & mask &&
1995 hdev->tm_info.hw_pfc_map & mask) {
1996 /* Reduce the number of pfc TC with private buffer */
2004 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2009 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013 struct hclge_pkt_buf_alloc *buf_alloc)
2015 #define COMPENSATE_BUFFER 0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP 0x1800
2019 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020 u32 tc_num = hclge_get_tc_num(hdev);
2021 u32 half_mps = hdev->mps >> 1;
2026 rx_priv = rx_priv / tc_num;
2028 if (tc_num <= NEED_RESERVE_TC_NUM)
2029 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2031 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032 COMPENSATE_HALF_MPS_NUM * half_mps;
2033 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2036 if (rx_priv < min_rx_priv)
2039 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2047 if (!(hdev->hw_tc_map & BIT(i)))
2051 priv->buf_size = rx_priv;
2052 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2056 buf_alloc->s_buf.buf_size = 0;
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062 * @hdev: pointer to struct hclge_dev
2063 * @buf_alloc: pointer to buffer calculation data
2064 * @return: 0: calculate sucessful, negative: fail
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067 struct hclge_pkt_buf_alloc *buf_alloc)
2069 /* When DCB is not supported, rx private buffer is not allocated. */
2070 if (!hnae3_dev_dcb_supported(hdev)) {
2071 u32 rx_all = hdev->pkt_buf_size;
2073 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2080 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2083 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2086 /* try to decrease the buffer size */
2087 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2090 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2093 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100 struct hclge_pkt_buf_alloc *buf_alloc)
2102 struct hclge_rx_priv_buff_cmd *req;
2103 struct hclge_desc desc;
2107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2110 /* Alloc private buffer TCs */
2111 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2115 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2117 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2121 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2126 dev_err(&hdev->pdev->dev,
2127 "rx private buffer alloc cmd failed %d\n", ret);
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 struct hclge_rx_priv_wl_buf *req;
2136 struct hclge_priv_buf *priv;
2137 struct hclge_desc desc[2];
2141 for (i = 0; i < 2; i++) {
2142 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2144 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2146 /* The first descriptor set the NEXT bit to 1 */
2148 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2152 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2155 priv = &buf_alloc->priv_buf[idx];
2156 req->tc_wl[j].high =
2157 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158 req->tc_wl[j].high |=
2159 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162 req->tc_wl[j].low |=
2163 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2167 /* Send 2 descriptor at one time */
2168 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2170 dev_err(&hdev->pdev->dev,
2171 "rx private waterline config cmd failed %d\n",
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177 struct hclge_pkt_buf_alloc *buf_alloc)
2179 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180 struct hclge_rx_com_thrd *req;
2181 struct hclge_desc desc[2];
2182 struct hclge_tc_thrd *tc;
2186 for (i = 0; i < 2; i++) {
2187 hclge_cmd_setup_basic_desc(&desc[i],
2188 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2191 /* The first descriptor set the NEXT bit to 1 */
2193 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2197 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2200 req->com_thrd[j].high =
2201 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202 req->com_thrd[j].high |=
2203 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204 req->com_thrd[j].low =
2205 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206 req->com_thrd[j].low |=
2207 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2211 /* Send 2 descriptors at one time */
2212 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2214 dev_err(&hdev->pdev->dev,
2215 "common threshold config cmd failed %d\n", ret);
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223 struct hclge_rx_com_wl *req;
2224 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2229 req = (struct hclge_rx_com_wl *)desc.data;
2230 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2238 dev_err(&hdev->pdev->dev,
2239 "common waterline config cmd failed %d\n", ret);
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2246 struct hclge_pkt_buf_alloc *pkt_buf;
2249 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2253 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2255 dev_err(&hdev->pdev->dev,
2256 "could not calc tx buffer size for all TCs %d\n", ret);
2260 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2262 dev_err(&hdev->pdev->dev,
2263 "could not alloc tx buffers %d\n", ret);
2267 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2269 dev_err(&hdev->pdev->dev,
2270 "could not calc rx priv buffer size for all TCs %d\n",
2275 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2277 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2282 if (hnae3_dev_dcb_supported(hdev)) {
2283 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2285 dev_err(&hdev->pdev->dev,
2286 "could not configure rx private waterline %d\n",
2291 ret = hclge_common_thrd_config(hdev, pkt_buf);
2293 dev_err(&hdev->pdev->dev,
2294 "could not configure common threshold %d\n",
2300 ret = hclge_common_wl_config(hdev, pkt_buf);
2302 dev_err(&hdev->pdev->dev,
2303 "could not configure common waterline %d\n", ret);
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2312 struct hnae3_handle *roce = &vport->roce;
2313 struct hnae3_handle *nic = &vport->nic;
2315 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2317 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318 vport->back->num_msi_left == 0)
2321 roce->rinfo.base_vector = vport->back->roce_base_vector;
2323 roce->rinfo.netdev = nic->kinfo.netdev;
2324 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2326 roce->pdev = nic->pdev;
2327 roce->ae_algo = nic->ae_algo;
2328 roce->numa_node_mask = nic->numa_node_mask;
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2335 struct pci_dev *pdev = hdev->pdev;
2339 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2341 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2344 "failed(%d) to allocate MSI/MSI-X vectors\n",
2348 if (vectors < hdev->num_msi)
2349 dev_warn(&hdev->pdev->dev,
2350 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351 hdev->num_msi, vectors);
2353 hdev->num_msi = vectors;
2354 hdev->num_msi_left = vectors;
2356 hdev->base_msi_vector = pdev->irq;
2357 hdev->roce_base_vector = hdev->base_msi_vector +
2358 hdev->roce_base_msix_offset;
2360 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 sizeof(u16), GFP_KERNEL);
2362 if (!hdev->vector_status) {
2363 pci_free_irq_vectors(pdev);
2367 for (i = 0; i < hdev->num_msi; i++)
2368 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2370 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371 sizeof(int), GFP_KERNEL);
2372 if (!hdev->vector_irq) {
2373 pci_free_irq_vectors(pdev);
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2382 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383 duplex = HCLGE_MAC_FULL;
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2391 struct hclge_config_mac_speed_dup_cmd *req;
2392 struct hclge_desc desc;
2395 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2400 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2403 case HCLGE_MAC_SPEED_10M:
2404 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405 HCLGE_CFG_SPEED_S, 6);
2407 case HCLGE_MAC_SPEED_100M:
2408 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409 HCLGE_CFG_SPEED_S, 7);
2411 case HCLGE_MAC_SPEED_1G:
2412 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413 HCLGE_CFG_SPEED_S, 0);
2415 case HCLGE_MAC_SPEED_10G:
2416 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417 HCLGE_CFG_SPEED_S, 1);
2419 case HCLGE_MAC_SPEED_25G:
2420 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421 HCLGE_CFG_SPEED_S, 2);
2423 case HCLGE_MAC_SPEED_40G:
2424 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425 HCLGE_CFG_SPEED_S, 3);
2427 case HCLGE_MAC_SPEED_50G:
2428 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429 HCLGE_CFG_SPEED_S, 4);
2431 case HCLGE_MAC_SPEED_100G:
2432 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433 HCLGE_CFG_SPEED_S, 5);
2436 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2440 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2445 dev_err(&hdev->pdev->dev,
2446 "mac speed/duplex config cmd failed %d.\n", ret);
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2457 duplex = hclge_check_speed_dup(duplex, speed);
2458 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2482 struct hclge_config_auto_neg_cmd *req;
2483 struct hclge_desc desc;
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2507 if (!hdev->hw.mac.support_autoneg) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2517 return hclge_set_autoneg_en(hdev, enable);
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2527 return phydev->autoneg;
2529 return hdev->hw.mac.autoneg;
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2610 *fec_ability = mac->fec_ability;
2612 *fec_mode = mac->fec_mode;
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2617 struct hclge_mac *mac = &hdev->hw.mac;
2620 hdev->support_sfp_query = true;
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
2625 dev_err(&hdev->pdev->dev,
2626 "Config mac speed dup fail ret=%d\n", ret);
2630 if (hdev->hw.mac.support_autoneg) {
2631 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2633 dev_err(&hdev->pdev->dev,
2634 "Config mac autoneg fail ret=%d\n", ret);
2641 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2644 dev_err(&hdev->pdev->dev,
2645 "Fec mode init fail, ret = %d\n", ret);
2650 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2652 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2656 ret = hclge_set_default_loopback(hdev);
2660 ret = hclge_buffer_alloc(hdev);
2662 dev_err(&hdev->pdev->dev,
2663 "allocate buffer fail, ret=%d\n", ret);
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2670 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673 hclge_wq, &hdev->service_task, 0);
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2678 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681 hclge_wq, &hdev->service_task, 0);
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2686 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2687 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2688 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2689 hclge_wq, &hdev->service_task,
2693 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2695 struct hclge_link_status_cmd *req;
2696 struct hclge_desc desc;
2700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2703 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2708 req = (struct hclge_link_status_cmd *)desc.data;
2709 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2711 return !!link_status;
2714 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2716 unsigned int mac_state;
2719 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2722 mac_state = hclge_get_mac_link_status(hdev);
2724 if (hdev->hw.mac.phydev) {
2725 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2726 link_stat = mac_state &
2727 hdev->hw.mac.phydev->link;
2732 link_stat = mac_state;
2738 static void hclge_update_link_status(struct hclge_dev *hdev)
2740 struct hnae3_client *rclient = hdev->roce_client;
2741 struct hnae3_client *client = hdev->nic_client;
2742 struct hnae3_handle *rhandle;
2743 struct hnae3_handle *handle;
2750 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2753 state = hclge_get_mac_phy_link(hdev);
2754 if (state != hdev->hw.mac.link) {
2755 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2756 handle = &hdev->vport[i].nic;
2757 client->ops->link_status_change(handle, state);
2758 hclge_config_mac_tnl_int(hdev, state);
2759 rhandle = &hdev->vport[i].roce;
2760 if (rclient && rclient->ops->link_status_change)
2761 rclient->ops->link_status_change(rhandle,
2764 hdev->hw.mac.link = state;
2767 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2770 static void hclge_update_port_capability(struct hclge_mac *mac)
2772 /* update fec ability by speed */
2773 hclge_convert_setting_fec(mac);
2775 /* firmware can not identify back plane type, the media type
2776 * read from configuration can help deal it
2778 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2779 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2780 mac->module_type = HNAE3_MODULE_TYPE_KR;
2781 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2782 mac->module_type = HNAE3_MODULE_TYPE_TP;
2784 if (mac->support_autoneg) {
2785 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2786 linkmode_copy(mac->advertising, mac->supported);
2788 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2790 linkmode_zero(mac->advertising);
2794 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2796 struct hclge_sfp_info_cmd *resp;
2797 struct hclge_desc desc;
2800 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2801 resp = (struct hclge_sfp_info_cmd *)desc.data;
2802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2803 if (ret == -EOPNOTSUPP) {
2804 dev_warn(&hdev->pdev->dev,
2805 "IMP do not support get SFP speed %d\n", ret);
2808 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2812 *speed = le32_to_cpu(resp->speed);
2817 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2819 struct hclge_sfp_info_cmd *resp;
2820 struct hclge_desc desc;
2823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2824 resp = (struct hclge_sfp_info_cmd *)desc.data;
2826 resp->query_type = QUERY_ACTIVE_SPEED;
2828 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2829 if (ret == -EOPNOTSUPP) {
2830 dev_warn(&hdev->pdev->dev,
2831 "IMP does not support get SFP info %d\n", ret);
2834 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2838 mac->speed = le32_to_cpu(resp->speed);
2839 /* if resp->speed_ability is 0, it means it's an old version
2840 * firmware, do not update these params
2842 if (resp->speed_ability) {
2843 mac->module_type = le32_to_cpu(resp->module_type);
2844 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2845 mac->autoneg = resp->autoneg;
2846 mac->support_autoneg = resp->autoneg_ability;
2847 mac->speed_type = QUERY_ACTIVE_SPEED;
2848 if (!resp->active_fec)
2851 mac->fec_mode = BIT(resp->active_fec);
2853 mac->speed_type = QUERY_SFP_SPEED;
2859 static int hclge_update_port_info(struct hclge_dev *hdev)
2861 struct hclge_mac *mac = &hdev->hw.mac;
2862 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2865 /* get the port info from SFP cmd if not copper port */
2866 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2869 /* if IMP does not support get SFP/qSFP info, return directly */
2870 if (!hdev->support_sfp_query)
2873 if (hdev->pdev->revision >= 0x21)
2874 ret = hclge_get_sfp_info(hdev, mac);
2876 ret = hclge_get_sfp_speed(hdev, &speed);
2878 if (ret == -EOPNOTSUPP) {
2879 hdev->support_sfp_query = false;
2885 if (hdev->pdev->revision >= 0x21) {
2886 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2887 hclge_update_port_capability(mac);
2890 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2893 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2894 return 0; /* do nothing if no SFP */
2896 /* must config full duplex for SFP */
2897 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2901 static int hclge_get_status(struct hnae3_handle *handle)
2903 struct hclge_vport *vport = hclge_get_vport(handle);
2904 struct hclge_dev *hdev = vport->back;
2906 hclge_update_link_status(hdev);
2908 return hdev->hw.mac.link;
2911 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2913 if (pci_num_vf(hdev->pdev) == 0) {
2914 dev_err(&hdev->pdev->dev,
2915 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2919 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2920 dev_err(&hdev->pdev->dev,
2921 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2922 vf, pci_num_vf(hdev->pdev));
2926 /* VF start from 1 in vport */
2927 vf += HCLGE_VF_VPORT_START_NUM;
2928 return &hdev->vport[vf];
2931 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2932 struct ifla_vf_info *ivf)
2934 struct hclge_vport *vport = hclge_get_vport(handle);
2935 struct hclge_dev *hdev = vport->back;
2937 vport = hclge_get_vf_vport(hdev, vf);
2942 ivf->linkstate = vport->vf_info.link_state;
2943 ivf->spoofchk = vport->vf_info.spoofchk;
2944 ivf->trusted = vport->vf_info.trusted;
2945 ivf->min_tx_rate = 0;
2946 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2947 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2948 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2949 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2950 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2955 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2958 struct hclge_vport *vport = hclge_get_vport(handle);
2959 struct hclge_dev *hdev = vport->back;
2961 vport = hclge_get_vf_vport(hdev, vf);
2965 vport->vf_info.link_state = link_state;
2970 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2972 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2974 /* fetch the events from their corresponding regs */
2975 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2976 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2977 msix_src_reg = hclge_read_dev(&hdev->hw,
2978 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2980 /* Assumption: If by any chance reset and mailbox events are reported
2981 * together then we will only process reset event in this go and will
2982 * defer the processing of the mailbox events. Since, we would have not
2983 * cleared RX CMDQ event this time we would receive again another
2984 * interrupt from H/W just for the mailbox.
2986 * check for vector0 reset event sources
2988 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2989 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2990 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2991 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2992 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2993 hdev->rst_stats.imp_rst_cnt++;
2994 return HCLGE_VECTOR0_EVENT_RST;
2997 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2998 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2999 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3000 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3001 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3002 hdev->rst_stats.global_rst_cnt++;
3003 return HCLGE_VECTOR0_EVENT_RST;
3006 /* check for vector0 msix event source */
3007 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3008 *clearval = msix_src_reg;
3009 return HCLGE_VECTOR0_EVENT_ERR;
3012 /* check for vector0 mailbox(=CMDQ RX) event source */
3013 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3014 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3015 *clearval = cmdq_src_reg;
3016 return HCLGE_VECTOR0_EVENT_MBX;
3019 /* print other vector0 event source */
3020 dev_info(&hdev->pdev->dev,
3021 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3022 cmdq_src_reg, msix_src_reg);
3023 *clearval = msix_src_reg;
3025 return HCLGE_VECTOR0_EVENT_OTHER;
3028 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3031 switch (event_type) {
3032 case HCLGE_VECTOR0_EVENT_RST:
3033 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3035 case HCLGE_VECTOR0_EVENT_MBX:
3036 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3043 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3045 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3046 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3047 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3048 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3049 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3052 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3054 writel(enable ? 1 : 0, vector->addr);
3057 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3059 struct hclge_dev *hdev = data;
3063 hclge_enable_vector(&hdev->misc_vector, false);
3064 event_cause = hclge_check_event_cause(hdev, &clearval);
3066 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3067 switch (event_cause) {
3068 case HCLGE_VECTOR0_EVENT_ERR:
3069 /* we do not know what type of reset is required now. This could
3070 * only be decided after we fetch the type of errors which
3071 * caused this event. Therefore, we will do below for now:
3072 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3073 * have defered type of reset to be used.
3074 * 2. Schedule the reset serivce task.
3075 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3076 * will fetch the correct type of reset. This would be done
3077 * by first decoding the types of errors.
3079 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3081 case HCLGE_VECTOR0_EVENT_RST:
3082 hclge_reset_task_schedule(hdev);
3084 case HCLGE_VECTOR0_EVENT_MBX:
3085 /* If we are here then,
3086 * 1. Either we are not handling any mbx task and we are not
3089 * 2. We could be handling a mbx task but nothing more is
3091 * In both cases, we should schedule mbx task as there are more
3092 * mbx messages reported by this interrupt.
3094 hclge_mbx_task_schedule(hdev);
3097 dev_warn(&hdev->pdev->dev,
3098 "received unknown or unhandled event of vector0\n");
3102 hclge_clear_event_cause(hdev, event_cause, clearval);
3104 /* Enable interrupt if it is not cause by reset. And when
3105 * clearval equal to 0, it means interrupt status may be
3106 * cleared by hardware before driver reads status register.
3107 * For this case, vector0 interrupt also should be enabled.
3110 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3111 hclge_enable_vector(&hdev->misc_vector, true);
3117 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3119 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3120 dev_warn(&hdev->pdev->dev,
3121 "vector(vector_id %d) has been freed.\n", vector_id);
3125 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3126 hdev->num_msi_left += 1;
3127 hdev->num_msi_used -= 1;
3130 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3132 struct hclge_misc_vector *vector = &hdev->misc_vector;
3134 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3136 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3137 hdev->vector_status[0] = 0;
3139 hdev->num_msi_left -= 1;
3140 hdev->num_msi_used += 1;
3143 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3144 const cpumask_t *mask)
3146 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3149 cpumask_copy(&hdev->affinity_mask, mask);
3152 static void hclge_irq_affinity_release(struct kref *ref)
3156 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3158 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3159 &hdev->affinity_mask);
3161 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3162 hdev->affinity_notify.release = hclge_irq_affinity_release;
3163 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3164 &hdev->affinity_notify);
3167 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3169 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3170 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3173 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3177 hclge_get_misc_vector(hdev);
3179 /* this would be explicitly freed in the end */
3180 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3181 0, "hclge_misc", hdev);
3183 hclge_free_vector(hdev, 0);
3184 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3185 hdev->misc_vector.vector_irq);
3191 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3193 free_irq(hdev->misc_vector.vector_irq, hdev);
3194 hclge_free_vector(hdev, 0);
3197 int hclge_notify_client(struct hclge_dev *hdev,
3198 enum hnae3_reset_notify_type type)
3200 struct hnae3_client *client = hdev->nic_client;
3203 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3206 if (!client->ops->reset_notify)
3209 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3210 struct hnae3_handle *handle = &hdev->vport[i].nic;
3213 ret = client->ops->reset_notify(handle, type);
3215 dev_err(&hdev->pdev->dev,
3216 "notify nic client failed %d(%d)\n", type, ret);
3224 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3225 enum hnae3_reset_notify_type type)
3227 struct hnae3_client *client = hdev->roce_client;
3231 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3234 if (!client->ops->reset_notify)
3237 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3238 struct hnae3_handle *handle = &hdev->vport[i].roce;
3240 ret = client->ops->reset_notify(handle, type);
3242 dev_err(&hdev->pdev->dev,
3243 "notify roce client failed %d(%d)",
3252 static int hclge_reset_wait(struct hclge_dev *hdev)
3254 #define HCLGE_RESET_WATI_MS 100
3255 #define HCLGE_RESET_WAIT_CNT 200
3256 u32 val, reg, reg_bit;
3259 switch (hdev->reset_type) {
3260 case HNAE3_IMP_RESET:
3261 reg = HCLGE_GLOBAL_RESET_REG;
3262 reg_bit = HCLGE_IMP_RESET_BIT;
3264 case HNAE3_GLOBAL_RESET:
3265 reg = HCLGE_GLOBAL_RESET_REG;
3266 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3268 case HNAE3_FUNC_RESET:
3269 reg = HCLGE_FUN_RST_ING;
3270 reg_bit = HCLGE_FUN_RST_ING_B;
3272 case HNAE3_FLR_RESET:
3275 dev_err(&hdev->pdev->dev,
3276 "Wait for unsupported reset type: %d\n",
3281 if (hdev->reset_type == HNAE3_FLR_RESET) {
3282 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3283 cnt++ < HCLGE_RESET_WAIT_CNT)
3284 msleep(HCLGE_RESET_WATI_MS);
3286 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3287 dev_err(&hdev->pdev->dev,
3288 "flr wait timeout: %u\n", cnt);
3295 val = hclge_read_dev(&hdev->hw, reg);
3296 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3297 msleep(HCLGE_RESET_WATI_MS);
3298 val = hclge_read_dev(&hdev->hw, reg);
3302 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3303 dev_warn(&hdev->pdev->dev,
3304 "Wait for reset timeout: %d\n", hdev->reset_type);
3311 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3313 struct hclge_vf_rst_cmd *req;
3314 struct hclge_desc desc;
3316 req = (struct hclge_vf_rst_cmd *)desc.data;
3317 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3318 req->dest_vfid = func_id;
3323 return hclge_cmd_send(&hdev->hw, &desc, 1);
3326 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3330 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3331 struct hclge_vport *vport = &hdev->vport[i];
3334 /* Send cmd to set/clear VF's FUNC_RST_ING */
3335 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3337 dev_err(&hdev->pdev->dev,
3338 "set vf(%u) rst failed %d!\n",
3339 vport->vport_id, ret);
3343 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3346 /* Inform VF to process the reset.
3347 * hclge_inform_reset_assert_to_vf may fail if VF
3348 * driver is not loaded.
3350 ret = hclge_inform_reset_assert_to_vf(vport);
3352 dev_warn(&hdev->pdev->dev,
3353 "inform reset to vf(%u) failed %d!\n",
3354 vport->vport_id, ret);
3360 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3362 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3363 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3364 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3367 hclge_mbx_handler(hdev);
3369 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3372 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3374 struct hclge_pf_rst_sync_cmd *req;
3375 struct hclge_desc desc;
3379 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3383 /* vf need to down netdev by mbx during PF or FLR reset */
3384 hclge_mailbox_service_task(hdev);
3386 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3387 /* for compatible with old firmware, wait
3388 * 100 ms for VF to stop IO
3390 if (ret == -EOPNOTSUPP) {
3391 msleep(HCLGE_RESET_SYNC_TIME);
3394 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3397 } else if (req->all_vf_ready) {
3400 msleep(HCLGE_PF_RESET_SYNC_TIME);
3401 hclge_cmd_reuse_desc(&desc, true);
3402 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3404 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3408 void hclge_report_hw_error(struct hclge_dev *hdev,
3409 enum hnae3_hw_error_type type)
3411 struct hnae3_client *client = hdev->nic_client;
3414 if (!client || !client->ops->process_hw_error ||
3415 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3418 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3419 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3422 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3426 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3427 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3428 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3429 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3430 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3433 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3434 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3435 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3436 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3440 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3442 struct hclge_desc desc;
3443 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3446 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3447 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3448 req->fun_reset_vfid = func_id;
3450 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3452 dev_err(&hdev->pdev->dev,
3453 "send function reset cmd fail, status =%d\n", ret);
3458 static void hclge_do_reset(struct hclge_dev *hdev)
3460 struct hnae3_handle *handle = &hdev->vport[0].nic;
3461 struct pci_dev *pdev = hdev->pdev;
3464 if (hclge_get_hw_reset_stat(handle)) {
3465 dev_info(&pdev->dev, "Hardware reset not finish\n");
3466 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3467 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3468 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3472 switch (hdev->reset_type) {
3473 case HNAE3_GLOBAL_RESET:
3474 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3475 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3476 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3477 dev_info(&pdev->dev, "Global Reset requested\n");
3479 case HNAE3_FUNC_RESET:
3480 dev_info(&pdev->dev, "PF Reset requested\n");
3481 /* schedule again to check later */
3482 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3483 hclge_reset_task_schedule(hdev);
3485 case HNAE3_FLR_RESET:
3486 dev_info(&pdev->dev, "FLR requested\n");
3487 /* schedule again to check later */
3488 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3489 hclge_reset_task_schedule(hdev);
3492 dev_warn(&pdev->dev,
3493 "Unsupported reset type: %d\n", hdev->reset_type);
3498 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3499 unsigned long *addr)
3501 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3502 struct hclge_dev *hdev = ae_dev->priv;
3504 /* first, resolve any unknown reset type to the known type(s) */
3505 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3506 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3507 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3508 /* we will intentionally ignore any errors from this function
3509 * as we will end up in *some* reset request in any case
3511 if (hclge_handle_hw_msix_error(hdev, addr))
3512 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3515 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3516 /* We defered the clearing of the error event which caused
3517 * interrupt since it was not posssible to do that in
3518 * interrupt context (and this is the reason we introduced
3519 * new UNKNOWN reset type). Now, the errors have been
3520 * handled and cleared in hardware we can safely enable
3521 * interrupts. This is an exception to the norm.
3523 hclge_enable_vector(&hdev->misc_vector, true);
3526 /* return the highest priority reset level amongst all */
3527 if (test_bit(HNAE3_IMP_RESET, addr)) {
3528 rst_level = HNAE3_IMP_RESET;
3529 clear_bit(HNAE3_IMP_RESET, addr);
3530 clear_bit(HNAE3_GLOBAL_RESET, addr);
3531 clear_bit(HNAE3_FUNC_RESET, addr);
3532 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3533 rst_level = HNAE3_GLOBAL_RESET;
3534 clear_bit(HNAE3_GLOBAL_RESET, addr);
3535 clear_bit(HNAE3_FUNC_RESET, addr);
3536 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3537 rst_level = HNAE3_FUNC_RESET;
3538 clear_bit(HNAE3_FUNC_RESET, addr);
3539 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3540 rst_level = HNAE3_FLR_RESET;
3541 clear_bit(HNAE3_FLR_RESET, addr);
3544 if (hdev->reset_type != HNAE3_NONE_RESET &&
3545 rst_level < hdev->reset_type)
3546 return HNAE3_NONE_RESET;
3551 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3555 switch (hdev->reset_type) {
3556 case HNAE3_IMP_RESET:
3557 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3559 case HNAE3_GLOBAL_RESET:
3560 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3569 /* For revision 0x20, the reset interrupt source
3570 * can only be cleared after hardware reset done
3572 if (hdev->pdev->revision == 0x20)
3573 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3576 hclge_enable_vector(&hdev->misc_vector, true);
3579 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3583 switch (hdev->reset_type) {
3584 case HNAE3_FUNC_RESET:
3586 case HNAE3_FLR_RESET:
3587 ret = hclge_set_all_vf_rst(hdev, true);
3596 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3600 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3602 reg_val |= HCLGE_NIC_SW_RST_RDY;
3604 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3606 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3609 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3614 switch (hdev->reset_type) {
3615 case HNAE3_FUNC_RESET:
3616 /* to confirm whether all running VF is ready
3617 * before request PF reset
3619 ret = hclge_func_reset_sync_vf(hdev);
3623 ret = hclge_func_reset_cmd(hdev, 0);
3625 dev_err(&hdev->pdev->dev,
3626 "asserting function reset fail %d!\n", ret);
3630 /* After performaning pf reset, it is not necessary to do the
3631 * mailbox handling or send any command to firmware, because
3632 * any mailbox handling or command to firmware is only valid
3633 * after hclge_cmd_init is called.
3635 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3636 hdev->rst_stats.pf_rst_cnt++;
3638 case HNAE3_FLR_RESET:
3639 /* to confirm whether all running VF is ready
3640 * before request PF reset
3642 ret = hclge_func_reset_sync_vf(hdev);
3646 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3647 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3648 hdev->rst_stats.flr_rst_cnt++;
3650 case HNAE3_IMP_RESET:
3651 hclge_handle_imp_error(hdev);
3652 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3653 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3654 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3660 /* inform hardware that preparatory work is done */
3661 msleep(HCLGE_RESET_SYNC_TIME);
3662 hclge_reset_handshake(hdev, true);
3663 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3668 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3670 #define MAX_RESET_FAIL_CNT 5
3672 if (hdev->reset_pending) {
3673 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3674 hdev->reset_pending);
3676 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3677 HCLGE_RESET_INT_M) {
3678 dev_info(&hdev->pdev->dev,
3679 "reset failed because new reset interrupt\n");
3680 hclge_clear_reset_cause(hdev);
3682 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3683 hdev->rst_stats.reset_fail_cnt++;
3684 set_bit(hdev->reset_type, &hdev->reset_pending);
3685 dev_info(&hdev->pdev->dev,
3686 "re-schedule reset task(%u)\n",
3687 hdev->rst_stats.reset_fail_cnt);
3691 hclge_clear_reset_cause(hdev);
3693 /* recover the handshake status when reset fail */
3694 hclge_reset_handshake(hdev, true);
3696 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3698 hclge_dbg_dump_rst_info(hdev);
3700 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3705 static int hclge_set_rst_done(struct hclge_dev *hdev)
3707 struct hclge_pf_rst_done_cmd *req;
3708 struct hclge_desc desc;
3711 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3713 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3716 /* To be compatible with the old firmware, which does not support
3717 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3720 if (ret == -EOPNOTSUPP) {
3721 dev_warn(&hdev->pdev->dev,
3722 "current firmware does not support command(0x%x)!\n",
3723 HCLGE_OPC_PF_RST_DONE);
3726 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3733 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3737 switch (hdev->reset_type) {
3738 case HNAE3_FUNC_RESET:
3740 case HNAE3_FLR_RESET:
3741 ret = hclge_set_all_vf_rst(hdev, false);
3743 case HNAE3_GLOBAL_RESET:
3745 case HNAE3_IMP_RESET:
3746 ret = hclge_set_rst_done(hdev);
3752 /* clear up the handshake status after re-initialize done */
3753 hclge_reset_handshake(hdev, false);
3758 static int hclge_reset_stack(struct hclge_dev *hdev)
3762 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3766 ret = hclge_reset_ae_dev(hdev->ae_dev);
3770 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3774 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3777 static void hclge_reset(struct hclge_dev *hdev)
3779 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3780 enum hnae3_reset_type reset_level;
3783 /* Initialize ae_dev reset status as well, in case enet layer wants to
3784 * know if device is undergoing reset
3786 ae_dev->reset_type = hdev->reset_type;
3787 hdev->rst_stats.reset_cnt++;
3788 /* perform reset of the stack & ae device for a client */
3789 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3793 ret = hclge_reset_prepare_down(hdev);
3798 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3800 goto err_reset_lock;
3804 ret = hclge_reset_prepare_wait(hdev);
3808 if (hclge_reset_wait(hdev))
3811 hdev->rst_stats.hw_reset_done_cnt++;
3813 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3819 ret = hclge_reset_stack(hdev);
3821 goto err_reset_lock;
3823 hclge_clear_reset_cause(hdev);
3825 ret = hclge_reset_prepare_up(hdev);
3827 goto err_reset_lock;
3831 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3832 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3836 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3841 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3843 goto err_reset_lock;
3847 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3851 hdev->last_reset_time = jiffies;
3852 hdev->rst_stats.reset_fail_cnt = 0;
3853 hdev->rst_stats.reset_done_cnt++;
3854 ae_dev->reset_type = HNAE3_NONE_RESET;
3855 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3857 /* if default_reset_request has a higher level reset request,
3858 * it should be handled as soon as possible. since some errors
3859 * need this kind of reset to fix.
3861 reset_level = hclge_get_reset_level(ae_dev,
3862 &hdev->default_reset_request);
3863 if (reset_level != HNAE3_NONE_RESET)
3864 set_bit(reset_level, &hdev->reset_request);
3871 if (hclge_reset_err_handle(hdev))
3872 hclge_reset_task_schedule(hdev);
3875 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3877 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3878 struct hclge_dev *hdev = ae_dev->priv;
3880 /* We might end up getting called broadly because of 2 below cases:
3881 * 1. Recoverable error was conveyed through APEI and only way to bring
3882 * normalcy is to reset.
3883 * 2. A new reset request from the stack due to timeout
3885 * For the first case,error event might not have ae handle available.
3886 * check if this is a new reset request and we are not here just because
3887 * last reset attempt did not succeed and watchdog hit us again. We will
3888 * know this if last reset request did not occur very recently (watchdog
3889 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3890 * In case of new request we reset the "reset level" to PF reset.
3891 * And if it is a repeat reset request of the most recent one then we
3892 * want to make sure we throttle the reset request. Therefore, we will
3893 * not allow it again before 3*HZ times.
3896 handle = &hdev->vport[0].nic;
3898 if (time_before(jiffies, (hdev->last_reset_time +
3899 HCLGE_RESET_INTERVAL))) {
3900 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3902 } else if (hdev->default_reset_request) {
3904 hclge_get_reset_level(ae_dev,
3905 &hdev->default_reset_request);
3906 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3907 hdev->reset_level = HNAE3_FUNC_RESET;
3910 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3913 /* request reset & schedule reset task */
3914 set_bit(hdev->reset_level, &hdev->reset_request);
3915 hclge_reset_task_schedule(hdev);
3917 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3918 hdev->reset_level++;
3921 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3922 enum hnae3_reset_type rst_type)
3924 struct hclge_dev *hdev = ae_dev->priv;
3926 set_bit(rst_type, &hdev->default_reset_request);
3929 static void hclge_reset_timer(struct timer_list *t)
3931 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3933 /* if default_reset_request has no value, it means that this reset
3934 * request has already be handled, so just return here
3936 if (!hdev->default_reset_request)
3939 dev_info(&hdev->pdev->dev,
3940 "triggering reset in reset timer\n");
3941 hclge_reset_event(hdev->pdev, NULL);
3944 static void hclge_reset_subtask(struct hclge_dev *hdev)
3946 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3948 /* check if there is any ongoing reset in the hardware. This status can
3949 * be checked from reset_pending. If there is then, we need to wait for
3950 * hardware to complete reset.
3951 * a. If we are able to figure out in reasonable time that hardware
3952 * has fully resetted then, we can proceed with driver, client
3954 * b. else, we can come back later to check this status so re-sched
3957 hdev->last_reset_time = jiffies;
3958 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3959 if (hdev->reset_type != HNAE3_NONE_RESET)
3962 /* check if we got any *new* reset requests to be honored */
3963 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3964 if (hdev->reset_type != HNAE3_NONE_RESET)
3965 hclge_do_reset(hdev);
3967 hdev->reset_type = HNAE3_NONE_RESET;
3970 static void hclge_reset_service_task(struct hclge_dev *hdev)
3972 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3975 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3978 hclge_reset_subtask(hdev);
3980 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3983 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3987 /* start from vport 1 for PF is always alive */
3988 for (i = 1; i < hdev->num_alloc_vport; i++) {
3989 struct hclge_vport *vport = &hdev->vport[i];
3991 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3992 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3994 /* If vf is not alive, set to default value */
3995 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3996 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4000 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4002 unsigned long delta = round_jiffies_relative(HZ);
4004 /* Always handle the link updating to make sure link state is
4005 * updated when it is triggered by mbx.
4007 hclge_update_link_status(hdev);
4009 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4010 delta = jiffies - hdev->last_serv_processed;
4012 if (delta < round_jiffies_relative(HZ)) {
4013 delta = round_jiffies_relative(HZ) - delta;
4018 hdev->serv_processed_cnt++;
4019 hclge_update_vport_alive(hdev);
4021 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4022 hdev->last_serv_processed = jiffies;
4026 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4027 hclge_update_stats_for_all(hdev);
4029 hclge_update_port_info(hdev);
4030 hclge_sync_vlan_filter(hdev);
4032 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4033 hclge_rfs_filter_expire(hdev);
4035 hdev->last_serv_processed = jiffies;
4038 hclge_task_schedule(hdev, delta);
4041 static void hclge_service_task(struct work_struct *work)
4043 struct hclge_dev *hdev =
4044 container_of(work, struct hclge_dev, service_task.work);
4046 hclge_reset_service_task(hdev);
4047 hclge_mailbox_service_task(hdev);
4048 hclge_periodic_service_task(hdev);
4050 /* Handle reset and mbx again in case periodical task delays the
4051 * handling by calling hclge_task_schedule() in
4052 * hclge_periodic_service_task().
4054 hclge_reset_service_task(hdev);
4055 hclge_mailbox_service_task(hdev);
4058 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4060 /* VF handle has no client */
4061 if (!handle->client)
4062 return container_of(handle, struct hclge_vport, nic);
4063 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4064 return container_of(handle, struct hclge_vport, roce);
4066 return container_of(handle, struct hclge_vport, nic);
4069 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4070 struct hnae3_vector_info *vector_info)
4072 struct hclge_vport *vport = hclge_get_vport(handle);
4073 struct hnae3_vector_info *vector = vector_info;
4074 struct hclge_dev *hdev = vport->back;
4078 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4079 vector_num = min(hdev->num_msi_left, vector_num);
4081 for (j = 0; j < vector_num; j++) {
4082 for (i = 1; i < hdev->num_msi; i++) {
4083 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4084 vector->vector = pci_irq_vector(hdev->pdev, i);
4085 vector->io_addr = hdev->hw.io_base +
4086 HCLGE_VECTOR_REG_BASE +
4087 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4089 HCLGE_VECTOR_VF_OFFSET;
4090 hdev->vector_status[i] = vport->vport_id;
4091 hdev->vector_irq[i] = vector->vector;
4100 hdev->num_msi_left -= alloc;
4101 hdev->num_msi_used += alloc;
4106 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4110 for (i = 0; i < hdev->num_msi; i++)
4111 if (vector == hdev->vector_irq[i])
4117 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4119 struct hclge_vport *vport = hclge_get_vport(handle);
4120 struct hclge_dev *hdev = vport->back;
4123 vector_id = hclge_get_vector_index(hdev, vector);
4124 if (vector_id < 0) {
4125 dev_err(&hdev->pdev->dev,
4126 "Get vector index fail. vector_id =%d\n", vector_id);
4130 hclge_free_vector(hdev, vector_id);
4135 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4137 return HCLGE_RSS_KEY_SIZE;
4140 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4142 return HCLGE_RSS_IND_TBL_SIZE;
4145 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4146 const u8 hfunc, const u8 *key)
4148 struct hclge_rss_config_cmd *req;
4149 unsigned int key_offset = 0;
4150 struct hclge_desc desc;
4155 key_counts = HCLGE_RSS_KEY_SIZE;
4156 req = (struct hclge_rss_config_cmd *)desc.data;
4158 while (key_counts) {
4159 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4162 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4163 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4165 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4166 memcpy(req->hash_key,
4167 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4169 key_counts -= key_size;
4171 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4173 dev_err(&hdev->pdev->dev,
4174 "Configure RSS config fail, status = %d\n",
4182 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4184 struct hclge_rss_indirection_table_cmd *req;
4185 struct hclge_desc desc;
4189 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4191 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4192 hclge_cmd_setup_basic_desc
4193 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4195 req->start_table_index =
4196 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4197 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4199 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4200 req->rss_result[j] =
4201 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4203 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4205 dev_err(&hdev->pdev->dev,
4206 "Configure rss indir table fail,status = %d\n",
4214 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4215 u16 *tc_size, u16 *tc_offset)
4217 struct hclge_rss_tc_mode_cmd *req;
4218 struct hclge_desc desc;
4222 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4223 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4225 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4228 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4229 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4230 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4231 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4232 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4234 req->rss_tc_mode[i] = cpu_to_le16(mode);
4237 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4239 dev_err(&hdev->pdev->dev,
4240 "Configure rss tc mode fail, status = %d\n", ret);
4245 static void hclge_get_rss_type(struct hclge_vport *vport)
4247 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4248 vport->rss_tuple_sets.ipv4_udp_en ||
4249 vport->rss_tuple_sets.ipv4_sctp_en ||
4250 vport->rss_tuple_sets.ipv6_tcp_en ||
4251 vport->rss_tuple_sets.ipv6_udp_en ||
4252 vport->rss_tuple_sets.ipv6_sctp_en)
4253 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4254 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4255 vport->rss_tuple_sets.ipv6_fragment_en)
4256 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4258 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4261 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4263 struct hclge_rss_input_tuple_cmd *req;
4264 struct hclge_desc desc;
4267 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4269 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4271 /* Get the tuple cfg from pf */
4272 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4273 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4274 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4275 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4276 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4277 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4278 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4279 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4280 hclge_get_rss_type(&hdev->vport[0]);
4281 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4283 dev_err(&hdev->pdev->dev,
4284 "Configure rss input fail, status = %d\n", ret);
4288 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4291 struct hclge_vport *vport = hclge_get_vport(handle);
4294 /* Get hash algorithm */
4296 switch (vport->rss_algo) {
4297 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4298 *hfunc = ETH_RSS_HASH_TOP;
4300 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4301 *hfunc = ETH_RSS_HASH_XOR;
4304 *hfunc = ETH_RSS_HASH_UNKNOWN;
4309 /* Get the RSS Key required by the user */
4311 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4313 /* Get indirect table */
4315 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4316 indir[i] = vport->rss_indirection_tbl[i];
4321 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4322 const u8 *key, const u8 hfunc)
4324 struct hclge_vport *vport = hclge_get_vport(handle);
4325 struct hclge_dev *hdev = vport->back;
4329 /* Set the RSS Hash Key if specififed by the user */
4332 case ETH_RSS_HASH_TOP:
4333 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4335 case ETH_RSS_HASH_XOR:
4336 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4338 case ETH_RSS_HASH_NO_CHANGE:
4339 hash_algo = vport->rss_algo;
4345 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4349 /* Update the shadow RSS key with user specified qids */
4350 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4351 vport->rss_algo = hash_algo;
4354 /* Update the shadow RSS table with user specified qids */
4355 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4356 vport->rss_indirection_tbl[i] = indir[i];
4358 /* Update the hardware */
4359 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4362 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4364 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4366 if (nfc->data & RXH_L4_B_2_3)
4367 hash_sets |= HCLGE_D_PORT_BIT;
4369 hash_sets &= ~HCLGE_D_PORT_BIT;
4371 if (nfc->data & RXH_IP_SRC)
4372 hash_sets |= HCLGE_S_IP_BIT;
4374 hash_sets &= ~HCLGE_S_IP_BIT;
4376 if (nfc->data & RXH_IP_DST)
4377 hash_sets |= HCLGE_D_IP_BIT;
4379 hash_sets &= ~HCLGE_D_IP_BIT;
4381 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4382 hash_sets |= HCLGE_V_TAG_BIT;
4387 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4388 struct ethtool_rxnfc *nfc)
4390 struct hclge_vport *vport = hclge_get_vport(handle);
4391 struct hclge_dev *hdev = vport->back;
4392 struct hclge_rss_input_tuple_cmd *req;
4393 struct hclge_desc desc;
4397 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4398 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4401 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4402 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4404 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4405 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4406 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4407 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4408 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4409 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4410 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4411 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4413 tuple_sets = hclge_get_rss_hash_bits(nfc);
4414 switch (nfc->flow_type) {
4416 req->ipv4_tcp_en = tuple_sets;
4419 req->ipv6_tcp_en = tuple_sets;
4422 req->ipv4_udp_en = tuple_sets;
4425 req->ipv6_udp_en = tuple_sets;
4428 req->ipv4_sctp_en = tuple_sets;
4431 if ((nfc->data & RXH_L4_B_0_1) ||
4432 (nfc->data & RXH_L4_B_2_3))
4435 req->ipv6_sctp_en = tuple_sets;
4438 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4441 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4447 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4449 dev_err(&hdev->pdev->dev,
4450 "Set rss tuple fail, status = %d\n", ret);
4454 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4455 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4456 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4457 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4458 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4459 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4460 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4461 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4462 hclge_get_rss_type(vport);
4466 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4467 struct ethtool_rxnfc *nfc)
4469 struct hclge_vport *vport = hclge_get_vport(handle);
4474 switch (nfc->flow_type) {
4476 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4479 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4482 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4485 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4488 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4491 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4495 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4504 if (tuple_sets & HCLGE_D_PORT_BIT)
4505 nfc->data |= RXH_L4_B_2_3;
4506 if (tuple_sets & HCLGE_S_PORT_BIT)
4507 nfc->data |= RXH_L4_B_0_1;
4508 if (tuple_sets & HCLGE_D_IP_BIT)
4509 nfc->data |= RXH_IP_DST;
4510 if (tuple_sets & HCLGE_S_IP_BIT)
4511 nfc->data |= RXH_IP_SRC;
4516 static int hclge_get_tc_size(struct hnae3_handle *handle)
4518 struct hclge_vport *vport = hclge_get_vport(handle);
4519 struct hclge_dev *hdev = vport->back;
4521 return hdev->rss_size_max;
4524 int hclge_rss_init_hw(struct hclge_dev *hdev)
4526 struct hclge_vport *vport = hdev->vport;
4527 u8 *rss_indir = vport[0].rss_indirection_tbl;
4528 u16 rss_size = vport[0].alloc_rss_size;
4529 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4530 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4531 u8 *key = vport[0].rss_hash_key;
4532 u8 hfunc = vport[0].rss_algo;
4533 u16 tc_valid[HCLGE_MAX_TC_NUM];
4538 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4542 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4546 ret = hclge_set_rss_input_tuple(hdev);
4550 /* Each TC have the same queue size, and tc_size set to hardware is
4551 * the log2 of roundup power of two of rss_size, the acutal queue
4552 * size is limited by indirection table.
4554 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4555 dev_err(&hdev->pdev->dev,
4556 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4561 roundup_size = roundup_pow_of_two(rss_size);
4562 roundup_size = ilog2(roundup_size);
4564 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4567 if (!(hdev->hw_tc_map & BIT(i)))
4571 tc_size[i] = roundup_size;
4572 tc_offset[i] = rss_size * i;
4575 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4578 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4580 struct hclge_vport *vport = hdev->vport;
4583 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4584 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4585 vport[j].rss_indirection_tbl[i] =
4586 i % vport[j].alloc_rss_size;
4590 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4592 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4593 struct hclge_vport *vport = hdev->vport;
4595 if (hdev->pdev->revision >= 0x21)
4596 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4598 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4599 vport[i].rss_tuple_sets.ipv4_tcp_en =
4600 HCLGE_RSS_INPUT_TUPLE_OTHER;
4601 vport[i].rss_tuple_sets.ipv4_udp_en =
4602 HCLGE_RSS_INPUT_TUPLE_OTHER;
4603 vport[i].rss_tuple_sets.ipv4_sctp_en =
4604 HCLGE_RSS_INPUT_TUPLE_SCTP;
4605 vport[i].rss_tuple_sets.ipv4_fragment_en =
4606 HCLGE_RSS_INPUT_TUPLE_OTHER;
4607 vport[i].rss_tuple_sets.ipv6_tcp_en =
4608 HCLGE_RSS_INPUT_TUPLE_OTHER;
4609 vport[i].rss_tuple_sets.ipv6_udp_en =
4610 HCLGE_RSS_INPUT_TUPLE_OTHER;
4611 vport[i].rss_tuple_sets.ipv6_sctp_en =
4612 HCLGE_RSS_INPUT_TUPLE_SCTP;
4613 vport[i].rss_tuple_sets.ipv6_fragment_en =
4614 HCLGE_RSS_INPUT_TUPLE_OTHER;
4616 vport[i].rss_algo = rss_algo;
4618 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4619 HCLGE_RSS_KEY_SIZE);
4622 hclge_rss_indir_init_cfg(hdev);
4625 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4626 int vector_id, bool en,
4627 struct hnae3_ring_chain_node *ring_chain)
4629 struct hclge_dev *hdev = vport->back;
4630 struct hnae3_ring_chain_node *node;
4631 struct hclge_desc desc;
4632 struct hclge_ctrl_vector_chain_cmd *req =
4633 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4634 enum hclge_cmd_status status;
4635 enum hclge_opcode_type op;
4636 u16 tqp_type_and_id;
4639 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4640 hclge_cmd_setup_basic_desc(&desc, op, false);
4641 req->int_vector_id = vector_id;
4644 for (node = ring_chain; node; node = node->next) {
4645 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4646 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4648 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4649 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4650 HCLGE_TQP_ID_S, node->tqp_index);
4651 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4653 hnae3_get_field(node->int_gl_idx,
4654 HNAE3_RING_GL_IDX_M,
4655 HNAE3_RING_GL_IDX_S));
4656 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4657 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4658 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4659 req->vfid = vport->vport_id;
4661 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4663 dev_err(&hdev->pdev->dev,
4664 "Map TQP fail, status is %d.\n",
4670 hclge_cmd_setup_basic_desc(&desc,
4673 req->int_vector_id = vector_id;
4678 req->int_cause_num = i;
4679 req->vfid = vport->vport_id;
4680 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4682 dev_err(&hdev->pdev->dev,
4683 "Map TQP fail, status is %d.\n", status);
4691 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4692 struct hnae3_ring_chain_node *ring_chain)
4694 struct hclge_vport *vport = hclge_get_vport(handle);
4695 struct hclge_dev *hdev = vport->back;
4698 vector_id = hclge_get_vector_index(hdev, vector);
4699 if (vector_id < 0) {
4700 dev_err(&hdev->pdev->dev,
4701 "Get vector index fail. vector_id =%d\n", vector_id);
4705 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4708 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4709 struct hnae3_ring_chain_node *ring_chain)
4711 struct hclge_vport *vport = hclge_get_vport(handle);
4712 struct hclge_dev *hdev = vport->back;
4715 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4718 vector_id = hclge_get_vector_index(hdev, vector);
4719 if (vector_id < 0) {
4720 dev_err(&handle->pdev->dev,
4721 "Get vector index fail. ret =%d\n", vector_id);
4725 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4727 dev_err(&handle->pdev->dev,
4728 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4734 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4735 struct hclge_promisc_param *param)
4737 struct hclge_promisc_cfg_cmd *req;
4738 struct hclge_desc desc;
4741 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4743 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4744 req->vf_id = param->vf_id;
4746 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4747 * pdev revision(0x20), new revision support them. The
4748 * value of this two fields will not return error when driver
4749 * send command to fireware in revision(0x20).
4751 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4752 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4754 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4756 dev_err(&hdev->pdev->dev,
4757 "Set promisc mode fail, status is %d.\n", ret);
4762 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4763 bool en_uc, bool en_mc, bool en_bc,
4769 memset(param, 0, sizeof(struct hclge_promisc_param));
4771 param->enable = HCLGE_PROMISC_EN_UC;
4773 param->enable |= HCLGE_PROMISC_EN_MC;
4775 param->enable |= HCLGE_PROMISC_EN_BC;
4776 param->vf_id = vport_id;
4779 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4780 bool en_mc_pmc, bool en_bc_pmc)
4782 struct hclge_dev *hdev = vport->back;
4783 struct hclge_promisc_param param;
4785 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4787 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4790 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4793 struct hclge_vport *vport = hclge_get_vport(handle);
4794 bool en_bc_pmc = true;
4796 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4797 * always bypassed. So broadcast promisc should be disabled until
4798 * user enable promisc mode
4800 if (handle->pdev->revision == 0x20)
4801 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4803 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4807 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4809 struct hclge_get_fd_mode_cmd *req;
4810 struct hclge_desc desc;
4813 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4815 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4817 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4819 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4823 *fd_mode = req->mode;
4828 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4829 u32 *stage1_entry_num,
4830 u32 *stage2_entry_num,
4831 u16 *stage1_counter_num,
4832 u16 *stage2_counter_num)
4834 struct hclge_get_fd_allocation_cmd *req;
4835 struct hclge_desc desc;
4838 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4840 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4842 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4844 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4849 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4850 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4851 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4852 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4857 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4859 struct hclge_set_fd_key_config_cmd *req;
4860 struct hclge_fd_key_cfg *stage;
4861 struct hclge_desc desc;
4864 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4866 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4867 stage = &hdev->fd_cfg.key_cfg[stage_num];
4868 req->stage = stage_num;
4869 req->key_select = stage->key_sel;
4870 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4871 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4872 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4873 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4874 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4875 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4879 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4884 static int hclge_init_fd_config(struct hclge_dev *hdev)
4886 #define LOW_2_WORDS 0x03
4887 struct hclge_fd_key_cfg *key_cfg;
4890 if (!hnae3_dev_fd_supported(hdev))
4893 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4897 switch (hdev->fd_cfg.fd_mode) {
4898 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4899 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4901 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4902 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4905 dev_err(&hdev->pdev->dev,
4906 "Unsupported flow director mode %u\n",
4907 hdev->fd_cfg.fd_mode);
4911 hdev->fd_cfg.proto_support =
4912 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4913 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4914 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4915 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4916 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4917 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4918 key_cfg->outer_sipv6_word_en = 0;
4919 key_cfg->outer_dipv6_word_en = 0;
4921 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4922 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4923 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4924 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4926 /* If use max 400bit key, we can support tuples for ether type */
4927 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4928 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4929 key_cfg->tuple_active |=
4930 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4933 /* roce_type is used to filter roce frames
4934 * dst_vport is used to specify the rule
4936 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4938 ret = hclge_get_fd_allocation(hdev,
4939 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4940 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4941 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4942 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4946 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4949 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4950 int loc, u8 *key, bool is_add)
4952 struct hclge_fd_tcam_config_1_cmd *req1;
4953 struct hclge_fd_tcam_config_2_cmd *req2;
4954 struct hclge_fd_tcam_config_3_cmd *req3;
4955 struct hclge_desc desc[3];
4958 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4959 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4960 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4961 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4962 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4964 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4965 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4966 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4968 req1->stage = stage;
4969 req1->xy_sel = sel_x ? 1 : 0;
4970 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4971 req1->index = cpu_to_le32(loc);
4972 req1->entry_vld = sel_x ? is_add : 0;
4975 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4976 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4977 sizeof(req2->tcam_data));
4978 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4979 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4982 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4984 dev_err(&hdev->pdev->dev,
4985 "config tcam key fail, ret=%d\n",
4991 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4992 struct hclge_fd_ad_data *action)
4994 struct hclge_fd_ad_config_cmd *req;
4995 struct hclge_desc desc;
4999 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5001 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5002 req->index = cpu_to_le32(loc);
5005 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5006 action->write_rule_id_to_bd);
5007 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5010 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5011 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5012 action->forward_to_direct_queue);
5013 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5015 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5016 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5017 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5018 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5019 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5020 action->counter_id);
5022 req->ad_data = cpu_to_le64(ad_data);
5023 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5025 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5030 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5031 struct hclge_fd_rule *rule)
5033 u16 tmp_x_s, tmp_y_s;
5034 u32 tmp_x_l, tmp_y_l;
5037 if (rule->unused_tuple & tuple_bit)
5040 switch (tuple_bit) {
5043 case BIT(INNER_DST_MAC):
5044 for (i = 0; i < ETH_ALEN; i++) {
5045 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5046 rule->tuples_mask.dst_mac[i]);
5047 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5048 rule->tuples_mask.dst_mac[i]);
5052 case BIT(INNER_SRC_MAC):
5053 for (i = 0; i < ETH_ALEN; i++) {
5054 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5055 rule->tuples.src_mac[i]);
5056 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5057 rule->tuples.src_mac[i]);
5061 case BIT(INNER_VLAN_TAG_FST):
5062 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5063 rule->tuples_mask.vlan_tag1);
5064 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5065 rule->tuples_mask.vlan_tag1);
5066 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5067 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5070 case BIT(INNER_ETH_TYPE):
5071 calc_x(tmp_x_s, rule->tuples.ether_proto,
5072 rule->tuples_mask.ether_proto);
5073 calc_y(tmp_y_s, rule->tuples.ether_proto,
5074 rule->tuples_mask.ether_proto);
5075 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5076 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5079 case BIT(INNER_IP_TOS):
5080 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5081 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5084 case BIT(INNER_IP_PROTO):
5085 calc_x(*key_x, rule->tuples.ip_proto,
5086 rule->tuples_mask.ip_proto);
5087 calc_y(*key_y, rule->tuples.ip_proto,
5088 rule->tuples_mask.ip_proto);
5091 case BIT(INNER_SRC_IP):
5092 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5093 rule->tuples_mask.src_ip[IPV4_INDEX]);
5094 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5095 rule->tuples_mask.src_ip[IPV4_INDEX]);
5096 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5097 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5100 case BIT(INNER_DST_IP):
5101 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5102 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5103 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5104 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5105 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5106 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5109 case BIT(INNER_SRC_PORT):
5110 calc_x(tmp_x_s, rule->tuples.src_port,
5111 rule->tuples_mask.src_port);
5112 calc_y(tmp_y_s, rule->tuples.src_port,
5113 rule->tuples_mask.src_port);
5114 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5115 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5118 case BIT(INNER_DST_PORT):
5119 calc_x(tmp_x_s, rule->tuples.dst_port,
5120 rule->tuples_mask.dst_port);
5121 calc_y(tmp_y_s, rule->tuples.dst_port,
5122 rule->tuples_mask.dst_port);
5123 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5124 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5132 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5133 u8 vf_id, u8 network_port_id)
5135 u32 port_number = 0;
5137 if (port_type == HOST_PORT) {
5138 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5140 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5142 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5144 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5145 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5146 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5152 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5153 __le32 *key_x, __le32 *key_y,
5154 struct hclge_fd_rule *rule)
5156 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5157 u8 cur_pos = 0, tuple_size, shift_bits;
5160 for (i = 0; i < MAX_META_DATA; i++) {
5161 tuple_size = meta_data_key_info[i].key_length;
5162 tuple_bit = key_cfg->meta_data_active & BIT(i);
5164 switch (tuple_bit) {
5165 case BIT(ROCE_TYPE):
5166 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5167 cur_pos += tuple_size;
5169 case BIT(DST_VPORT):
5170 port_number = hclge_get_port_number(HOST_PORT, 0,
5172 hnae3_set_field(meta_data,
5173 GENMASK(cur_pos + tuple_size, cur_pos),
5174 cur_pos, port_number);
5175 cur_pos += tuple_size;
5182 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5183 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5184 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5186 *key_x = cpu_to_le32(tmp_x << shift_bits);
5187 *key_y = cpu_to_le32(tmp_y << shift_bits);
5190 /* A complete key is combined with meta data key and tuple key.
5191 * Meta data key is stored at the MSB region, and tuple key is stored at
5192 * the LSB region, unused bits will be filled 0.
5194 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5195 struct hclge_fd_rule *rule)
5197 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5198 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5199 u8 *cur_key_x, *cur_key_y;
5201 int ret, tuple_size;
5202 u8 meta_data_region;
5204 memset(key_x, 0, sizeof(key_x));
5205 memset(key_y, 0, sizeof(key_y));
5209 for (i = 0 ; i < MAX_TUPLE; i++) {
5213 tuple_size = tuple_key_info[i].key_length / 8;
5214 check_tuple = key_cfg->tuple_active & BIT(i);
5216 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5219 cur_key_x += tuple_size;
5220 cur_key_y += tuple_size;
5224 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5225 MAX_META_DATA_LENGTH / 8;
5227 hclge_fd_convert_meta_data(key_cfg,
5228 (__le32 *)(key_x + meta_data_region),
5229 (__le32 *)(key_y + meta_data_region),
5232 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5235 dev_err(&hdev->pdev->dev,
5236 "fd key_y config fail, loc=%u, ret=%d\n",
5237 rule->queue_id, ret);
5241 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5244 dev_err(&hdev->pdev->dev,
5245 "fd key_x config fail, loc=%u, ret=%d\n",
5246 rule->queue_id, ret);
5250 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5251 struct hclge_fd_rule *rule)
5253 struct hclge_fd_ad_data ad_data;
5255 ad_data.ad_id = rule->location;
5257 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5258 ad_data.drop_packet = true;
5259 ad_data.forward_to_direct_queue = false;
5260 ad_data.queue_id = 0;
5262 ad_data.drop_packet = false;
5263 ad_data.forward_to_direct_queue = true;
5264 ad_data.queue_id = rule->queue_id;
5267 ad_data.use_counter = false;
5268 ad_data.counter_id = 0;
5270 ad_data.use_next_stage = false;
5271 ad_data.next_input_key = 0;
5273 ad_data.write_rule_id_to_bd = true;
5274 ad_data.rule_id = rule->location;
5276 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5279 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5280 struct ethtool_rx_flow_spec *fs, u32 *unused)
5282 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5283 struct ethtool_usrip4_spec *usr_ip4_spec;
5284 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5285 struct ethtool_usrip6_spec *usr_ip6_spec;
5286 struct ethhdr *ether_spec;
5288 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5291 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5294 if ((fs->flow_type & FLOW_EXT) &&
5295 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5296 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5300 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5304 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5305 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5307 if (!tcp_ip4_spec->ip4src)
5308 *unused |= BIT(INNER_SRC_IP);
5310 if (!tcp_ip4_spec->ip4dst)
5311 *unused |= BIT(INNER_DST_IP);
5313 if (!tcp_ip4_spec->psrc)
5314 *unused |= BIT(INNER_SRC_PORT);
5316 if (!tcp_ip4_spec->pdst)
5317 *unused |= BIT(INNER_DST_PORT);
5319 if (!tcp_ip4_spec->tos)
5320 *unused |= BIT(INNER_IP_TOS);
5324 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5325 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5326 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5328 if (!usr_ip4_spec->ip4src)
5329 *unused |= BIT(INNER_SRC_IP);
5331 if (!usr_ip4_spec->ip4dst)
5332 *unused |= BIT(INNER_DST_IP);
5334 if (!usr_ip4_spec->tos)
5335 *unused |= BIT(INNER_IP_TOS);
5337 if (!usr_ip4_spec->proto)
5338 *unused |= BIT(INNER_IP_PROTO);
5340 if (usr_ip4_spec->l4_4_bytes)
5343 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5350 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5351 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5354 /* check whether src/dst ip address used */
5355 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5356 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5357 *unused |= BIT(INNER_SRC_IP);
5359 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5360 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5361 *unused |= BIT(INNER_DST_IP);
5363 if (!tcp_ip6_spec->psrc)
5364 *unused |= BIT(INNER_SRC_PORT);
5366 if (!tcp_ip6_spec->pdst)
5367 *unused |= BIT(INNER_DST_PORT);
5369 if (tcp_ip6_spec->tclass)
5373 case IPV6_USER_FLOW:
5374 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5375 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5376 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5377 BIT(INNER_DST_PORT);
5379 /* check whether src/dst ip address used */
5380 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5381 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5382 *unused |= BIT(INNER_SRC_IP);
5384 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5385 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5386 *unused |= BIT(INNER_DST_IP);
5388 if (!usr_ip6_spec->l4_proto)
5389 *unused |= BIT(INNER_IP_PROTO);
5391 if (usr_ip6_spec->tclass)
5394 if (usr_ip6_spec->l4_4_bytes)
5399 ether_spec = &fs->h_u.ether_spec;
5400 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5401 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5402 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5404 if (is_zero_ether_addr(ether_spec->h_source))
5405 *unused |= BIT(INNER_SRC_MAC);
5407 if (is_zero_ether_addr(ether_spec->h_dest))
5408 *unused |= BIT(INNER_DST_MAC);
5410 if (!ether_spec->h_proto)
5411 *unused |= BIT(INNER_ETH_TYPE);
5418 if ((fs->flow_type & FLOW_EXT)) {
5419 if (fs->h_ext.vlan_etype)
5421 if (!fs->h_ext.vlan_tci)
5422 *unused |= BIT(INNER_VLAN_TAG_FST);
5424 if (fs->m_ext.vlan_tci) {
5425 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5429 *unused |= BIT(INNER_VLAN_TAG_FST);
5432 if (fs->flow_type & FLOW_MAC_EXT) {
5433 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5436 if (is_zero_ether_addr(fs->h_ext.h_dest))
5437 *unused |= BIT(INNER_DST_MAC);
5439 *unused &= ~(BIT(INNER_DST_MAC));
5445 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5447 struct hclge_fd_rule *rule = NULL;
5448 struct hlist_node *node2;
5450 spin_lock_bh(&hdev->fd_rule_lock);
5451 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5452 if (rule->location >= location)
5456 spin_unlock_bh(&hdev->fd_rule_lock);
5458 return rule && rule->location == location;
5461 /* make sure being called after lock up with fd_rule_lock */
5462 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5463 struct hclge_fd_rule *new_rule,
5467 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5468 struct hlist_node *node2;
5470 if (is_add && !new_rule)
5473 hlist_for_each_entry_safe(rule, node2,
5474 &hdev->fd_rule_list, rule_node) {
5475 if (rule->location >= location)
5480 if (rule && rule->location == location) {
5481 hlist_del(&rule->rule_node);
5483 hdev->hclge_fd_rule_num--;
5486 if (!hdev->hclge_fd_rule_num)
5487 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5488 clear_bit(location, hdev->fd_bmap);
5492 } else if (!is_add) {
5493 dev_err(&hdev->pdev->dev,
5494 "delete fail, rule %u is inexistent\n",
5499 INIT_HLIST_NODE(&new_rule->rule_node);
5502 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5504 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5506 set_bit(location, hdev->fd_bmap);
5507 hdev->hclge_fd_rule_num++;
5508 hdev->fd_active_type = new_rule->rule_type;
5513 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5514 struct ethtool_rx_flow_spec *fs,
5515 struct hclge_fd_rule *rule)
5517 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5519 switch (flow_type) {
5523 rule->tuples.src_ip[IPV4_INDEX] =
5524 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5525 rule->tuples_mask.src_ip[IPV4_INDEX] =
5526 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5528 rule->tuples.dst_ip[IPV4_INDEX] =
5529 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5530 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5531 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5533 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5534 rule->tuples_mask.src_port =
5535 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5537 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5538 rule->tuples_mask.dst_port =
5539 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5541 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5542 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5544 rule->tuples.ether_proto = ETH_P_IP;
5545 rule->tuples_mask.ether_proto = 0xFFFF;
5549 rule->tuples.src_ip[IPV4_INDEX] =
5550 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5551 rule->tuples_mask.src_ip[IPV4_INDEX] =
5552 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5554 rule->tuples.dst_ip[IPV4_INDEX] =
5555 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5556 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5557 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5559 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5560 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5562 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5563 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5565 rule->tuples.ether_proto = ETH_P_IP;
5566 rule->tuples_mask.ether_proto = 0xFFFF;
5572 be32_to_cpu_array(rule->tuples.src_ip,
5573 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5574 be32_to_cpu_array(rule->tuples_mask.src_ip,
5575 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5577 be32_to_cpu_array(rule->tuples.dst_ip,
5578 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5579 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5580 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5582 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5583 rule->tuples_mask.src_port =
5584 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5586 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5587 rule->tuples_mask.dst_port =
5588 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5590 rule->tuples.ether_proto = ETH_P_IPV6;
5591 rule->tuples_mask.ether_proto = 0xFFFF;
5594 case IPV6_USER_FLOW:
5595 be32_to_cpu_array(rule->tuples.src_ip,
5596 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5597 be32_to_cpu_array(rule->tuples_mask.src_ip,
5598 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5600 be32_to_cpu_array(rule->tuples.dst_ip,
5601 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5602 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5603 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5605 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5606 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5608 rule->tuples.ether_proto = ETH_P_IPV6;
5609 rule->tuples_mask.ether_proto = 0xFFFF;
5613 ether_addr_copy(rule->tuples.src_mac,
5614 fs->h_u.ether_spec.h_source);
5615 ether_addr_copy(rule->tuples_mask.src_mac,
5616 fs->m_u.ether_spec.h_source);
5618 ether_addr_copy(rule->tuples.dst_mac,
5619 fs->h_u.ether_spec.h_dest);
5620 ether_addr_copy(rule->tuples_mask.dst_mac,
5621 fs->m_u.ether_spec.h_dest);
5623 rule->tuples.ether_proto =
5624 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5625 rule->tuples_mask.ether_proto =
5626 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5633 switch (flow_type) {
5636 rule->tuples.ip_proto = IPPROTO_SCTP;
5637 rule->tuples_mask.ip_proto = 0xFF;
5641 rule->tuples.ip_proto = IPPROTO_TCP;
5642 rule->tuples_mask.ip_proto = 0xFF;
5646 rule->tuples.ip_proto = IPPROTO_UDP;
5647 rule->tuples_mask.ip_proto = 0xFF;
5653 if ((fs->flow_type & FLOW_EXT)) {
5654 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5655 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5658 if (fs->flow_type & FLOW_MAC_EXT) {
5659 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5660 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5666 /* make sure being called after lock up with fd_rule_lock */
5667 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5668 struct hclge_fd_rule *rule)
5673 dev_err(&hdev->pdev->dev,
5674 "The flow director rule is NULL\n");
5678 /* it will never fail here, so needn't to check return value */
5679 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5681 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5685 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5692 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5696 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5697 struct ethtool_rxnfc *cmd)
5699 struct hclge_vport *vport = hclge_get_vport(handle);
5700 struct hclge_dev *hdev = vport->back;
5701 u16 dst_vport_id = 0, q_index = 0;
5702 struct ethtool_rx_flow_spec *fs;
5703 struct hclge_fd_rule *rule;
5708 if (!hnae3_dev_fd_supported(hdev))
5712 dev_warn(&hdev->pdev->dev,
5713 "Please enable flow director first\n");
5717 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5719 ret = hclge_fd_check_spec(hdev, fs, &unused);
5721 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5725 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5726 action = HCLGE_FD_ACTION_DROP_PACKET;
5728 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5729 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5732 if (vf > hdev->num_req_vfs) {
5733 dev_err(&hdev->pdev->dev,
5734 "Error: vf id (%u) > max vf num (%u)\n",
5735 vf, hdev->num_req_vfs);
5739 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5740 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5743 dev_err(&hdev->pdev->dev,
5744 "Error: queue id (%u) > max tqp num (%u)\n",
5749 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5753 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5757 ret = hclge_fd_get_tuple(hdev, fs, rule);
5763 rule->flow_type = fs->flow_type;
5765 rule->location = fs->location;
5766 rule->unused_tuple = unused;
5767 rule->vf_id = dst_vport_id;
5768 rule->queue_id = q_index;
5769 rule->action = action;
5770 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5772 /* to avoid rule conflict, when user configure rule by ethtool,
5773 * we need to clear all arfs rules
5775 hclge_clear_arfs_rules(handle);
5777 spin_lock_bh(&hdev->fd_rule_lock);
5778 ret = hclge_fd_config_rule(hdev, rule);
5780 spin_unlock_bh(&hdev->fd_rule_lock);
5785 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5786 struct ethtool_rxnfc *cmd)
5788 struct hclge_vport *vport = hclge_get_vport(handle);
5789 struct hclge_dev *hdev = vport->back;
5790 struct ethtool_rx_flow_spec *fs;
5793 if (!hnae3_dev_fd_supported(hdev))
5796 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5798 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5801 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5802 dev_err(&hdev->pdev->dev,
5803 "Delete fail, rule %u is inexistent\n", fs->location);
5807 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5812 spin_lock_bh(&hdev->fd_rule_lock);
5813 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5815 spin_unlock_bh(&hdev->fd_rule_lock);
5820 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5823 struct hclge_vport *vport = hclge_get_vport(handle);
5824 struct hclge_dev *hdev = vport->back;
5825 struct hclge_fd_rule *rule;
5826 struct hlist_node *node;
5829 if (!hnae3_dev_fd_supported(hdev))
5832 spin_lock_bh(&hdev->fd_rule_lock);
5833 for_each_set_bit(location, hdev->fd_bmap,
5834 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5835 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5839 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5841 hlist_del(&rule->rule_node);
5844 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5845 hdev->hclge_fd_rule_num = 0;
5846 bitmap_zero(hdev->fd_bmap,
5847 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5850 spin_unlock_bh(&hdev->fd_rule_lock);
5853 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5855 struct hclge_vport *vport = hclge_get_vport(handle);
5856 struct hclge_dev *hdev = vport->back;
5857 struct hclge_fd_rule *rule;
5858 struct hlist_node *node;
5861 /* Return ok here, because reset error handling will check this
5862 * return value. If error is returned here, the reset process will
5865 if (!hnae3_dev_fd_supported(hdev))
5868 /* if fd is disabled, should not restore it when reset */
5872 spin_lock_bh(&hdev->fd_rule_lock);
5873 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5874 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5876 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5879 dev_warn(&hdev->pdev->dev,
5880 "Restore rule %u failed, remove it\n",
5882 clear_bit(rule->location, hdev->fd_bmap);
5883 hlist_del(&rule->rule_node);
5885 hdev->hclge_fd_rule_num--;
5889 if (hdev->hclge_fd_rule_num)
5890 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5892 spin_unlock_bh(&hdev->fd_rule_lock);
5897 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5898 struct ethtool_rxnfc *cmd)
5900 struct hclge_vport *vport = hclge_get_vport(handle);
5901 struct hclge_dev *hdev = vport->back;
5903 if (!hnae3_dev_fd_supported(hdev))
5906 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5907 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5912 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5913 struct ethtool_rxnfc *cmd)
5915 struct hclge_vport *vport = hclge_get_vport(handle);
5916 struct hclge_fd_rule *rule = NULL;
5917 struct hclge_dev *hdev = vport->back;
5918 struct ethtool_rx_flow_spec *fs;
5919 struct hlist_node *node2;
5921 if (!hnae3_dev_fd_supported(hdev))
5924 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5926 spin_lock_bh(&hdev->fd_rule_lock);
5928 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5929 if (rule->location >= fs->location)
5933 if (!rule || fs->location != rule->location) {
5934 spin_unlock_bh(&hdev->fd_rule_lock);
5939 fs->flow_type = rule->flow_type;
5940 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5944 fs->h_u.tcp_ip4_spec.ip4src =
5945 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5946 fs->m_u.tcp_ip4_spec.ip4src =
5947 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5948 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5950 fs->h_u.tcp_ip4_spec.ip4dst =
5951 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5952 fs->m_u.tcp_ip4_spec.ip4dst =
5953 rule->unused_tuple & BIT(INNER_DST_IP) ?
5954 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5956 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5957 fs->m_u.tcp_ip4_spec.psrc =
5958 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5959 0 : cpu_to_be16(rule->tuples_mask.src_port);
5961 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5962 fs->m_u.tcp_ip4_spec.pdst =
5963 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5964 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5966 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5967 fs->m_u.tcp_ip4_spec.tos =
5968 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5969 0 : rule->tuples_mask.ip_tos;
5973 fs->h_u.usr_ip4_spec.ip4src =
5974 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5975 fs->m_u.tcp_ip4_spec.ip4src =
5976 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5977 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5979 fs->h_u.usr_ip4_spec.ip4dst =
5980 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5981 fs->m_u.usr_ip4_spec.ip4dst =
5982 rule->unused_tuple & BIT(INNER_DST_IP) ?
5983 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5985 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5986 fs->m_u.usr_ip4_spec.tos =
5987 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5988 0 : rule->tuples_mask.ip_tos;
5990 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5991 fs->m_u.usr_ip4_spec.proto =
5992 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5993 0 : rule->tuples_mask.ip_proto;
5995 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
6001 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
6002 rule->tuples.src_ip, IPV6_SIZE);
6003 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6004 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
6005 sizeof(int) * IPV6_SIZE);
6007 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
6008 rule->tuples_mask.src_ip, IPV6_SIZE);
6010 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6011 rule->tuples.dst_ip, IPV6_SIZE);
6012 if (rule->unused_tuple & BIT(INNER_DST_IP))
6013 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6014 sizeof(int) * IPV6_SIZE);
6016 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6017 rule->tuples_mask.dst_ip, IPV6_SIZE);
6019 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6020 fs->m_u.tcp_ip6_spec.psrc =
6021 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6022 0 : cpu_to_be16(rule->tuples_mask.src_port);
6024 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6025 fs->m_u.tcp_ip6_spec.pdst =
6026 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6027 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6030 case IPV6_USER_FLOW:
6031 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6032 rule->tuples.src_ip, IPV6_SIZE);
6033 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6034 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6035 sizeof(int) * IPV6_SIZE);
6037 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6038 rule->tuples_mask.src_ip, IPV6_SIZE);
6040 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6041 rule->tuples.dst_ip, IPV6_SIZE);
6042 if (rule->unused_tuple & BIT(INNER_DST_IP))
6043 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6044 sizeof(int) * IPV6_SIZE);
6046 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6047 rule->tuples_mask.dst_ip, IPV6_SIZE);
6049 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6050 fs->m_u.usr_ip6_spec.l4_proto =
6051 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6052 0 : rule->tuples_mask.ip_proto;
6056 ether_addr_copy(fs->h_u.ether_spec.h_source,
6057 rule->tuples.src_mac);
6058 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6059 eth_zero_addr(fs->m_u.ether_spec.h_source);
6061 ether_addr_copy(fs->m_u.ether_spec.h_source,
6062 rule->tuples_mask.src_mac);
6064 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6065 rule->tuples.dst_mac);
6066 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6067 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6069 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6070 rule->tuples_mask.dst_mac);
6072 fs->h_u.ether_spec.h_proto =
6073 cpu_to_be16(rule->tuples.ether_proto);
6074 fs->m_u.ether_spec.h_proto =
6075 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6076 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6080 spin_unlock_bh(&hdev->fd_rule_lock);
6084 if (fs->flow_type & FLOW_EXT) {
6085 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6086 fs->m_ext.vlan_tci =
6087 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6088 cpu_to_be16(VLAN_VID_MASK) :
6089 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6092 if (fs->flow_type & FLOW_MAC_EXT) {
6093 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6094 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6095 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6097 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6098 rule->tuples_mask.dst_mac);
6101 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6102 fs->ring_cookie = RX_CLS_FLOW_DISC;
6106 fs->ring_cookie = rule->queue_id;
6107 vf_id = rule->vf_id;
6108 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6109 fs->ring_cookie |= vf_id;
6112 spin_unlock_bh(&hdev->fd_rule_lock);
6117 static int hclge_get_all_rules(struct hnae3_handle *handle,
6118 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6120 struct hclge_vport *vport = hclge_get_vport(handle);
6121 struct hclge_dev *hdev = vport->back;
6122 struct hclge_fd_rule *rule;
6123 struct hlist_node *node2;
6126 if (!hnae3_dev_fd_supported(hdev))
6129 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6131 spin_lock_bh(&hdev->fd_rule_lock);
6132 hlist_for_each_entry_safe(rule, node2,
6133 &hdev->fd_rule_list, rule_node) {
6134 if (cnt == cmd->rule_cnt) {
6135 spin_unlock_bh(&hdev->fd_rule_lock);
6139 rule_locs[cnt] = rule->location;
6143 spin_unlock_bh(&hdev->fd_rule_lock);
6145 cmd->rule_cnt = cnt;
6150 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6151 struct hclge_fd_rule_tuples *tuples)
6153 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6154 tuples->ip_proto = fkeys->basic.ip_proto;
6155 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6157 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6158 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6159 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6161 memcpy(tuples->src_ip,
6162 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6163 sizeof(tuples->src_ip));
6164 memcpy(tuples->dst_ip,
6165 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6166 sizeof(tuples->dst_ip));
6170 /* traverse all rules, check whether an existed rule has the same tuples */
6171 static struct hclge_fd_rule *
6172 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6173 const struct hclge_fd_rule_tuples *tuples)
6175 struct hclge_fd_rule *rule = NULL;
6176 struct hlist_node *node;
6178 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6179 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6186 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6187 struct hclge_fd_rule *rule)
6189 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6190 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6191 BIT(INNER_SRC_PORT);
6194 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6195 if (tuples->ether_proto == ETH_P_IP) {
6196 if (tuples->ip_proto == IPPROTO_TCP)
6197 rule->flow_type = TCP_V4_FLOW;
6199 rule->flow_type = UDP_V4_FLOW;
6201 if (tuples->ip_proto == IPPROTO_TCP)
6202 rule->flow_type = TCP_V6_FLOW;
6204 rule->flow_type = UDP_V6_FLOW;
6206 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6207 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6210 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6211 u16 flow_id, struct flow_keys *fkeys)
6213 struct hclge_vport *vport = hclge_get_vport(handle);
6214 struct hclge_fd_rule_tuples new_tuples;
6215 struct hclge_dev *hdev = vport->back;
6216 struct hclge_fd_rule *rule;
6221 if (!hnae3_dev_fd_supported(hdev))
6224 memset(&new_tuples, 0, sizeof(new_tuples));
6225 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6227 spin_lock_bh(&hdev->fd_rule_lock);
6229 /* when there is already fd rule existed add by user,
6230 * arfs should not work
6232 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6233 spin_unlock_bh(&hdev->fd_rule_lock);
6238 /* check is there flow director filter existed for this flow,
6239 * if not, create a new filter for it;
6240 * if filter exist with different queue id, modify the filter;
6241 * if filter exist with same queue id, do nothing
6243 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6245 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6246 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6247 spin_unlock_bh(&hdev->fd_rule_lock);
6252 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6254 spin_unlock_bh(&hdev->fd_rule_lock);
6259 set_bit(bit_id, hdev->fd_bmap);
6260 rule->location = bit_id;
6261 rule->flow_id = flow_id;
6262 rule->queue_id = queue_id;
6263 hclge_fd_build_arfs_rule(&new_tuples, rule);
6264 ret = hclge_fd_config_rule(hdev, rule);
6266 spin_unlock_bh(&hdev->fd_rule_lock);
6271 return rule->location;
6274 spin_unlock_bh(&hdev->fd_rule_lock);
6276 if (rule->queue_id == queue_id)
6277 return rule->location;
6279 tmp_queue_id = rule->queue_id;
6280 rule->queue_id = queue_id;
6281 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6283 rule->queue_id = tmp_queue_id;
6287 return rule->location;
6290 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6292 #ifdef CONFIG_RFS_ACCEL
6293 struct hnae3_handle *handle = &hdev->vport[0].nic;
6294 struct hclge_fd_rule *rule;
6295 struct hlist_node *node;
6296 HLIST_HEAD(del_list);
6298 spin_lock_bh(&hdev->fd_rule_lock);
6299 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6300 spin_unlock_bh(&hdev->fd_rule_lock);
6303 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6304 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6305 rule->flow_id, rule->location)) {
6306 hlist_del_init(&rule->rule_node);
6307 hlist_add_head(&rule->rule_node, &del_list);
6308 hdev->hclge_fd_rule_num--;
6309 clear_bit(rule->location, hdev->fd_bmap);
6312 spin_unlock_bh(&hdev->fd_rule_lock);
6314 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6315 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6316 rule->location, NULL, false);
6322 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6324 #ifdef CONFIG_RFS_ACCEL
6325 struct hclge_vport *vport = hclge_get_vport(handle);
6326 struct hclge_dev *hdev = vport->back;
6328 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6329 hclge_del_all_fd_entries(handle, true);
6333 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6335 struct hclge_vport *vport = hclge_get_vport(handle);
6336 struct hclge_dev *hdev = vport->back;
6338 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6339 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6342 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6344 struct hclge_vport *vport = hclge_get_vport(handle);
6345 struct hclge_dev *hdev = vport->back;
6347 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6350 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6352 struct hclge_vport *vport = hclge_get_vport(handle);
6353 struct hclge_dev *hdev = vport->back;
6355 return hdev->rst_stats.hw_reset_done_cnt;
6358 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6360 struct hclge_vport *vport = hclge_get_vport(handle);
6361 struct hclge_dev *hdev = vport->back;
6364 hdev->fd_en = enable;
6365 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6367 hclge_del_all_fd_entries(handle, clear);
6369 hclge_restore_fd_entries(handle);
6372 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6374 struct hclge_desc desc;
6375 struct hclge_config_mac_mode_cmd *req =
6376 (struct hclge_config_mac_mode_cmd *)desc.data;
6380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6383 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6384 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6385 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6386 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6387 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6388 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6389 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6390 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6391 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6392 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6395 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6399 dev_err(&hdev->pdev->dev,
6400 "mac enable fail, ret =%d.\n", ret);
6403 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6404 u8 switch_param, u8 param_mask)
6406 struct hclge_mac_vlan_switch_cmd *req;
6407 struct hclge_desc desc;
6411 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6412 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6414 /* read current config parameter */
6415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6417 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6418 req->func_id = cpu_to_le32(func_id);
6420 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6422 dev_err(&hdev->pdev->dev,
6423 "read mac vlan switch parameter fail, ret = %d\n", ret);
6427 /* modify and write new config parameter */
6428 hclge_cmd_reuse_desc(&desc, false);
6429 req->switch_param = (req->switch_param & param_mask) | switch_param;
6430 req->param_mask = param_mask;
6432 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6434 dev_err(&hdev->pdev->dev,
6435 "set mac vlan switch parameter fail, ret = %d\n", ret);
6439 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6442 #define HCLGE_PHY_LINK_STATUS_NUM 200
6444 struct phy_device *phydev = hdev->hw.mac.phydev;
6449 ret = phy_read_status(phydev);
6451 dev_err(&hdev->pdev->dev,
6452 "phy update link status fail, ret = %d\n", ret);
6456 if (phydev->link == link_ret)
6459 msleep(HCLGE_LINK_STATUS_MS);
6460 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6463 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6465 #define HCLGE_MAC_LINK_STATUS_NUM 100
6471 ret = hclge_get_mac_link_status(hdev);
6474 else if (ret == link_ret)
6477 msleep(HCLGE_LINK_STATUS_MS);
6478 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6482 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6485 #define HCLGE_LINK_STATUS_DOWN 0
6486 #define HCLGE_LINK_STATUS_UP 1
6490 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6493 hclge_phy_link_status_wait(hdev, link_ret);
6495 return hclge_mac_link_status_wait(hdev, link_ret);
6498 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6500 struct hclge_config_mac_mode_cmd *req;
6501 struct hclge_desc desc;
6505 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6506 /* 1 Read out the MAC mode config at first */
6507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6510 dev_err(&hdev->pdev->dev,
6511 "mac loopback get fail, ret =%d.\n", ret);
6515 /* 2 Then setup the loopback flag */
6516 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6517 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6518 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6519 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6521 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6523 /* 3 Config mac work mode with loopback flag
6524 * and its original configure parameters
6526 hclge_cmd_reuse_desc(&desc, false);
6527 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6529 dev_err(&hdev->pdev->dev,
6530 "mac loopback set fail, ret =%d.\n", ret);
6534 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6535 enum hnae3_loop loop_mode)
6537 #define HCLGE_SERDES_RETRY_MS 10
6538 #define HCLGE_SERDES_RETRY_NUM 100
6540 struct hclge_serdes_lb_cmd *req;
6541 struct hclge_desc desc;
6545 req = (struct hclge_serdes_lb_cmd *)desc.data;
6546 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6548 switch (loop_mode) {
6549 case HNAE3_LOOP_SERIAL_SERDES:
6550 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6552 case HNAE3_LOOP_PARALLEL_SERDES:
6553 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6556 dev_err(&hdev->pdev->dev,
6557 "unsupported serdes loopback mode %d\n", loop_mode);
6562 req->enable = loop_mode_b;
6563 req->mask = loop_mode_b;
6565 req->mask = loop_mode_b;
6568 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6570 dev_err(&hdev->pdev->dev,
6571 "serdes loopback set fail, ret = %d\n", ret);
6576 msleep(HCLGE_SERDES_RETRY_MS);
6577 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6579 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6581 dev_err(&hdev->pdev->dev,
6582 "serdes loopback get, ret = %d\n", ret);
6585 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6586 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6588 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6589 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6591 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6592 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6598 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6599 enum hnae3_loop loop_mode)
6603 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6607 hclge_cfg_mac_mode(hdev, en);
6609 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6611 dev_err(&hdev->pdev->dev,
6612 "serdes loopback config mac mode timeout\n");
6617 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6618 struct phy_device *phydev)
6622 if (!phydev->suspended) {
6623 ret = phy_suspend(phydev);
6628 ret = phy_resume(phydev);
6632 return phy_loopback(phydev, true);
6635 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6636 struct phy_device *phydev)
6640 ret = phy_loopback(phydev, false);
6644 return phy_suspend(phydev);
6647 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6649 struct phy_device *phydev = hdev->hw.mac.phydev;
6656 ret = hclge_enable_phy_loopback(hdev, phydev);
6658 ret = hclge_disable_phy_loopback(hdev, phydev);
6660 dev_err(&hdev->pdev->dev,
6661 "set phy loopback fail, ret = %d\n", ret);
6665 hclge_cfg_mac_mode(hdev, en);
6667 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6669 dev_err(&hdev->pdev->dev,
6670 "phy loopback config mac mode timeout\n");
6675 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6676 int stream_id, bool enable)
6678 struct hclge_desc desc;
6679 struct hclge_cfg_com_tqp_queue_cmd *req =
6680 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6683 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6684 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6685 req->stream_id = cpu_to_le16(stream_id);
6687 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6689 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6691 dev_err(&hdev->pdev->dev,
6692 "Tqp enable fail, status =%d.\n", ret);
6696 static int hclge_set_loopback(struct hnae3_handle *handle,
6697 enum hnae3_loop loop_mode, bool en)
6699 struct hclge_vport *vport = hclge_get_vport(handle);
6700 struct hnae3_knic_private_info *kinfo;
6701 struct hclge_dev *hdev = vport->back;
6704 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6705 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6706 * the same, the packets are looped back in the SSU. If SSU loopback
6707 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6709 if (hdev->pdev->revision >= 0x21) {
6710 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6712 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6713 HCLGE_SWITCH_ALW_LPBK_MASK);
6718 switch (loop_mode) {
6719 case HNAE3_LOOP_APP:
6720 ret = hclge_set_app_loopback(hdev, en);
6722 case HNAE3_LOOP_SERIAL_SERDES:
6723 case HNAE3_LOOP_PARALLEL_SERDES:
6724 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6726 case HNAE3_LOOP_PHY:
6727 ret = hclge_set_phy_loopback(hdev, en);
6731 dev_err(&hdev->pdev->dev,
6732 "loop_mode %d is not supported\n", loop_mode);
6739 kinfo = &vport->nic.kinfo;
6740 for (i = 0; i < kinfo->num_tqps; i++) {
6741 ret = hclge_tqp_enable(hdev, i, 0, en);
6749 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6753 ret = hclge_set_app_loopback(hdev, false);
6757 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6761 return hclge_cfg_serdes_loopback(hdev, false,
6762 HNAE3_LOOP_PARALLEL_SERDES);
6765 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6767 struct hclge_vport *vport = hclge_get_vport(handle);
6768 struct hnae3_knic_private_info *kinfo;
6769 struct hnae3_queue *queue;
6770 struct hclge_tqp *tqp;
6773 kinfo = &vport->nic.kinfo;
6774 for (i = 0; i < kinfo->num_tqps; i++) {
6775 queue = handle->kinfo.tqp[i];
6776 tqp = container_of(queue, struct hclge_tqp, q);
6777 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6781 static void hclge_flush_link_update(struct hclge_dev *hdev)
6783 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6785 unsigned long last = hdev->serv_processed_cnt;
6788 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6789 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6790 last == hdev->serv_processed_cnt)
6794 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6796 struct hclge_vport *vport = hclge_get_vport(handle);
6797 struct hclge_dev *hdev = vport->back;
6800 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6802 /* Set the DOWN flag here to disable link updating */
6803 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6805 /* flush memory to make sure DOWN is seen by service task */
6806 smp_mb__before_atomic();
6807 hclge_flush_link_update(hdev);
6811 static int hclge_ae_start(struct hnae3_handle *handle)
6813 struct hclge_vport *vport = hclge_get_vport(handle);
6814 struct hclge_dev *hdev = vport->back;
6817 hclge_cfg_mac_mode(hdev, true);
6818 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6819 hdev->hw.mac.link = 0;
6821 /* reset tqp stats */
6822 hclge_reset_tqp_stats(handle);
6824 hclge_mac_start_phy(hdev);
6829 static void hclge_ae_stop(struct hnae3_handle *handle)
6831 struct hclge_vport *vport = hclge_get_vport(handle);
6832 struct hclge_dev *hdev = vport->back;
6835 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6837 hclge_clear_arfs_rules(handle);
6839 /* If it is not PF reset, the firmware will disable the MAC,
6840 * so it only need to stop phy here.
6842 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6843 hdev->reset_type != HNAE3_FUNC_RESET) {
6844 hclge_mac_stop_phy(hdev);
6845 hclge_update_link_status(hdev);
6849 for (i = 0; i < handle->kinfo.num_tqps; i++)
6850 hclge_reset_tqp(handle, i);
6852 hclge_config_mac_tnl_int(hdev, false);
6855 hclge_cfg_mac_mode(hdev, false);
6857 hclge_mac_stop_phy(hdev);
6859 /* reset tqp stats */
6860 hclge_reset_tqp_stats(handle);
6861 hclge_update_link_status(hdev);
6864 int hclge_vport_start(struct hclge_vport *vport)
6866 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6867 vport->last_active_jiffies = jiffies;
6871 void hclge_vport_stop(struct hclge_vport *vport)
6873 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6876 static int hclge_client_start(struct hnae3_handle *handle)
6878 struct hclge_vport *vport = hclge_get_vport(handle);
6880 return hclge_vport_start(vport);
6883 static void hclge_client_stop(struct hnae3_handle *handle)
6885 struct hclge_vport *vport = hclge_get_vport(handle);
6887 hclge_vport_stop(vport);
6890 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6891 u16 cmdq_resp, u8 resp_code,
6892 enum hclge_mac_vlan_tbl_opcode op)
6894 struct hclge_dev *hdev = vport->back;
6897 dev_err(&hdev->pdev->dev,
6898 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6903 if (op == HCLGE_MAC_VLAN_ADD) {
6904 if ((!resp_code) || (resp_code == 1)) {
6906 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6907 dev_err(&hdev->pdev->dev,
6908 "add mac addr failed for uc_overflow.\n");
6910 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6911 dev_err(&hdev->pdev->dev,
6912 "add mac addr failed for mc_overflow.\n");
6916 dev_err(&hdev->pdev->dev,
6917 "add mac addr failed for undefined, code=%u.\n",
6920 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6923 } else if (resp_code == 1) {
6924 dev_dbg(&hdev->pdev->dev,
6925 "remove mac addr failed for miss.\n");
6929 dev_err(&hdev->pdev->dev,
6930 "remove mac addr failed for undefined, code=%u.\n",
6933 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6936 } else if (resp_code == 1) {
6937 dev_dbg(&hdev->pdev->dev,
6938 "lookup mac addr failed for miss.\n");
6942 dev_err(&hdev->pdev->dev,
6943 "lookup mac addr failed for undefined, code=%u.\n",
6948 dev_err(&hdev->pdev->dev,
6949 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6954 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6956 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6958 unsigned int word_num;
6959 unsigned int bit_num;
6961 if (vfid > 255 || vfid < 0)
6964 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6965 word_num = vfid / 32;
6966 bit_num = vfid % 32;
6968 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6970 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6972 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6973 bit_num = vfid % 32;
6975 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6977 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6983 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6985 #define HCLGE_DESC_NUMBER 3
6986 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6989 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6990 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6991 if (desc[i].data[j])
6997 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6998 const u8 *addr, bool is_mc)
7000 const unsigned char *mac_addr = addr;
7001 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7002 (mac_addr[0]) | (mac_addr[1] << 8);
7003 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7005 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7007 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7008 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7011 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7012 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7015 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7016 struct hclge_mac_vlan_tbl_entry_cmd *req)
7018 struct hclge_dev *hdev = vport->back;
7019 struct hclge_desc desc;
7024 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7026 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7028 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7030 dev_err(&hdev->pdev->dev,
7031 "del mac addr failed for cmd_send, ret =%d.\n",
7035 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7036 retval = le16_to_cpu(desc.retval);
7038 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7039 HCLGE_MAC_VLAN_REMOVE);
7042 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7043 struct hclge_mac_vlan_tbl_entry_cmd *req,
7044 struct hclge_desc *desc,
7047 struct hclge_dev *hdev = vport->back;
7052 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7054 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7055 memcpy(desc[0].data,
7057 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7058 hclge_cmd_setup_basic_desc(&desc[1],
7059 HCLGE_OPC_MAC_VLAN_ADD,
7061 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7062 hclge_cmd_setup_basic_desc(&desc[2],
7063 HCLGE_OPC_MAC_VLAN_ADD,
7065 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7067 memcpy(desc[0].data,
7069 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7070 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7073 dev_err(&hdev->pdev->dev,
7074 "lookup mac addr failed for cmd_send, ret =%d.\n",
7078 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7079 retval = le16_to_cpu(desc[0].retval);
7081 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7082 HCLGE_MAC_VLAN_LKUP);
7085 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7086 struct hclge_mac_vlan_tbl_entry_cmd *req,
7087 struct hclge_desc *mc_desc)
7089 struct hclge_dev *hdev = vport->back;
7096 struct hclge_desc desc;
7098 hclge_cmd_setup_basic_desc(&desc,
7099 HCLGE_OPC_MAC_VLAN_ADD,
7101 memcpy(desc.data, req,
7102 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7103 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7104 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7105 retval = le16_to_cpu(desc.retval);
7107 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7109 HCLGE_MAC_VLAN_ADD);
7111 hclge_cmd_reuse_desc(&mc_desc[0], false);
7112 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7113 hclge_cmd_reuse_desc(&mc_desc[1], false);
7114 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7115 hclge_cmd_reuse_desc(&mc_desc[2], false);
7116 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7117 memcpy(mc_desc[0].data, req,
7118 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7119 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7120 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7121 retval = le16_to_cpu(mc_desc[0].retval);
7123 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7125 HCLGE_MAC_VLAN_ADD);
7129 dev_err(&hdev->pdev->dev,
7130 "add mac addr failed for cmd_send, ret =%d.\n",
7138 static int hclge_init_umv_space(struct hclge_dev *hdev)
7140 u16 allocated_size = 0;
7143 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7148 if (allocated_size < hdev->wanted_umv_size)
7149 dev_warn(&hdev->pdev->dev,
7150 "Alloc umv space failed, want %u, get %u\n",
7151 hdev->wanted_umv_size, allocated_size);
7153 mutex_init(&hdev->umv_mutex);
7154 hdev->max_umv_size = allocated_size;
7155 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7156 * preserve some unicast mac vlan table entries shared by pf
7159 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7160 hdev->share_umv_size = hdev->priv_umv_size +
7161 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7166 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7170 if (hdev->max_umv_size > 0) {
7171 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7175 hdev->max_umv_size = 0;
7177 mutex_destroy(&hdev->umv_mutex);
7182 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7183 u16 *allocated_size, bool is_alloc)
7185 struct hclge_umv_spc_alc_cmd *req;
7186 struct hclge_desc desc;
7189 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7190 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7192 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7194 req->space_size = cpu_to_le32(space_size);
7196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7198 dev_err(&hdev->pdev->dev,
7199 "%s umv space failed for cmd_send, ret =%d\n",
7200 is_alloc ? "allocate" : "free", ret);
7204 if (is_alloc && allocated_size)
7205 *allocated_size = le32_to_cpu(desc.data[1]);
7210 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7212 struct hclge_vport *vport;
7215 for (i = 0; i < hdev->num_alloc_vport; i++) {
7216 vport = &hdev->vport[i];
7217 vport->used_umv_num = 0;
7220 mutex_lock(&hdev->umv_mutex);
7221 hdev->share_umv_size = hdev->priv_umv_size +
7222 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7223 mutex_unlock(&hdev->umv_mutex);
7226 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7228 struct hclge_dev *hdev = vport->back;
7231 mutex_lock(&hdev->umv_mutex);
7232 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7233 hdev->share_umv_size == 0);
7234 mutex_unlock(&hdev->umv_mutex);
7239 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7241 struct hclge_dev *hdev = vport->back;
7243 mutex_lock(&hdev->umv_mutex);
7245 if (vport->used_umv_num > hdev->priv_umv_size)
7246 hdev->share_umv_size++;
7248 if (vport->used_umv_num > 0)
7249 vport->used_umv_num--;
7251 if (vport->used_umv_num >= hdev->priv_umv_size &&
7252 hdev->share_umv_size > 0)
7253 hdev->share_umv_size--;
7254 vport->used_umv_num++;
7256 mutex_unlock(&hdev->umv_mutex);
7259 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7260 const unsigned char *addr)
7262 struct hclge_vport *vport = hclge_get_vport(handle);
7264 return hclge_add_uc_addr_common(vport, addr);
7267 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7268 const unsigned char *addr)
7270 struct hclge_dev *hdev = vport->back;
7271 struct hclge_mac_vlan_tbl_entry_cmd req;
7272 struct hclge_desc desc;
7273 u16 egress_port = 0;
7276 /* mac addr check */
7277 if (is_zero_ether_addr(addr) ||
7278 is_broadcast_ether_addr(addr) ||
7279 is_multicast_ether_addr(addr)) {
7280 dev_err(&hdev->pdev->dev,
7281 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7282 addr, is_zero_ether_addr(addr),
7283 is_broadcast_ether_addr(addr),
7284 is_multicast_ether_addr(addr));
7288 memset(&req, 0, sizeof(req));
7290 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7291 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7293 req.egress_port = cpu_to_le16(egress_port);
7295 hclge_prepare_mac_addr(&req, addr, false);
7297 /* Lookup the mac address in the mac_vlan table, and add
7298 * it if the entry is inexistent. Repeated unicast entry
7299 * is not allowed in the mac vlan table.
7301 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7302 if (ret == -ENOENT) {
7303 if (!hclge_is_umv_space_full(vport)) {
7304 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7306 hclge_update_umv_space(vport, false);
7310 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7311 hdev->priv_umv_size);
7316 /* check if we just hit the duplicate */
7318 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7319 vport->vport_id, addr);
7323 dev_err(&hdev->pdev->dev,
7324 "PF failed to add unicast entry(%pM) in the MAC table\n",
7330 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7331 const unsigned char *addr)
7333 struct hclge_vport *vport = hclge_get_vport(handle);
7335 return hclge_rm_uc_addr_common(vport, addr);
7338 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7339 const unsigned char *addr)
7341 struct hclge_dev *hdev = vport->back;
7342 struct hclge_mac_vlan_tbl_entry_cmd req;
7345 /* mac addr check */
7346 if (is_zero_ether_addr(addr) ||
7347 is_broadcast_ether_addr(addr) ||
7348 is_multicast_ether_addr(addr)) {
7349 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7354 memset(&req, 0, sizeof(req));
7355 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7356 hclge_prepare_mac_addr(&req, addr, false);
7357 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7359 hclge_update_umv_space(vport, true);
7364 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7365 const unsigned char *addr)
7367 struct hclge_vport *vport = hclge_get_vport(handle);
7369 return hclge_add_mc_addr_common(vport, addr);
7372 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7373 const unsigned char *addr)
7375 struct hclge_dev *hdev = vport->back;
7376 struct hclge_mac_vlan_tbl_entry_cmd req;
7377 struct hclge_desc desc[3];
7380 /* mac addr check */
7381 if (!is_multicast_ether_addr(addr)) {
7382 dev_err(&hdev->pdev->dev,
7383 "Add mc mac err! invalid mac:%pM.\n",
7387 memset(&req, 0, sizeof(req));
7388 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7389 hclge_prepare_mac_addr(&req, addr, true);
7390 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7392 /* This mac addr do not exist, add new entry for it */
7393 memset(desc[0].data, 0, sizeof(desc[0].data));
7394 memset(desc[1].data, 0, sizeof(desc[0].data));
7395 memset(desc[2].data, 0, sizeof(desc[0].data));
7397 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7400 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7402 if (status == -ENOSPC)
7403 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7408 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7409 const unsigned char *addr)
7411 struct hclge_vport *vport = hclge_get_vport(handle);
7413 return hclge_rm_mc_addr_common(vport, addr);
7416 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7417 const unsigned char *addr)
7419 struct hclge_dev *hdev = vport->back;
7420 struct hclge_mac_vlan_tbl_entry_cmd req;
7421 enum hclge_cmd_status status;
7422 struct hclge_desc desc[3];
7424 /* mac addr check */
7425 if (!is_multicast_ether_addr(addr)) {
7426 dev_dbg(&hdev->pdev->dev,
7427 "Remove mc mac err! invalid mac:%pM.\n",
7432 memset(&req, 0, sizeof(req));
7433 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7434 hclge_prepare_mac_addr(&req, addr, true);
7435 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7437 /* This mac addr exist, remove this handle's VFID for it */
7438 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7442 if (hclge_is_all_function_id_zero(desc))
7443 /* All the vfid is zero, so need to delete this entry */
7444 status = hclge_remove_mac_vlan_tbl(vport, &req);
7446 /* Not all the vfid is zero, update the vfid */
7447 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7450 /* Maybe this mac address is in mta table, but it cannot be
7451 * deleted here because an entry of mta represents an address
7452 * range rather than a specific address. the delete action to
7453 * all entries will take effect in update_mta_status called by
7454 * hns3_nic_set_rx_mode.
7462 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7463 enum HCLGE_MAC_ADDR_TYPE mac_type)
7465 struct hclge_vport_mac_addr_cfg *mac_cfg;
7466 struct list_head *list;
7468 if (!vport->vport_id)
7471 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7475 mac_cfg->hd_tbl_status = true;
7476 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7478 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7479 &vport->uc_mac_list : &vport->mc_mac_list;
7481 list_add_tail(&mac_cfg->node, list);
7484 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7486 enum HCLGE_MAC_ADDR_TYPE mac_type)
7488 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7489 struct list_head *list;
7490 bool uc_flag, mc_flag;
7492 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7493 &vport->uc_mac_list : &vport->mc_mac_list;
7495 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7496 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7498 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7499 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7500 if (uc_flag && mac_cfg->hd_tbl_status)
7501 hclge_rm_uc_addr_common(vport, mac_addr);
7503 if (mc_flag && mac_cfg->hd_tbl_status)
7504 hclge_rm_mc_addr_common(vport, mac_addr);
7506 list_del(&mac_cfg->node);
7513 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7514 enum HCLGE_MAC_ADDR_TYPE mac_type)
7516 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7517 struct list_head *list;
7519 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7520 &vport->uc_mac_list : &vport->mc_mac_list;
7522 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7523 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7524 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7526 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7527 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7529 mac_cfg->hd_tbl_status = false;
7531 list_del(&mac_cfg->node);
7537 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7539 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7540 struct hclge_vport *vport;
7543 for (i = 0; i < hdev->num_alloc_vport; i++) {
7544 vport = &hdev->vport[i];
7545 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7546 list_del(&mac->node);
7550 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7551 list_del(&mac->node);
7557 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7558 u16 cmdq_resp, u8 resp_code)
7560 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7561 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7562 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7563 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7568 dev_err(&hdev->pdev->dev,
7569 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7574 switch (resp_code) {
7575 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7576 case HCLGE_ETHERTYPE_ALREADY_ADD:
7579 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7580 dev_err(&hdev->pdev->dev,
7581 "add mac ethertype failed for manager table overflow.\n");
7582 return_status = -EIO;
7584 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7585 dev_err(&hdev->pdev->dev,
7586 "add mac ethertype failed for key conflict.\n");
7587 return_status = -EIO;
7590 dev_err(&hdev->pdev->dev,
7591 "add mac ethertype failed for undefined, code=%u.\n",
7593 return_status = -EIO;
7596 return return_status;
7599 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7602 struct hclge_mac_vlan_tbl_entry_cmd req;
7603 struct hclge_dev *hdev = vport->back;
7604 struct hclge_desc desc;
7605 u16 egress_port = 0;
7608 if (is_zero_ether_addr(mac_addr))
7611 memset(&req, 0, sizeof(req));
7612 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7613 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7614 req.egress_port = cpu_to_le16(egress_port);
7615 hclge_prepare_mac_addr(&req, mac_addr, false);
7617 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7620 vf_idx += HCLGE_VF_VPORT_START_NUM;
7621 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7623 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7629 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7632 struct hclge_vport *vport = hclge_get_vport(handle);
7633 struct hclge_dev *hdev = vport->back;
7635 vport = hclge_get_vf_vport(hdev, vf);
7639 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7640 dev_info(&hdev->pdev->dev,
7641 "Specified MAC(=%pM) is same as before, no change committed!\n",
7646 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7647 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7652 ether_addr_copy(vport->vf_info.mac, mac_addr);
7653 dev_info(&hdev->pdev->dev,
7654 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7657 return hclge_inform_reset_assert_to_vf(vport);
7660 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7661 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7663 struct hclge_desc desc;
7668 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7669 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7671 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7673 dev_err(&hdev->pdev->dev,
7674 "add mac ethertype failed for cmd_send, ret =%d.\n",
7679 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7680 retval = le16_to_cpu(desc.retval);
7682 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7685 static int init_mgr_tbl(struct hclge_dev *hdev)
7690 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7691 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7693 dev_err(&hdev->pdev->dev,
7694 "add mac ethertype failed, ret =%d.\n",
7703 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7705 struct hclge_vport *vport = hclge_get_vport(handle);
7706 struct hclge_dev *hdev = vport->back;
7708 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7711 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7714 const unsigned char *new_addr = (const unsigned char *)p;
7715 struct hclge_vport *vport = hclge_get_vport(handle);
7716 struct hclge_dev *hdev = vport->back;
7719 /* mac addr check */
7720 if (is_zero_ether_addr(new_addr) ||
7721 is_broadcast_ether_addr(new_addr) ||
7722 is_multicast_ether_addr(new_addr)) {
7723 dev_err(&hdev->pdev->dev,
7724 "Change uc mac err! invalid mac:%pM.\n",
7729 if ((!is_first || is_kdump_kernel()) &&
7730 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7731 dev_warn(&hdev->pdev->dev,
7732 "remove old uc mac address fail.\n");
7734 ret = hclge_add_uc_addr(handle, new_addr);
7736 dev_err(&hdev->pdev->dev,
7737 "add uc mac address fail, ret =%d.\n",
7741 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7742 dev_err(&hdev->pdev->dev,
7743 "restore uc mac address fail.\n");
7748 ret = hclge_pause_addr_cfg(hdev, new_addr);
7750 dev_err(&hdev->pdev->dev,
7751 "configure mac pause address fail, ret =%d.\n",
7756 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7761 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7764 struct hclge_vport *vport = hclge_get_vport(handle);
7765 struct hclge_dev *hdev = vport->back;
7767 if (!hdev->hw.mac.phydev)
7770 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7773 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7774 u8 fe_type, bool filter_en, u8 vf_id)
7776 struct hclge_vlan_filter_ctrl_cmd *req;
7777 struct hclge_desc desc;
7780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7782 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7783 req->vlan_type = vlan_type;
7784 req->vlan_fe = filter_en ? fe_type : 0;
7787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7789 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7795 #define HCLGE_FILTER_TYPE_VF 0
7796 #define HCLGE_FILTER_TYPE_PORT 1
7797 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7798 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7799 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7800 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7801 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7802 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7803 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7804 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7805 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7807 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7809 struct hclge_vport *vport = hclge_get_vport(handle);
7810 struct hclge_dev *hdev = vport->back;
7812 if (hdev->pdev->revision >= 0x21) {
7813 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7814 HCLGE_FILTER_FE_EGRESS, enable, 0);
7815 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7816 HCLGE_FILTER_FE_INGRESS, enable, 0);
7818 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7819 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7823 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7825 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7828 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7829 bool is_kill, u16 vlan,
7832 struct hclge_vport *vport = &hdev->vport[vfid];
7833 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7834 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7835 struct hclge_desc desc[2];
7840 /* if vf vlan table is full, firmware will close vf vlan filter, it
7841 * is unable and unnecessary to add new vlan id to vf vlan filter.
7842 * If spoof check is enable, and vf vlan is full, it shouldn't add
7843 * new vlan, because tx packets with these vlan id will be dropped.
7845 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7846 if (vport->vf_info.spoofchk && vlan) {
7847 dev_err(&hdev->pdev->dev,
7848 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7854 hclge_cmd_setup_basic_desc(&desc[0],
7855 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7856 hclge_cmd_setup_basic_desc(&desc[1],
7857 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7859 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7861 vf_byte_off = vfid / 8;
7862 vf_byte_val = 1 << (vfid % 8);
7864 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7865 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7867 req0->vlan_id = cpu_to_le16(vlan);
7868 req0->vlan_cfg = is_kill;
7870 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7871 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7873 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7875 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7877 dev_err(&hdev->pdev->dev,
7878 "Send vf vlan command fail, ret =%d.\n",
7884 #define HCLGE_VF_VLAN_NO_ENTRY 2
7885 if (!req0->resp_code || req0->resp_code == 1)
7888 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7889 set_bit(vfid, hdev->vf_vlan_full);
7890 dev_warn(&hdev->pdev->dev,
7891 "vf vlan table is full, vf vlan filter is disabled\n");
7895 dev_err(&hdev->pdev->dev,
7896 "Add vf vlan filter fail, ret =%u.\n",
7899 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7900 if (!req0->resp_code)
7903 /* vf vlan filter is disabled when vf vlan table is full,
7904 * then new vlan id will not be added into vf vlan table.
7905 * Just return 0 without warning, avoid massive verbose
7906 * print logs when unload.
7908 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7911 dev_err(&hdev->pdev->dev,
7912 "Kill vf vlan filter fail, ret =%u.\n",
7919 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7920 u16 vlan_id, bool is_kill)
7922 struct hclge_vlan_filter_pf_cfg_cmd *req;
7923 struct hclge_desc desc;
7924 u8 vlan_offset_byte_val;
7925 u8 vlan_offset_byte;
7929 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7931 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7932 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7933 HCLGE_VLAN_BYTE_SIZE;
7934 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7936 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7937 req->vlan_offset = vlan_offset_160;
7938 req->vlan_cfg = is_kill;
7939 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7941 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7943 dev_err(&hdev->pdev->dev,
7944 "port vlan command, send fail, ret =%d.\n", ret);
7948 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7949 u16 vport_id, u16 vlan_id,
7952 u16 vport_idx, vport_num = 0;
7955 if (is_kill && !vlan_id)
7958 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7961 dev_err(&hdev->pdev->dev,
7962 "Set %u vport vlan filter config fail, ret =%d.\n",
7967 /* vlan 0 may be added twice when 8021q module is enabled */
7968 if (!is_kill && !vlan_id &&
7969 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7972 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7973 dev_err(&hdev->pdev->dev,
7974 "Add port vlan failed, vport %u is already in vlan %u\n",
7980 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7981 dev_err(&hdev->pdev->dev,
7982 "Delete port vlan failed, vport %u is not in vlan %u\n",
7987 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7990 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7991 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7997 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7999 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8000 struct hclge_vport_vtag_tx_cfg_cmd *req;
8001 struct hclge_dev *hdev = vport->back;
8002 struct hclge_desc desc;
8006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8008 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8009 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8010 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8011 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8012 vcfg->accept_tag1 ? 1 : 0);
8013 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8014 vcfg->accept_untag1 ? 1 : 0);
8015 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8016 vcfg->accept_tag2 ? 1 : 0);
8017 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8018 vcfg->accept_untag2 ? 1 : 0);
8019 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8020 vcfg->insert_tag1_en ? 1 : 0);
8021 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8022 vcfg->insert_tag2_en ? 1 : 0);
8023 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8025 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8026 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8027 HCLGE_VF_NUM_PER_BYTE;
8028 req->vf_bitmap[bmap_index] =
8029 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8031 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8033 dev_err(&hdev->pdev->dev,
8034 "Send port txvlan cfg command fail, ret =%d\n",
8040 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8042 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8043 struct hclge_vport_vtag_rx_cfg_cmd *req;
8044 struct hclge_dev *hdev = vport->back;
8045 struct hclge_desc desc;
8049 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8051 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8052 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8053 vcfg->strip_tag1_en ? 1 : 0);
8054 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8055 vcfg->strip_tag2_en ? 1 : 0);
8056 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8057 vcfg->vlan1_vlan_prionly ? 1 : 0);
8058 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8059 vcfg->vlan2_vlan_prionly ? 1 : 0);
8061 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8062 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8063 HCLGE_VF_NUM_PER_BYTE;
8064 req->vf_bitmap[bmap_index] =
8065 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8067 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8069 dev_err(&hdev->pdev->dev,
8070 "Send port rxvlan cfg command fail, ret =%d\n",
8076 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8077 u16 port_base_vlan_state,
8082 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8083 vport->txvlan_cfg.accept_tag1 = true;
8084 vport->txvlan_cfg.insert_tag1_en = false;
8085 vport->txvlan_cfg.default_tag1 = 0;
8087 vport->txvlan_cfg.accept_tag1 = false;
8088 vport->txvlan_cfg.insert_tag1_en = true;
8089 vport->txvlan_cfg.default_tag1 = vlan_tag;
8092 vport->txvlan_cfg.accept_untag1 = true;
8094 /* accept_tag2 and accept_untag2 are not supported on
8095 * pdev revision(0x20), new revision support them,
8096 * this two fields can not be configured by user.
8098 vport->txvlan_cfg.accept_tag2 = true;
8099 vport->txvlan_cfg.accept_untag2 = true;
8100 vport->txvlan_cfg.insert_tag2_en = false;
8101 vport->txvlan_cfg.default_tag2 = 0;
8103 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8104 vport->rxvlan_cfg.strip_tag1_en = false;
8105 vport->rxvlan_cfg.strip_tag2_en =
8106 vport->rxvlan_cfg.rx_vlan_offload_en;
8108 vport->rxvlan_cfg.strip_tag1_en =
8109 vport->rxvlan_cfg.rx_vlan_offload_en;
8110 vport->rxvlan_cfg.strip_tag2_en = true;
8112 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8113 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8115 ret = hclge_set_vlan_tx_offload_cfg(vport);
8119 return hclge_set_vlan_rx_offload_cfg(vport);
8122 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8124 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8125 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8126 struct hclge_desc desc;
8129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8130 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8131 rx_req->ot_fst_vlan_type =
8132 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8133 rx_req->ot_sec_vlan_type =
8134 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8135 rx_req->in_fst_vlan_type =
8136 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8137 rx_req->in_sec_vlan_type =
8138 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8140 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8142 dev_err(&hdev->pdev->dev,
8143 "Send rxvlan protocol type command fail, ret =%d\n",
8148 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8150 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8151 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8152 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8154 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8156 dev_err(&hdev->pdev->dev,
8157 "Send txvlan protocol type command fail, ret =%d\n",
8163 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8165 #define HCLGE_DEF_VLAN_TYPE 0x8100
8167 struct hnae3_handle *handle = &hdev->vport[0].nic;
8168 struct hclge_vport *vport;
8172 if (hdev->pdev->revision >= 0x21) {
8173 /* for revision 0x21, vf vlan filter is per function */
8174 for (i = 0; i < hdev->num_alloc_vport; i++) {
8175 vport = &hdev->vport[i];
8176 ret = hclge_set_vlan_filter_ctrl(hdev,
8177 HCLGE_FILTER_TYPE_VF,
8178 HCLGE_FILTER_FE_EGRESS,
8185 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8186 HCLGE_FILTER_FE_INGRESS, true,
8191 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8192 HCLGE_FILTER_FE_EGRESS_V1_B,
8198 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8200 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8201 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8202 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8203 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8204 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8205 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8207 ret = hclge_set_vlan_protocol_type(hdev);
8211 for (i = 0; i < hdev->num_alloc_vport; i++) {
8214 vport = &hdev->vport[i];
8215 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8217 ret = hclge_vlan_offload_cfg(vport,
8218 vport->port_base_vlan_cfg.state,
8224 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8227 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8230 struct hclge_vport_vlan_cfg *vlan;
8232 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8236 vlan->hd_tbl_status = writen_to_tbl;
8237 vlan->vlan_id = vlan_id;
8239 list_add_tail(&vlan->node, &vport->vlan_list);
8242 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8244 struct hclge_vport_vlan_cfg *vlan, *tmp;
8245 struct hclge_dev *hdev = vport->back;
8248 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8249 if (!vlan->hd_tbl_status) {
8250 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8252 vlan->vlan_id, false);
8254 dev_err(&hdev->pdev->dev,
8255 "restore vport vlan list failed, ret=%d\n",
8260 vlan->hd_tbl_status = true;
8266 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8269 struct hclge_vport_vlan_cfg *vlan, *tmp;
8270 struct hclge_dev *hdev = vport->back;
8272 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8273 if (vlan->vlan_id == vlan_id) {
8274 if (is_write_tbl && vlan->hd_tbl_status)
8275 hclge_set_vlan_filter_hw(hdev,
8281 list_del(&vlan->node);
8288 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8290 struct hclge_vport_vlan_cfg *vlan, *tmp;
8291 struct hclge_dev *hdev = vport->back;
8293 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8294 if (vlan->hd_tbl_status)
8295 hclge_set_vlan_filter_hw(hdev,
8301 vlan->hd_tbl_status = false;
8303 list_del(&vlan->node);
8309 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8311 struct hclge_vport_vlan_cfg *vlan, *tmp;
8312 struct hclge_vport *vport;
8315 for (i = 0; i < hdev->num_alloc_vport; i++) {
8316 vport = &hdev->vport[i];
8317 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8318 list_del(&vlan->node);
8324 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8326 struct hclge_vport *vport = hclge_get_vport(handle);
8327 struct hclge_vport_vlan_cfg *vlan, *tmp;
8328 struct hclge_dev *hdev = vport->back;
8333 for (i = 0; i < hdev->num_alloc_vport; i++) {
8334 vport = &hdev->vport[i];
8335 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8336 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8337 state = vport->port_base_vlan_cfg.state;
8339 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8340 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8341 vport->vport_id, vlan_id,
8346 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8349 if (!vlan->hd_tbl_status)
8351 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8353 vlan->vlan_id, false);
8360 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8362 struct hclge_vport *vport = hclge_get_vport(handle);
8364 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8365 vport->rxvlan_cfg.strip_tag1_en = false;
8366 vport->rxvlan_cfg.strip_tag2_en = enable;
8368 vport->rxvlan_cfg.strip_tag1_en = enable;
8369 vport->rxvlan_cfg.strip_tag2_en = true;
8371 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8372 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8373 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8375 return hclge_set_vlan_rx_offload_cfg(vport);
8378 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8379 u16 port_base_vlan_state,
8380 struct hclge_vlan_info *new_info,
8381 struct hclge_vlan_info *old_info)
8383 struct hclge_dev *hdev = vport->back;
8386 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8387 hclge_rm_vport_all_vlan_table(vport, false);
8388 return hclge_set_vlan_filter_hw(hdev,
8389 htons(new_info->vlan_proto),
8395 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8396 vport->vport_id, old_info->vlan_tag,
8401 return hclge_add_vport_all_vlan_table(vport);
8404 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8405 struct hclge_vlan_info *vlan_info)
8407 struct hnae3_handle *nic = &vport->nic;
8408 struct hclge_vlan_info *old_vlan_info;
8409 struct hclge_dev *hdev = vport->back;
8412 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8414 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8418 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8419 /* add new VLAN tag */
8420 ret = hclge_set_vlan_filter_hw(hdev,
8421 htons(vlan_info->vlan_proto),
8423 vlan_info->vlan_tag,
8428 /* remove old VLAN tag */
8429 ret = hclge_set_vlan_filter_hw(hdev,
8430 htons(old_vlan_info->vlan_proto),
8432 old_vlan_info->vlan_tag,
8440 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8445 /* update state only when disable/enable port based VLAN */
8446 vport->port_base_vlan_cfg.state = state;
8447 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8448 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8450 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8453 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8454 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8455 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8460 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8461 enum hnae3_port_base_vlan_state state,
8464 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8466 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8468 return HNAE3_PORT_BASE_VLAN_ENABLE;
8471 return HNAE3_PORT_BASE_VLAN_DISABLE;
8472 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8473 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8475 return HNAE3_PORT_BASE_VLAN_MODIFY;
8479 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8480 u16 vlan, u8 qos, __be16 proto)
8482 struct hclge_vport *vport = hclge_get_vport(handle);
8483 struct hclge_dev *hdev = vport->back;
8484 struct hclge_vlan_info vlan_info;
8488 if (hdev->pdev->revision == 0x20)
8491 vport = hclge_get_vf_vport(hdev, vfid);
8495 /* qos is a 3 bits value, so can not be bigger than 7 */
8496 if (vlan > VLAN_N_VID - 1 || qos > 7)
8498 if (proto != htons(ETH_P_8021Q))
8499 return -EPROTONOSUPPORT;
8501 state = hclge_get_port_base_vlan_state(vport,
8502 vport->port_base_vlan_cfg.state,
8504 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8507 vlan_info.vlan_tag = vlan;
8508 vlan_info.qos = qos;
8509 vlan_info.vlan_proto = ntohs(proto);
8511 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8512 return hclge_update_port_base_vlan_cfg(vport, state,
8515 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8516 vport->vport_id, state,
8523 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8524 u16 vlan_id, bool is_kill)
8526 struct hclge_vport *vport = hclge_get_vport(handle);
8527 struct hclge_dev *hdev = vport->back;
8528 bool writen_to_tbl = false;
8531 /* When device is resetting, firmware is unable to handle
8532 * mailbox. Just record the vlan id, and remove it after
8535 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8536 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8540 /* when port base vlan enabled, we use port base vlan as the vlan
8541 * filter entry. In this case, we don't update vlan filter table
8542 * when user add new vlan or remove exist vlan, just update the vport
8543 * vlan list. The vlan id in vlan list will be writen in vlan filter
8544 * table until port base vlan disabled
8546 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8547 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8549 writen_to_tbl = true;
8554 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8556 hclge_add_vport_vlan_table(vport, vlan_id,
8558 } else if (is_kill) {
8559 /* when remove hw vlan filter failed, record the vlan id,
8560 * and try to remove it from hw later, to be consistence
8563 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8568 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8570 #define HCLGE_MAX_SYNC_COUNT 60
8572 int i, ret, sync_cnt = 0;
8575 /* start from vport 1 for PF is always alive */
8576 for (i = 0; i < hdev->num_alloc_vport; i++) {
8577 struct hclge_vport *vport = &hdev->vport[i];
8579 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8581 while (vlan_id != VLAN_N_VID) {
8582 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8583 vport->vport_id, vlan_id,
8585 if (ret && ret != -EINVAL)
8588 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8589 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8592 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8595 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8601 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8603 struct hclge_config_max_frm_size_cmd *req;
8604 struct hclge_desc desc;
8606 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8608 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8609 req->max_frm_size = cpu_to_le16(new_mps);
8610 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8612 return hclge_cmd_send(&hdev->hw, &desc, 1);
8615 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8617 struct hclge_vport *vport = hclge_get_vport(handle);
8619 return hclge_set_vport_mtu(vport, new_mtu);
8622 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8624 struct hclge_dev *hdev = vport->back;
8625 int i, max_frm_size, ret;
8627 /* HW supprt 2 layer vlan */
8628 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8629 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8630 max_frm_size > HCLGE_MAC_MAX_FRAME)
8633 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8634 mutex_lock(&hdev->vport_lock);
8635 /* VF's mps must fit within hdev->mps */
8636 if (vport->vport_id && max_frm_size > hdev->mps) {
8637 mutex_unlock(&hdev->vport_lock);
8639 } else if (vport->vport_id) {
8640 vport->mps = max_frm_size;
8641 mutex_unlock(&hdev->vport_lock);
8645 /* PF's mps must be greater then VF's mps */
8646 for (i = 1; i < hdev->num_alloc_vport; i++)
8647 if (max_frm_size < hdev->vport[i].mps) {
8648 mutex_unlock(&hdev->vport_lock);
8652 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8654 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8656 dev_err(&hdev->pdev->dev,
8657 "Change mtu fail, ret =%d\n", ret);
8661 hdev->mps = max_frm_size;
8662 vport->mps = max_frm_size;
8664 ret = hclge_buffer_alloc(hdev);
8666 dev_err(&hdev->pdev->dev,
8667 "Allocate buffer fail, ret =%d\n", ret);
8670 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8671 mutex_unlock(&hdev->vport_lock);
8675 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8678 struct hclge_reset_tqp_queue_cmd *req;
8679 struct hclge_desc desc;
8682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8684 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8685 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8687 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8689 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8691 dev_err(&hdev->pdev->dev,
8692 "Send tqp reset cmd error, status =%d\n", ret);
8699 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8701 struct hclge_reset_tqp_queue_cmd *req;
8702 struct hclge_desc desc;
8705 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8707 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8708 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8710 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8712 dev_err(&hdev->pdev->dev,
8713 "Get reset status error, status =%d\n", ret);
8717 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8720 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8722 struct hnae3_queue *queue;
8723 struct hclge_tqp *tqp;
8725 queue = handle->kinfo.tqp[queue_id];
8726 tqp = container_of(queue, struct hclge_tqp, q);
8731 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8733 struct hclge_vport *vport = hclge_get_vport(handle);
8734 struct hclge_dev *hdev = vport->back;
8735 int reset_try_times = 0;
8740 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8742 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8744 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8748 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8750 dev_err(&hdev->pdev->dev,
8751 "Send reset tqp cmd fail, ret = %d\n", ret);
8755 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8756 reset_status = hclge_get_reset_status(hdev, queue_gid);
8760 /* Wait for tqp hw reset */
8761 usleep_range(1000, 1200);
8764 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8765 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8769 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8771 dev_err(&hdev->pdev->dev,
8772 "Deassert the soft reset fail, ret = %d\n", ret);
8777 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8779 struct hclge_dev *hdev = vport->back;
8780 int reset_try_times = 0;
8785 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8787 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8789 dev_warn(&hdev->pdev->dev,
8790 "Send reset tqp cmd fail, ret = %d\n", ret);
8794 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8795 reset_status = hclge_get_reset_status(hdev, queue_gid);
8799 /* Wait for tqp hw reset */
8800 usleep_range(1000, 1200);
8803 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8804 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8808 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8810 dev_warn(&hdev->pdev->dev,
8811 "Deassert the soft reset fail, ret = %d\n", ret);
8814 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8816 struct hclge_vport *vport = hclge_get_vport(handle);
8817 struct hclge_dev *hdev = vport->back;
8819 return hdev->fw_version;
8822 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8824 struct phy_device *phydev = hdev->hw.mac.phydev;
8829 phy_set_asym_pause(phydev, rx_en, tx_en);
8832 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8836 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8839 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8841 dev_err(&hdev->pdev->dev,
8842 "configure pauseparam error, ret = %d.\n", ret);
8847 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8849 struct phy_device *phydev = hdev->hw.mac.phydev;
8850 u16 remote_advertising = 0;
8851 u16 local_advertising;
8852 u32 rx_pause, tx_pause;
8855 if (!phydev->link || !phydev->autoneg)
8858 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8861 remote_advertising = LPA_PAUSE_CAP;
8863 if (phydev->asym_pause)
8864 remote_advertising |= LPA_PAUSE_ASYM;
8866 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8867 remote_advertising);
8868 tx_pause = flowctl & FLOW_CTRL_TX;
8869 rx_pause = flowctl & FLOW_CTRL_RX;
8871 if (phydev->duplex == HCLGE_MAC_HALF) {
8876 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8879 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8880 u32 *rx_en, u32 *tx_en)
8882 struct hclge_vport *vport = hclge_get_vport(handle);
8883 struct hclge_dev *hdev = vport->back;
8884 struct phy_device *phydev = hdev->hw.mac.phydev;
8886 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8888 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8894 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8897 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8900 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8909 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8910 u32 rx_en, u32 tx_en)
8913 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8914 else if (rx_en && !tx_en)
8915 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8916 else if (!rx_en && tx_en)
8917 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8919 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8921 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8924 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8925 u32 rx_en, u32 tx_en)
8927 struct hclge_vport *vport = hclge_get_vport(handle);
8928 struct hclge_dev *hdev = vport->back;
8929 struct phy_device *phydev = hdev->hw.mac.phydev;
8933 fc_autoneg = hclge_get_autoneg(handle);
8934 if (auto_neg != fc_autoneg) {
8935 dev_info(&hdev->pdev->dev,
8936 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8941 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8942 dev_info(&hdev->pdev->dev,
8943 "Priority flow control enabled. Cannot set link flow control.\n");
8947 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8949 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8952 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8955 return phy_start_aneg(phydev);
8960 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8961 u8 *auto_neg, u32 *speed, u8 *duplex)
8963 struct hclge_vport *vport = hclge_get_vport(handle);
8964 struct hclge_dev *hdev = vport->back;
8967 *speed = hdev->hw.mac.speed;
8969 *duplex = hdev->hw.mac.duplex;
8971 *auto_neg = hdev->hw.mac.autoneg;
8974 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8977 struct hclge_vport *vport = hclge_get_vport(handle);
8978 struct hclge_dev *hdev = vport->back;
8981 *media_type = hdev->hw.mac.media_type;
8984 *module_type = hdev->hw.mac.module_type;
8987 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8988 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8990 struct hclge_vport *vport = hclge_get_vport(handle);
8991 struct hclge_dev *hdev = vport->back;
8992 struct phy_device *phydev = hdev->hw.mac.phydev;
8993 int mdix_ctrl, mdix, is_resolved;
8994 unsigned int retval;
8997 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8998 *tp_mdix = ETH_TP_MDI_INVALID;
9002 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9004 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9005 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9006 HCLGE_PHY_MDIX_CTRL_S);
9008 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9009 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9010 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9012 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9014 switch (mdix_ctrl) {
9016 *tp_mdix_ctrl = ETH_TP_MDI;
9019 *tp_mdix_ctrl = ETH_TP_MDI_X;
9022 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9025 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9030 *tp_mdix = ETH_TP_MDI_INVALID;
9032 *tp_mdix = ETH_TP_MDI_X;
9034 *tp_mdix = ETH_TP_MDI;
9037 static void hclge_info_show(struct hclge_dev *hdev)
9039 struct device *dev = &hdev->pdev->dev;
9041 dev_info(dev, "PF info begin:\n");
9043 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9044 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9045 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9046 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9047 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9048 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9049 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9050 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9051 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9052 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9053 dev_info(dev, "This is %s PF\n",
9054 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9055 dev_info(dev, "DCB %s\n",
9056 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9057 dev_info(dev, "MQPRIO %s\n",
9058 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9060 dev_info(dev, "PF info end.\n");
9063 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9064 struct hclge_vport *vport)
9066 struct hnae3_client *client = vport->nic.client;
9067 struct hclge_dev *hdev = ae_dev->priv;
9068 int rst_cnt = hdev->rst_stats.reset_cnt;
9071 ret = client->ops->init_instance(&vport->nic);
9075 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9076 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9077 rst_cnt != hdev->rst_stats.reset_cnt) {
9082 /* Enable nic hw error interrupts */
9083 ret = hclge_config_nic_hw_error(hdev, true);
9085 dev_err(&ae_dev->pdev->dev,
9086 "fail(%d) to enable hw error interrupts\n", ret);
9090 hnae3_set_client_init_flag(client, ae_dev, 1);
9092 if (netif_msg_drv(&hdev->vport->nic))
9093 hclge_info_show(hdev);
9098 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9099 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9100 msleep(HCLGE_WAIT_RESET_DONE);
9102 client->ops->uninit_instance(&vport->nic, 0);
9107 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9108 struct hclge_vport *vport)
9110 struct hnae3_client *client = vport->roce.client;
9111 struct hclge_dev *hdev = ae_dev->priv;
9115 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9119 client = hdev->roce_client;
9120 ret = hclge_init_roce_base_info(vport);
9124 rst_cnt = hdev->rst_stats.reset_cnt;
9125 ret = client->ops->init_instance(&vport->roce);
9129 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9130 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9131 rst_cnt != hdev->rst_stats.reset_cnt) {
9136 /* Enable roce ras interrupts */
9137 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9139 dev_err(&ae_dev->pdev->dev,
9140 "fail(%d) to enable roce ras interrupts\n", ret);
9144 hnae3_set_client_init_flag(client, ae_dev, 1);
9149 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9150 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9151 msleep(HCLGE_WAIT_RESET_DONE);
9153 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9158 static int hclge_init_client_instance(struct hnae3_client *client,
9159 struct hnae3_ae_dev *ae_dev)
9161 struct hclge_dev *hdev = ae_dev->priv;
9162 struct hclge_vport *vport;
9165 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9166 vport = &hdev->vport[i];
9168 switch (client->type) {
9169 case HNAE3_CLIENT_KNIC:
9170 hdev->nic_client = client;
9171 vport->nic.client = client;
9172 ret = hclge_init_nic_client_instance(ae_dev, vport);
9176 ret = hclge_init_roce_client_instance(ae_dev, vport);
9181 case HNAE3_CLIENT_ROCE:
9182 if (hnae3_dev_roce_supported(hdev)) {
9183 hdev->roce_client = client;
9184 vport->roce.client = client;
9187 ret = hclge_init_roce_client_instance(ae_dev, vport);
9200 hdev->nic_client = NULL;
9201 vport->nic.client = NULL;
9204 hdev->roce_client = NULL;
9205 vport->roce.client = NULL;
9209 static void hclge_uninit_client_instance(struct hnae3_client *client,
9210 struct hnae3_ae_dev *ae_dev)
9212 struct hclge_dev *hdev = ae_dev->priv;
9213 struct hclge_vport *vport;
9216 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9217 vport = &hdev->vport[i];
9218 if (hdev->roce_client) {
9219 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9220 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9221 msleep(HCLGE_WAIT_RESET_DONE);
9223 hdev->roce_client->ops->uninit_instance(&vport->roce,
9225 hdev->roce_client = NULL;
9226 vport->roce.client = NULL;
9228 if (client->type == HNAE3_CLIENT_ROCE)
9230 if (hdev->nic_client && client->ops->uninit_instance) {
9231 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9232 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9233 msleep(HCLGE_WAIT_RESET_DONE);
9235 client->ops->uninit_instance(&vport->nic, 0);
9236 hdev->nic_client = NULL;
9237 vport->nic.client = NULL;
9242 static int hclge_pci_init(struct hclge_dev *hdev)
9244 struct pci_dev *pdev = hdev->pdev;
9245 struct hclge_hw *hw;
9248 ret = pci_enable_device(pdev);
9250 dev_err(&pdev->dev, "failed to enable PCI device\n");
9254 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9256 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9259 "can't set consistent PCI DMA");
9260 goto err_disable_device;
9262 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9265 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9267 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9268 goto err_disable_device;
9271 pci_set_master(pdev);
9273 hw->io_base = pcim_iomap(pdev, 2, 0);
9275 dev_err(&pdev->dev, "Can't map configuration register space\n");
9277 goto err_clr_master;
9280 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9284 pci_clear_master(pdev);
9285 pci_release_regions(pdev);
9287 pci_disable_device(pdev);
9292 static void hclge_pci_uninit(struct hclge_dev *hdev)
9294 struct pci_dev *pdev = hdev->pdev;
9296 pcim_iounmap(pdev, hdev->hw.io_base);
9297 pci_free_irq_vectors(pdev);
9298 pci_clear_master(pdev);
9299 pci_release_mem_regions(pdev);
9300 pci_disable_device(pdev);
9303 static void hclge_state_init(struct hclge_dev *hdev)
9305 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9306 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9307 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9308 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9309 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9310 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9311 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9314 static void hclge_state_uninit(struct hclge_dev *hdev)
9316 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9317 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9319 if (hdev->reset_timer.function)
9320 del_timer_sync(&hdev->reset_timer);
9321 if (hdev->service_task.work.func)
9322 cancel_delayed_work_sync(&hdev->service_task);
9325 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9327 #define HCLGE_FLR_WAIT_MS 100
9328 #define HCLGE_FLR_WAIT_CNT 50
9329 struct hclge_dev *hdev = ae_dev->priv;
9332 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9333 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9334 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9335 hclge_reset_event(hdev->pdev, NULL);
9337 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9338 cnt++ < HCLGE_FLR_WAIT_CNT)
9339 msleep(HCLGE_FLR_WAIT_MS);
9341 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9342 dev_err(&hdev->pdev->dev,
9343 "flr wait down timeout: %d\n", cnt);
9346 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9348 struct hclge_dev *hdev = ae_dev->priv;
9350 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9353 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9357 for (i = 0; i < hdev->num_alloc_vport; i++) {
9358 struct hclge_vport *vport = &hdev->vport[i];
9361 /* Send cmd to clear VF's FUNC_RST_ING */
9362 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9364 dev_warn(&hdev->pdev->dev,
9365 "clear vf(%u) rst failed %d!\n",
9366 vport->vport_id, ret);
9370 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9372 struct pci_dev *pdev = ae_dev->pdev;
9373 struct hclge_dev *hdev;
9376 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9383 hdev->ae_dev = ae_dev;
9384 hdev->reset_type = HNAE3_NONE_RESET;
9385 hdev->reset_level = HNAE3_FUNC_RESET;
9386 ae_dev->priv = hdev;
9388 /* HW supprt 2 layer vlan */
9389 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9391 mutex_init(&hdev->vport_lock);
9392 spin_lock_init(&hdev->fd_rule_lock);
9394 ret = hclge_pci_init(hdev);
9396 dev_err(&pdev->dev, "PCI init failed\n");
9400 /* Firmware command queue initialize */
9401 ret = hclge_cmd_queue_init(hdev);
9403 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9404 goto err_pci_uninit;
9407 /* Firmware command initialize */
9408 ret = hclge_cmd_init(hdev);
9410 goto err_cmd_uninit;
9412 ret = hclge_get_cap(hdev);
9414 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9416 goto err_cmd_uninit;
9419 ret = hclge_configure(hdev);
9421 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9422 goto err_cmd_uninit;
9425 ret = hclge_init_msi(hdev);
9427 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9428 goto err_cmd_uninit;
9431 ret = hclge_misc_irq_init(hdev);
9434 "Misc IRQ(vector0) init error, ret = %d.\n",
9436 goto err_msi_uninit;
9439 ret = hclge_alloc_tqps(hdev);
9441 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9442 goto err_msi_irq_uninit;
9445 ret = hclge_alloc_vport(hdev);
9447 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9448 goto err_msi_irq_uninit;
9451 ret = hclge_map_tqp(hdev);
9453 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9454 goto err_msi_irq_uninit;
9457 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9458 ret = hclge_mac_mdio_config(hdev);
9460 dev_err(&hdev->pdev->dev,
9461 "mdio config fail ret=%d\n", ret);
9462 goto err_msi_irq_uninit;
9466 ret = hclge_init_umv_space(hdev);
9468 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9469 goto err_mdiobus_unreg;
9472 ret = hclge_mac_init(hdev);
9474 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9475 goto err_mdiobus_unreg;
9478 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9480 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9481 goto err_mdiobus_unreg;
9484 ret = hclge_config_gro(hdev, true);
9486 goto err_mdiobus_unreg;
9488 ret = hclge_init_vlan_config(hdev);
9490 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9491 goto err_mdiobus_unreg;
9494 ret = hclge_tm_schd_init(hdev);
9496 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9497 goto err_mdiobus_unreg;
9500 hclge_rss_init_cfg(hdev);
9501 ret = hclge_rss_init_hw(hdev);
9503 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9504 goto err_mdiobus_unreg;
9507 ret = init_mgr_tbl(hdev);
9509 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9510 goto err_mdiobus_unreg;
9513 ret = hclge_init_fd_config(hdev);
9516 "fd table init fail, ret=%d\n", ret);
9517 goto err_mdiobus_unreg;
9520 INIT_KFIFO(hdev->mac_tnl_log);
9522 hclge_dcb_ops_set(hdev);
9524 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9525 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9527 /* Setup affinity after service timer setup because add_timer_on
9528 * is called in affinity notify.
9530 hclge_misc_affinity_setup(hdev);
9532 hclge_clear_all_event_cause(hdev);
9533 hclge_clear_resetting_state(hdev);
9535 /* Log and clear the hw errors those already occurred */
9536 hclge_handle_all_hns_hw_errors(ae_dev);
9538 /* request delayed reset for the error recovery because an immediate
9539 * global reset on a PF affecting pending initialization of other PFs
9541 if (ae_dev->hw_err_reset_req) {
9542 enum hnae3_reset_type reset_level;
9544 reset_level = hclge_get_reset_level(ae_dev,
9545 &ae_dev->hw_err_reset_req);
9546 hclge_set_def_reset_request(ae_dev, reset_level);
9547 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9550 /* Enable MISC vector(vector0) */
9551 hclge_enable_vector(&hdev->misc_vector, true);
9553 hclge_state_init(hdev);
9554 hdev->last_reset_time = jiffies;
9556 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9559 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9564 if (hdev->hw.mac.phydev)
9565 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9567 hclge_misc_irq_uninit(hdev);
9569 pci_free_irq_vectors(pdev);
9571 hclge_cmd_uninit(hdev);
9573 pcim_iounmap(pdev, hdev->hw.io_base);
9574 pci_clear_master(pdev);
9575 pci_release_regions(pdev);
9576 pci_disable_device(pdev);
9581 static void hclge_stats_clear(struct hclge_dev *hdev)
9583 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9586 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9588 return hclge_config_switch_param(hdev, vf, enable,
9589 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9592 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9594 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9595 HCLGE_FILTER_FE_NIC_INGRESS_B,
9599 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9603 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9605 dev_err(&hdev->pdev->dev,
9606 "Set vf %d mac spoof check %s failed, ret=%d\n",
9607 vf, enable ? "on" : "off", ret);
9611 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9613 dev_err(&hdev->pdev->dev,
9614 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9615 vf, enable ? "on" : "off", ret);
9620 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9623 struct hclge_vport *vport = hclge_get_vport(handle);
9624 struct hclge_dev *hdev = vport->back;
9625 u32 new_spoofchk = enable ? 1 : 0;
9628 if (hdev->pdev->revision == 0x20)
9631 vport = hclge_get_vf_vport(hdev, vf);
9635 if (vport->vf_info.spoofchk == new_spoofchk)
9638 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9639 dev_warn(&hdev->pdev->dev,
9640 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9642 else if (enable && hclge_is_umv_space_full(vport))
9643 dev_warn(&hdev->pdev->dev,
9644 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9647 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9651 vport->vf_info.spoofchk = new_spoofchk;
9655 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9657 struct hclge_vport *vport = hdev->vport;
9661 if (hdev->pdev->revision == 0x20)
9664 /* resume the vf spoof check state after reset */
9665 for (i = 0; i < hdev->num_alloc_vport; i++) {
9666 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9667 vport->vf_info.spoofchk);
9677 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9679 struct hclge_vport *vport = hclge_get_vport(handle);
9680 struct hclge_dev *hdev = vport->back;
9681 u32 new_trusted = enable ? 1 : 0;
9685 vport = hclge_get_vf_vport(hdev, vf);
9689 if (vport->vf_info.trusted == new_trusted)
9692 /* Disable promisc mode for VF if it is not trusted any more. */
9693 if (!enable && vport->vf_info.promisc_enable) {
9694 en_bc_pmc = hdev->pdev->revision != 0x20;
9695 ret = hclge_set_vport_promisc_mode(vport, false, false,
9699 vport->vf_info.promisc_enable = 0;
9700 hclge_inform_vf_promisc_info(vport);
9703 vport->vf_info.trusted = new_trusted;
9708 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9713 /* reset vf rate to default value */
9714 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9715 struct hclge_vport *vport = &hdev->vport[vf];
9717 vport->vf_info.max_tx_rate = 0;
9718 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9720 dev_err(&hdev->pdev->dev,
9721 "vf%d failed to reset to default, ret=%d\n",
9722 vf - HCLGE_VF_VPORT_START_NUM, ret);
9726 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9727 int min_tx_rate, int max_tx_rate)
9729 if (min_tx_rate != 0 ||
9730 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9731 dev_err(&hdev->pdev->dev,
9732 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9733 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9740 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9741 int min_tx_rate, int max_tx_rate, bool force)
9743 struct hclge_vport *vport = hclge_get_vport(handle);
9744 struct hclge_dev *hdev = vport->back;
9747 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9751 vport = hclge_get_vf_vport(hdev, vf);
9755 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9758 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9762 vport->vf_info.max_tx_rate = max_tx_rate;
9767 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9769 struct hnae3_handle *handle = &hdev->vport->nic;
9770 struct hclge_vport *vport;
9774 /* resume the vf max_tx_rate after reset */
9775 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9776 vport = hclge_get_vf_vport(hdev, vf);
9780 /* zero means max rate, after reset, firmware already set it to
9781 * max rate, so just continue.
9783 if (!vport->vf_info.max_tx_rate)
9786 ret = hclge_set_vf_rate(handle, vf, 0,
9787 vport->vf_info.max_tx_rate, true);
9789 dev_err(&hdev->pdev->dev,
9790 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9791 vf, vport->vf_info.max_tx_rate, ret);
9799 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9801 struct hclge_vport *vport = hdev->vport;
9804 for (i = 0; i < hdev->num_alloc_vport; i++) {
9805 hclge_vport_stop(vport);
9810 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9812 struct hclge_dev *hdev = ae_dev->priv;
9813 struct pci_dev *pdev = ae_dev->pdev;
9816 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9818 hclge_stats_clear(hdev);
9819 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9820 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9822 ret = hclge_cmd_init(hdev);
9824 dev_err(&pdev->dev, "Cmd queue init failed\n");
9828 ret = hclge_map_tqp(hdev);
9830 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9834 hclge_reset_umv_space(hdev);
9836 ret = hclge_mac_init(hdev);
9838 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9842 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9844 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9848 ret = hclge_config_gro(hdev, true);
9852 ret = hclge_init_vlan_config(hdev);
9854 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9858 ret = hclge_tm_init_hw(hdev, true);
9860 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9864 ret = hclge_rss_init_hw(hdev);
9866 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9870 ret = hclge_init_fd_config(hdev);
9872 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9876 /* Log and clear the hw errors those already occurred */
9877 hclge_handle_all_hns_hw_errors(ae_dev);
9879 /* Re-enable the hw error interrupts because
9880 * the interrupts get disabled on global reset.
9882 ret = hclge_config_nic_hw_error(hdev, true);
9885 "fail(%d) to re-enable NIC hw error interrupts\n",
9890 if (hdev->roce_client) {
9891 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9894 "fail(%d) to re-enable roce ras interrupts\n",
9900 hclge_reset_vport_state(hdev);
9901 ret = hclge_reset_vport_spoofchk(hdev);
9905 ret = hclge_resume_vf_rate(hdev);
9909 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9915 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9917 struct hclge_dev *hdev = ae_dev->priv;
9918 struct hclge_mac *mac = &hdev->hw.mac;
9920 hclge_reset_vf_rate(hdev);
9921 hclge_misc_affinity_teardown(hdev);
9922 hclge_state_uninit(hdev);
9925 mdiobus_unregister(mac->mdio_bus);
9927 hclge_uninit_umv_space(hdev);
9929 /* Disable MISC vector(vector0) */
9930 hclge_enable_vector(&hdev->misc_vector, false);
9931 synchronize_irq(hdev->misc_vector.vector_irq);
9933 /* Disable all hw interrupts */
9934 hclge_config_mac_tnl_int(hdev, false);
9935 hclge_config_nic_hw_error(hdev, false);
9936 hclge_config_rocee_ras_interrupt(hdev, false);
9938 hclge_cmd_uninit(hdev);
9939 hclge_misc_irq_uninit(hdev);
9940 hclge_pci_uninit(hdev);
9941 mutex_destroy(&hdev->vport_lock);
9942 hclge_uninit_vport_mac_table(hdev);
9943 hclge_uninit_vport_vlan_table(hdev);
9944 ae_dev->priv = NULL;
9947 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9949 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9950 struct hclge_vport *vport = hclge_get_vport(handle);
9951 struct hclge_dev *hdev = vport->back;
9953 return min_t(u32, hdev->rss_size_max,
9954 vport->alloc_tqps / kinfo->num_tc);
9957 static void hclge_get_channels(struct hnae3_handle *handle,
9958 struct ethtool_channels *ch)
9960 ch->max_combined = hclge_get_max_channels(handle);
9961 ch->other_count = 1;
9963 ch->combined_count = handle->kinfo.rss_size;
9966 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9967 u16 *alloc_tqps, u16 *max_rss_size)
9969 struct hclge_vport *vport = hclge_get_vport(handle);
9970 struct hclge_dev *hdev = vport->back;
9972 *alloc_tqps = vport->alloc_tqps;
9973 *max_rss_size = hdev->rss_size_max;
9976 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9977 bool rxfh_configured)
9979 struct hclge_vport *vport = hclge_get_vport(handle);
9980 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9981 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9982 struct hclge_dev *hdev = vport->back;
9983 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9984 u16 cur_rss_size = kinfo->rss_size;
9985 u16 cur_tqps = kinfo->num_tqps;
9986 u16 tc_valid[HCLGE_MAX_TC_NUM];
9992 kinfo->req_rss_size = new_tqps_num;
9994 ret = hclge_tm_vport_map_update(hdev);
9996 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10000 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10001 roundup_size = ilog2(roundup_size);
10002 /* Set the RSS TC mode according to the new RSS size */
10003 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10006 if (!(hdev->hw_tc_map & BIT(i)))
10010 tc_size[i] = roundup_size;
10011 tc_offset[i] = kinfo->rss_size * i;
10013 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10017 /* RSS indirection table has been configuared by user */
10018 if (rxfh_configured)
10021 /* Reinitializes the rss indirect table according to the new RSS size */
10022 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10026 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10027 rss_indir[i] = i % kinfo->rss_size;
10029 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10031 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10038 dev_info(&hdev->pdev->dev,
10039 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10040 cur_rss_size, kinfo->rss_size,
10041 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10046 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10047 u32 *regs_num_64_bit)
10049 struct hclge_desc desc;
10053 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10054 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10056 dev_err(&hdev->pdev->dev,
10057 "Query register number cmd failed, ret = %d.\n", ret);
10061 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10062 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10064 total_num = *regs_num_32_bit + *regs_num_64_bit;
10071 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10074 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10075 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10077 struct hclge_desc *desc;
10078 u32 *reg_val = data;
10088 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10089 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10090 HCLGE_32_BIT_REG_RTN_DATANUM);
10091 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10095 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10096 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10098 dev_err(&hdev->pdev->dev,
10099 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10104 for (i = 0; i < cmd_num; i++) {
10106 desc_data = (__le32 *)(&desc[i].data[0]);
10107 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10109 desc_data = (__le32 *)(&desc[i]);
10110 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10112 for (k = 0; k < n; k++) {
10113 *reg_val++ = le32_to_cpu(*desc_data++);
10125 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10128 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10129 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10131 struct hclge_desc *desc;
10132 u64 *reg_val = data;
10142 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10143 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10144 HCLGE_64_BIT_REG_RTN_DATANUM);
10145 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10149 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10150 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10152 dev_err(&hdev->pdev->dev,
10153 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10158 for (i = 0; i < cmd_num; i++) {
10160 desc_data = (__le64 *)(&desc[i].data[0]);
10161 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10163 desc_data = (__le64 *)(&desc[i]);
10164 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10166 for (k = 0; k < n; k++) {
10167 *reg_val++ = le64_to_cpu(*desc_data++);
10179 #define MAX_SEPARATE_NUM 4
10180 #define SEPARATOR_VALUE 0xFDFCFBFA
10181 #define REG_NUM_PER_LINE 4
10182 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10183 #define REG_SEPARATOR_LINE 1
10184 #define REG_NUM_REMAIN_MASK 3
10185 #define BD_LIST_MAX_NUM 30
10187 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10189 /*prepare 4 commands to query DFX BD number*/
10190 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10191 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10192 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10193 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10194 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10195 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10196 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10198 return hclge_cmd_send(&hdev->hw, desc, 4);
10201 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10205 #define HCLGE_DFX_REG_BD_NUM 4
10207 u32 entries_per_desc, desc_index, index, offset, i;
10208 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10211 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10213 dev_err(&hdev->pdev->dev,
10214 "Get dfx bd num fail, status is %d.\n", ret);
10218 entries_per_desc = ARRAY_SIZE(desc[0].data);
10219 for (i = 0; i < type_num; i++) {
10220 offset = hclge_dfx_bd_offset_list[i];
10221 index = offset % entries_per_desc;
10222 desc_index = offset / entries_per_desc;
10223 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10229 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10230 struct hclge_desc *desc_src, int bd_num,
10231 enum hclge_opcode_type cmd)
10233 struct hclge_desc *desc = desc_src;
10236 hclge_cmd_setup_basic_desc(desc, cmd, true);
10237 for (i = 0; i < bd_num - 1; i++) {
10238 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10240 hclge_cmd_setup_basic_desc(desc, cmd, true);
10244 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10246 dev_err(&hdev->pdev->dev,
10247 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10253 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10256 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10257 struct hclge_desc *desc = desc_src;
10260 entries_per_desc = ARRAY_SIZE(desc->data);
10261 reg_num = entries_per_desc * bd_num;
10262 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10263 for (i = 0; i < reg_num; i++) {
10264 index = i % entries_per_desc;
10265 desc_index = i / entries_per_desc;
10266 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10268 for (i = 0; i < separator_num; i++)
10269 *reg++ = SEPARATOR_VALUE;
10271 return reg_num + separator_num;
10274 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10276 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10277 int data_len_per_desc, data_len, bd_num, i;
10278 int bd_num_list[BD_LIST_MAX_NUM];
10281 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10283 dev_err(&hdev->pdev->dev,
10284 "Get dfx reg bd num fail, status is %d.\n", ret);
10288 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10290 for (i = 0; i < dfx_reg_type_num; i++) {
10291 bd_num = bd_num_list[i];
10292 data_len = data_len_per_desc * bd_num;
10293 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10299 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10301 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10302 int bd_num, bd_num_max, buf_len, i;
10303 int bd_num_list[BD_LIST_MAX_NUM];
10304 struct hclge_desc *desc_src;
10308 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10310 dev_err(&hdev->pdev->dev,
10311 "Get dfx reg bd num fail, status is %d.\n", ret);
10315 bd_num_max = bd_num_list[0];
10316 for (i = 1; i < dfx_reg_type_num; i++)
10317 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10319 buf_len = sizeof(*desc_src) * bd_num_max;
10320 desc_src = kzalloc(buf_len, GFP_KERNEL);
10322 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10326 for (i = 0; i < dfx_reg_type_num; i++) {
10327 bd_num = bd_num_list[i];
10328 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10329 hclge_dfx_reg_opcode_list[i]);
10331 dev_err(&hdev->pdev->dev,
10332 "Get dfx reg fail, status is %d.\n", ret);
10336 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10343 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10344 struct hnae3_knic_private_info *kinfo)
10346 #define HCLGE_RING_REG_OFFSET 0x200
10347 #define HCLGE_RING_INT_REG_OFFSET 0x4
10349 int i, j, reg_num, separator_num;
10353 /* fetching per-PF registers valus from PF PCIe register space */
10354 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10355 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10356 for (i = 0; i < reg_num; i++)
10357 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10358 for (i = 0; i < separator_num; i++)
10359 *reg++ = SEPARATOR_VALUE;
10360 data_num_sum = reg_num + separator_num;
10362 reg_num = ARRAY_SIZE(common_reg_addr_list);
10363 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10364 for (i = 0; i < reg_num; i++)
10365 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10366 for (i = 0; i < separator_num; i++)
10367 *reg++ = SEPARATOR_VALUE;
10368 data_num_sum += reg_num + separator_num;
10370 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10371 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10372 for (j = 0; j < kinfo->num_tqps; j++) {
10373 for (i = 0; i < reg_num; i++)
10374 *reg++ = hclge_read_dev(&hdev->hw,
10375 ring_reg_addr_list[i] +
10376 HCLGE_RING_REG_OFFSET * j);
10377 for (i = 0; i < separator_num; i++)
10378 *reg++ = SEPARATOR_VALUE;
10380 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10382 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10383 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10384 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10385 for (i = 0; i < reg_num; i++)
10386 *reg++ = hclge_read_dev(&hdev->hw,
10387 tqp_intr_reg_addr_list[i] +
10388 HCLGE_RING_INT_REG_OFFSET * j);
10389 for (i = 0; i < separator_num; i++)
10390 *reg++ = SEPARATOR_VALUE;
10392 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10394 return data_num_sum;
10397 static int hclge_get_regs_len(struct hnae3_handle *handle)
10399 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10400 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10401 struct hclge_vport *vport = hclge_get_vport(handle);
10402 struct hclge_dev *hdev = vport->back;
10403 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10404 int regs_lines_32_bit, regs_lines_64_bit;
10407 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10409 dev_err(&hdev->pdev->dev,
10410 "Get register number failed, ret = %d.\n", ret);
10414 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10416 dev_err(&hdev->pdev->dev,
10417 "Get dfx reg len failed, ret = %d.\n", ret);
10421 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10422 REG_SEPARATOR_LINE;
10423 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10424 REG_SEPARATOR_LINE;
10425 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10426 REG_SEPARATOR_LINE;
10427 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10428 REG_SEPARATOR_LINE;
10429 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10430 REG_SEPARATOR_LINE;
10431 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10432 REG_SEPARATOR_LINE;
10434 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10435 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10436 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10439 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10442 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10443 struct hclge_vport *vport = hclge_get_vport(handle);
10444 struct hclge_dev *hdev = vport->back;
10445 u32 regs_num_32_bit, regs_num_64_bit;
10446 int i, reg_num, separator_num, ret;
10449 *version = hdev->fw_version;
10451 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10453 dev_err(&hdev->pdev->dev,
10454 "Get register number failed, ret = %d.\n", ret);
10458 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10460 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10462 dev_err(&hdev->pdev->dev,
10463 "Get 32 bit register failed, ret = %d.\n", ret);
10466 reg_num = regs_num_32_bit;
10468 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10469 for (i = 0; i < separator_num; i++)
10470 *reg++ = SEPARATOR_VALUE;
10472 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10474 dev_err(&hdev->pdev->dev,
10475 "Get 64 bit register failed, ret = %d.\n", ret);
10478 reg_num = regs_num_64_bit * 2;
10480 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10481 for (i = 0; i < separator_num; i++)
10482 *reg++ = SEPARATOR_VALUE;
10484 ret = hclge_get_dfx_reg(hdev, reg);
10486 dev_err(&hdev->pdev->dev,
10487 "Get dfx register failed, ret = %d.\n", ret);
10490 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10492 struct hclge_set_led_state_cmd *req;
10493 struct hclge_desc desc;
10496 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10498 req = (struct hclge_set_led_state_cmd *)desc.data;
10499 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10500 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10502 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10504 dev_err(&hdev->pdev->dev,
10505 "Send set led state cmd error, ret =%d\n", ret);
10510 enum hclge_led_status {
10513 HCLGE_LED_NO_CHANGE = 0xFF,
10516 static int hclge_set_led_id(struct hnae3_handle *handle,
10517 enum ethtool_phys_id_state status)
10519 struct hclge_vport *vport = hclge_get_vport(handle);
10520 struct hclge_dev *hdev = vport->back;
10523 case ETHTOOL_ID_ACTIVE:
10524 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10525 case ETHTOOL_ID_INACTIVE:
10526 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10532 static void hclge_get_link_mode(struct hnae3_handle *handle,
10533 unsigned long *supported,
10534 unsigned long *advertising)
10536 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10537 struct hclge_vport *vport = hclge_get_vport(handle);
10538 struct hclge_dev *hdev = vport->back;
10539 unsigned int idx = 0;
10541 for (; idx < size; idx++) {
10542 supported[idx] = hdev->hw.mac.supported[idx];
10543 advertising[idx] = hdev->hw.mac.advertising[idx];
10547 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10549 struct hclge_vport *vport = hclge_get_vport(handle);
10550 struct hclge_dev *hdev = vport->back;
10552 return hclge_config_gro(hdev, enable);
10555 static const struct hnae3_ae_ops hclge_ops = {
10556 .init_ae_dev = hclge_init_ae_dev,
10557 .uninit_ae_dev = hclge_uninit_ae_dev,
10558 .flr_prepare = hclge_flr_prepare,
10559 .flr_done = hclge_flr_done,
10560 .init_client_instance = hclge_init_client_instance,
10561 .uninit_client_instance = hclge_uninit_client_instance,
10562 .map_ring_to_vector = hclge_map_ring_to_vector,
10563 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10564 .get_vector = hclge_get_vector,
10565 .put_vector = hclge_put_vector,
10566 .set_promisc_mode = hclge_set_promisc_mode,
10567 .set_loopback = hclge_set_loopback,
10568 .start = hclge_ae_start,
10569 .stop = hclge_ae_stop,
10570 .client_start = hclge_client_start,
10571 .client_stop = hclge_client_stop,
10572 .get_status = hclge_get_status,
10573 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10574 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10575 .get_media_type = hclge_get_media_type,
10576 .check_port_speed = hclge_check_port_speed,
10577 .get_fec = hclge_get_fec,
10578 .set_fec = hclge_set_fec,
10579 .get_rss_key_size = hclge_get_rss_key_size,
10580 .get_rss_indir_size = hclge_get_rss_indir_size,
10581 .get_rss = hclge_get_rss,
10582 .set_rss = hclge_set_rss,
10583 .set_rss_tuple = hclge_set_rss_tuple,
10584 .get_rss_tuple = hclge_get_rss_tuple,
10585 .get_tc_size = hclge_get_tc_size,
10586 .get_mac_addr = hclge_get_mac_addr,
10587 .set_mac_addr = hclge_set_mac_addr,
10588 .do_ioctl = hclge_do_ioctl,
10589 .add_uc_addr = hclge_add_uc_addr,
10590 .rm_uc_addr = hclge_rm_uc_addr,
10591 .add_mc_addr = hclge_add_mc_addr,
10592 .rm_mc_addr = hclge_rm_mc_addr,
10593 .set_autoneg = hclge_set_autoneg,
10594 .get_autoneg = hclge_get_autoneg,
10595 .restart_autoneg = hclge_restart_autoneg,
10596 .halt_autoneg = hclge_halt_autoneg,
10597 .get_pauseparam = hclge_get_pauseparam,
10598 .set_pauseparam = hclge_set_pauseparam,
10599 .set_mtu = hclge_set_mtu,
10600 .reset_queue = hclge_reset_tqp,
10601 .get_stats = hclge_get_stats,
10602 .get_mac_stats = hclge_get_mac_stat,
10603 .update_stats = hclge_update_stats,
10604 .get_strings = hclge_get_strings,
10605 .get_sset_count = hclge_get_sset_count,
10606 .get_fw_version = hclge_get_fw_version,
10607 .get_mdix_mode = hclge_get_mdix_mode,
10608 .enable_vlan_filter = hclge_enable_vlan_filter,
10609 .set_vlan_filter = hclge_set_vlan_filter,
10610 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10611 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10612 .reset_event = hclge_reset_event,
10613 .get_reset_level = hclge_get_reset_level,
10614 .set_default_reset_request = hclge_set_def_reset_request,
10615 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10616 .set_channels = hclge_set_channels,
10617 .get_channels = hclge_get_channels,
10618 .get_regs_len = hclge_get_regs_len,
10619 .get_regs = hclge_get_regs,
10620 .set_led_id = hclge_set_led_id,
10621 .get_link_mode = hclge_get_link_mode,
10622 .add_fd_entry = hclge_add_fd_entry,
10623 .del_fd_entry = hclge_del_fd_entry,
10624 .del_all_fd_entries = hclge_del_all_fd_entries,
10625 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10626 .get_fd_rule_info = hclge_get_fd_rule_info,
10627 .get_fd_all_rules = hclge_get_all_rules,
10628 .restore_fd_rules = hclge_restore_fd_entries,
10629 .enable_fd = hclge_enable_fd,
10630 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10631 .dbg_run_cmd = hclge_dbg_run_cmd,
10632 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10633 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10634 .ae_dev_resetting = hclge_ae_dev_resetting,
10635 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10636 .set_gro_en = hclge_gro_en,
10637 .get_global_queue_id = hclge_covert_handle_qid_global,
10638 .set_timer_task = hclge_set_timer_task,
10639 .mac_connect_phy = hclge_mac_connect_phy,
10640 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10641 .restore_vlan_table = hclge_restore_vlan_table,
10642 .get_vf_config = hclge_get_vf_config,
10643 .set_vf_link_state = hclge_set_vf_link_state,
10644 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10645 .set_vf_trust = hclge_set_vf_trust,
10646 .set_vf_rate = hclge_set_vf_rate,
10647 .set_vf_mac = hclge_set_vf_mac,
10650 static struct hnae3_ae_algo ae_algo = {
10652 .pdev_id_table = ae_algo_pci_tbl,
10655 static int hclge_init(void)
10657 pr_info("%s is initializing\n", HCLGE_NAME);
10659 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10661 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10665 hnae3_register_ae_algo(&ae_algo);
10670 static void hclge_exit(void)
10672 hnae3_unregister_ae_algo(&ae_algo);
10673 destroy_workqueue(hclge_wq);
10675 module_init(hclge_init);
10676 module_exit(hclge_exit);
10678 MODULE_LICENSE("GPL");
10679 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10680 MODULE_DESCRIPTION("HCLGE Driver");
10681 MODULE_VERSION(HCLGE_MOD_VERSION);