1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 ret = hclge_parse_func_status(hdev, req);
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 struct hclge_pf_res_cmd *req;
873 struct hclge_desc desc;
876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 dev_err(&hdev->pdev->dev,
880 "query pf resource failed %d.\n", ret);
884 req = (struct hclge_pf_res_cmd *)desc.data;
885 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 if (req->tx_buf_size)
890 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 if (req->dv_buf_size)
898 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 if (hnae3_dev_roce_supported(hdev)) {
905 hdev->roce_base_msix_offset =
906 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 /* nic's msix numbers is always equals to the roce's. */
913 hdev->num_nic_msi = hdev->num_roce_msi;
915 /* PF should have NIC vectors and Roce vectors,
916 * NIC vectors are queued before Roce vectors.
918 hdev->num_msi = hdev->num_roce_msi +
919 hdev->roce_base_msix_offset;
922 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 hdev->num_nic_msi = hdev->num_msi;
928 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929 dev_err(&hdev->pdev->dev,
930 "Just %u msi resources, not enough for pf(min:2).\n",
938 static int hclge_parse_speed(int speed_cmd, int *speed)
942 *speed = HCLGE_MAC_SPEED_10M;
945 *speed = HCLGE_MAC_SPEED_100M;
948 *speed = HCLGE_MAC_SPEED_1G;
951 *speed = HCLGE_MAC_SPEED_10G;
954 *speed = HCLGE_MAC_SPEED_25G;
957 *speed = HCLGE_MAC_SPEED_40G;
960 *speed = HCLGE_MAC_SPEED_50G;
963 *speed = HCLGE_MAC_SPEED_100G;
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 struct hclge_vport *vport = hclge_get_vport(handle);
975 struct hclge_dev *hdev = vport->back;
976 u32 speed_ability = hdev->hw.mac.speed_ability;
980 case HCLGE_MAC_SPEED_10M:
981 speed_bit = HCLGE_SUPPORT_10M_BIT;
983 case HCLGE_MAC_SPEED_100M:
984 speed_bit = HCLGE_SUPPORT_100M_BIT;
986 case HCLGE_MAC_SPEED_1G:
987 speed_bit = HCLGE_SUPPORT_1G_BIT;
989 case HCLGE_MAC_SPEED_10G:
990 speed_bit = HCLGE_SUPPORT_10G_BIT;
992 case HCLGE_MAC_SPEED_25G:
993 speed_bit = HCLGE_SUPPORT_25G_BIT;
995 case HCLGE_MAC_SPEED_40G:
996 speed_bit = HCLGE_SUPPORT_40G_BIT;
998 case HCLGE_MAC_SPEED_50G:
999 speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 case HCLGE_MAC_SPEED_100G:
1002 speed_bit = HCLGE_SUPPORT_100G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 switch (mac->speed) {
1099 case HCLGE_MAC_SPEED_10G:
1100 case HCLGE_MAC_SPEED_40G:
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1104 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 case HCLGE_MAC_SPEED_25G:
1107 case HCLGE_MAC_SPEED_50G:
1108 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1111 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112 BIT(HNAE3_FEC_AUTO);
1114 case HCLGE_MAC_SPEED_100G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1119 mac->fec_ability = 0;
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1127 struct hclge_mac *mac = &hdev->hw.mac;
1129 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1133 hclge_convert_setting_sr(mac, speed_ability);
1134 hclge_convert_setting_lr(mac, speed_ability);
1135 hclge_convert_setting_cr(mac, speed_ability);
1136 if (hdev->pdev->revision >= 0x21)
1137 hclge_convert_setting_fec(mac);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1147 struct hclge_mac *mac = &hdev->hw.mac;
1149 hclge_convert_setting_kr(mac, speed_ability);
1150 if (hdev->pdev->revision >= 0x21)
1151 hclge_convert_setting_fec(mac);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1160 unsigned long *supported = hdev->hw.mac.supported;
1162 /* default to support all speed for GE port */
1164 speed_ability = HCLGE_SUPPORT_GE;
1166 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1170 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 u8 media_type = hdev->hw.mac.media_type;
1192 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195 hclge_parse_copper_link_mode(hdev, speed_ability);
1196 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197 hclge_parse_backplane_link_mode(hdev, speed_ability);
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1202 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203 return HCLGE_MAC_SPEED_100G;
1205 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206 return HCLGE_MAC_SPEED_50G;
1208 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209 return HCLGE_MAC_SPEED_40G;
1211 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212 return HCLGE_MAC_SPEED_25G;
1214 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215 return HCLGE_MAC_SPEED_10G;
1217 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218 return HCLGE_MAC_SPEED_1G;
1220 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221 return HCLGE_MAC_SPEED_100M;
1223 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224 return HCLGE_MAC_SPEED_10M;
1226 return HCLGE_MAC_SPEED_1G;
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 struct hclge_cfg_param_cmd *req;
1232 u64 mac_addr_tmp_high;
1236 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 /* get the configuration */
1239 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1242 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 HCLGE_CFG_TQP_DESC_N_M,
1246 HCLGE_CFG_TQP_DESC_N_S);
1248 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249 HCLGE_CFG_PHY_ADDR_M,
1250 HCLGE_CFG_PHY_ADDR_S);
1251 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_MEDIA_TP_M,
1253 HCLGE_CFG_MEDIA_TP_S);
1254 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_RX_BUF_LEN_M,
1256 HCLGE_CFG_RX_BUF_LEN_S);
1257 /* get mac_address */
1258 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260 HCLGE_CFG_MAC_ADDR_H_M,
1261 HCLGE_CFG_MAC_ADDR_H_S);
1263 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266 HCLGE_CFG_DEFAULT_SPEED_M,
1267 HCLGE_CFG_DEFAULT_SPEED_S);
1268 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_RSS_SIZE_M,
1270 HCLGE_CFG_RSS_SIZE_S);
1272 for (i = 0; i < ETH_ALEN; i++)
1273 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279 HCLGE_CFG_SPEED_ABILITY_M,
1280 HCLGE_CFG_SPEED_ABILITY_S);
1281 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_UMV_TBL_SPACE_M,
1283 HCLGE_CFG_UMV_TBL_SPACE_S);
1284 if (!cfg->umv_space)
1285 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1288 /* hclge_get_cfg: query the static parameter from flash
1289 * @hdev: pointer to struct hclge_dev
1290 * @hcfg: the config structure to be getted
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295 struct hclge_cfg_param_cmd *req;
1299 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1302 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307 /* Len should be united by 4 bytes when send to hardware */
1308 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310 req->offset = cpu_to_le32(offset);
1313 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 hclge_parse_cfg(hcfg, desc);
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1328 ret = hclge_query_function_status(hdev);
1330 dev_err(&hdev->pdev->dev,
1331 "query function status error %d.\n", ret);
1335 /* get pf resource */
1336 ret = hclge_query_pf_resource(hdev);
1338 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1345 #define HCLGE_MIN_TX_DESC 64
1346 #define HCLGE_MIN_RX_DESC 64
1348 if (!is_kdump_kernel())
1351 dev_info(&hdev->pdev->dev,
1352 "Running kdump kernel. Using minimal resources\n");
1354 /* minimal queue pairs equals to the number of vports */
1355 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1360 static int hclge_configure(struct hclge_dev *hdev)
1362 struct hclge_cfg cfg;
1366 ret = hclge_get_cfg(hdev, &cfg);
1368 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1372 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373 hdev->base_tqp_pid = 0;
1374 hdev->rss_size_max = cfg.rss_size_max;
1375 hdev->rx_buf_len = cfg.rx_buf_len;
1376 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377 hdev->hw.mac.media_type = cfg.media_type;
1378 hdev->hw.mac.phy_addr = cfg.phy_addr;
1379 hdev->num_tx_desc = cfg.tqp_desc_num;
1380 hdev->num_rx_desc = cfg.tqp_desc_num;
1381 hdev->tm_info.num_pg = 1;
1382 hdev->tc_max = cfg.tc_num;
1383 hdev->tm_info.hw_pfc_map = 0;
1384 hdev->wanted_umv_size = cfg.umv_space;
1386 if (hnae3_dev_fd_supported(hdev)) {
1388 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1391 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1393 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1397 hclge_parse_link_mode(hdev, cfg.speed_ability);
1399 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1401 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402 (hdev->tc_max < 1)) {
1403 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1408 /* Dev does not support DCB */
1409 if (!hnae3_dev_dcb_supported(hdev)) {
1413 hdev->pfc_max = hdev->tc_max;
1416 hdev->tm_info.num_tc = 1;
1418 /* Currently not support uncontiuous tc */
1419 for (i = 0; i < hdev->tm_info.num_tc; i++)
1420 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1422 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1424 hclge_init_kdump_kernel_config(hdev);
1426 /* Set the init affinity based on pci func number */
1427 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430 &hdev->affinity_mask);
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436 unsigned int tso_mss_max)
1438 struct hclge_cfg_tso_status_cmd *req;
1439 struct hclge_desc desc;
1442 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1444 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1447 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449 req->tso_mss_min = cpu_to_le16(tso_mss);
1452 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454 req->tso_mss_max = cpu_to_le16(tso_mss);
1456 return hclge_cmd_send(&hdev->hw, &desc, 1);
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1461 struct hclge_cfg_gro_status_cmd *req;
1462 struct hclge_desc desc;
1465 if (!hnae3_dev_gro_supported(hdev))
1468 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1471 req->gro_en = cpu_to_le16(en ? 1 : 0);
1473 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1475 dev_err(&hdev->pdev->dev,
1476 "GRO hardware config cmd failed, ret = %d\n", ret);
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1483 struct hclge_tqp *tqp;
1486 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487 sizeof(struct hclge_tqp), GFP_KERNEL);
1493 for (i = 0; i < hdev->num_tqps; i++) {
1494 tqp->dev = &hdev->pdev->dev;
1497 tqp->q.ae_algo = &ae_algo;
1498 tqp->q.buf_size = hdev->rx_buf_len;
1499 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502 i * HCLGE_TQP_REG_SIZE;
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1513 struct hclge_tqp_map_cmd *req;
1514 struct hclge_desc desc;
1517 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1519 req = (struct hclge_tqp_map_cmd *)desc.data;
1520 req->tqp_id = cpu_to_le16(tqp_pid);
1521 req->tqp_vf = func_id;
1522 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1524 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525 req->tqp_vid = cpu_to_le16(tqp_vid);
1527 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1529 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1534 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1536 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537 struct hclge_dev *hdev = vport->back;
1540 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541 alloced < num_tqps; i++) {
1542 if (!hdev->htqp[i].alloced) {
1543 hdev->htqp[i].q.handle = &vport->nic;
1544 hdev->htqp[i].q.tqp_index = alloced;
1545 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548 hdev->htqp[i].alloced = true;
1552 vport->alloc_tqps = alloced;
1553 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554 vport->alloc_tqps / hdev->tm_info.num_tc);
1556 /* ensure one to one mapping between irq and queue at default */
1557 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564 u16 num_tx_desc, u16 num_rx_desc)
1567 struct hnae3_handle *nic = &vport->nic;
1568 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569 struct hclge_dev *hdev = vport->back;
1572 kinfo->num_tx_desc = num_tx_desc;
1573 kinfo->num_rx_desc = num_rx_desc;
1575 kinfo->rx_buf_len = hdev->rx_buf_len;
1577 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578 sizeof(struct hnae3_queue *), GFP_KERNEL);
1582 ret = hclge_assign_tqp(vport, num_tqps);
1584 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590 struct hclge_vport *vport)
1592 struct hnae3_handle *nic = &vport->nic;
1593 struct hnae3_knic_private_info *kinfo;
1596 kinfo = &nic->kinfo;
1597 for (i = 0; i < vport->alloc_tqps; i++) {
1598 struct hclge_tqp *q =
1599 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1603 is_pf = !(vport->vport_id);
1604 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1615 struct hclge_vport *vport = hdev->vport;
1618 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619 for (i = 0; i < num_vport; i++) {
1622 ret = hclge_map_tqp_to_vport(hdev, vport);
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1634 struct hnae3_handle *nic = &vport->nic;
1635 struct hclge_dev *hdev = vport->back;
1638 nic->pdev = hdev->pdev;
1639 nic->ae_algo = &ae_algo;
1640 nic->numa_node_mask = hdev->numa_node_mask;
1642 ret = hclge_knic_setup(vport, num_tqps,
1643 hdev->num_tx_desc, hdev->num_rx_desc);
1645 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1652 struct pci_dev *pdev = hdev->pdev;
1653 struct hclge_vport *vport;
1659 /* We need to alloc a vport for main NIC of PF */
1660 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1662 if (hdev->num_tqps < num_vport) {
1663 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664 hdev->num_tqps, num_vport);
1668 /* Alloc the same number of TQPs for every vport */
1669 tqp_per_vport = hdev->num_tqps / num_vport;
1670 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1672 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1677 hdev->vport = vport;
1678 hdev->num_alloc_vport = num_vport;
1680 if (IS_ENABLED(CONFIG_PCI_IOV))
1681 hdev->num_alloc_vfs = hdev->num_req_vfs;
1683 for (i = 0; i < num_vport; i++) {
1685 vport->vport_id = i;
1686 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690 INIT_LIST_HEAD(&vport->vlan_list);
1691 INIT_LIST_HEAD(&vport->uc_mac_list);
1692 INIT_LIST_HEAD(&vport->mc_mac_list);
1695 ret = hclge_vport_setup(vport, tqp_main_vport);
1697 ret = hclge_vport_setup(vport, tqp_per_vport);
1700 "vport setup failed for vport %d, %d\n",
1711 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712 struct hclge_pkt_buf_alloc *buf_alloc)
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1717 struct hclge_tx_buff_alloc_cmd *req;
1718 struct hclge_desc desc;
1722 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1728 req->tx_pkt_buff[i] =
1729 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1735 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742 struct hclge_pkt_buf_alloc *buf_alloc)
1744 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1747 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1757 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758 if (hdev->hw_tc_map & BIT(i))
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765 struct hclge_pkt_buf_alloc *buf_alloc)
1767 struct hclge_priv_buf *priv;
1771 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 priv = &buf_alloc->priv_buf[i];
1773 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 struct hclge_priv_buf *priv;
1789 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790 priv = &buf_alloc->priv_buf[i];
1791 if (hdev->hw_tc_map & BIT(i) &&
1792 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1802 struct hclge_priv_buf *priv;
1806 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807 priv = &buf_alloc->priv_buf[i];
1809 rx_priv += priv->buf_size;
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1816 u32 i, total_tx_size = 0;
1818 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1821 return total_tx_size;
1824 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc,
1828 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829 u32 tc_num = hclge_get_tc_num(hdev);
1830 u32 shared_buf, aligned_mps;
1834 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1836 if (hnae3_dev_dcb_supported(hdev))
1837 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1840 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841 + hdev->dv_buf_size;
1843 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845 HCLGE_BUF_SIZE_UNIT);
1847 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848 if (rx_all < rx_priv + shared_std)
1851 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852 buf_alloc->s_buf.buf_size = shared_buf;
1853 if (hnae3_dev_dcb_supported(hdev)) {
1854 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857 HCLGE_BUF_SIZE_UNIT);
1859 buf_alloc->s_buf.self.high = aligned_mps +
1860 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861 buf_alloc->s_buf.self.low = aligned_mps;
1864 if (hnae3_dev_dcb_supported(hdev)) {
1865 hi_thrd = shared_buf - hdev->dv_buf_size;
1867 if (tc_num <= NEED_RESERVE_TC_NUM)
1868 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1872 hi_thrd = hi_thrd / tc_num;
1874 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1878 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879 lo_thrd = aligned_mps;
1882 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1895 total_size = hdev->pkt_buf_size;
1897 /* alloc tx buffer for all enabled tc */
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1901 if (hdev->hw_tc_map & BIT(i)) {
1902 if (total_size < hdev->tx_buf_size)
1905 priv->tx_buf_size = hdev->tx_buf_size;
1907 priv->tx_buf_size = 0;
1910 total_size -= priv->tx_buf_size;
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917 struct hclge_pkt_buf_alloc *buf_alloc)
1919 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1923 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1931 if (!(hdev->hw_tc_map & BIT(i)))
1936 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939 HCLGE_BUF_SIZE_UNIT);
1942 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1946 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1949 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1959 /* let the last to be cleared first */
1960 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962 unsigned int mask = BIT((unsigned int)i);
1964 if (hdev->hw_tc_map & mask &&
1965 !(hdev->tm_info.hw_pfc_map & mask)) {
1966 /* Clear the no pfc TC private buffer */
1974 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975 no_pfc_priv_num == 0)
1979 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983 struct hclge_pkt_buf_alloc *buf_alloc)
1985 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1989 /* let the last to be cleared first */
1990 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992 unsigned int mask = BIT((unsigned int)i);
1994 if (hdev->hw_tc_map & mask &&
1995 hdev->tm_info.hw_pfc_map & mask) {
1996 /* Reduce the number of pfc TC with private buffer */
2004 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2009 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013 struct hclge_pkt_buf_alloc *buf_alloc)
2015 #define COMPENSATE_BUFFER 0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP 0x1800
2019 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020 u32 tc_num = hclge_get_tc_num(hdev);
2021 u32 half_mps = hdev->mps >> 1;
2026 rx_priv = rx_priv / tc_num;
2028 if (tc_num <= NEED_RESERVE_TC_NUM)
2029 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2031 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032 COMPENSATE_HALF_MPS_NUM * half_mps;
2033 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2036 if (rx_priv < min_rx_priv)
2039 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2047 if (!(hdev->hw_tc_map & BIT(i)))
2051 priv->buf_size = rx_priv;
2052 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2056 buf_alloc->s_buf.buf_size = 0;
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062 * @hdev: pointer to struct hclge_dev
2063 * @buf_alloc: pointer to buffer calculation data
2064 * @return: 0: calculate sucessful, negative: fail
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067 struct hclge_pkt_buf_alloc *buf_alloc)
2069 /* When DCB is not supported, rx private buffer is not allocated. */
2070 if (!hnae3_dev_dcb_supported(hdev)) {
2071 u32 rx_all = hdev->pkt_buf_size;
2073 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2080 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2083 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2086 /* try to decrease the buffer size */
2087 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2090 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2093 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100 struct hclge_pkt_buf_alloc *buf_alloc)
2102 struct hclge_rx_priv_buff_cmd *req;
2103 struct hclge_desc desc;
2107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2110 /* Alloc private buffer TCs */
2111 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2115 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2117 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2121 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2126 dev_err(&hdev->pdev->dev,
2127 "rx private buffer alloc cmd failed %d\n", ret);
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 struct hclge_rx_priv_wl_buf *req;
2136 struct hclge_priv_buf *priv;
2137 struct hclge_desc desc[2];
2141 for (i = 0; i < 2; i++) {
2142 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2144 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2146 /* The first descriptor set the NEXT bit to 1 */
2148 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2152 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2155 priv = &buf_alloc->priv_buf[idx];
2156 req->tc_wl[j].high =
2157 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158 req->tc_wl[j].high |=
2159 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162 req->tc_wl[j].low |=
2163 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2167 /* Send 2 descriptor at one time */
2168 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2170 dev_err(&hdev->pdev->dev,
2171 "rx private waterline config cmd failed %d\n",
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177 struct hclge_pkt_buf_alloc *buf_alloc)
2179 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180 struct hclge_rx_com_thrd *req;
2181 struct hclge_desc desc[2];
2182 struct hclge_tc_thrd *tc;
2186 for (i = 0; i < 2; i++) {
2187 hclge_cmd_setup_basic_desc(&desc[i],
2188 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2191 /* The first descriptor set the NEXT bit to 1 */
2193 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2197 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2200 req->com_thrd[j].high =
2201 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202 req->com_thrd[j].high |=
2203 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204 req->com_thrd[j].low =
2205 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206 req->com_thrd[j].low |=
2207 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2211 /* Send 2 descriptors at one time */
2212 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2214 dev_err(&hdev->pdev->dev,
2215 "common threshold config cmd failed %d\n", ret);
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223 struct hclge_rx_com_wl *req;
2224 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2229 req = (struct hclge_rx_com_wl *)desc.data;
2230 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2238 dev_err(&hdev->pdev->dev,
2239 "common waterline config cmd failed %d\n", ret);
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2246 struct hclge_pkt_buf_alloc *pkt_buf;
2249 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2253 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2255 dev_err(&hdev->pdev->dev,
2256 "could not calc tx buffer size for all TCs %d\n", ret);
2260 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2262 dev_err(&hdev->pdev->dev,
2263 "could not alloc tx buffers %d\n", ret);
2267 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2269 dev_err(&hdev->pdev->dev,
2270 "could not calc rx priv buffer size for all TCs %d\n",
2275 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2277 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2282 if (hnae3_dev_dcb_supported(hdev)) {
2283 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2285 dev_err(&hdev->pdev->dev,
2286 "could not configure rx private waterline %d\n",
2291 ret = hclge_common_thrd_config(hdev, pkt_buf);
2293 dev_err(&hdev->pdev->dev,
2294 "could not configure common threshold %d\n",
2300 ret = hclge_common_wl_config(hdev, pkt_buf);
2302 dev_err(&hdev->pdev->dev,
2303 "could not configure common waterline %d\n", ret);
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2312 struct hnae3_handle *roce = &vport->roce;
2313 struct hnae3_handle *nic = &vport->nic;
2315 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2317 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318 vport->back->num_msi_left == 0)
2321 roce->rinfo.base_vector = vport->back->roce_base_vector;
2323 roce->rinfo.netdev = nic->kinfo.netdev;
2324 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2326 roce->pdev = nic->pdev;
2327 roce->ae_algo = nic->ae_algo;
2328 roce->numa_node_mask = nic->numa_node_mask;
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2335 struct pci_dev *pdev = hdev->pdev;
2339 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2341 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2344 "failed(%d) to allocate MSI/MSI-X vectors\n",
2348 if (vectors < hdev->num_msi)
2349 dev_warn(&hdev->pdev->dev,
2350 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351 hdev->num_msi, vectors);
2353 hdev->num_msi = vectors;
2354 hdev->num_msi_left = vectors;
2356 hdev->base_msi_vector = pdev->irq;
2357 hdev->roce_base_vector = hdev->base_msi_vector +
2358 hdev->roce_base_msix_offset;
2360 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 sizeof(u16), GFP_KERNEL);
2362 if (!hdev->vector_status) {
2363 pci_free_irq_vectors(pdev);
2367 for (i = 0; i < hdev->num_msi; i++)
2368 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2370 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371 sizeof(int), GFP_KERNEL);
2372 if (!hdev->vector_irq) {
2373 pci_free_irq_vectors(pdev);
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2382 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383 duplex = HCLGE_MAC_FULL;
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2391 struct hclge_config_mac_speed_dup_cmd *req;
2392 struct hclge_desc desc;
2395 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2400 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2403 case HCLGE_MAC_SPEED_10M:
2404 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405 HCLGE_CFG_SPEED_S, 6);
2407 case HCLGE_MAC_SPEED_100M:
2408 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409 HCLGE_CFG_SPEED_S, 7);
2411 case HCLGE_MAC_SPEED_1G:
2412 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413 HCLGE_CFG_SPEED_S, 0);
2415 case HCLGE_MAC_SPEED_10G:
2416 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417 HCLGE_CFG_SPEED_S, 1);
2419 case HCLGE_MAC_SPEED_25G:
2420 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421 HCLGE_CFG_SPEED_S, 2);
2423 case HCLGE_MAC_SPEED_40G:
2424 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425 HCLGE_CFG_SPEED_S, 3);
2427 case HCLGE_MAC_SPEED_50G:
2428 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429 HCLGE_CFG_SPEED_S, 4);
2431 case HCLGE_MAC_SPEED_100G:
2432 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433 HCLGE_CFG_SPEED_S, 5);
2436 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2440 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2445 dev_err(&hdev->pdev->dev,
2446 "mac speed/duplex config cmd failed %d.\n", ret);
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2457 duplex = hclge_check_speed_dup(duplex, speed);
2458 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2482 struct hclge_config_auto_neg_cmd *req;
2483 struct hclge_desc desc;
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2507 if (!hdev->hw.mac.support_autoneg) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2517 return hclge_set_autoneg_en(hdev, enable);
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2527 return phydev->autoneg;
2529 return hdev->hw.mac.autoneg;
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2610 *fec_ability = mac->fec_ability;
2612 *fec_mode = mac->fec_mode;
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2617 struct hclge_mac *mac = &hdev->hw.mac;
2620 hdev->support_sfp_query = true;
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
2625 dev_err(&hdev->pdev->dev,
2626 "Config mac speed dup fail ret=%d\n", ret);
2630 if (hdev->hw.mac.support_autoneg) {
2631 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2633 dev_err(&hdev->pdev->dev,
2634 "Config mac autoneg fail ret=%d\n", ret);
2641 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2644 dev_err(&hdev->pdev->dev,
2645 "Fec mode init fail, ret = %d\n", ret);
2650 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2652 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2656 ret = hclge_set_default_loopback(hdev);
2660 ret = hclge_buffer_alloc(hdev);
2662 dev_err(&hdev->pdev->dev,
2663 "allocate buffer fail, ret=%d\n", ret);
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2670 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673 hclge_wq, &hdev->service_task, 0);
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2678 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681 hclge_wq, &hdev->service_task, 0);
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2686 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2687 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2688 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2689 hclge_wq, &hdev->service_task,
2693 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2695 struct hclge_link_status_cmd *req;
2696 struct hclge_desc desc;
2700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2703 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2708 req = (struct hclge_link_status_cmd *)desc.data;
2709 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2711 return !!link_status;
2714 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2716 unsigned int mac_state;
2719 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2722 mac_state = hclge_get_mac_link_status(hdev);
2724 if (hdev->hw.mac.phydev) {
2725 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2726 link_stat = mac_state &
2727 hdev->hw.mac.phydev->link;
2732 link_stat = mac_state;
2738 static void hclge_update_link_status(struct hclge_dev *hdev)
2740 struct hnae3_client *rclient = hdev->roce_client;
2741 struct hnae3_client *client = hdev->nic_client;
2742 struct hnae3_handle *rhandle;
2743 struct hnae3_handle *handle;
2750 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2753 state = hclge_get_mac_phy_link(hdev);
2754 if (state != hdev->hw.mac.link) {
2755 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2756 handle = &hdev->vport[i].nic;
2757 client->ops->link_status_change(handle, state);
2758 hclge_config_mac_tnl_int(hdev, state);
2759 rhandle = &hdev->vport[i].roce;
2760 if (rclient && rclient->ops->link_status_change)
2761 rclient->ops->link_status_change(rhandle,
2764 hdev->hw.mac.link = state;
2767 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2770 static void hclge_update_port_capability(struct hclge_mac *mac)
2772 /* update fec ability by speed */
2773 hclge_convert_setting_fec(mac);
2775 /* firmware can not identify back plane type, the media type
2776 * read from configuration can help deal it
2778 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2779 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2780 mac->module_type = HNAE3_MODULE_TYPE_KR;
2781 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2782 mac->module_type = HNAE3_MODULE_TYPE_TP;
2784 if (mac->support_autoneg) {
2785 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2786 linkmode_copy(mac->advertising, mac->supported);
2788 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2790 linkmode_zero(mac->advertising);
2794 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2796 struct hclge_sfp_info_cmd *resp;
2797 struct hclge_desc desc;
2800 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2801 resp = (struct hclge_sfp_info_cmd *)desc.data;
2802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2803 if (ret == -EOPNOTSUPP) {
2804 dev_warn(&hdev->pdev->dev,
2805 "IMP do not support get SFP speed %d\n", ret);
2808 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2812 *speed = le32_to_cpu(resp->speed);
2817 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2819 struct hclge_sfp_info_cmd *resp;
2820 struct hclge_desc desc;
2823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2824 resp = (struct hclge_sfp_info_cmd *)desc.data;
2826 resp->query_type = QUERY_ACTIVE_SPEED;
2828 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2829 if (ret == -EOPNOTSUPP) {
2830 dev_warn(&hdev->pdev->dev,
2831 "IMP does not support get SFP info %d\n", ret);
2834 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2838 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2839 * set to mac->speed.
2841 if (!le32_to_cpu(resp->speed))
2844 mac->speed = le32_to_cpu(resp->speed);
2845 /* if resp->speed_ability is 0, it means it's an old version
2846 * firmware, do not update these params
2848 if (resp->speed_ability) {
2849 mac->module_type = le32_to_cpu(resp->module_type);
2850 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2851 mac->autoneg = resp->autoneg;
2852 mac->support_autoneg = resp->autoneg_ability;
2853 mac->speed_type = QUERY_ACTIVE_SPEED;
2854 if (!resp->active_fec)
2857 mac->fec_mode = BIT(resp->active_fec);
2859 mac->speed_type = QUERY_SFP_SPEED;
2865 static int hclge_update_port_info(struct hclge_dev *hdev)
2867 struct hclge_mac *mac = &hdev->hw.mac;
2868 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2871 /* get the port info from SFP cmd if not copper port */
2872 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2875 /* if IMP does not support get SFP/qSFP info, return directly */
2876 if (!hdev->support_sfp_query)
2879 if (hdev->pdev->revision >= 0x21)
2880 ret = hclge_get_sfp_info(hdev, mac);
2882 ret = hclge_get_sfp_speed(hdev, &speed);
2884 if (ret == -EOPNOTSUPP) {
2885 hdev->support_sfp_query = false;
2891 if (hdev->pdev->revision >= 0x21) {
2892 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2893 hclge_update_port_capability(mac);
2896 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2899 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2900 return 0; /* do nothing if no SFP */
2902 /* must config full duplex for SFP */
2903 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2907 static int hclge_get_status(struct hnae3_handle *handle)
2909 struct hclge_vport *vport = hclge_get_vport(handle);
2910 struct hclge_dev *hdev = vport->back;
2912 hclge_update_link_status(hdev);
2914 return hdev->hw.mac.link;
2917 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2919 if (pci_num_vf(hdev->pdev) == 0) {
2920 dev_err(&hdev->pdev->dev,
2921 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2925 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2926 dev_err(&hdev->pdev->dev,
2927 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2928 vf, pci_num_vf(hdev->pdev));
2932 /* VF start from 1 in vport */
2933 vf += HCLGE_VF_VPORT_START_NUM;
2934 return &hdev->vport[vf];
2937 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2938 struct ifla_vf_info *ivf)
2940 struct hclge_vport *vport = hclge_get_vport(handle);
2941 struct hclge_dev *hdev = vport->back;
2943 vport = hclge_get_vf_vport(hdev, vf);
2948 ivf->linkstate = vport->vf_info.link_state;
2949 ivf->spoofchk = vport->vf_info.spoofchk;
2950 ivf->trusted = vport->vf_info.trusted;
2951 ivf->min_tx_rate = 0;
2952 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2953 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2954 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2955 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2956 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2961 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2964 struct hclge_vport *vport = hclge_get_vport(handle);
2965 struct hclge_dev *hdev = vport->back;
2967 vport = hclge_get_vf_vport(hdev, vf);
2971 vport->vf_info.link_state = link_state;
2976 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2978 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2980 /* fetch the events from their corresponding regs */
2981 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2982 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2983 msix_src_reg = hclge_read_dev(&hdev->hw,
2984 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2986 /* Assumption: If by any chance reset and mailbox events are reported
2987 * together then we will only process reset event in this go and will
2988 * defer the processing of the mailbox events. Since, we would have not
2989 * cleared RX CMDQ event this time we would receive again another
2990 * interrupt from H/W just for the mailbox.
2992 * check for vector0 reset event sources
2994 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2995 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2996 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2997 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2998 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2999 hdev->rst_stats.imp_rst_cnt++;
3000 return HCLGE_VECTOR0_EVENT_RST;
3003 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
3004 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3005 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3006 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3007 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3008 hdev->rst_stats.global_rst_cnt++;
3009 return HCLGE_VECTOR0_EVENT_RST;
3012 /* check for vector0 msix event source */
3013 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3014 *clearval = msix_src_reg;
3015 return HCLGE_VECTOR0_EVENT_ERR;
3018 /* check for vector0 mailbox(=CMDQ RX) event source */
3019 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3020 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3021 *clearval = cmdq_src_reg;
3022 return HCLGE_VECTOR0_EVENT_MBX;
3025 /* print other vector0 event source */
3026 dev_info(&hdev->pdev->dev,
3027 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3028 cmdq_src_reg, msix_src_reg);
3029 *clearval = msix_src_reg;
3031 return HCLGE_VECTOR0_EVENT_OTHER;
3034 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3037 switch (event_type) {
3038 case HCLGE_VECTOR0_EVENT_RST:
3039 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3041 case HCLGE_VECTOR0_EVENT_MBX:
3042 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3049 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3051 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3052 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3053 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3054 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3055 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3058 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3060 writel(enable ? 1 : 0, vector->addr);
3063 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3065 struct hclge_dev *hdev = data;
3069 hclge_enable_vector(&hdev->misc_vector, false);
3070 event_cause = hclge_check_event_cause(hdev, &clearval);
3072 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3073 switch (event_cause) {
3074 case HCLGE_VECTOR0_EVENT_ERR:
3075 /* we do not know what type of reset is required now. This could
3076 * only be decided after we fetch the type of errors which
3077 * caused this event. Therefore, we will do below for now:
3078 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3079 * have defered type of reset to be used.
3080 * 2. Schedule the reset serivce task.
3081 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3082 * will fetch the correct type of reset. This would be done
3083 * by first decoding the types of errors.
3085 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3087 case HCLGE_VECTOR0_EVENT_RST:
3088 hclge_reset_task_schedule(hdev);
3090 case HCLGE_VECTOR0_EVENT_MBX:
3091 /* If we are here then,
3092 * 1. Either we are not handling any mbx task and we are not
3095 * 2. We could be handling a mbx task but nothing more is
3097 * In both cases, we should schedule mbx task as there are more
3098 * mbx messages reported by this interrupt.
3100 hclge_mbx_task_schedule(hdev);
3103 dev_warn(&hdev->pdev->dev,
3104 "received unknown or unhandled event of vector0\n");
3108 hclge_clear_event_cause(hdev, event_cause, clearval);
3110 /* Enable interrupt if it is not cause by reset. And when
3111 * clearval equal to 0, it means interrupt status may be
3112 * cleared by hardware before driver reads status register.
3113 * For this case, vector0 interrupt also should be enabled.
3116 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3117 hclge_enable_vector(&hdev->misc_vector, true);
3123 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3125 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3126 dev_warn(&hdev->pdev->dev,
3127 "vector(vector_id %d) has been freed.\n", vector_id);
3131 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3132 hdev->num_msi_left += 1;
3133 hdev->num_msi_used -= 1;
3136 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3138 struct hclge_misc_vector *vector = &hdev->misc_vector;
3140 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3142 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3143 hdev->vector_status[0] = 0;
3145 hdev->num_msi_left -= 1;
3146 hdev->num_msi_used += 1;
3149 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3150 const cpumask_t *mask)
3152 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3155 cpumask_copy(&hdev->affinity_mask, mask);
3158 static void hclge_irq_affinity_release(struct kref *ref)
3162 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3164 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3165 &hdev->affinity_mask);
3167 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3168 hdev->affinity_notify.release = hclge_irq_affinity_release;
3169 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3170 &hdev->affinity_notify);
3173 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3175 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3176 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3179 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3183 hclge_get_misc_vector(hdev);
3185 /* this would be explicitly freed in the end */
3186 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3187 HCLGE_NAME, pci_name(hdev->pdev));
3188 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3189 0, hdev->misc_vector.name, hdev);
3191 hclge_free_vector(hdev, 0);
3192 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3193 hdev->misc_vector.vector_irq);
3199 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3201 free_irq(hdev->misc_vector.vector_irq, hdev);
3202 hclge_free_vector(hdev, 0);
3205 int hclge_notify_client(struct hclge_dev *hdev,
3206 enum hnae3_reset_notify_type type)
3208 struct hnae3_client *client = hdev->nic_client;
3211 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3214 if (!client->ops->reset_notify)
3217 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3218 struct hnae3_handle *handle = &hdev->vport[i].nic;
3221 ret = client->ops->reset_notify(handle, type);
3223 dev_err(&hdev->pdev->dev,
3224 "notify nic client failed %d(%d)\n", type, ret);
3232 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3233 enum hnae3_reset_notify_type type)
3235 struct hnae3_client *client = hdev->roce_client;
3239 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3242 if (!client->ops->reset_notify)
3245 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3246 struct hnae3_handle *handle = &hdev->vport[i].roce;
3248 ret = client->ops->reset_notify(handle, type);
3250 dev_err(&hdev->pdev->dev,
3251 "notify roce client failed %d(%d)",
3260 static int hclge_reset_wait(struct hclge_dev *hdev)
3262 #define HCLGE_RESET_WATI_MS 100
3263 #define HCLGE_RESET_WAIT_CNT 350
3265 u32 val, reg, reg_bit;
3268 switch (hdev->reset_type) {
3269 case HNAE3_IMP_RESET:
3270 reg = HCLGE_GLOBAL_RESET_REG;
3271 reg_bit = HCLGE_IMP_RESET_BIT;
3273 case HNAE3_GLOBAL_RESET:
3274 reg = HCLGE_GLOBAL_RESET_REG;
3275 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3277 case HNAE3_FUNC_RESET:
3278 reg = HCLGE_FUN_RST_ING;
3279 reg_bit = HCLGE_FUN_RST_ING_B;
3282 dev_err(&hdev->pdev->dev,
3283 "Wait for unsupported reset type: %d\n",
3288 val = hclge_read_dev(&hdev->hw, reg);
3289 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3290 msleep(HCLGE_RESET_WATI_MS);
3291 val = hclge_read_dev(&hdev->hw, reg);
3295 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3296 dev_warn(&hdev->pdev->dev,
3297 "Wait for reset timeout: %d\n", hdev->reset_type);
3304 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3306 struct hclge_vf_rst_cmd *req;
3307 struct hclge_desc desc;
3309 req = (struct hclge_vf_rst_cmd *)desc.data;
3310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3311 req->dest_vfid = func_id;
3316 return hclge_cmd_send(&hdev->hw, &desc, 1);
3319 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3323 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3324 struct hclge_vport *vport = &hdev->vport[i];
3327 /* Send cmd to set/clear VF's FUNC_RST_ING */
3328 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3330 dev_err(&hdev->pdev->dev,
3331 "set vf(%u) rst failed %d!\n",
3332 vport->vport_id, ret);
3336 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3339 /* Inform VF to process the reset.
3340 * hclge_inform_reset_assert_to_vf may fail if VF
3341 * driver is not loaded.
3343 ret = hclge_inform_reset_assert_to_vf(vport);
3345 dev_warn(&hdev->pdev->dev,
3346 "inform reset to vf(%u) failed %d!\n",
3347 vport->vport_id, ret);
3353 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3355 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3356 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3357 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3360 hclge_mbx_handler(hdev);
3362 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3365 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3367 struct hclge_pf_rst_sync_cmd *req;
3368 struct hclge_desc desc;
3372 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3376 /* vf need to down netdev by mbx during PF or FLR reset */
3377 hclge_mailbox_service_task(hdev);
3379 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3380 /* for compatible with old firmware, wait
3381 * 100 ms for VF to stop IO
3383 if (ret == -EOPNOTSUPP) {
3384 msleep(HCLGE_RESET_SYNC_TIME);
3387 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3390 } else if (req->all_vf_ready) {
3393 msleep(HCLGE_PF_RESET_SYNC_TIME);
3394 hclge_cmd_reuse_desc(&desc, true);
3395 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3397 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3401 void hclge_report_hw_error(struct hclge_dev *hdev,
3402 enum hnae3_hw_error_type type)
3404 struct hnae3_client *client = hdev->nic_client;
3407 if (!client || !client->ops->process_hw_error ||
3408 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3411 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3412 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3415 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3419 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3420 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3421 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3422 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3423 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3426 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3427 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3428 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3429 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3433 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3435 struct hclge_desc desc;
3436 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3439 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3440 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3441 req->fun_reset_vfid = func_id;
3443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3445 dev_err(&hdev->pdev->dev,
3446 "send function reset cmd fail, status =%d\n", ret);
3451 static void hclge_do_reset(struct hclge_dev *hdev)
3453 struct hnae3_handle *handle = &hdev->vport[0].nic;
3454 struct pci_dev *pdev = hdev->pdev;
3457 if (hclge_get_hw_reset_stat(handle)) {
3458 dev_info(&pdev->dev, "Hardware reset not finish\n");
3459 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3460 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3461 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3465 switch (hdev->reset_type) {
3466 case HNAE3_GLOBAL_RESET:
3467 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3468 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3469 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3470 dev_info(&pdev->dev, "Global Reset requested\n");
3472 case HNAE3_FUNC_RESET:
3473 dev_info(&pdev->dev, "PF Reset requested\n");
3474 /* schedule again to check later */
3475 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3476 hclge_reset_task_schedule(hdev);
3479 dev_warn(&pdev->dev,
3480 "Unsupported reset type: %d\n", hdev->reset_type);
3485 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3486 unsigned long *addr)
3488 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3489 struct hclge_dev *hdev = ae_dev->priv;
3491 /* first, resolve any unknown reset type to the known type(s) */
3492 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3493 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3494 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3495 /* we will intentionally ignore any errors from this function
3496 * as we will end up in *some* reset request in any case
3498 if (hclge_handle_hw_msix_error(hdev, addr))
3499 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3502 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3503 /* We defered the clearing of the error event which caused
3504 * interrupt since it was not posssible to do that in
3505 * interrupt context (and this is the reason we introduced
3506 * new UNKNOWN reset type). Now, the errors have been
3507 * handled and cleared in hardware we can safely enable
3508 * interrupts. This is an exception to the norm.
3510 hclge_enable_vector(&hdev->misc_vector, true);
3513 /* return the highest priority reset level amongst all */
3514 if (test_bit(HNAE3_IMP_RESET, addr)) {
3515 rst_level = HNAE3_IMP_RESET;
3516 clear_bit(HNAE3_IMP_RESET, addr);
3517 clear_bit(HNAE3_GLOBAL_RESET, addr);
3518 clear_bit(HNAE3_FUNC_RESET, addr);
3519 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3520 rst_level = HNAE3_GLOBAL_RESET;
3521 clear_bit(HNAE3_GLOBAL_RESET, addr);
3522 clear_bit(HNAE3_FUNC_RESET, addr);
3523 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3524 rst_level = HNAE3_FUNC_RESET;
3525 clear_bit(HNAE3_FUNC_RESET, addr);
3526 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3527 rst_level = HNAE3_FLR_RESET;
3528 clear_bit(HNAE3_FLR_RESET, addr);
3531 if (hdev->reset_type != HNAE3_NONE_RESET &&
3532 rst_level < hdev->reset_type)
3533 return HNAE3_NONE_RESET;
3538 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3542 switch (hdev->reset_type) {
3543 case HNAE3_IMP_RESET:
3544 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3546 case HNAE3_GLOBAL_RESET:
3547 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3556 /* For revision 0x20, the reset interrupt source
3557 * can only be cleared after hardware reset done
3559 if (hdev->pdev->revision == 0x20)
3560 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3563 hclge_enable_vector(&hdev->misc_vector, true);
3566 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3570 switch (hdev->reset_type) {
3571 case HNAE3_FUNC_RESET:
3573 case HNAE3_FLR_RESET:
3574 ret = hclge_set_all_vf_rst(hdev, true);
3583 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3587 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3589 reg_val |= HCLGE_NIC_SW_RST_RDY;
3591 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3593 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3596 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3601 switch (hdev->reset_type) {
3602 case HNAE3_FUNC_RESET:
3603 /* to confirm whether all running VF is ready
3604 * before request PF reset
3606 ret = hclge_func_reset_sync_vf(hdev);
3610 ret = hclge_func_reset_cmd(hdev, 0);
3612 dev_err(&hdev->pdev->dev,
3613 "asserting function reset fail %d!\n", ret);
3617 /* After performaning pf reset, it is not necessary to do the
3618 * mailbox handling or send any command to firmware, because
3619 * any mailbox handling or command to firmware is only valid
3620 * after hclge_cmd_init is called.
3622 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3623 hdev->rst_stats.pf_rst_cnt++;
3625 case HNAE3_FLR_RESET:
3626 /* to confirm whether all running VF is ready
3627 * before request PF reset
3629 ret = hclge_func_reset_sync_vf(hdev);
3633 case HNAE3_IMP_RESET:
3634 hclge_handle_imp_error(hdev);
3635 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3636 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3637 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3643 /* inform hardware that preparatory work is done */
3644 msleep(HCLGE_RESET_SYNC_TIME);
3645 hclge_reset_handshake(hdev, true);
3646 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3651 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3653 #define MAX_RESET_FAIL_CNT 5
3655 if (hdev->reset_pending) {
3656 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3657 hdev->reset_pending);
3659 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3660 HCLGE_RESET_INT_M) {
3661 dev_info(&hdev->pdev->dev,
3662 "reset failed because new reset interrupt\n");
3663 hclge_clear_reset_cause(hdev);
3665 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3666 hdev->rst_stats.reset_fail_cnt++;
3667 set_bit(hdev->reset_type, &hdev->reset_pending);
3668 dev_info(&hdev->pdev->dev,
3669 "re-schedule reset task(%u)\n",
3670 hdev->rst_stats.reset_fail_cnt);
3674 hclge_clear_reset_cause(hdev);
3676 /* recover the handshake status when reset fail */
3677 hclge_reset_handshake(hdev, true);
3679 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3681 hclge_dbg_dump_rst_info(hdev);
3683 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3688 static int hclge_set_rst_done(struct hclge_dev *hdev)
3690 struct hclge_pf_rst_done_cmd *req;
3691 struct hclge_desc desc;
3694 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3695 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3696 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3699 /* To be compatible with the old firmware, which does not support
3700 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3703 if (ret == -EOPNOTSUPP) {
3704 dev_warn(&hdev->pdev->dev,
3705 "current firmware does not support command(0x%x)!\n",
3706 HCLGE_OPC_PF_RST_DONE);
3709 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3716 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3720 switch (hdev->reset_type) {
3721 case HNAE3_FUNC_RESET:
3723 case HNAE3_FLR_RESET:
3724 ret = hclge_set_all_vf_rst(hdev, false);
3726 case HNAE3_GLOBAL_RESET:
3728 case HNAE3_IMP_RESET:
3729 ret = hclge_set_rst_done(hdev);
3735 /* clear up the handshake status after re-initialize done */
3736 hclge_reset_handshake(hdev, false);
3741 static int hclge_reset_stack(struct hclge_dev *hdev)
3745 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3749 ret = hclge_reset_ae_dev(hdev->ae_dev);
3753 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3757 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3760 static int hclge_reset_prepare(struct hclge_dev *hdev)
3762 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3765 /* Initialize ae_dev reset status as well, in case enet layer wants to
3766 * know if device is undergoing reset
3768 ae_dev->reset_type = hdev->reset_type;
3769 hdev->rst_stats.reset_cnt++;
3770 /* perform reset of the stack & ae device for a client */
3771 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3775 ret = hclge_reset_prepare_down(hdev);
3780 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3785 return hclge_reset_prepare_wait(hdev);
3788 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3790 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3791 enum hnae3_reset_type reset_level;
3794 hdev->rst_stats.hw_reset_done_cnt++;
3796 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3801 ret = hclge_reset_stack(hdev);
3806 hclge_clear_reset_cause(hdev);
3808 ret = hclge_reset_prepare_up(hdev);
3813 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3814 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3818 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3822 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3827 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3831 hdev->last_reset_time = jiffies;
3832 hdev->rst_stats.reset_fail_cnt = 0;
3833 hdev->rst_stats.reset_done_cnt++;
3834 ae_dev->reset_type = HNAE3_NONE_RESET;
3835 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3837 /* if default_reset_request has a higher level reset request,
3838 * it should be handled as soon as possible. since some errors
3839 * need this kind of reset to fix.
3841 reset_level = hclge_get_reset_level(ae_dev,
3842 &hdev->default_reset_request);
3843 if (reset_level != HNAE3_NONE_RESET)
3844 set_bit(reset_level, &hdev->reset_request);
3849 static void hclge_reset(struct hclge_dev *hdev)
3851 if (hclge_reset_prepare(hdev))
3854 if (hclge_reset_wait(hdev))
3857 if (hclge_reset_rebuild(hdev))
3863 if (hclge_reset_err_handle(hdev))
3864 hclge_reset_task_schedule(hdev);
3867 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3869 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3870 struct hclge_dev *hdev = ae_dev->priv;
3872 /* We might end up getting called broadly because of 2 below cases:
3873 * 1. Recoverable error was conveyed through APEI and only way to bring
3874 * normalcy is to reset.
3875 * 2. A new reset request from the stack due to timeout
3877 * For the first case,error event might not have ae handle available.
3878 * check if this is a new reset request and we are not here just because
3879 * last reset attempt did not succeed and watchdog hit us again. We will
3880 * know this if last reset request did not occur very recently (watchdog
3881 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3882 * In case of new request we reset the "reset level" to PF reset.
3883 * And if it is a repeat reset request of the most recent one then we
3884 * want to make sure we throttle the reset request. Therefore, we will
3885 * not allow it again before 3*HZ times.
3888 handle = &hdev->vport[0].nic;
3890 if (time_before(jiffies, (hdev->last_reset_time +
3891 HCLGE_RESET_INTERVAL))) {
3892 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3894 } else if (hdev->default_reset_request) {
3896 hclge_get_reset_level(ae_dev,
3897 &hdev->default_reset_request);
3898 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3899 hdev->reset_level = HNAE3_FUNC_RESET;
3902 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3905 /* request reset & schedule reset task */
3906 set_bit(hdev->reset_level, &hdev->reset_request);
3907 hclge_reset_task_schedule(hdev);
3909 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3910 hdev->reset_level++;
3913 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3914 enum hnae3_reset_type rst_type)
3916 struct hclge_dev *hdev = ae_dev->priv;
3918 set_bit(rst_type, &hdev->default_reset_request);
3921 static void hclge_reset_timer(struct timer_list *t)
3923 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3925 /* if default_reset_request has no value, it means that this reset
3926 * request has already be handled, so just return here
3928 if (!hdev->default_reset_request)
3931 dev_info(&hdev->pdev->dev,
3932 "triggering reset in reset timer\n");
3933 hclge_reset_event(hdev->pdev, NULL);
3936 static void hclge_reset_subtask(struct hclge_dev *hdev)
3938 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3940 /* check if there is any ongoing reset in the hardware. This status can
3941 * be checked from reset_pending. If there is then, we need to wait for
3942 * hardware to complete reset.
3943 * a. If we are able to figure out in reasonable time that hardware
3944 * has fully resetted then, we can proceed with driver, client
3946 * b. else, we can come back later to check this status so re-sched
3949 hdev->last_reset_time = jiffies;
3950 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3951 if (hdev->reset_type != HNAE3_NONE_RESET)
3954 /* check if we got any *new* reset requests to be honored */
3955 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3956 if (hdev->reset_type != HNAE3_NONE_RESET)
3957 hclge_do_reset(hdev);
3959 hdev->reset_type = HNAE3_NONE_RESET;
3962 static void hclge_reset_service_task(struct hclge_dev *hdev)
3964 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3967 down(&hdev->reset_sem);
3968 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3970 hclge_reset_subtask(hdev);
3972 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3973 up(&hdev->reset_sem);
3976 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3980 /* start from vport 1 for PF is always alive */
3981 for (i = 1; i < hdev->num_alloc_vport; i++) {
3982 struct hclge_vport *vport = &hdev->vport[i];
3984 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3985 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3987 /* If vf is not alive, set to default value */
3988 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3989 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3993 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3995 unsigned long delta = round_jiffies_relative(HZ);
3997 /* Always handle the link updating to make sure link state is
3998 * updated when it is triggered by mbx.
4000 hclge_update_link_status(hdev);
4002 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4003 delta = jiffies - hdev->last_serv_processed;
4005 if (delta < round_jiffies_relative(HZ)) {
4006 delta = round_jiffies_relative(HZ) - delta;
4011 hdev->serv_processed_cnt++;
4012 hclge_update_vport_alive(hdev);
4014 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4015 hdev->last_serv_processed = jiffies;
4019 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4020 hclge_update_stats_for_all(hdev);
4022 hclge_update_port_info(hdev);
4023 hclge_sync_vlan_filter(hdev);
4025 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4026 hclge_rfs_filter_expire(hdev);
4028 hdev->last_serv_processed = jiffies;
4031 hclge_task_schedule(hdev, delta);
4034 static void hclge_service_task(struct work_struct *work)
4036 struct hclge_dev *hdev =
4037 container_of(work, struct hclge_dev, service_task.work);
4039 hclge_reset_service_task(hdev);
4040 hclge_mailbox_service_task(hdev);
4041 hclge_periodic_service_task(hdev);
4043 /* Handle reset and mbx again in case periodical task delays the
4044 * handling by calling hclge_task_schedule() in
4045 * hclge_periodic_service_task().
4047 hclge_reset_service_task(hdev);
4048 hclge_mailbox_service_task(hdev);
4051 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4053 /* VF handle has no client */
4054 if (!handle->client)
4055 return container_of(handle, struct hclge_vport, nic);
4056 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4057 return container_of(handle, struct hclge_vport, roce);
4059 return container_of(handle, struct hclge_vport, nic);
4062 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4063 struct hnae3_vector_info *vector_info)
4065 struct hclge_vport *vport = hclge_get_vport(handle);
4066 struct hnae3_vector_info *vector = vector_info;
4067 struct hclge_dev *hdev = vport->back;
4071 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4072 vector_num = min(hdev->num_msi_left, vector_num);
4074 for (j = 0; j < vector_num; j++) {
4075 for (i = 1; i < hdev->num_msi; i++) {
4076 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4077 vector->vector = pci_irq_vector(hdev->pdev, i);
4078 vector->io_addr = hdev->hw.io_base +
4079 HCLGE_VECTOR_REG_BASE +
4080 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4082 HCLGE_VECTOR_VF_OFFSET;
4083 hdev->vector_status[i] = vport->vport_id;
4084 hdev->vector_irq[i] = vector->vector;
4093 hdev->num_msi_left -= alloc;
4094 hdev->num_msi_used += alloc;
4099 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4103 for (i = 0; i < hdev->num_msi; i++)
4104 if (vector == hdev->vector_irq[i])
4110 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4112 struct hclge_vport *vport = hclge_get_vport(handle);
4113 struct hclge_dev *hdev = vport->back;
4116 vector_id = hclge_get_vector_index(hdev, vector);
4117 if (vector_id < 0) {
4118 dev_err(&hdev->pdev->dev,
4119 "Get vector index fail. vector_id =%d\n", vector_id);
4123 hclge_free_vector(hdev, vector_id);
4128 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4130 return HCLGE_RSS_KEY_SIZE;
4133 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4135 return HCLGE_RSS_IND_TBL_SIZE;
4138 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4139 const u8 hfunc, const u8 *key)
4141 struct hclge_rss_config_cmd *req;
4142 unsigned int key_offset = 0;
4143 struct hclge_desc desc;
4148 key_counts = HCLGE_RSS_KEY_SIZE;
4149 req = (struct hclge_rss_config_cmd *)desc.data;
4151 while (key_counts) {
4152 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4155 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4156 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4158 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4159 memcpy(req->hash_key,
4160 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4162 key_counts -= key_size;
4164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4166 dev_err(&hdev->pdev->dev,
4167 "Configure RSS config fail, status = %d\n",
4175 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4177 struct hclge_rss_indirection_table_cmd *req;
4178 struct hclge_desc desc;
4182 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4184 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4185 hclge_cmd_setup_basic_desc
4186 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4188 req->start_table_index =
4189 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4190 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4192 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4193 req->rss_result[j] =
4194 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4196 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4198 dev_err(&hdev->pdev->dev,
4199 "Configure rss indir table fail,status = %d\n",
4207 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4208 u16 *tc_size, u16 *tc_offset)
4210 struct hclge_rss_tc_mode_cmd *req;
4211 struct hclge_desc desc;
4215 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4216 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4218 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4221 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4222 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4223 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4224 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4225 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4227 req->rss_tc_mode[i] = cpu_to_le16(mode);
4230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4232 dev_err(&hdev->pdev->dev,
4233 "Configure rss tc mode fail, status = %d\n", ret);
4238 static void hclge_get_rss_type(struct hclge_vport *vport)
4240 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4241 vport->rss_tuple_sets.ipv4_udp_en ||
4242 vport->rss_tuple_sets.ipv4_sctp_en ||
4243 vport->rss_tuple_sets.ipv6_tcp_en ||
4244 vport->rss_tuple_sets.ipv6_udp_en ||
4245 vport->rss_tuple_sets.ipv6_sctp_en)
4246 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4247 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4248 vport->rss_tuple_sets.ipv6_fragment_en)
4249 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4251 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4254 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4256 struct hclge_rss_input_tuple_cmd *req;
4257 struct hclge_desc desc;
4260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4262 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4264 /* Get the tuple cfg from pf */
4265 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4266 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4267 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4268 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4269 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4270 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4271 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4272 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4273 hclge_get_rss_type(&hdev->vport[0]);
4274 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4276 dev_err(&hdev->pdev->dev,
4277 "Configure rss input fail, status = %d\n", ret);
4281 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4284 struct hclge_vport *vport = hclge_get_vport(handle);
4287 /* Get hash algorithm */
4289 switch (vport->rss_algo) {
4290 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4291 *hfunc = ETH_RSS_HASH_TOP;
4293 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4294 *hfunc = ETH_RSS_HASH_XOR;
4297 *hfunc = ETH_RSS_HASH_UNKNOWN;
4302 /* Get the RSS Key required by the user */
4304 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4306 /* Get indirect table */
4308 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4309 indir[i] = vport->rss_indirection_tbl[i];
4314 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4315 const u8 *key, const u8 hfunc)
4317 struct hclge_vport *vport = hclge_get_vport(handle);
4318 struct hclge_dev *hdev = vport->back;
4322 /* Set the RSS Hash Key if specififed by the user */
4325 case ETH_RSS_HASH_TOP:
4326 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4328 case ETH_RSS_HASH_XOR:
4329 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4331 case ETH_RSS_HASH_NO_CHANGE:
4332 hash_algo = vport->rss_algo;
4338 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4342 /* Update the shadow RSS key with user specified qids */
4343 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4344 vport->rss_algo = hash_algo;
4347 /* Update the shadow RSS table with user specified qids */
4348 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4349 vport->rss_indirection_tbl[i] = indir[i];
4351 /* Update the hardware */
4352 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4355 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4357 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4359 if (nfc->data & RXH_L4_B_2_3)
4360 hash_sets |= HCLGE_D_PORT_BIT;
4362 hash_sets &= ~HCLGE_D_PORT_BIT;
4364 if (nfc->data & RXH_IP_SRC)
4365 hash_sets |= HCLGE_S_IP_BIT;
4367 hash_sets &= ~HCLGE_S_IP_BIT;
4369 if (nfc->data & RXH_IP_DST)
4370 hash_sets |= HCLGE_D_IP_BIT;
4372 hash_sets &= ~HCLGE_D_IP_BIT;
4374 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4375 hash_sets |= HCLGE_V_TAG_BIT;
4380 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4381 struct ethtool_rxnfc *nfc)
4383 struct hclge_vport *vport = hclge_get_vport(handle);
4384 struct hclge_dev *hdev = vport->back;
4385 struct hclge_rss_input_tuple_cmd *req;
4386 struct hclge_desc desc;
4390 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4391 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4394 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4397 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4398 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4399 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4400 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4401 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4402 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4403 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4404 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4406 tuple_sets = hclge_get_rss_hash_bits(nfc);
4407 switch (nfc->flow_type) {
4409 req->ipv4_tcp_en = tuple_sets;
4412 req->ipv6_tcp_en = tuple_sets;
4415 req->ipv4_udp_en = tuple_sets;
4418 req->ipv6_udp_en = tuple_sets;
4421 req->ipv4_sctp_en = tuple_sets;
4424 if ((nfc->data & RXH_L4_B_0_1) ||
4425 (nfc->data & RXH_L4_B_2_3))
4428 req->ipv6_sctp_en = tuple_sets;
4431 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4434 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4442 dev_err(&hdev->pdev->dev,
4443 "Set rss tuple fail, status = %d\n", ret);
4447 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4448 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4449 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4450 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4451 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4452 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4453 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4454 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4455 hclge_get_rss_type(vport);
4459 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4460 struct ethtool_rxnfc *nfc)
4462 struct hclge_vport *vport = hclge_get_vport(handle);
4467 switch (nfc->flow_type) {
4469 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4472 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4475 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4478 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4481 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4484 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4488 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4497 if (tuple_sets & HCLGE_D_PORT_BIT)
4498 nfc->data |= RXH_L4_B_2_3;
4499 if (tuple_sets & HCLGE_S_PORT_BIT)
4500 nfc->data |= RXH_L4_B_0_1;
4501 if (tuple_sets & HCLGE_D_IP_BIT)
4502 nfc->data |= RXH_IP_DST;
4503 if (tuple_sets & HCLGE_S_IP_BIT)
4504 nfc->data |= RXH_IP_SRC;
4509 static int hclge_get_tc_size(struct hnae3_handle *handle)
4511 struct hclge_vport *vport = hclge_get_vport(handle);
4512 struct hclge_dev *hdev = vport->back;
4514 return hdev->rss_size_max;
4517 int hclge_rss_init_hw(struct hclge_dev *hdev)
4519 struct hclge_vport *vport = hdev->vport;
4520 u8 *rss_indir = vport[0].rss_indirection_tbl;
4521 u16 rss_size = vport[0].alloc_rss_size;
4522 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4523 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4524 u8 *key = vport[0].rss_hash_key;
4525 u8 hfunc = vport[0].rss_algo;
4526 u16 tc_valid[HCLGE_MAX_TC_NUM];
4531 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4535 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4539 ret = hclge_set_rss_input_tuple(hdev);
4543 /* Each TC have the same queue size, and tc_size set to hardware is
4544 * the log2 of roundup power of two of rss_size, the acutal queue
4545 * size is limited by indirection table.
4547 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4548 dev_err(&hdev->pdev->dev,
4549 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4554 roundup_size = roundup_pow_of_two(rss_size);
4555 roundup_size = ilog2(roundup_size);
4557 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4560 if (!(hdev->hw_tc_map & BIT(i)))
4564 tc_size[i] = roundup_size;
4565 tc_offset[i] = rss_size * i;
4568 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4571 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4573 struct hclge_vport *vport = hdev->vport;
4576 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4577 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4578 vport[j].rss_indirection_tbl[i] =
4579 i % vport[j].alloc_rss_size;
4583 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4585 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4586 struct hclge_vport *vport = hdev->vport;
4588 if (hdev->pdev->revision >= 0x21)
4589 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4591 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4592 vport[i].rss_tuple_sets.ipv4_tcp_en =
4593 HCLGE_RSS_INPUT_TUPLE_OTHER;
4594 vport[i].rss_tuple_sets.ipv4_udp_en =
4595 HCLGE_RSS_INPUT_TUPLE_OTHER;
4596 vport[i].rss_tuple_sets.ipv4_sctp_en =
4597 HCLGE_RSS_INPUT_TUPLE_SCTP;
4598 vport[i].rss_tuple_sets.ipv4_fragment_en =
4599 HCLGE_RSS_INPUT_TUPLE_OTHER;
4600 vport[i].rss_tuple_sets.ipv6_tcp_en =
4601 HCLGE_RSS_INPUT_TUPLE_OTHER;
4602 vport[i].rss_tuple_sets.ipv6_udp_en =
4603 HCLGE_RSS_INPUT_TUPLE_OTHER;
4604 vport[i].rss_tuple_sets.ipv6_sctp_en =
4605 HCLGE_RSS_INPUT_TUPLE_SCTP;
4606 vport[i].rss_tuple_sets.ipv6_fragment_en =
4607 HCLGE_RSS_INPUT_TUPLE_OTHER;
4609 vport[i].rss_algo = rss_algo;
4611 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4612 HCLGE_RSS_KEY_SIZE);
4615 hclge_rss_indir_init_cfg(hdev);
4618 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4619 int vector_id, bool en,
4620 struct hnae3_ring_chain_node *ring_chain)
4622 struct hclge_dev *hdev = vport->back;
4623 struct hnae3_ring_chain_node *node;
4624 struct hclge_desc desc;
4625 struct hclge_ctrl_vector_chain_cmd *req =
4626 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4627 enum hclge_cmd_status status;
4628 enum hclge_opcode_type op;
4629 u16 tqp_type_and_id;
4632 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4633 hclge_cmd_setup_basic_desc(&desc, op, false);
4634 req->int_vector_id = vector_id;
4637 for (node = ring_chain; node; node = node->next) {
4638 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4639 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4641 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4642 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4643 HCLGE_TQP_ID_S, node->tqp_index);
4644 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4646 hnae3_get_field(node->int_gl_idx,
4647 HNAE3_RING_GL_IDX_M,
4648 HNAE3_RING_GL_IDX_S));
4649 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4650 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4651 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4652 req->vfid = vport->vport_id;
4654 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4656 dev_err(&hdev->pdev->dev,
4657 "Map TQP fail, status is %d.\n",
4663 hclge_cmd_setup_basic_desc(&desc,
4666 req->int_vector_id = vector_id;
4671 req->int_cause_num = i;
4672 req->vfid = vport->vport_id;
4673 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4675 dev_err(&hdev->pdev->dev,
4676 "Map TQP fail, status is %d.\n", status);
4684 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4685 struct hnae3_ring_chain_node *ring_chain)
4687 struct hclge_vport *vport = hclge_get_vport(handle);
4688 struct hclge_dev *hdev = vport->back;
4691 vector_id = hclge_get_vector_index(hdev, vector);
4692 if (vector_id < 0) {
4693 dev_err(&hdev->pdev->dev,
4694 "failed to get vector index. vector=%d\n", vector);
4698 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4701 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4702 struct hnae3_ring_chain_node *ring_chain)
4704 struct hclge_vport *vport = hclge_get_vport(handle);
4705 struct hclge_dev *hdev = vport->back;
4708 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4711 vector_id = hclge_get_vector_index(hdev, vector);
4712 if (vector_id < 0) {
4713 dev_err(&handle->pdev->dev,
4714 "Get vector index fail. ret =%d\n", vector_id);
4718 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4720 dev_err(&handle->pdev->dev,
4721 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4727 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4728 struct hclge_promisc_param *param)
4730 struct hclge_promisc_cfg_cmd *req;
4731 struct hclge_desc desc;
4734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4736 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4737 req->vf_id = param->vf_id;
4739 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4740 * pdev revision(0x20), new revision support them. The
4741 * value of this two fields will not return error when driver
4742 * send command to fireware in revision(0x20).
4744 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4745 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4747 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4749 dev_err(&hdev->pdev->dev,
4750 "Set promisc mode fail, status is %d.\n", ret);
4755 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4756 bool en_uc, bool en_mc, bool en_bc,
4762 memset(param, 0, sizeof(struct hclge_promisc_param));
4764 param->enable = HCLGE_PROMISC_EN_UC;
4766 param->enable |= HCLGE_PROMISC_EN_MC;
4768 param->enable |= HCLGE_PROMISC_EN_BC;
4769 param->vf_id = vport_id;
4772 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4773 bool en_mc_pmc, bool en_bc_pmc)
4775 struct hclge_dev *hdev = vport->back;
4776 struct hclge_promisc_param param;
4778 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4780 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4783 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4786 struct hclge_vport *vport = hclge_get_vport(handle);
4787 bool en_bc_pmc = true;
4789 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4790 * always bypassed. So broadcast promisc should be disabled until
4791 * user enable promisc mode
4793 if (handle->pdev->revision == 0x20)
4794 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4796 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4800 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4802 struct hclge_get_fd_mode_cmd *req;
4803 struct hclge_desc desc;
4806 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4808 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4812 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4816 *fd_mode = req->mode;
4821 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4822 u32 *stage1_entry_num,
4823 u32 *stage2_entry_num,
4824 u16 *stage1_counter_num,
4825 u16 *stage2_counter_num)
4827 struct hclge_get_fd_allocation_cmd *req;
4828 struct hclge_desc desc;
4831 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4833 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4835 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4837 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4842 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4843 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4844 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4845 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4850 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4852 struct hclge_set_fd_key_config_cmd *req;
4853 struct hclge_fd_key_cfg *stage;
4854 struct hclge_desc desc;
4857 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4859 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4860 stage = &hdev->fd_cfg.key_cfg[stage_num];
4861 req->stage = stage_num;
4862 req->key_select = stage->key_sel;
4863 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4864 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4865 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4866 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4867 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4868 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4870 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4872 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4877 static int hclge_init_fd_config(struct hclge_dev *hdev)
4879 #define LOW_2_WORDS 0x03
4880 struct hclge_fd_key_cfg *key_cfg;
4883 if (!hnae3_dev_fd_supported(hdev))
4886 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4890 switch (hdev->fd_cfg.fd_mode) {
4891 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4892 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4894 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4895 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4898 dev_err(&hdev->pdev->dev,
4899 "Unsupported flow director mode %u\n",
4900 hdev->fd_cfg.fd_mode);
4904 hdev->fd_cfg.proto_support =
4905 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4906 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4907 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4908 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4909 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4910 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4911 key_cfg->outer_sipv6_word_en = 0;
4912 key_cfg->outer_dipv6_word_en = 0;
4914 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4915 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4916 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4917 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4919 /* If use max 400bit key, we can support tuples for ether type */
4920 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4921 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4922 key_cfg->tuple_active |=
4923 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4926 /* roce_type is used to filter roce frames
4927 * dst_vport is used to specify the rule
4929 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4931 ret = hclge_get_fd_allocation(hdev,
4932 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4933 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4934 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4935 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4939 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4942 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4943 int loc, u8 *key, bool is_add)
4945 struct hclge_fd_tcam_config_1_cmd *req1;
4946 struct hclge_fd_tcam_config_2_cmd *req2;
4947 struct hclge_fd_tcam_config_3_cmd *req3;
4948 struct hclge_desc desc[3];
4951 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4952 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4953 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4954 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4955 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4957 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4958 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4959 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4961 req1->stage = stage;
4962 req1->xy_sel = sel_x ? 1 : 0;
4963 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4964 req1->index = cpu_to_le32(loc);
4965 req1->entry_vld = sel_x ? is_add : 0;
4968 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4969 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4970 sizeof(req2->tcam_data));
4971 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4972 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4975 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4977 dev_err(&hdev->pdev->dev,
4978 "config tcam key fail, ret=%d\n",
4984 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4985 struct hclge_fd_ad_data *action)
4987 struct hclge_fd_ad_config_cmd *req;
4988 struct hclge_desc desc;
4992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4994 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4995 req->index = cpu_to_le32(loc);
4998 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4999 action->write_rule_id_to_bd);
5000 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5003 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5004 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5005 action->forward_to_direct_queue);
5006 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5008 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5009 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5010 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5011 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5012 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5013 action->counter_id);
5015 req->ad_data = cpu_to_le64(ad_data);
5016 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5018 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5023 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5024 struct hclge_fd_rule *rule)
5026 u16 tmp_x_s, tmp_y_s;
5027 u32 tmp_x_l, tmp_y_l;
5030 if (rule->unused_tuple & tuple_bit)
5033 switch (tuple_bit) {
5036 case BIT(INNER_DST_MAC):
5037 for (i = 0; i < ETH_ALEN; i++) {
5038 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5039 rule->tuples_mask.dst_mac[i]);
5040 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5041 rule->tuples_mask.dst_mac[i]);
5045 case BIT(INNER_SRC_MAC):
5046 for (i = 0; i < ETH_ALEN; i++) {
5047 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5048 rule->tuples.src_mac[i]);
5049 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5050 rule->tuples.src_mac[i]);
5054 case BIT(INNER_VLAN_TAG_FST):
5055 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5056 rule->tuples_mask.vlan_tag1);
5057 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5058 rule->tuples_mask.vlan_tag1);
5059 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5060 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5063 case BIT(INNER_ETH_TYPE):
5064 calc_x(tmp_x_s, rule->tuples.ether_proto,
5065 rule->tuples_mask.ether_proto);
5066 calc_y(tmp_y_s, rule->tuples.ether_proto,
5067 rule->tuples_mask.ether_proto);
5068 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5069 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5072 case BIT(INNER_IP_TOS):
5073 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5074 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5077 case BIT(INNER_IP_PROTO):
5078 calc_x(*key_x, rule->tuples.ip_proto,
5079 rule->tuples_mask.ip_proto);
5080 calc_y(*key_y, rule->tuples.ip_proto,
5081 rule->tuples_mask.ip_proto);
5084 case BIT(INNER_SRC_IP):
5085 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5086 rule->tuples_mask.src_ip[IPV4_INDEX]);
5087 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5088 rule->tuples_mask.src_ip[IPV4_INDEX]);
5089 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5090 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5093 case BIT(INNER_DST_IP):
5094 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5095 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5096 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5097 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5098 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5099 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5102 case BIT(INNER_SRC_PORT):
5103 calc_x(tmp_x_s, rule->tuples.src_port,
5104 rule->tuples_mask.src_port);
5105 calc_y(tmp_y_s, rule->tuples.src_port,
5106 rule->tuples_mask.src_port);
5107 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5108 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5111 case BIT(INNER_DST_PORT):
5112 calc_x(tmp_x_s, rule->tuples.dst_port,
5113 rule->tuples_mask.dst_port);
5114 calc_y(tmp_y_s, rule->tuples.dst_port,
5115 rule->tuples_mask.dst_port);
5116 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5117 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5125 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5126 u8 vf_id, u8 network_port_id)
5128 u32 port_number = 0;
5130 if (port_type == HOST_PORT) {
5131 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5133 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5135 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5137 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5138 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5139 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5145 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5146 __le32 *key_x, __le32 *key_y,
5147 struct hclge_fd_rule *rule)
5149 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5150 u8 cur_pos = 0, tuple_size, shift_bits;
5153 for (i = 0; i < MAX_META_DATA; i++) {
5154 tuple_size = meta_data_key_info[i].key_length;
5155 tuple_bit = key_cfg->meta_data_active & BIT(i);
5157 switch (tuple_bit) {
5158 case BIT(ROCE_TYPE):
5159 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5160 cur_pos += tuple_size;
5162 case BIT(DST_VPORT):
5163 port_number = hclge_get_port_number(HOST_PORT, 0,
5165 hnae3_set_field(meta_data,
5166 GENMASK(cur_pos + tuple_size, cur_pos),
5167 cur_pos, port_number);
5168 cur_pos += tuple_size;
5175 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5176 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5177 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5179 *key_x = cpu_to_le32(tmp_x << shift_bits);
5180 *key_y = cpu_to_le32(tmp_y << shift_bits);
5183 /* A complete key is combined with meta data key and tuple key.
5184 * Meta data key is stored at the MSB region, and tuple key is stored at
5185 * the LSB region, unused bits will be filled 0.
5187 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5188 struct hclge_fd_rule *rule)
5190 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5191 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5192 u8 *cur_key_x, *cur_key_y;
5194 int ret, tuple_size;
5195 u8 meta_data_region;
5197 memset(key_x, 0, sizeof(key_x));
5198 memset(key_y, 0, sizeof(key_y));
5202 for (i = 0 ; i < MAX_TUPLE; i++) {
5206 tuple_size = tuple_key_info[i].key_length / 8;
5207 check_tuple = key_cfg->tuple_active & BIT(i);
5209 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5212 cur_key_x += tuple_size;
5213 cur_key_y += tuple_size;
5217 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5218 MAX_META_DATA_LENGTH / 8;
5220 hclge_fd_convert_meta_data(key_cfg,
5221 (__le32 *)(key_x + meta_data_region),
5222 (__le32 *)(key_y + meta_data_region),
5225 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5228 dev_err(&hdev->pdev->dev,
5229 "fd key_y config fail, loc=%u, ret=%d\n",
5230 rule->queue_id, ret);
5234 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5237 dev_err(&hdev->pdev->dev,
5238 "fd key_x config fail, loc=%u, ret=%d\n",
5239 rule->queue_id, ret);
5243 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5244 struct hclge_fd_rule *rule)
5246 struct hclge_fd_ad_data ad_data;
5248 ad_data.ad_id = rule->location;
5250 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5251 ad_data.drop_packet = true;
5252 ad_data.forward_to_direct_queue = false;
5253 ad_data.queue_id = 0;
5255 ad_data.drop_packet = false;
5256 ad_data.forward_to_direct_queue = true;
5257 ad_data.queue_id = rule->queue_id;
5260 ad_data.use_counter = false;
5261 ad_data.counter_id = 0;
5263 ad_data.use_next_stage = false;
5264 ad_data.next_input_key = 0;
5266 ad_data.write_rule_id_to_bd = true;
5267 ad_data.rule_id = rule->location;
5269 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5272 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5273 struct ethtool_rx_flow_spec *fs, u32 *unused)
5275 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5276 struct ethtool_usrip4_spec *usr_ip4_spec;
5277 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5278 struct ethtool_usrip6_spec *usr_ip6_spec;
5279 struct ethhdr *ether_spec;
5281 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5284 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5287 if ((fs->flow_type & FLOW_EXT) &&
5288 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5289 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5293 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5297 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5298 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5300 if (!tcp_ip4_spec->ip4src)
5301 *unused |= BIT(INNER_SRC_IP);
5303 if (!tcp_ip4_spec->ip4dst)
5304 *unused |= BIT(INNER_DST_IP);
5306 if (!tcp_ip4_spec->psrc)
5307 *unused |= BIT(INNER_SRC_PORT);
5309 if (!tcp_ip4_spec->pdst)
5310 *unused |= BIT(INNER_DST_PORT);
5312 if (!tcp_ip4_spec->tos)
5313 *unused |= BIT(INNER_IP_TOS);
5317 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5318 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5319 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5321 if (!usr_ip4_spec->ip4src)
5322 *unused |= BIT(INNER_SRC_IP);
5324 if (!usr_ip4_spec->ip4dst)
5325 *unused |= BIT(INNER_DST_IP);
5327 if (!usr_ip4_spec->tos)
5328 *unused |= BIT(INNER_IP_TOS);
5330 if (!usr_ip4_spec->proto)
5331 *unused |= BIT(INNER_IP_PROTO);
5333 if (usr_ip4_spec->l4_4_bytes)
5336 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5343 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5344 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5347 /* check whether src/dst ip address used */
5348 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5349 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5350 *unused |= BIT(INNER_SRC_IP);
5352 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5353 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5354 *unused |= BIT(INNER_DST_IP);
5356 if (!tcp_ip6_spec->psrc)
5357 *unused |= BIT(INNER_SRC_PORT);
5359 if (!tcp_ip6_spec->pdst)
5360 *unused |= BIT(INNER_DST_PORT);
5362 if (tcp_ip6_spec->tclass)
5366 case IPV6_USER_FLOW:
5367 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5368 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5369 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5370 BIT(INNER_DST_PORT);
5372 /* check whether src/dst ip address used */
5373 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5374 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5375 *unused |= BIT(INNER_SRC_IP);
5377 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5378 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5379 *unused |= BIT(INNER_DST_IP);
5381 if (!usr_ip6_spec->l4_proto)
5382 *unused |= BIT(INNER_IP_PROTO);
5384 if (usr_ip6_spec->tclass)
5387 if (usr_ip6_spec->l4_4_bytes)
5392 ether_spec = &fs->h_u.ether_spec;
5393 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5394 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5395 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5397 if (is_zero_ether_addr(ether_spec->h_source))
5398 *unused |= BIT(INNER_SRC_MAC);
5400 if (is_zero_ether_addr(ether_spec->h_dest))
5401 *unused |= BIT(INNER_DST_MAC);
5403 if (!ether_spec->h_proto)
5404 *unused |= BIT(INNER_ETH_TYPE);
5411 if ((fs->flow_type & FLOW_EXT)) {
5412 if (fs->h_ext.vlan_etype)
5414 if (!fs->h_ext.vlan_tci)
5415 *unused |= BIT(INNER_VLAN_TAG_FST);
5417 if (fs->m_ext.vlan_tci) {
5418 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5422 *unused |= BIT(INNER_VLAN_TAG_FST);
5425 if (fs->flow_type & FLOW_MAC_EXT) {
5426 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5429 if (is_zero_ether_addr(fs->h_ext.h_dest))
5430 *unused |= BIT(INNER_DST_MAC);
5432 *unused &= ~(BIT(INNER_DST_MAC));
5438 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5440 struct hclge_fd_rule *rule = NULL;
5441 struct hlist_node *node2;
5443 spin_lock_bh(&hdev->fd_rule_lock);
5444 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5445 if (rule->location >= location)
5449 spin_unlock_bh(&hdev->fd_rule_lock);
5451 return rule && rule->location == location;
5454 /* make sure being called after lock up with fd_rule_lock */
5455 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5456 struct hclge_fd_rule *new_rule,
5460 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5461 struct hlist_node *node2;
5463 if (is_add && !new_rule)
5466 hlist_for_each_entry_safe(rule, node2,
5467 &hdev->fd_rule_list, rule_node) {
5468 if (rule->location >= location)
5473 if (rule && rule->location == location) {
5474 hlist_del(&rule->rule_node);
5476 hdev->hclge_fd_rule_num--;
5479 if (!hdev->hclge_fd_rule_num)
5480 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5481 clear_bit(location, hdev->fd_bmap);
5485 } else if (!is_add) {
5486 dev_err(&hdev->pdev->dev,
5487 "delete fail, rule %u is inexistent\n",
5492 INIT_HLIST_NODE(&new_rule->rule_node);
5495 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5497 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5499 set_bit(location, hdev->fd_bmap);
5500 hdev->hclge_fd_rule_num++;
5501 hdev->fd_active_type = new_rule->rule_type;
5506 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5507 struct ethtool_rx_flow_spec *fs,
5508 struct hclge_fd_rule *rule)
5510 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5512 switch (flow_type) {
5516 rule->tuples.src_ip[IPV4_INDEX] =
5517 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5518 rule->tuples_mask.src_ip[IPV4_INDEX] =
5519 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5521 rule->tuples.dst_ip[IPV4_INDEX] =
5522 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5523 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5524 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5526 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5527 rule->tuples_mask.src_port =
5528 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5530 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5531 rule->tuples_mask.dst_port =
5532 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5534 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5535 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5537 rule->tuples.ether_proto = ETH_P_IP;
5538 rule->tuples_mask.ether_proto = 0xFFFF;
5542 rule->tuples.src_ip[IPV4_INDEX] =
5543 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5544 rule->tuples_mask.src_ip[IPV4_INDEX] =
5545 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5547 rule->tuples.dst_ip[IPV4_INDEX] =
5548 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5549 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5550 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5552 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5553 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5555 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5556 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5558 rule->tuples.ether_proto = ETH_P_IP;
5559 rule->tuples_mask.ether_proto = 0xFFFF;
5565 be32_to_cpu_array(rule->tuples.src_ip,
5566 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5567 be32_to_cpu_array(rule->tuples_mask.src_ip,
5568 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5570 be32_to_cpu_array(rule->tuples.dst_ip,
5571 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5572 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5573 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5575 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5576 rule->tuples_mask.src_port =
5577 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5579 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5580 rule->tuples_mask.dst_port =
5581 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5583 rule->tuples.ether_proto = ETH_P_IPV6;
5584 rule->tuples_mask.ether_proto = 0xFFFF;
5587 case IPV6_USER_FLOW:
5588 be32_to_cpu_array(rule->tuples.src_ip,
5589 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5590 be32_to_cpu_array(rule->tuples_mask.src_ip,
5591 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5593 be32_to_cpu_array(rule->tuples.dst_ip,
5594 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5595 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5596 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5598 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5599 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5601 rule->tuples.ether_proto = ETH_P_IPV6;
5602 rule->tuples_mask.ether_proto = 0xFFFF;
5606 ether_addr_copy(rule->tuples.src_mac,
5607 fs->h_u.ether_spec.h_source);
5608 ether_addr_copy(rule->tuples_mask.src_mac,
5609 fs->m_u.ether_spec.h_source);
5611 ether_addr_copy(rule->tuples.dst_mac,
5612 fs->h_u.ether_spec.h_dest);
5613 ether_addr_copy(rule->tuples_mask.dst_mac,
5614 fs->m_u.ether_spec.h_dest);
5616 rule->tuples.ether_proto =
5617 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5618 rule->tuples_mask.ether_proto =
5619 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5626 switch (flow_type) {
5629 rule->tuples.ip_proto = IPPROTO_SCTP;
5630 rule->tuples_mask.ip_proto = 0xFF;
5634 rule->tuples.ip_proto = IPPROTO_TCP;
5635 rule->tuples_mask.ip_proto = 0xFF;
5639 rule->tuples.ip_proto = IPPROTO_UDP;
5640 rule->tuples_mask.ip_proto = 0xFF;
5646 if ((fs->flow_type & FLOW_EXT)) {
5647 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5648 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5651 if (fs->flow_type & FLOW_MAC_EXT) {
5652 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5653 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5659 /* make sure being called after lock up with fd_rule_lock */
5660 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5661 struct hclge_fd_rule *rule)
5666 dev_err(&hdev->pdev->dev,
5667 "The flow director rule is NULL\n");
5671 /* it will never fail here, so needn't to check return value */
5672 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5674 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5678 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5685 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5689 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5690 struct ethtool_rxnfc *cmd)
5692 struct hclge_vport *vport = hclge_get_vport(handle);
5693 struct hclge_dev *hdev = vport->back;
5694 u16 dst_vport_id = 0, q_index = 0;
5695 struct ethtool_rx_flow_spec *fs;
5696 struct hclge_fd_rule *rule;
5701 if (!hnae3_dev_fd_supported(hdev))
5705 dev_warn(&hdev->pdev->dev,
5706 "Please enable flow director first\n");
5710 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5712 ret = hclge_fd_check_spec(hdev, fs, &unused);
5714 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5718 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5719 action = HCLGE_FD_ACTION_DROP_PACKET;
5721 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5722 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5725 if (vf > hdev->num_req_vfs) {
5726 dev_err(&hdev->pdev->dev,
5727 "Error: vf id (%u) > max vf num (%u)\n",
5728 vf, hdev->num_req_vfs);
5732 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5733 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5736 dev_err(&hdev->pdev->dev,
5737 "Error: queue id (%u) > max tqp num (%u)\n",
5742 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5746 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5750 ret = hclge_fd_get_tuple(hdev, fs, rule);
5756 rule->flow_type = fs->flow_type;
5758 rule->location = fs->location;
5759 rule->unused_tuple = unused;
5760 rule->vf_id = dst_vport_id;
5761 rule->queue_id = q_index;
5762 rule->action = action;
5763 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5765 /* to avoid rule conflict, when user configure rule by ethtool,
5766 * we need to clear all arfs rules
5768 hclge_clear_arfs_rules(handle);
5770 spin_lock_bh(&hdev->fd_rule_lock);
5771 ret = hclge_fd_config_rule(hdev, rule);
5773 spin_unlock_bh(&hdev->fd_rule_lock);
5778 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5779 struct ethtool_rxnfc *cmd)
5781 struct hclge_vport *vport = hclge_get_vport(handle);
5782 struct hclge_dev *hdev = vport->back;
5783 struct ethtool_rx_flow_spec *fs;
5786 if (!hnae3_dev_fd_supported(hdev))
5789 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5791 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5794 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5795 dev_err(&hdev->pdev->dev,
5796 "Delete fail, rule %u is inexistent\n", fs->location);
5800 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5805 spin_lock_bh(&hdev->fd_rule_lock);
5806 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5808 spin_unlock_bh(&hdev->fd_rule_lock);
5813 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5816 struct hclge_vport *vport = hclge_get_vport(handle);
5817 struct hclge_dev *hdev = vport->back;
5818 struct hclge_fd_rule *rule;
5819 struct hlist_node *node;
5822 if (!hnae3_dev_fd_supported(hdev))
5825 spin_lock_bh(&hdev->fd_rule_lock);
5826 for_each_set_bit(location, hdev->fd_bmap,
5827 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5828 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5832 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5834 hlist_del(&rule->rule_node);
5837 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5838 hdev->hclge_fd_rule_num = 0;
5839 bitmap_zero(hdev->fd_bmap,
5840 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5843 spin_unlock_bh(&hdev->fd_rule_lock);
5846 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5848 struct hclge_vport *vport = hclge_get_vport(handle);
5849 struct hclge_dev *hdev = vport->back;
5850 struct hclge_fd_rule *rule;
5851 struct hlist_node *node;
5854 /* Return ok here, because reset error handling will check this
5855 * return value. If error is returned here, the reset process will
5858 if (!hnae3_dev_fd_supported(hdev))
5861 /* if fd is disabled, should not restore it when reset */
5865 spin_lock_bh(&hdev->fd_rule_lock);
5866 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5867 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5869 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5872 dev_warn(&hdev->pdev->dev,
5873 "Restore rule %u failed, remove it\n",
5875 clear_bit(rule->location, hdev->fd_bmap);
5876 hlist_del(&rule->rule_node);
5878 hdev->hclge_fd_rule_num--;
5882 if (hdev->hclge_fd_rule_num)
5883 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5885 spin_unlock_bh(&hdev->fd_rule_lock);
5890 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5891 struct ethtool_rxnfc *cmd)
5893 struct hclge_vport *vport = hclge_get_vport(handle);
5894 struct hclge_dev *hdev = vport->back;
5896 if (!hnae3_dev_fd_supported(hdev))
5899 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5900 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5905 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5906 struct ethtool_rxnfc *cmd)
5908 struct hclge_vport *vport = hclge_get_vport(handle);
5909 struct hclge_fd_rule *rule = NULL;
5910 struct hclge_dev *hdev = vport->back;
5911 struct ethtool_rx_flow_spec *fs;
5912 struct hlist_node *node2;
5914 if (!hnae3_dev_fd_supported(hdev))
5917 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5919 spin_lock_bh(&hdev->fd_rule_lock);
5921 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5922 if (rule->location >= fs->location)
5926 if (!rule || fs->location != rule->location) {
5927 spin_unlock_bh(&hdev->fd_rule_lock);
5932 fs->flow_type = rule->flow_type;
5933 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5937 fs->h_u.tcp_ip4_spec.ip4src =
5938 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5939 fs->m_u.tcp_ip4_spec.ip4src =
5940 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5941 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5943 fs->h_u.tcp_ip4_spec.ip4dst =
5944 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5945 fs->m_u.tcp_ip4_spec.ip4dst =
5946 rule->unused_tuple & BIT(INNER_DST_IP) ?
5947 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5949 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5950 fs->m_u.tcp_ip4_spec.psrc =
5951 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5952 0 : cpu_to_be16(rule->tuples_mask.src_port);
5954 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5955 fs->m_u.tcp_ip4_spec.pdst =
5956 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5957 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5959 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5960 fs->m_u.tcp_ip4_spec.tos =
5961 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5962 0 : rule->tuples_mask.ip_tos;
5966 fs->h_u.usr_ip4_spec.ip4src =
5967 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5968 fs->m_u.tcp_ip4_spec.ip4src =
5969 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5970 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5972 fs->h_u.usr_ip4_spec.ip4dst =
5973 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5974 fs->m_u.usr_ip4_spec.ip4dst =
5975 rule->unused_tuple & BIT(INNER_DST_IP) ?
5976 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5978 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5979 fs->m_u.usr_ip4_spec.tos =
5980 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5981 0 : rule->tuples_mask.ip_tos;
5983 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5984 fs->m_u.usr_ip4_spec.proto =
5985 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5986 0 : rule->tuples_mask.ip_proto;
5988 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5994 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5995 rule->tuples.src_ip, IPV6_SIZE);
5996 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5997 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5998 sizeof(int) * IPV6_SIZE);
6000 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
6001 rule->tuples_mask.src_ip, IPV6_SIZE);
6003 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6004 rule->tuples.dst_ip, IPV6_SIZE);
6005 if (rule->unused_tuple & BIT(INNER_DST_IP))
6006 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6007 sizeof(int) * IPV6_SIZE);
6009 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6010 rule->tuples_mask.dst_ip, IPV6_SIZE);
6012 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6013 fs->m_u.tcp_ip6_spec.psrc =
6014 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6015 0 : cpu_to_be16(rule->tuples_mask.src_port);
6017 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6018 fs->m_u.tcp_ip6_spec.pdst =
6019 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6020 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6023 case IPV6_USER_FLOW:
6024 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6025 rule->tuples.src_ip, IPV6_SIZE);
6026 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6027 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6028 sizeof(int) * IPV6_SIZE);
6030 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6031 rule->tuples_mask.src_ip, IPV6_SIZE);
6033 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6034 rule->tuples.dst_ip, IPV6_SIZE);
6035 if (rule->unused_tuple & BIT(INNER_DST_IP))
6036 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6037 sizeof(int) * IPV6_SIZE);
6039 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6040 rule->tuples_mask.dst_ip, IPV6_SIZE);
6042 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6043 fs->m_u.usr_ip6_spec.l4_proto =
6044 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6045 0 : rule->tuples_mask.ip_proto;
6049 ether_addr_copy(fs->h_u.ether_spec.h_source,
6050 rule->tuples.src_mac);
6051 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6052 eth_zero_addr(fs->m_u.ether_spec.h_source);
6054 ether_addr_copy(fs->m_u.ether_spec.h_source,
6055 rule->tuples_mask.src_mac);
6057 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6058 rule->tuples.dst_mac);
6059 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6060 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6062 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6063 rule->tuples_mask.dst_mac);
6065 fs->h_u.ether_spec.h_proto =
6066 cpu_to_be16(rule->tuples.ether_proto);
6067 fs->m_u.ether_spec.h_proto =
6068 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6069 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6073 spin_unlock_bh(&hdev->fd_rule_lock);
6077 if (fs->flow_type & FLOW_EXT) {
6078 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6079 fs->m_ext.vlan_tci =
6080 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6081 cpu_to_be16(VLAN_VID_MASK) :
6082 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6085 if (fs->flow_type & FLOW_MAC_EXT) {
6086 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6087 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6088 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6090 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6091 rule->tuples_mask.dst_mac);
6094 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6095 fs->ring_cookie = RX_CLS_FLOW_DISC;
6099 fs->ring_cookie = rule->queue_id;
6100 vf_id = rule->vf_id;
6101 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6102 fs->ring_cookie |= vf_id;
6105 spin_unlock_bh(&hdev->fd_rule_lock);
6110 static int hclge_get_all_rules(struct hnae3_handle *handle,
6111 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6113 struct hclge_vport *vport = hclge_get_vport(handle);
6114 struct hclge_dev *hdev = vport->back;
6115 struct hclge_fd_rule *rule;
6116 struct hlist_node *node2;
6119 if (!hnae3_dev_fd_supported(hdev))
6122 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6124 spin_lock_bh(&hdev->fd_rule_lock);
6125 hlist_for_each_entry_safe(rule, node2,
6126 &hdev->fd_rule_list, rule_node) {
6127 if (cnt == cmd->rule_cnt) {
6128 spin_unlock_bh(&hdev->fd_rule_lock);
6132 rule_locs[cnt] = rule->location;
6136 spin_unlock_bh(&hdev->fd_rule_lock);
6138 cmd->rule_cnt = cnt;
6143 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6144 struct hclge_fd_rule_tuples *tuples)
6146 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6147 tuples->ip_proto = fkeys->basic.ip_proto;
6148 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6150 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6151 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6152 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6154 memcpy(tuples->src_ip,
6155 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6156 sizeof(tuples->src_ip));
6157 memcpy(tuples->dst_ip,
6158 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6159 sizeof(tuples->dst_ip));
6163 /* traverse all rules, check whether an existed rule has the same tuples */
6164 static struct hclge_fd_rule *
6165 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6166 const struct hclge_fd_rule_tuples *tuples)
6168 struct hclge_fd_rule *rule = NULL;
6169 struct hlist_node *node;
6171 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6172 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6179 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6180 struct hclge_fd_rule *rule)
6182 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6183 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6184 BIT(INNER_SRC_PORT);
6187 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6188 if (tuples->ether_proto == ETH_P_IP) {
6189 if (tuples->ip_proto == IPPROTO_TCP)
6190 rule->flow_type = TCP_V4_FLOW;
6192 rule->flow_type = UDP_V4_FLOW;
6194 if (tuples->ip_proto == IPPROTO_TCP)
6195 rule->flow_type = TCP_V6_FLOW;
6197 rule->flow_type = UDP_V6_FLOW;
6199 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6200 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6203 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6204 u16 flow_id, struct flow_keys *fkeys)
6206 struct hclge_vport *vport = hclge_get_vport(handle);
6207 struct hclge_fd_rule_tuples new_tuples;
6208 struct hclge_dev *hdev = vport->back;
6209 struct hclge_fd_rule *rule;
6214 if (!hnae3_dev_fd_supported(hdev))
6217 memset(&new_tuples, 0, sizeof(new_tuples));
6218 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6220 spin_lock_bh(&hdev->fd_rule_lock);
6222 /* when there is already fd rule existed add by user,
6223 * arfs should not work
6225 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6226 spin_unlock_bh(&hdev->fd_rule_lock);
6231 /* check is there flow director filter existed for this flow,
6232 * if not, create a new filter for it;
6233 * if filter exist with different queue id, modify the filter;
6234 * if filter exist with same queue id, do nothing
6236 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6238 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6239 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6240 spin_unlock_bh(&hdev->fd_rule_lock);
6245 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6247 spin_unlock_bh(&hdev->fd_rule_lock);
6252 set_bit(bit_id, hdev->fd_bmap);
6253 rule->location = bit_id;
6254 rule->flow_id = flow_id;
6255 rule->queue_id = queue_id;
6256 hclge_fd_build_arfs_rule(&new_tuples, rule);
6257 ret = hclge_fd_config_rule(hdev, rule);
6259 spin_unlock_bh(&hdev->fd_rule_lock);
6264 return rule->location;
6267 spin_unlock_bh(&hdev->fd_rule_lock);
6269 if (rule->queue_id == queue_id)
6270 return rule->location;
6272 tmp_queue_id = rule->queue_id;
6273 rule->queue_id = queue_id;
6274 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6276 rule->queue_id = tmp_queue_id;
6280 return rule->location;
6283 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6285 #ifdef CONFIG_RFS_ACCEL
6286 struct hnae3_handle *handle = &hdev->vport[0].nic;
6287 struct hclge_fd_rule *rule;
6288 struct hlist_node *node;
6289 HLIST_HEAD(del_list);
6291 spin_lock_bh(&hdev->fd_rule_lock);
6292 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6293 spin_unlock_bh(&hdev->fd_rule_lock);
6296 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6297 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6298 rule->flow_id, rule->location)) {
6299 hlist_del_init(&rule->rule_node);
6300 hlist_add_head(&rule->rule_node, &del_list);
6301 hdev->hclge_fd_rule_num--;
6302 clear_bit(rule->location, hdev->fd_bmap);
6305 spin_unlock_bh(&hdev->fd_rule_lock);
6307 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6308 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6309 rule->location, NULL, false);
6315 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6317 #ifdef CONFIG_RFS_ACCEL
6318 struct hclge_vport *vport = hclge_get_vport(handle);
6319 struct hclge_dev *hdev = vport->back;
6321 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6322 hclge_del_all_fd_entries(handle, true);
6326 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6328 struct hclge_vport *vport = hclge_get_vport(handle);
6329 struct hclge_dev *hdev = vport->back;
6331 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6332 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6335 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6337 struct hclge_vport *vport = hclge_get_vport(handle);
6338 struct hclge_dev *hdev = vport->back;
6340 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6343 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6345 struct hclge_vport *vport = hclge_get_vport(handle);
6346 struct hclge_dev *hdev = vport->back;
6348 return hdev->rst_stats.hw_reset_done_cnt;
6351 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6353 struct hclge_vport *vport = hclge_get_vport(handle);
6354 struct hclge_dev *hdev = vport->back;
6357 hdev->fd_en = enable;
6358 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6360 hclge_del_all_fd_entries(handle, clear);
6362 hclge_restore_fd_entries(handle);
6365 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6367 struct hclge_desc desc;
6368 struct hclge_config_mac_mode_cmd *req =
6369 (struct hclge_config_mac_mode_cmd *)desc.data;
6373 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6376 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6377 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6378 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6379 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6380 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6381 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6382 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6383 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6384 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6385 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6388 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6390 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6392 dev_err(&hdev->pdev->dev,
6393 "mac enable fail, ret =%d.\n", ret);
6396 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6397 u8 switch_param, u8 param_mask)
6399 struct hclge_mac_vlan_switch_cmd *req;
6400 struct hclge_desc desc;
6404 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6405 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6407 /* read current config parameter */
6408 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6410 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6411 req->func_id = cpu_to_le32(func_id);
6413 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6415 dev_err(&hdev->pdev->dev,
6416 "read mac vlan switch parameter fail, ret = %d\n", ret);
6420 /* modify and write new config parameter */
6421 hclge_cmd_reuse_desc(&desc, false);
6422 req->switch_param = (req->switch_param & param_mask) | switch_param;
6423 req->param_mask = param_mask;
6425 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6427 dev_err(&hdev->pdev->dev,
6428 "set mac vlan switch parameter fail, ret = %d\n", ret);
6432 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6435 #define HCLGE_PHY_LINK_STATUS_NUM 200
6437 struct phy_device *phydev = hdev->hw.mac.phydev;
6442 ret = phy_read_status(phydev);
6444 dev_err(&hdev->pdev->dev,
6445 "phy update link status fail, ret = %d\n", ret);
6449 if (phydev->link == link_ret)
6452 msleep(HCLGE_LINK_STATUS_MS);
6453 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6456 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6458 #define HCLGE_MAC_LINK_STATUS_NUM 100
6464 ret = hclge_get_mac_link_status(hdev);
6467 else if (ret == link_ret)
6470 msleep(HCLGE_LINK_STATUS_MS);
6471 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6475 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6478 #define HCLGE_LINK_STATUS_DOWN 0
6479 #define HCLGE_LINK_STATUS_UP 1
6483 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6486 hclge_phy_link_status_wait(hdev, link_ret);
6488 return hclge_mac_link_status_wait(hdev, link_ret);
6491 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6493 struct hclge_config_mac_mode_cmd *req;
6494 struct hclge_desc desc;
6498 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6499 /* 1 Read out the MAC mode config at first */
6500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6503 dev_err(&hdev->pdev->dev,
6504 "mac loopback get fail, ret =%d.\n", ret);
6508 /* 2 Then setup the loopback flag */
6509 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6510 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6511 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6512 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6514 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6516 /* 3 Config mac work mode with loopback flag
6517 * and its original configure parameters
6519 hclge_cmd_reuse_desc(&desc, false);
6520 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6522 dev_err(&hdev->pdev->dev,
6523 "mac loopback set fail, ret =%d.\n", ret);
6527 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6528 enum hnae3_loop loop_mode)
6530 #define HCLGE_SERDES_RETRY_MS 10
6531 #define HCLGE_SERDES_RETRY_NUM 100
6533 struct hclge_serdes_lb_cmd *req;
6534 struct hclge_desc desc;
6538 req = (struct hclge_serdes_lb_cmd *)desc.data;
6539 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6541 switch (loop_mode) {
6542 case HNAE3_LOOP_SERIAL_SERDES:
6543 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6545 case HNAE3_LOOP_PARALLEL_SERDES:
6546 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6549 dev_err(&hdev->pdev->dev,
6550 "unsupported serdes loopback mode %d\n", loop_mode);
6555 req->enable = loop_mode_b;
6556 req->mask = loop_mode_b;
6558 req->mask = loop_mode_b;
6561 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6563 dev_err(&hdev->pdev->dev,
6564 "serdes loopback set fail, ret = %d\n", ret);
6569 msleep(HCLGE_SERDES_RETRY_MS);
6570 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6574 dev_err(&hdev->pdev->dev,
6575 "serdes loopback get, ret = %d\n", ret);
6578 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6579 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6581 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6582 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6584 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6585 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6591 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6592 enum hnae3_loop loop_mode)
6596 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6600 hclge_cfg_mac_mode(hdev, en);
6602 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6604 dev_err(&hdev->pdev->dev,
6605 "serdes loopback config mac mode timeout\n");
6610 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6611 struct phy_device *phydev)
6615 if (!phydev->suspended) {
6616 ret = phy_suspend(phydev);
6621 ret = phy_resume(phydev);
6625 return phy_loopback(phydev, true);
6628 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6629 struct phy_device *phydev)
6633 ret = phy_loopback(phydev, false);
6637 return phy_suspend(phydev);
6640 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6642 struct phy_device *phydev = hdev->hw.mac.phydev;
6649 ret = hclge_enable_phy_loopback(hdev, phydev);
6651 ret = hclge_disable_phy_loopback(hdev, phydev);
6653 dev_err(&hdev->pdev->dev,
6654 "set phy loopback fail, ret = %d\n", ret);
6658 hclge_cfg_mac_mode(hdev, en);
6660 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6662 dev_err(&hdev->pdev->dev,
6663 "phy loopback config mac mode timeout\n");
6668 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6669 int stream_id, bool enable)
6671 struct hclge_desc desc;
6672 struct hclge_cfg_com_tqp_queue_cmd *req =
6673 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6676 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6677 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6678 req->stream_id = cpu_to_le16(stream_id);
6680 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6684 dev_err(&hdev->pdev->dev,
6685 "Tqp enable fail, status =%d.\n", ret);
6689 static int hclge_set_loopback(struct hnae3_handle *handle,
6690 enum hnae3_loop loop_mode, bool en)
6692 struct hclge_vport *vport = hclge_get_vport(handle);
6693 struct hnae3_knic_private_info *kinfo;
6694 struct hclge_dev *hdev = vport->back;
6697 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6698 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6699 * the same, the packets are looped back in the SSU. If SSU loopback
6700 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6702 if (hdev->pdev->revision >= 0x21) {
6703 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6705 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6706 HCLGE_SWITCH_ALW_LPBK_MASK);
6711 switch (loop_mode) {
6712 case HNAE3_LOOP_APP:
6713 ret = hclge_set_app_loopback(hdev, en);
6715 case HNAE3_LOOP_SERIAL_SERDES:
6716 case HNAE3_LOOP_PARALLEL_SERDES:
6717 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6719 case HNAE3_LOOP_PHY:
6720 ret = hclge_set_phy_loopback(hdev, en);
6724 dev_err(&hdev->pdev->dev,
6725 "loop_mode %d is not supported\n", loop_mode);
6732 kinfo = &vport->nic.kinfo;
6733 for (i = 0; i < kinfo->num_tqps; i++) {
6734 ret = hclge_tqp_enable(hdev, i, 0, en);
6742 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6746 ret = hclge_set_app_loopback(hdev, false);
6750 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6754 return hclge_cfg_serdes_loopback(hdev, false,
6755 HNAE3_LOOP_PARALLEL_SERDES);
6758 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6760 struct hclge_vport *vport = hclge_get_vport(handle);
6761 struct hnae3_knic_private_info *kinfo;
6762 struct hnae3_queue *queue;
6763 struct hclge_tqp *tqp;
6766 kinfo = &vport->nic.kinfo;
6767 for (i = 0; i < kinfo->num_tqps; i++) {
6768 queue = handle->kinfo.tqp[i];
6769 tqp = container_of(queue, struct hclge_tqp, q);
6770 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6774 static void hclge_flush_link_update(struct hclge_dev *hdev)
6776 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6778 unsigned long last = hdev->serv_processed_cnt;
6781 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6782 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6783 last == hdev->serv_processed_cnt)
6787 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6789 struct hclge_vport *vport = hclge_get_vport(handle);
6790 struct hclge_dev *hdev = vport->back;
6793 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6795 /* Set the DOWN flag here to disable link updating */
6796 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6798 /* flush memory to make sure DOWN is seen by service task */
6799 smp_mb__before_atomic();
6800 hclge_flush_link_update(hdev);
6804 static int hclge_ae_start(struct hnae3_handle *handle)
6806 struct hclge_vport *vport = hclge_get_vport(handle);
6807 struct hclge_dev *hdev = vport->back;
6810 hclge_cfg_mac_mode(hdev, true);
6811 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6812 hdev->hw.mac.link = 0;
6814 /* reset tqp stats */
6815 hclge_reset_tqp_stats(handle);
6817 hclge_mac_start_phy(hdev);
6822 static void hclge_ae_stop(struct hnae3_handle *handle)
6824 struct hclge_vport *vport = hclge_get_vport(handle);
6825 struct hclge_dev *hdev = vport->back;
6828 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6830 hclge_clear_arfs_rules(handle);
6832 /* If it is not PF reset, the firmware will disable the MAC,
6833 * so it only need to stop phy here.
6835 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6836 hdev->reset_type != HNAE3_FUNC_RESET) {
6837 hclge_mac_stop_phy(hdev);
6838 hclge_update_link_status(hdev);
6842 for (i = 0; i < handle->kinfo.num_tqps; i++)
6843 hclge_reset_tqp(handle, i);
6845 hclge_config_mac_tnl_int(hdev, false);
6848 hclge_cfg_mac_mode(hdev, false);
6850 hclge_mac_stop_phy(hdev);
6852 /* reset tqp stats */
6853 hclge_reset_tqp_stats(handle);
6854 hclge_update_link_status(hdev);
6857 int hclge_vport_start(struct hclge_vport *vport)
6859 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6860 vport->last_active_jiffies = jiffies;
6864 void hclge_vport_stop(struct hclge_vport *vport)
6866 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6869 static int hclge_client_start(struct hnae3_handle *handle)
6871 struct hclge_vport *vport = hclge_get_vport(handle);
6873 return hclge_vport_start(vport);
6876 static void hclge_client_stop(struct hnae3_handle *handle)
6878 struct hclge_vport *vport = hclge_get_vport(handle);
6880 hclge_vport_stop(vport);
6883 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6884 u16 cmdq_resp, u8 resp_code,
6885 enum hclge_mac_vlan_tbl_opcode op)
6887 struct hclge_dev *hdev = vport->back;
6890 dev_err(&hdev->pdev->dev,
6891 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6896 if (op == HCLGE_MAC_VLAN_ADD) {
6897 if ((!resp_code) || (resp_code == 1)) {
6899 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6900 dev_err(&hdev->pdev->dev,
6901 "add mac addr failed for uc_overflow.\n");
6903 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6904 dev_err(&hdev->pdev->dev,
6905 "add mac addr failed for mc_overflow.\n");
6909 dev_err(&hdev->pdev->dev,
6910 "add mac addr failed for undefined, code=%u.\n",
6913 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6916 } else if (resp_code == 1) {
6917 dev_dbg(&hdev->pdev->dev,
6918 "remove mac addr failed for miss.\n");
6922 dev_err(&hdev->pdev->dev,
6923 "remove mac addr failed for undefined, code=%u.\n",
6926 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6929 } else if (resp_code == 1) {
6930 dev_dbg(&hdev->pdev->dev,
6931 "lookup mac addr failed for miss.\n");
6935 dev_err(&hdev->pdev->dev,
6936 "lookup mac addr failed for undefined, code=%u.\n",
6941 dev_err(&hdev->pdev->dev,
6942 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6947 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6949 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6951 unsigned int word_num;
6952 unsigned int bit_num;
6954 if (vfid > 255 || vfid < 0)
6957 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6958 word_num = vfid / 32;
6959 bit_num = vfid % 32;
6961 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6963 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6965 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6966 bit_num = vfid % 32;
6968 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6970 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6976 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6978 #define HCLGE_DESC_NUMBER 3
6979 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6982 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6983 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6984 if (desc[i].data[j])
6990 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6991 const u8 *addr, bool is_mc)
6993 const unsigned char *mac_addr = addr;
6994 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6995 (mac_addr[0]) | (mac_addr[1] << 8);
6996 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6998 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7000 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7001 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7004 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7005 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7008 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7009 struct hclge_mac_vlan_tbl_entry_cmd *req)
7011 struct hclge_dev *hdev = vport->back;
7012 struct hclge_desc desc;
7017 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7019 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7023 dev_err(&hdev->pdev->dev,
7024 "del mac addr failed for cmd_send, ret =%d.\n",
7028 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7029 retval = le16_to_cpu(desc.retval);
7031 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7032 HCLGE_MAC_VLAN_REMOVE);
7035 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7036 struct hclge_mac_vlan_tbl_entry_cmd *req,
7037 struct hclge_desc *desc,
7040 struct hclge_dev *hdev = vport->back;
7045 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7047 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7048 memcpy(desc[0].data,
7050 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7051 hclge_cmd_setup_basic_desc(&desc[1],
7052 HCLGE_OPC_MAC_VLAN_ADD,
7054 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7055 hclge_cmd_setup_basic_desc(&desc[2],
7056 HCLGE_OPC_MAC_VLAN_ADD,
7058 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7060 memcpy(desc[0].data,
7062 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7063 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7066 dev_err(&hdev->pdev->dev,
7067 "lookup mac addr failed for cmd_send, ret =%d.\n",
7071 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7072 retval = le16_to_cpu(desc[0].retval);
7074 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7075 HCLGE_MAC_VLAN_LKUP);
7078 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7079 struct hclge_mac_vlan_tbl_entry_cmd *req,
7080 struct hclge_desc *mc_desc)
7082 struct hclge_dev *hdev = vport->back;
7089 struct hclge_desc desc;
7091 hclge_cmd_setup_basic_desc(&desc,
7092 HCLGE_OPC_MAC_VLAN_ADD,
7094 memcpy(desc.data, req,
7095 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7096 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7097 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7098 retval = le16_to_cpu(desc.retval);
7100 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7102 HCLGE_MAC_VLAN_ADD);
7104 hclge_cmd_reuse_desc(&mc_desc[0], false);
7105 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7106 hclge_cmd_reuse_desc(&mc_desc[1], false);
7107 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7108 hclge_cmd_reuse_desc(&mc_desc[2], false);
7109 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7110 memcpy(mc_desc[0].data, req,
7111 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7112 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7113 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7114 retval = le16_to_cpu(mc_desc[0].retval);
7116 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7118 HCLGE_MAC_VLAN_ADD);
7122 dev_err(&hdev->pdev->dev,
7123 "add mac addr failed for cmd_send, ret =%d.\n",
7131 static int hclge_init_umv_space(struct hclge_dev *hdev)
7133 u16 allocated_size = 0;
7136 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7141 if (allocated_size < hdev->wanted_umv_size)
7142 dev_warn(&hdev->pdev->dev,
7143 "Alloc umv space failed, want %u, get %u\n",
7144 hdev->wanted_umv_size, allocated_size);
7146 mutex_init(&hdev->umv_mutex);
7147 hdev->max_umv_size = allocated_size;
7148 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7149 * preserve some unicast mac vlan table entries shared by pf
7152 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7153 hdev->share_umv_size = hdev->priv_umv_size +
7154 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7159 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7163 if (hdev->max_umv_size > 0) {
7164 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7168 hdev->max_umv_size = 0;
7170 mutex_destroy(&hdev->umv_mutex);
7175 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7176 u16 *allocated_size, bool is_alloc)
7178 struct hclge_umv_spc_alc_cmd *req;
7179 struct hclge_desc desc;
7182 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7185 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7187 req->space_size = cpu_to_le32(space_size);
7189 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7191 dev_err(&hdev->pdev->dev,
7192 "%s umv space failed for cmd_send, ret =%d\n",
7193 is_alloc ? "allocate" : "free", ret);
7197 if (is_alloc && allocated_size)
7198 *allocated_size = le32_to_cpu(desc.data[1]);
7203 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7205 struct hclge_vport *vport;
7208 for (i = 0; i < hdev->num_alloc_vport; i++) {
7209 vport = &hdev->vport[i];
7210 vport->used_umv_num = 0;
7213 mutex_lock(&hdev->umv_mutex);
7214 hdev->share_umv_size = hdev->priv_umv_size +
7215 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7216 mutex_unlock(&hdev->umv_mutex);
7219 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7221 struct hclge_dev *hdev = vport->back;
7224 mutex_lock(&hdev->umv_mutex);
7225 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7226 hdev->share_umv_size == 0);
7227 mutex_unlock(&hdev->umv_mutex);
7232 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7234 struct hclge_dev *hdev = vport->back;
7236 mutex_lock(&hdev->umv_mutex);
7238 if (vport->used_umv_num > hdev->priv_umv_size)
7239 hdev->share_umv_size++;
7241 if (vport->used_umv_num > 0)
7242 vport->used_umv_num--;
7244 if (vport->used_umv_num >= hdev->priv_umv_size &&
7245 hdev->share_umv_size > 0)
7246 hdev->share_umv_size--;
7247 vport->used_umv_num++;
7249 mutex_unlock(&hdev->umv_mutex);
7252 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7253 const unsigned char *addr)
7255 struct hclge_vport *vport = hclge_get_vport(handle);
7257 return hclge_add_uc_addr_common(vport, addr);
7260 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7261 const unsigned char *addr)
7263 struct hclge_dev *hdev = vport->back;
7264 struct hclge_mac_vlan_tbl_entry_cmd req;
7265 struct hclge_desc desc;
7266 u16 egress_port = 0;
7269 /* mac addr check */
7270 if (is_zero_ether_addr(addr) ||
7271 is_broadcast_ether_addr(addr) ||
7272 is_multicast_ether_addr(addr)) {
7273 dev_err(&hdev->pdev->dev,
7274 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7275 addr, is_zero_ether_addr(addr),
7276 is_broadcast_ether_addr(addr),
7277 is_multicast_ether_addr(addr));
7281 memset(&req, 0, sizeof(req));
7283 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7284 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7286 req.egress_port = cpu_to_le16(egress_port);
7288 hclge_prepare_mac_addr(&req, addr, false);
7290 /* Lookup the mac address in the mac_vlan table, and add
7291 * it if the entry is inexistent. Repeated unicast entry
7292 * is not allowed in the mac vlan table.
7294 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7295 if (ret == -ENOENT) {
7296 if (!hclge_is_umv_space_full(vport)) {
7297 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7299 hclge_update_umv_space(vport, false);
7303 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7304 hdev->priv_umv_size);
7309 /* check if we just hit the duplicate */
7311 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7312 vport->vport_id, addr);
7316 dev_err(&hdev->pdev->dev,
7317 "PF failed to add unicast entry(%pM) in the MAC table\n",
7323 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7324 const unsigned char *addr)
7326 struct hclge_vport *vport = hclge_get_vport(handle);
7328 return hclge_rm_uc_addr_common(vport, addr);
7331 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7332 const unsigned char *addr)
7334 struct hclge_dev *hdev = vport->back;
7335 struct hclge_mac_vlan_tbl_entry_cmd req;
7338 /* mac addr check */
7339 if (is_zero_ether_addr(addr) ||
7340 is_broadcast_ether_addr(addr) ||
7341 is_multicast_ether_addr(addr)) {
7342 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7347 memset(&req, 0, sizeof(req));
7348 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7349 hclge_prepare_mac_addr(&req, addr, false);
7350 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7352 hclge_update_umv_space(vport, true);
7357 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7358 const unsigned char *addr)
7360 struct hclge_vport *vport = hclge_get_vport(handle);
7362 return hclge_add_mc_addr_common(vport, addr);
7365 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7366 const unsigned char *addr)
7368 struct hclge_dev *hdev = vport->back;
7369 struct hclge_mac_vlan_tbl_entry_cmd req;
7370 struct hclge_desc desc[3];
7373 /* mac addr check */
7374 if (!is_multicast_ether_addr(addr)) {
7375 dev_err(&hdev->pdev->dev,
7376 "Add mc mac err! invalid mac:%pM.\n",
7380 memset(&req, 0, sizeof(req));
7381 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7382 hclge_prepare_mac_addr(&req, addr, true);
7383 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7385 /* This mac addr do not exist, add new entry for it */
7386 memset(desc[0].data, 0, sizeof(desc[0].data));
7387 memset(desc[1].data, 0, sizeof(desc[0].data));
7388 memset(desc[2].data, 0, sizeof(desc[0].data));
7390 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7393 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7395 if (status == -ENOSPC)
7396 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7401 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7402 const unsigned char *addr)
7404 struct hclge_vport *vport = hclge_get_vport(handle);
7406 return hclge_rm_mc_addr_common(vport, addr);
7409 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7410 const unsigned char *addr)
7412 struct hclge_dev *hdev = vport->back;
7413 struct hclge_mac_vlan_tbl_entry_cmd req;
7414 enum hclge_cmd_status status;
7415 struct hclge_desc desc[3];
7417 /* mac addr check */
7418 if (!is_multicast_ether_addr(addr)) {
7419 dev_dbg(&hdev->pdev->dev,
7420 "Remove mc mac err! invalid mac:%pM.\n",
7425 memset(&req, 0, sizeof(req));
7426 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7427 hclge_prepare_mac_addr(&req, addr, true);
7428 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7430 /* This mac addr exist, remove this handle's VFID for it */
7431 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7435 if (hclge_is_all_function_id_zero(desc))
7436 /* All the vfid is zero, so need to delete this entry */
7437 status = hclge_remove_mac_vlan_tbl(vport, &req);
7439 /* Not all the vfid is zero, update the vfid */
7440 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7443 /* Maybe this mac address is in mta table, but it cannot be
7444 * deleted here because an entry of mta represents an address
7445 * range rather than a specific address. the delete action to
7446 * all entries will take effect in update_mta_status called by
7447 * hns3_nic_set_rx_mode.
7455 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7456 enum HCLGE_MAC_ADDR_TYPE mac_type)
7458 struct hclge_vport_mac_addr_cfg *mac_cfg;
7459 struct list_head *list;
7461 if (!vport->vport_id)
7464 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7468 mac_cfg->hd_tbl_status = true;
7469 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7471 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7472 &vport->uc_mac_list : &vport->mc_mac_list;
7474 list_add_tail(&mac_cfg->node, list);
7477 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7479 enum HCLGE_MAC_ADDR_TYPE mac_type)
7481 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7482 struct list_head *list;
7483 bool uc_flag, mc_flag;
7485 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7486 &vport->uc_mac_list : &vport->mc_mac_list;
7488 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7489 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7491 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7492 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7493 if (uc_flag && mac_cfg->hd_tbl_status)
7494 hclge_rm_uc_addr_common(vport, mac_addr);
7496 if (mc_flag && mac_cfg->hd_tbl_status)
7497 hclge_rm_mc_addr_common(vport, mac_addr);
7499 list_del(&mac_cfg->node);
7506 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7507 enum HCLGE_MAC_ADDR_TYPE mac_type)
7509 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7510 struct list_head *list;
7512 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7513 &vport->uc_mac_list : &vport->mc_mac_list;
7515 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7516 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7517 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7519 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7520 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7522 mac_cfg->hd_tbl_status = false;
7524 list_del(&mac_cfg->node);
7530 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7532 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7533 struct hclge_vport *vport;
7536 for (i = 0; i < hdev->num_alloc_vport; i++) {
7537 vport = &hdev->vport[i];
7538 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7539 list_del(&mac->node);
7543 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7544 list_del(&mac->node);
7550 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7551 u16 cmdq_resp, u8 resp_code)
7553 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7554 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7555 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7556 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7561 dev_err(&hdev->pdev->dev,
7562 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7567 switch (resp_code) {
7568 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7569 case HCLGE_ETHERTYPE_ALREADY_ADD:
7572 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7573 dev_err(&hdev->pdev->dev,
7574 "add mac ethertype failed for manager table overflow.\n");
7575 return_status = -EIO;
7577 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7578 dev_err(&hdev->pdev->dev,
7579 "add mac ethertype failed for key conflict.\n");
7580 return_status = -EIO;
7583 dev_err(&hdev->pdev->dev,
7584 "add mac ethertype failed for undefined, code=%u.\n",
7586 return_status = -EIO;
7589 return return_status;
7592 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7595 struct hclge_mac_vlan_tbl_entry_cmd req;
7596 struct hclge_dev *hdev = vport->back;
7597 struct hclge_desc desc;
7598 u16 egress_port = 0;
7601 if (is_zero_ether_addr(mac_addr))
7604 memset(&req, 0, sizeof(req));
7605 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7606 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7607 req.egress_port = cpu_to_le16(egress_port);
7608 hclge_prepare_mac_addr(&req, mac_addr, false);
7610 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7613 vf_idx += HCLGE_VF_VPORT_START_NUM;
7614 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7616 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7622 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7625 struct hclge_vport *vport = hclge_get_vport(handle);
7626 struct hclge_dev *hdev = vport->back;
7628 vport = hclge_get_vf_vport(hdev, vf);
7632 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7633 dev_info(&hdev->pdev->dev,
7634 "Specified MAC(=%pM) is same as before, no change committed!\n",
7639 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7640 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7645 ether_addr_copy(vport->vf_info.mac, mac_addr);
7646 dev_info(&hdev->pdev->dev,
7647 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7650 return hclge_inform_reset_assert_to_vf(vport);
7653 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7654 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7656 struct hclge_desc desc;
7661 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7662 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7664 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7666 dev_err(&hdev->pdev->dev,
7667 "add mac ethertype failed for cmd_send, ret =%d.\n",
7672 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7673 retval = le16_to_cpu(desc.retval);
7675 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7678 static int init_mgr_tbl(struct hclge_dev *hdev)
7683 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7684 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7686 dev_err(&hdev->pdev->dev,
7687 "add mac ethertype failed, ret =%d.\n",
7696 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7698 struct hclge_vport *vport = hclge_get_vport(handle);
7699 struct hclge_dev *hdev = vport->back;
7701 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7704 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7707 const unsigned char *new_addr = (const unsigned char *)p;
7708 struct hclge_vport *vport = hclge_get_vport(handle);
7709 struct hclge_dev *hdev = vport->back;
7712 /* mac addr check */
7713 if (is_zero_ether_addr(new_addr) ||
7714 is_broadcast_ether_addr(new_addr) ||
7715 is_multicast_ether_addr(new_addr)) {
7716 dev_err(&hdev->pdev->dev,
7717 "Change uc mac err! invalid mac:%pM.\n",
7722 if ((!is_first || is_kdump_kernel()) &&
7723 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7724 dev_warn(&hdev->pdev->dev,
7725 "remove old uc mac address fail.\n");
7727 ret = hclge_add_uc_addr(handle, new_addr);
7729 dev_err(&hdev->pdev->dev,
7730 "add uc mac address fail, ret =%d.\n",
7734 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7735 dev_err(&hdev->pdev->dev,
7736 "restore uc mac address fail.\n");
7741 ret = hclge_pause_addr_cfg(hdev, new_addr);
7743 dev_err(&hdev->pdev->dev,
7744 "configure mac pause address fail, ret =%d.\n",
7749 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7754 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7757 struct hclge_vport *vport = hclge_get_vport(handle);
7758 struct hclge_dev *hdev = vport->back;
7760 if (!hdev->hw.mac.phydev)
7763 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7766 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7767 u8 fe_type, bool filter_en, u8 vf_id)
7769 struct hclge_vlan_filter_ctrl_cmd *req;
7770 struct hclge_desc desc;
7773 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7775 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7776 req->vlan_type = vlan_type;
7777 req->vlan_fe = filter_en ? fe_type : 0;
7780 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7782 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7788 #define HCLGE_FILTER_TYPE_VF 0
7789 #define HCLGE_FILTER_TYPE_PORT 1
7790 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7791 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7792 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7793 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7794 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7795 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7796 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7797 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7798 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7800 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7802 struct hclge_vport *vport = hclge_get_vport(handle);
7803 struct hclge_dev *hdev = vport->back;
7805 if (hdev->pdev->revision >= 0x21) {
7806 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7807 HCLGE_FILTER_FE_EGRESS, enable, 0);
7808 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7809 HCLGE_FILTER_FE_INGRESS, enable, 0);
7811 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7812 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7816 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7818 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7821 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7822 bool is_kill, u16 vlan,
7825 struct hclge_vport *vport = &hdev->vport[vfid];
7826 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7827 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7828 struct hclge_desc desc[2];
7833 /* if vf vlan table is full, firmware will close vf vlan filter, it
7834 * is unable and unnecessary to add new vlan id to vf vlan filter.
7835 * If spoof check is enable, and vf vlan is full, it shouldn't add
7836 * new vlan, because tx packets with these vlan id will be dropped.
7838 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7839 if (vport->vf_info.spoofchk && vlan) {
7840 dev_err(&hdev->pdev->dev,
7841 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7847 hclge_cmd_setup_basic_desc(&desc[0],
7848 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7849 hclge_cmd_setup_basic_desc(&desc[1],
7850 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7852 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7854 vf_byte_off = vfid / 8;
7855 vf_byte_val = 1 << (vfid % 8);
7857 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7858 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7860 req0->vlan_id = cpu_to_le16(vlan);
7861 req0->vlan_cfg = is_kill;
7863 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7864 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7866 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7868 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7870 dev_err(&hdev->pdev->dev,
7871 "Send vf vlan command fail, ret =%d.\n",
7877 #define HCLGE_VF_VLAN_NO_ENTRY 2
7878 if (!req0->resp_code || req0->resp_code == 1)
7881 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7882 set_bit(vfid, hdev->vf_vlan_full);
7883 dev_warn(&hdev->pdev->dev,
7884 "vf vlan table is full, vf vlan filter is disabled\n");
7888 dev_err(&hdev->pdev->dev,
7889 "Add vf vlan filter fail, ret =%u.\n",
7892 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7893 if (!req0->resp_code)
7896 /* vf vlan filter is disabled when vf vlan table is full,
7897 * then new vlan id will not be added into vf vlan table.
7898 * Just return 0 without warning, avoid massive verbose
7899 * print logs when unload.
7901 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7904 dev_err(&hdev->pdev->dev,
7905 "Kill vf vlan filter fail, ret =%u.\n",
7912 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7913 u16 vlan_id, bool is_kill)
7915 struct hclge_vlan_filter_pf_cfg_cmd *req;
7916 struct hclge_desc desc;
7917 u8 vlan_offset_byte_val;
7918 u8 vlan_offset_byte;
7922 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7924 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7925 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7926 HCLGE_VLAN_BYTE_SIZE;
7927 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7929 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7930 req->vlan_offset = vlan_offset_160;
7931 req->vlan_cfg = is_kill;
7932 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7934 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7936 dev_err(&hdev->pdev->dev,
7937 "port vlan command, send fail, ret =%d.\n", ret);
7941 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7942 u16 vport_id, u16 vlan_id,
7945 u16 vport_idx, vport_num = 0;
7948 if (is_kill && !vlan_id)
7951 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7954 dev_err(&hdev->pdev->dev,
7955 "Set %u vport vlan filter config fail, ret =%d.\n",
7960 /* vlan 0 may be added twice when 8021q module is enabled */
7961 if (!is_kill && !vlan_id &&
7962 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7965 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7966 dev_err(&hdev->pdev->dev,
7967 "Add port vlan failed, vport %u is already in vlan %u\n",
7973 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7974 dev_err(&hdev->pdev->dev,
7975 "Delete port vlan failed, vport %u is not in vlan %u\n",
7980 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7983 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7984 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7990 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7992 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7993 struct hclge_vport_vtag_tx_cfg_cmd *req;
7994 struct hclge_dev *hdev = vport->back;
7995 struct hclge_desc desc;
7999 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8001 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8002 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8003 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8004 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8005 vcfg->accept_tag1 ? 1 : 0);
8006 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8007 vcfg->accept_untag1 ? 1 : 0);
8008 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8009 vcfg->accept_tag2 ? 1 : 0);
8010 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8011 vcfg->accept_untag2 ? 1 : 0);
8012 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8013 vcfg->insert_tag1_en ? 1 : 0);
8014 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8015 vcfg->insert_tag2_en ? 1 : 0);
8016 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8018 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8019 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8020 HCLGE_VF_NUM_PER_BYTE;
8021 req->vf_bitmap[bmap_index] =
8022 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8024 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8026 dev_err(&hdev->pdev->dev,
8027 "Send port txvlan cfg command fail, ret =%d\n",
8033 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8035 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8036 struct hclge_vport_vtag_rx_cfg_cmd *req;
8037 struct hclge_dev *hdev = vport->back;
8038 struct hclge_desc desc;
8042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8044 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8045 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8046 vcfg->strip_tag1_en ? 1 : 0);
8047 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8048 vcfg->strip_tag2_en ? 1 : 0);
8049 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8050 vcfg->vlan1_vlan_prionly ? 1 : 0);
8051 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8052 vcfg->vlan2_vlan_prionly ? 1 : 0);
8054 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8055 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8056 HCLGE_VF_NUM_PER_BYTE;
8057 req->vf_bitmap[bmap_index] =
8058 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8060 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8062 dev_err(&hdev->pdev->dev,
8063 "Send port rxvlan cfg command fail, ret =%d\n",
8069 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8070 u16 port_base_vlan_state,
8075 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8076 vport->txvlan_cfg.accept_tag1 = true;
8077 vport->txvlan_cfg.insert_tag1_en = false;
8078 vport->txvlan_cfg.default_tag1 = 0;
8080 vport->txvlan_cfg.accept_tag1 = false;
8081 vport->txvlan_cfg.insert_tag1_en = true;
8082 vport->txvlan_cfg.default_tag1 = vlan_tag;
8085 vport->txvlan_cfg.accept_untag1 = true;
8087 /* accept_tag2 and accept_untag2 are not supported on
8088 * pdev revision(0x20), new revision support them,
8089 * this two fields can not be configured by user.
8091 vport->txvlan_cfg.accept_tag2 = true;
8092 vport->txvlan_cfg.accept_untag2 = true;
8093 vport->txvlan_cfg.insert_tag2_en = false;
8094 vport->txvlan_cfg.default_tag2 = 0;
8096 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8097 vport->rxvlan_cfg.strip_tag1_en = false;
8098 vport->rxvlan_cfg.strip_tag2_en =
8099 vport->rxvlan_cfg.rx_vlan_offload_en;
8101 vport->rxvlan_cfg.strip_tag1_en =
8102 vport->rxvlan_cfg.rx_vlan_offload_en;
8103 vport->rxvlan_cfg.strip_tag2_en = true;
8105 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8106 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8108 ret = hclge_set_vlan_tx_offload_cfg(vport);
8112 return hclge_set_vlan_rx_offload_cfg(vport);
8115 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8117 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8118 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8119 struct hclge_desc desc;
8122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8123 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8124 rx_req->ot_fst_vlan_type =
8125 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8126 rx_req->ot_sec_vlan_type =
8127 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8128 rx_req->in_fst_vlan_type =
8129 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8130 rx_req->in_sec_vlan_type =
8131 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8133 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8135 dev_err(&hdev->pdev->dev,
8136 "Send rxvlan protocol type command fail, ret =%d\n",
8141 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8143 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8144 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8145 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8147 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8149 dev_err(&hdev->pdev->dev,
8150 "Send txvlan protocol type command fail, ret =%d\n",
8156 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8158 #define HCLGE_DEF_VLAN_TYPE 0x8100
8160 struct hnae3_handle *handle = &hdev->vport[0].nic;
8161 struct hclge_vport *vport;
8165 if (hdev->pdev->revision >= 0x21) {
8166 /* for revision 0x21, vf vlan filter is per function */
8167 for (i = 0; i < hdev->num_alloc_vport; i++) {
8168 vport = &hdev->vport[i];
8169 ret = hclge_set_vlan_filter_ctrl(hdev,
8170 HCLGE_FILTER_TYPE_VF,
8171 HCLGE_FILTER_FE_EGRESS,
8178 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8179 HCLGE_FILTER_FE_INGRESS, true,
8184 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8185 HCLGE_FILTER_FE_EGRESS_V1_B,
8191 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8193 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8194 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8195 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8196 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8197 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8198 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8200 ret = hclge_set_vlan_protocol_type(hdev);
8204 for (i = 0; i < hdev->num_alloc_vport; i++) {
8207 vport = &hdev->vport[i];
8208 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8210 ret = hclge_vlan_offload_cfg(vport,
8211 vport->port_base_vlan_cfg.state,
8217 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8220 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8223 struct hclge_vport_vlan_cfg *vlan;
8225 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8229 vlan->hd_tbl_status = writen_to_tbl;
8230 vlan->vlan_id = vlan_id;
8232 list_add_tail(&vlan->node, &vport->vlan_list);
8235 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8237 struct hclge_vport_vlan_cfg *vlan, *tmp;
8238 struct hclge_dev *hdev = vport->back;
8241 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8242 if (!vlan->hd_tbl_status) {
8243 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8245 vlan->vlan_id, false);
8247 dev_err(&hdev->pdev->dev,
8248 "restore vport vlan list failed, ret=%d\n",
8253 vlan->hd_tbl_status = true;
8259 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8262 struct hclge_vport_vlan_cfg *vlan, *tmp;
8263 struct hclge_dev *hdev = vport->back;
8265 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8266 if (vlan->vlan_id == vlan_id) {
8267 if (is_write_tbl && vlan->hd_tbl_status)
8268 hclge_set_vlan_filter_hw(hdev,
8274 list_del(&vlan->node);
8281 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8283 struct hclge_vport_vlan_cfg *vlan, *tmp;
8284 struct hclge_dev *hdev = vport->back;
8286 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8287 if (vlan->hd_tbl_status)
8288 hclge_set_vlan_filter_hw(hdev,
8294 vlan->hd_tbl_status = false;
8296 list_del(&vlan->node);
8302 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8304 struct hclge_vport_vlan_cfg *vlan, *tmp;
8305 struct hclge_vport *vport;
8308 for (i = 0; i < hdev->num_alloc_vport; i++) {
8309 vport = &hdev->vport[i];
8310 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8311 list_del(&vlan->node);
8317 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8319 struct hclge_vport *vport = hclge_get_vport(handle);
8320 struct hclge_vport_vlan_cfg *vlan, *tmp;
8321 struct hclge_dev *hdev = vport->back;
8326 for (i = 0; i < hdev->num_alloc_vport; i++) {
8327 vport = &hdev->vport[i];
8328 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8329 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8330 state = vport->port_base_vlan_cfg.state;
8332 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8333 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8334 vport->vport_id, vlan_id,
8339 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8342 if (!vlan->hd_tbl_status)
8344 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8346 vlan->vlan_id, false);
8353 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8355 struct hclge_vport *vport = hclge_get_vport(handle);
8357 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8358 vport->rxvlan_cfg.strip_tag1_en = false;
8359 vport->rxvlan_cfg.strip_tag2_en = enable;
8361 vport->rxvlan_cfg.strip_tag1_en = enable;
8362 vport->rxvlan_cfg.strip_tag2_en = true;
8364 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8365 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8366 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8368 return hclge_set_vlan_rx_offload_cfg(vport);
8371 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8372 u16 port_base_vlan_state,
8373 struct hclge_vlan_info *new_info,
8374 struct hclge_vlan_info *old_info)
8376 struct hclge_dev *hdev = vport->back;
8379 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8380 hclge_rm_vport_all_vlan_table(vport, false);
8381 return hclge_set_vlan_filter_hw(hdev,
8382 htons(new_info->vlan_proto),
8388 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8389 vport->vport_id, old_info->vlan_tag,
8394 return hclge_add_vport_all_vlan_table(vport);
8397 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8398 struct hclge_vlan_info *vlan_info)
8400 struct hnae3_handle *nic = &vport->nic;
8401 struct hclge_vlan_info *old_vlan_info;
8402 struct hclge_dev *hdev = vport->back;
8405 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8407 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8411 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8412 /* add new VLAN tag */
8413 ret = hclge_set_vlan_filter_hw(hdev,
8414 htons(vlan_info->vlan_proto),
8416 vlan_info->vlan_tag,
8421 /* remove old VLAN tag */
8422 ret = hclge_set_vlan_filter_hw(hdev,
8423 htons(old_vlan_info->vlan_proto),
8425 old_vlan_info->vlan_tag,
8433 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8438 /* update state only when disable/enable port based VLAN */
8439 vport->port_base_vlan_cfg.state = state;
8440 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8441 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8443 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8446 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8447 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8448 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8453 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8454 enum hnae3_port_base_vlan_state state,
8457 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8459 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8461 return HNAE3_PORT_BASE_VLAN_ENABLE;
8464 return HNAE3_PORT_BASE_VLAN_DISABLE;
8465 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8466 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8468 return HNAE3_PORT_BASE_VLAN_MODIFY;
8472 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8473 u16 vlan, u8 qos, __be16 proto)
8475 struct hclge_vport *vport = hclge_get_vport(handle);
8476 struct hclge_dev *hdev = vport->back;
8477 struct hclge_vlan_info vlan_info;
8481 if (hdev->pdev->revision == 0x20)
8484 vport = hclge_get_vf_vport(hdev, vfid);
8488 /* qos is a 3 bits value, so can not be bigger than 7 */
8489 if (vlan > VLAN_N_VID - 1 || qos > 7)
8491 if (proto != htons(ETH_P_8021Q))
8492 return -EPROTONOSUPPORT;
8494 state = hclge_get_port_base_vlan_state(vport,
8495 vport->port_base_vlan_cfg.state,
8497 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8500 vlan_info.vlan_tag = vlan;
8501 vlan_info.qos = qos;
8502 vlan_info.vlan_proto = ntohs(proto);
8504 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8505 return hclge_update_port_base_vlan_cfg(vport, state,
8508 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8509 vport->vport_id, state,
8516 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8517 u16 vlan_id, bool is_kill)
8519 struct hclge_vport *vport = hclge_get_vport(handle);
8520 struct hclge_dev *hdev = vport->back;
8521 bool writen_to_tbl = false;
8524 /* When device is resetting, firmware is unable to handle
8525 * mailbox. Just record the vlan id, and remove it after
8528 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8529 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8533 /* when port base vlan enabled, we use port base vlan as the vlan
8534 * filter entry. In this case, we don't update vlan filter table
8535 * when user add new vlan or remove exist vlan, just update the vport
8536 * vlan list. The vlan id in vlan list will be writen in vlan filter
8537 * table until port base vlan disabled
8539 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8540 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8542 writen_to_tbl = true;
8547 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8549 hclge_add_vport_vlan_table(vport, vlan_id,
8551 } else if (is_kill) {
8552 /* when remove hw vlan filter failed, record the vlan id,
8553 * and try to remove it from hw later, to be consistence
8556 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8561 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8563 #define HCLGE_MAX_SYNC_COUNT 60
8565 int i, ret, sync_cnt = 0;
8568 /* start from vport 1 for PF is always alive */
8569 for (i = 0; i < hdev->num_alloc_vport; i++) {
8570 struct hclge_vport *vport = &hdev->vport[i];
8572 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8574 while (vlan_id != VLAN_N_VID) {
8575 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8576 vport->vport_id, vlan_id,
8578 if (ret && ret != -EINVAL)
8581 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8582 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8585 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8588 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8594 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8596 struct hclge_config_max_frm_size_cmd *req;
8597 struct hclge_desc desc;
8599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8601 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8602 req->max_frm_size = cpu_to_le16(new_mps);
8603 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8605 return hclge_cmd_send(&hdev->hw, &desc, 1);
8608 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8610 struct hclge_vport *vport = hclge_get_vport(handle);
8612 return hclge_set_vport_mtu(vport, new_mtu);
8615 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8617 struct hclge_dev *hdev = vport->back;
8618 int i, max_frm_size, ret;
8620 /* HW supprt 2 layer vlan */
8621 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8622 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8623 max_frm_size > HCLGE_MAC_MAX_FRAME)
8626 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8627 mutex_lock(&hdev->vport_lock);
8628 /* VF's mps must fit within hdev->mps */
8629 if (vport->vport_id && max_frm_size > hdev->mps) {
8630 mutex_unlock(&hdev->vport_lock);
8632 } else if (vport->vport_id) {
8633 vport->mps = max_frm_size;
8634 mutex_unlock(&hdev->vport_lock);
8638 /* PF's mps must be greater then VF's mps */
8639 for (i = 1; i < hdev->num_alloc_vport; i++)
8640 if (max_frm_size < hdev->vport[i].mps) {
8641 mutex_unlock(&hdev->vport_lock);
8645 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8647 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8649 dev_err(&hdev->pdev->dev,
8650 "Change mtu fail, ret =%d\n", ret);
8654 hdev->mps = max_frm_size;
8655 vport->mps = max_frm_size;
8657 ret = hclge_buffer_alloc(hdev);
8659 dev_err(&hdev->pdev->dev,
8660 "Allocate buffer fail, ret =%d\n", ret);
8663 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8664 mutex_unlock(&hdev->vport_lock);
8668 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8671 struct hclge_reset_tqp_queue_cmd *req;
8672 struct hclge_desc desc;
8675 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8677 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8678 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8680 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8684 dev_err(&hdev->pdev->dev,
8685 "Send tqp reset cmd error, status =%d\n", ret);
8692 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8694 struct hclge_reset_tqp_queue_cmd *req;
8695 struct hclge_desc desc;
8698 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8700 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8701 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8705 dev_err(&hdev->pdev->dev,
8706 "Get reset status error, status =%d\n", ret);
8710 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8713 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8715 struct hnae3_queue *queue;
8716 struct hclge_tqp *tqp;
8718 queue = handle->kinfo.tqp[queue_id];
8719 tqp = container_of(queue, struct hclge_tqp, q);
8724 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8726 struct hclge_vport *vport = hclge_get_vport(handle);
8727 struct hclge_dev *hdev = vport->back;
8728 int reset_try_times = 0;
8733 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8735 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8737 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8741 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8743 dev_err(&hdev->pdev->dev,
8744 "Send reset tqp cmd fail, ret = %d\n", ret);
8748 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8749 reset_status = hclge_get_reset_status(hdev, queue_gid);
8753 /* Wait for tqp hw reset */
8754 usleep_range(1000, 1200);
8757 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8758 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8762 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8764 dev_err(&hdev->pdev->dev,
8765 "Deassert the soft reset fail, ret = %d\n", ret);
8770 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8772 struct hclge_dev *hdev = vport->back;
8773 int reset_try_times = 0;
8778 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8780 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8782 dev_warn(&hdev->pdev->dev,
8783 "Send reset tqp cmd fail, ret = %d\n", ret);
8787 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8788 reset_status = hclge_get_reset_status(hdev, queue_gid);
8792 /* Wait for tqp hw reset */
8793 usleep_range(1000, 1200);
8796 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8797 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8801 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8803 dev_warn(&hdev->pdev->dev,
8804 "Deassert the soft reset fail, ret = %d\n", ret);
8807 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8809 struct hclge_vport *vport = hclge_get_vport(handle);
8810 struct hclge_dev *hdev = vport->back;
8812 return hdev->fw_version;
8815 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8817 struct phy_device *phydev = hdev->hw.mac.phydev;
8822 phy_set_asym_pause(phydev, rx_en, tx_en);
8825 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8829 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8832 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8834 dev_err(&hdev->pdev->dev,
8835 "configure pauseparam error, ret = %d.\n", ret);
8840 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8842 struct phy_device *phydev = hdev->hw.mac.phydev;
8843 u16 remote_advertising = 0;
8844 u16 local_advertising;
8845 u32 rx_pause, tx_pause;
8848 if (!phydev->link || !phydev->autoneg)
8851 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8854 remote_advertising = LPA_PAUSE_CAP;
8856 if (phydev->asym_pause)
8857 remote_advertising |= LPA_PAUSE_ASYM;
8859 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8860 remote_advertising);
8861 tx_pause = flowctl & FLOW_CTRL_TX;
8862 rx_pause = flowctl & FLOW_CTRL_RX;
8864 if (phydev->duplex == HCLGE_MAC_HALF) {
8869 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8872 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8873 u32 *rx_en, u32 *tx_en)
8875 struct hclge_vport *vport = hclge_get_vport(handle);
8876 struct hclge_dev *hdev = vport->back;
8877 struct phy_device *phydev = hdev->hw.mac.phydev;
8879 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8881 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8887 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8890 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8893 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8902 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8903 u32 rx_en, u32 tx_en)
8906 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8907 else if (rx_en && !tx_en)
8908 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8909 else if (!rx_en && tx_en)
8910 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8912 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8914 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8917 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8918 u32 rx_en, u32 tx_en)
8920 struct hclge_vport *vport = hclge_get_vport(handle);
8921 struct hclge_dev *hdev = vport->back;
8922 struct phy_device *phydev = hdev->hw.mac.phydev;
8926 fc_autoneg = hclge_get_autoneg(handle);
8927 if (auto_neg != fc_autoneg) {
8928 dev_info(&hdev->pdev->dev,
8929 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8934 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8935 dev_info(&hdev->pdev->dev,
8936 "Priority flow control enabled. Cannot set link flow control.\n");
8940 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8942 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8945 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8948 return phy_start_aneg(phydev);
8953 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8954 u8 *auto_neg, u32 *speed, u8 *duplex)
8956 struct hclge_vport *vport = hclge_get_vport(handle);
8957 struct hclge_dev *hdev = vport->back;
8960 *speed = hdev->hw.mac.speed;
8962 *duplex = hdev->hw.mac.duplex;
8964 *auto_neg = hdev->hw.mac.autoneg;
8967 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8970 struct hclge_vport *vport = hclge_get_vport(handle);
8971 struct hclge_dev *hdev = vport->back;
8974 *media_type = hdev->hw.mac.media_type;
8977 *module_type = hdev->hw.mac.module_type;
8980 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8981 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8983 struct hclge_vport *vport = hclge_get_vport(handle);
8984 struct hclge_dev *hdev = vport->back;
8985 struct phy_device *phydev = hdev->hw.mac.phydev;
8986 int mdix_ctrl, mdix, is_resolved;
8987 unsigned int retval;
8990 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8991 *tp_mdix = ETH_TP_MDI_INVALID;
8995 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8997 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8998 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8999 HCLGE_PHY_MDIX_CTRL_S);
9001 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9002 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9003 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9005 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9007 switch (mdix_ctrl) {
9009 *tp_mdix_ctrl = ETH_TP_MDI;
9012 *tp_mdix_ctrl = ETH_TP_MDI_X;
9015 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9018 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9023 *tp_mdix = ETH_TP_MDI_INVALID;
9025 *tp_mdix = ETH_TP_MDI_X;
9027 *tp_mdix = ETH_TP_MDI;
9030 static void hclge_info_show(struct hclge_dev *hdev)
9032 struct device *dev = &hdev->pdev->dev;
9034 dev_info(dev, "PF info begin:\n");
9036 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9037 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9038 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9039 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9040 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9041 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9042 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9043 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9044 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9045 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9046 dev_info(dev, "This is %s PF\n",
9047 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9048 dev_info(dev, "DCB %s\n",
9049 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9050 dev_info(dev, "MQPRIO %s\n",
9051 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9053 dev_info(dev, "PF info end.\n");
9056 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9057 struct hclge_vport *vport)
9059 struct hnae3_client *client = vport->nic.client;
9060 struct hclge_dev *hdev = ae_dev->priv;
9061 int rst_cnt = hdev->rst_stats.reset_cnt;
9064 ret = client->ops->init_instance(&vport->nic);
9068 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9069 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9070 rst_cnt != hdev->rst_stats.reset_cnt) {
9075 /* Enable nic hw error interrupts */
9076 ret = hclge_config_nic_hw_error(hdev, true);
9078 dev_err(&ae_dev->pdev->dev,
9079 "fail(%d) to enable hw error interrupts\n", ret);
9083 hnae3_set_client_init_flag(client, ae_dev, 1);
9085 if (netif_msg_drv(&hdev->vport->nic))
9086 hclge_info_show(hdev);
9091 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9092 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9093 msleep(HCLGE_WAIT_RESET_DONE);
9095 client->ops->uninit_instance(&vport->nic, 0);
9100 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9101 struct hclge_vport *vport)
9103 struct hnae3_client *client = vport->roce.client;
9104 struct hclge_dev *hdev = ae_dev->priv;
9108 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9112 client = hdev->roce_client;
9113 ret = hclge_init_roce_base_info(vport);
9117 rst_cnt = hdev->rst_stats.reset_cnt;
9118 ret = client->ops->init_instance(&vport->roce);
9122 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9123 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9124 rst_cnt != hdev->rst_stats.reset_cnt) {
9129 /* Enable roce ras interrupts */
9130 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9132 dev_err(&ae_dev->pdev->dev,
9133 "fail(%d) to enable roce ras interrupts\n", ret);
9137 hnae3_set_client_init_flag(client, ae_dev, 1);
9142 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9143 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9144 msleep(HCLGE_WAIT_RESET_DONE);
9146 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9151 static int hclge_init_client_instance(struct hnae3_client *client,
9152 struct hnae3_ae_dev *ae_dev)
9154 struct hclge_dev *hdev = ae_dev->priv;
9155 struct hclge_vport *vport;
9158 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9159 vport = &hdev->vport[i];
9161 switch (client->type) {
9162 case HNAE3_CLIENT_KNIC:
9163 hdev->nic_client = client;
9164 vport->nic.client = client;
9165 ret = hclge_init_nic_client_instance(ae_dev, vport);
9169 ret = hclge_init_roce_client_instance(ae_dev, vport);
9174 case HNAE3_CLIENT_ROCE:
9175 if (hnae3_dev_roce_supported(hdev)) {
9176 hdev->roce_client = client;
9177 vport->roce.client = client;
9180 ret = hclge_init_roce_client_instance(ae_dev, vport);
9193 hdev->nic_client = NULL;
9194 vport->nic.client = NULL;
9197 hdev->roce_client = NULL;
9198 vport->roce.client = NULL;
9202 static void hclge_uninit_client_instance(struct hnae3_client *client,
9203 struct hnae3_ae_dev *ae_dev)
9205 struct hclge_dev *hdev = ae_dev->priv;
9206 struct hclge_vport *vport;
9209 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9210 vport = &hdev->vport[i];
9211 if (hdev->roce_client) {
9212 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9213 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9214 msleep(HCLGE_WAIT_RESET_DONE);
9216 hdev->roce_client->ops->uninit_instance(&vport->roce,
9218 hdev->roce_client = NULL;
9219 vport->roce.client = NULL;
9221 if (client->type == HNAE3_CLIENT_ROCE)
9223 if (hdev->nic_client && client->ops->uninit_instance) {
9224 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9225 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9226 msleep(HCLGE_WAIT_RESET_DONE);
9228 client->ops->uninit_instance(&vport->nic, 0);
9229 hdev->nic_client = NULL;
9230 vport->nic.client = NULL;
9235 static int hclge_pci_init(struct hclge_dev *hdev)
9237 struct pci_dev *pdev = hdev->pdev;
9238 struct hclge_hw *hw;
9241 ret = pci_enable_device(pdev);
9243 dev_err(&pdev->dev, "failed to enable PCI device\n");
9247 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9249 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9252 "can't set consistent PCI DMA");
9253 goto err_disable_device;
9255 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9258 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9260 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9261 goto err_disable_device;
9264 pci_set_master(pdev);
9266 hw->io_base = pcim_iomap(pdev, 2, 0);
9268 dev_err(&pdev->dev, "Can't map configuration register space\n");
9270 goto err_clr_master;
9273 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9277 pci_clear_master(pdev);
9278 pci_release_regions(pdev);
9280 pci_disable_device(pdev);
9285 static void hclge_pci_uninit(struct hclge_dev *hdev)
9287 struct pci_dev *pdev = hdev->pdev;
9289 pcim_iounmap(pdev, hdev->hw.io_base);
9290 pci_free_irq_vectors(pdev);
9291 pci_clear_master(pdev);
9292 pci_release_mem_regions(pdev);
9293 pci_disable_device(pdev);
9296 static void hclge_state_init(struct hclge_dev *hdev)
9298 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9299 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9300 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9301 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9302 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9303 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9304 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9307 static void hclge_state_uninit(struct hclge_dev *hdev)
9309 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9310 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9312 if (hdev->reset_timer.function)
9313 del_timer_sync(&hdev->reset_timer);
9314 if (hdev->service_task.work.func)
9315 cancel_delayed_work_sync(&hdev->service_task);
9318 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9320 #define HCLGE_FLR_RETRY_WAIT_MS 500
9321 #define HCLGE_FLR_RETRY_CNT 5
9323 struct hclge_dev *hdev = ae_dev->priv;
9328 down(&hdev->reset_sem);
9329 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9330 hdev->reset_type = HNAE3_FLR_RESET;
9331 ret = hclge_reset_prepare(hdev);
9333 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9335 if (hdev->reset_pending ||
9336 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9337 dev_err(&hdev->pdev->dev,
9338 "reset_pending:0x%lx, retry_cnt:%d\n",
9339 hdev->reset_pending, retry_cnt);
9340 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9341 up(&hdev->reset_sem);
9342 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9347 /* disable misc vector before FLR done */
9348 hclge_enable_vector(&hdev->misc_vector, false);
9349 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9350 hdev->rst_stats.flr_rst_cnt++;
9353 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9355 struct hclge_dev *hdev = ae_dev->priv;
9358 hclge_enable_vector(&hdev->misc_vector, true);
9360 ret = hclge_reset_rebuild(hdev);
9362 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9364 hdev->reset_type = HNAE3_NONE_RESET;
9365 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9366 up(&hdev->reset_sem);
9369 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9373 for (i = 0; i < hdev->num_alloc_vport; i++) {
9374 struct hclge_vport *vport = &hdev->vport[i];
9377 /* Send cmd to clear VF's FUNC_RST_ING */
9378 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9380 dev_warn(&hdev->pdev->dev,
9381 "clear vf(%u) rst failed %d!\n",
9382 vport->vport_id, ret);
9386 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9388 struct pci_dev *pdev = ae_dev->pdev;
9389 struct hclge_dev *hdev;
9392 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9399 hdev->ae_dev = ae_dev;
9400 hdev->reset_type = HNAE3_NONE_RESET;
9401 hdev->reset_level = HNAE3_FUNC_RESET;
9402 ae_dev->priv = hdev;
9404 /* HW supprt 2 layer vlan */
9405 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9407 mutex_init(&hdev->vport_lock);
9408 spin_lock_init(&hdev->fd_rule_lock);
9409 sema_init(&hdev->reset_sem, 1);
9411 ret = hclge_pci_init(hdev);
9413 dev_err(&pdev->dev, "PCI init failed\n");
9417 /* Firmware command queue initialize */
9418 ret = hclge_cmd_queue_init(hdev);
9420 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9421 goto err_pci_uninit;
9424 /* Firmware command initialize */
9425 ret = hclge_cmd_init(hdev);
9427 goto err_cmd_uninit;
9429 ret = hclge_get_cap(hdev);
9431 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9433 goto err_cmd_uninit;
9436 ret = hclge_configure(hdev);
9438 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9439 goto err_cmd_uninit;
9442 ret = hclge_init_msi(hdev);
9444 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9445 goto err_cmd_uninit;
9448 ret = hclge_misc_irq_init(hdev);
9451 "Misc IRQ(vector0) init error, ret = %d.\n",
9453 goto err_msi_uninit;
9456 ret = hclge_alloc_tqps(hdev);
9458 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9459 goto err_msi_irq_uninit;
9462 ret = hclge_alloc_vport(hdev);
9464 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9465 goto err_msi_irq_uninit;
9468 ret = hclge_map_tqp(hdev);
9470 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9471 goto err_msi_irq_uninit;
9474 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9475 ret = hclge_mac_mdio_config(hdev);
9477 dev_err(&hdev->pdev->dev,
9478 "mdio config fail ret=%d\n", ret);
9479 goto err_msi_irq_uninit;
9483 ret = hclge_init_umv_space(hdev);
9485 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9486 goto err_mdiobus_unreg;
9489 ret = hclge_mac_init(hdev);
9491 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9492 goto err_mdiobus_unreg;
9495 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9497 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9498 goto err_mdiobus_unreg;
9501 ret = hclge_config_gro(hdev, true);
9503 goto err_mdiobus_unreg;
9505 ret = hclge_init_vlan_config(hdev);
9507 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9508 goto err_mdiobus_unreg;
9511 ret = hclge_tm_schd_init(hdev);
9513 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9514 goto err_mdiobus_unreg;
9517 hclge_rss_init_cfg(hdev);
9518 ret = hclge_rss_init_hw(hdev);
9520 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9521 goto err_mdiobus_unreg;
9524 ret = init_mgr_tbl(hdev);
9526 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9527 goto err_mdiobus_unreg;
9530 ret = hclge_init_fd_config(hdev);
9533 "fd table init fail, ret=%d\n", ret);
9534 goto err_mdiobus_unreg;
9537 INIT_KFIFO(hdev->mac_tnl_log);
9539 hclge_dcb_ops_set(hdev);
9541 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9542 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9544 /* Setup affinity after service timer setup because add_timer_on
9545 * is called in affinity notify.
9547 hclge_misc_affinity_setup(hdev);
9549 hclge_clear_all_event_cause(hdev);
9550 hclge_clear_resetting_state(hdev);
9552 /* Log and clear the hw errors those already occurred */
9553 hclge_handle_all_hns_hw_errors(ae_dev);
9555 /* request delayed reset for the error recovery because an immediate
9556 * global reset on a PF affecting pending initialization of other PFs
9558 if (ae_dev->hw_err_reset_req) {
9559 enum hnae3_reset_type reset_level;
9561 reset_level = hclge_get_reset_level(ae_dev,
9562 &ae_dev->hw_err_reset_req);
9563 hclge_set_def_reset_request(ae_dev, reset_level);
9564 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9567 /* Enable MISC vector(vector0) */
9568 hclge_enable_vector(&hdev->misc_vector, true);
9570 hclge_state_init(hdev);
9571 hdev->last_reset_time = jiffies;
9573 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9576 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9581 if (hdev->hw.mac.phydev)
9582 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9584 hclge_misc_irq_uninit(hdev);
9586 pci_free_irq_vectors(pdev);
9588 hclge_cmd_uninit(hdev);
9590 pcim_iounmap(pdev, hdev->hw.io_base);
9591 pci_clear_master(pdev);
9592 pci_release_regions(pdev);
9593 pci_disable_device(pdev);
9598 static void hclge_stats_clear(struct hclge_dev *hdev)
9600 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9603 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9605 return hclge_config_switch_param(hdev, vf, enable,
9606 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9609 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9611 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9612 HCLGE_FILTER_FE_NIC_INGRESS_B,
9616 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9620 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9622 dev_err(&hdev->pdev->dev,
9623 "Set vf %d mac spoof check %s failed, ret=%d\n",
9624 vf, enable ? "on" : "off", ret);
9628 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9630 dev_err(&hdev->pdev->dev,
9631 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9632 vf, enable ? "on" : "off", ret);
9637 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9640 struct hclge_vport *vport = hclge_get_vport(handle);
9641 struct hclge_dev *hdev = vport->back;
9642 u32 new_spoofchk = enable ? 1 : 0;
9645 if (hdev->pdev->revision == 0x20)
9648 vport = hclge_get_vf_vport(hdev, vf);
9652 if (vport->vf_info.spoofchk == new_spoofchk)
9655 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9656 dev_warn(&hdev->pdev->dev,
9657 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9659 else if (enable && hclge_is_umv_space_full(vport))
9660 dev_warn(&hdev->pdev->dev,
9661 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9664 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9668 vport->vf_info.spoofchk = new_spoofchk;
9672 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9674 struct hclge_vport *vport = hdev->vport;
9678 if (hdev->pdev->revision == 0x20)
9681 /* resume the vf spoof check state after reset */
9682 for (i = 0; i < hdev->num_alloc_vport; i++) {
9683 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9684 vport->vf_info.spoofchk);
9694 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9696 struct hclge_vport *vport = hclge_get_vport(handle);
9697 struct hclge_dev *hdev = vport->back;
9698 u32 new_trusted = enable ? 1 : 0;
9702 vport = hclge_get_vf_vport(hdev, vf);
9706 if (vport->vf_info.trusted == new_trusted)
9709 /* Disable promisc mode for VF if it is not trusted any more. */
9710 if (!enable && vport->vf_info.promisc_enable) {
9711 en_bc_pmc = hdev->pdev->revision != 0x20;
9712 ret = hclge_set_vport_promisc_mode(vport, false, false,
9716 vport->vf_info.promisc_enable = 0;
9717 hclge_inform_vf_promisc_info(vport);
9720 vport->vf_info.trusted = new_trusted;
9725 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9730 /* reset vf rate to default value */
9731 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9732 struct hclge_vport *vport = &hdev->vport[vf];
9734 vport->vf_info.max_tx_rate = 0;
9735 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9737 dev_err(&hdev->pdev->dev,
9738 "vf%d failed to reset to default, ret=%d\n",
9739 vf - HCLGE_VF_VPORT_START_NUM, ret);
9743 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9744 int min_tx_rate, int max_tx_rate)
9746 if (min_tx_rate != 0 ||
9747 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9748 dev_err(&hdev->pdev->dev,
9749 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9750 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9757 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9758 int min_tx_rate, int max_tx_rate, bool force)
9760 struct hclge_vport *vport = hclge_get_vport(handle);
9761 struct hclge_dev *hdev = vport->back;
9764 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9768 vport = hclge_get_vf_vport(hdev, vf);
9772 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9775 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9779 vport->vf_info.max_tx_rate = max_tx_rate;
9784 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9786 struct hnae3_handle *handle = &hdev->vport->nic;
9787 struct hclge_vport *vport;
9791 /* resume the vf max_tx_rate after reset */
9792 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9793 vport = hclge_get_vf_vport(hdev, vf);
9797 /* zero means max rate, after reset, firmware already set it to
9798 * max rate, so just continue.
9800 if (!vport->vf_info.max_tx_rate)
9803 ret = hclge_set_vf_rate(handle, vf, 0,
9804 vport->vf_info.max_tx_rate, true);
9806 dev_err(&hdev->pdev->dev,
9807 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9808 vf, vport->vf_info.max_tx_rate, ret);
9816 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9818 struct hclge_vport *vport = hdev->vport;
9821 for (i = 0; i < hdev->num_alloc_vport; i++) {
9822 hclge_vport_stop(vport);
9827 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9829 struct hclge_dev *hdev = ae_dev->priv;
9830 struct pci_dev *pdev = ae_dev->pdev;
9833 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9835 hclge_stats_clear(hdev);
9836 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9837 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9839 ret = hclge_cmd_init(hdev);
9841 dev_err(&pdev->dev, "Cmd queue init failed\n");
9845 ret = hclge_map_tqp(hdev);
9847 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9851 hclge_reset_umv_space(hdev);
9853 ret = hclge_mac_init(hdev);
9855 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9859 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9861 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9865 ret = hclge_config_gro(hdev, true);
9869 ret = hclge_init_vlan_config(hdev);
9871 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9875 ret = hclge_tm_init_hw(hdev, true);
9877 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9881 ret = hclge_rss_init_hw(hdev);
9883 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9887 ret = hclge_init_fd_config(hdev);
9889 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9893 /* Log and clear the hw errors those already occurred */
9894 hclge_handle_all_hns_hw_errors(ae_dev);
9896 /* Re-enable the hw error interrupts because
9897 * the interrupts get disabled on global reset.
9899 ret = hclge_config_nic_hw_error(hdev, true);
9902 "fail(%d) to re-enable NIC hw error interrupts\n",
9907 if (hdev->roce_client) {
9908 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9911 "fail(%d) to re-enable roce ras interrupts\n",
9917 hclge_reset_vport_state(hdev);
9918 ret = hclge_reset_vport_spoofchk(hdev);
9922 ret = hclge_resume_vf_rate(hdev);
9926 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9932 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9934 struct hclge_dev *hdev = ae_dev->priv;
9935 struct hclge_mac *mac = &hdev->hw.mac;
9937 hclge_reset_vf_rate(hdev);
9938 hclge_misc_affinity_teardown(hdev);
9939 hclge_state_uninit(hdev);
9942 mdiobus_unregister(mac->mdio_bus);
9944 hclge_uninit_umv_space(hdev);
9946 /* Disable MISC vector(vector0) */
9947 hclge_enable_vector(&hdev->misc_vector, false);
9948 synchronize_irq(hdev->misc_vector.vector_irq);
9950 /* Disable all hw interrupts */
9951 hclge_config_mac_tnl_int(hdev, false);
9952 hclge_config_nic_hw_error(hdev, false);
9953 hclge_config_rocee_ras_interrupt(hdev, false);
9955 hclge_cmd_uninit(hdev);
9956 hclge_misc_irq_uninit(hdev);
9957 hclge_pci_uninit(hdev);
9958 mutex_destroy(&hdev->vport_lock);
9959 hclge_uninit_vport_mac_table(hdev);
9960 hclge_uninit_vport_vlan_table(hdev);
9961 ae_dev->priv = NULL;
9964 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9966 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9967 struct hclge_vport *vport = hclge_get_vport(handle);
9968 struct hclge_dev *hdev = vport->back;
9970 return min_t(u32, hdev->rss_size_max,
9971 vport->alloc_tqps / kinfo->num_tc);
9974 static void hclge_get_channels(struct hnae3_handle *handle,
9975 struct ethtool_channels *ch)
9977 ch->max_combined = hclge_get_max_channels(handle);
9978 ch->other_count = 1;
9980 ch->combined_count = handle->kinfo.rss_size;
9983 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9984 u16 *alloc_tqps, u16 *max_rss_size)
9986 struct hclge_vport *vport = hclge_get_vport(handle);
9987 struct hclge_dev *hdev = vport->back;
9989 *alloc_tqps = vport->alloc_tqps;
9990 *max_rss_size = hdev->rss_size_max;
9993 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9994 bool rxfh_configured)
9996 struct hclge_vport *vport = hclge_get_vport(handle);
9997 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9998 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9999 struct hclge_dev *hdev = vport->back;
10000 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10001 u16 cur_rss_size = kinfo->rss_size;
10002 u16 cur_tqps = kinfo->num_tqps;
10003 u16 tc_valid[HCLGE_MAX_TC_NUM];
10009 kinfo->req_rss_size = new_tqps_num;
10011 ret = hclge_tm_vport_map_update(hdev);
10013 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10017 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10018 roundup_size = ilog2(roundup_size);
10019 /* Set the RSS TC mode according to the new RSS size */
10020 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10023 if (!(hdev->hw_tc_map & BIT(i)))
10027 tc_size[i] = roundup_size;
10028 tc_offset[i] = kinfo->rss_size * i;
10030 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10034 /* RSS indirection table has been configuared by user */
10035 if (rxfh_configured)
10038 /* Reinitializes the rss indirect table according to the new RSS size */
10039 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10043 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10044 rss_indir[i] = i % kinfo->rss_size;
10046 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10048 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10055 dev_info(&hdev->pdev->dev,
10056 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10057 cur_rss_size, kinfo->rss_size,
10058 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10063 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10064 u32 *regs_num_64_bit)
10066 struct hclge_desc desc;
10070 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10071 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10073 dev_err(&hdev->pdev->dev,
10074 "Query register number cmd failed, ret = %d.\n", ret);
10078 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10079 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10081 total_num = *regs_num_32_bit + *regs_num_64_bit;
10088 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10091 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10092 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10094 struct hclge_desc *desc;
10095 u32 *reg_val = data;
10105 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10106 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10107 HCLGE_32_BIT_REG_RTN_DATANUM);
10108 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10112 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10113 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10115 dev_err(&hdev->pdev->dev,
10116 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10121 for (i = 0; i < cmd_num; i++) {
10123 desc_data = (__le32 *)(&desc[i].data[0]);
10124 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10126 desc_data = (__le32 *)(&desc[i]);
10127 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10129 for (k = 0; k < n; k++) {
10130 *reg_val++ = le32_to_cpu(*desc_data++);
10142 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10145 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10146 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10148 struct hclge_desc *desc;
10149 u64 *reg_val = data;
10159 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10160 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10161 HCLGE_64_BIT_REG_RTN_DATANUM);
10162 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10166 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10167 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10169 dev_err(&hdev->pdev->dev,
10170 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10175 for (i = 0; i < cmd_num; i++) {
10177 desc_data = (__le64 *)(&desc[i].data[0]);
10178 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10180 desc_data = (__le64 *)(&desc[i]);
10181 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10183 for (k = 0; k < n; k++) {
10184 *reg_val++ = le64_to_cpu(*desc_data++);
10196 #define MAX_SEPARATE_NUM 4
10197 #define SEPARATOR_VALUE 0xFDFCFBFA
10198 #define REG_NUM_PER_LINE 4
10199 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10200 #define REG_SEPARATOR_LINE 1
10201 #define REG_NUM_REMAIN_MASK 3
10202 #define BD_LIST_MAX_NUM 30
10204 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10206 /*prepare 4 commands to query DFX BD number*/
10207 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10208 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10209 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10210 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10211 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10212 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10213 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10215 return hclge_cmd_send(&hdev->hw, desc, 4);
10218 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10222 #define HCLGE_DFX_REG_BD_NUM 4
10224 u32 entries_per_desc, desc_index, index, offset, i;
10225 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10228 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10230 dev_err(&hdev->pdev->dev,
10231 "Get dfx bd num fail, status is %d.\n", ret);
10235 entries_per_desc = ARRAY_SIZE(desc[0].data);
10236 for (i = 0; i < type_num; i++) {
10237 offset = hclge_dfx_bd_offset_list[i];
10238 index = offset % entries_per_desc;
10239 desc_index = offset / entries_per_desc;
10240 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10246 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10247 struct hclge_desc *desc_src, int bd_num,
10248 enum hclge_opcode_type cmd)
10250 struct hclge_desc *desc = desc_src;
10253 hclge_cmd_setup_basic_desc(desc, cmd, true);
10254 for (i = 0; i < bd_num - 1; i++) {
10255 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10257 hclge_cmd_setup_basic_desc(desc, cmd, true);
10261 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10263 dev_err(&hdev->pdev->dev,
10264 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10270 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10273 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10274 struct hclge_desc *desc = desc_src;
10277 entries_per_desc = ARRAY_SIZE(desc->data);
10278 reg_num = entries_per_desc * bd_num;
10279 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10280 for (i = 0; i < reg_num; i++) {
10281 index = i % entries_per_desc;
10282 desc_index = i / entries_per_desc;
10283 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10285 for (i = 0; i < separator_num; i++)
10286 *reg++ = SEPARATOR_VALUE;
10288 return reg_num + separator_num;
10291 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10293 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10294 int data_len_per_desc, data_len, bd_num, i;
10295 int bd_num_list[BD_LIST_MAX_NUM];
10298 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10300 dev_err(&hdev->pdev->dev,
10301 "Get dfx reg bd num fail, status is %d.\n", ret);
10305 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10307 for (i = 0; i < dfx_reg_type_num; i++) {
10308 bd_num = bd_num_list[i];
10309 data_len = data_len_per_desc * bd_num;
10310 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10316 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10318 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10319 int bd_num, bd_num_max, buf_len, i;
10320 int bd_num_list[BD_LIST_MAX_NUM];
10321 struct hclge_desc *desc_src;
10325 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10327 dev_err(&hdev->pdev->dev,
10328 "Get dfx reg bd num fail, status is %d.\n", ret);
10332 bd_num_max = bd_num_list[0];
10333 for (i = 1; i < dfx_reg_type_num; i++)
10334 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10336 buf_len = sizeof(*desc_src) * bd_num_max;
10337 desc_src = kzalloc(buf_len, GFP_KERNEL);
10339 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10343 for (i = 0; i < dfx_reg_type_num; i++) {
10344 bd_num = bd_num_list[i];
10345 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10346 hclge_dfx_reg_opcode_list[i]);
10348 dev_err(&hdev->pdev->dev,
10349 "Get dfx reg fail, status is %d.\n", ret);
10353 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10360 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10361 struct hnae3_knic_private_info *kinfo)
10363 #define HCLGE_RING_REG_OFFSET 0x200
10364 #define HCLGE_RING_INT_REG_OFFSET 0x4
10366 int i, j, reg_num, separator_num;
10370 /* fetching per-PF registers valus from PF PCIe register space */
10371 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10372 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10373 for (i = 0; i < reg_num; i++)
10374 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10375 for (i = 0; i < separator_num; i++)
10376 *reg++ = SEPARATOR_VALUE;
10377 data_num_sum = reg_num + separator_num;
10379 reg_num = ARRAY_SIZE(common_reg_addr_list);
10380 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10381 for (i = 0; i < reg_num; i++)
10382 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10383 for (i = 0; i < separator_num; i++)
10384 *reg++ = SEPARATOR_VALUE;
10385 data_num_sum += reg_num + separator_num;
10387 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10388 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10389 for (j = 0; j < kinfo->num_tqps; j++) {
10390 for (i = 0; i < reg_num; i++)
10391 *reg++ = hclge_read_dev(&hdev->hw,
10392 ring_reg_addr_list[i] +
10393 HCLGE_RING_REG_OFFSET * j);
10394 for (i = 0; i < separator_num; i++)
10395 *reg++ = SEPARATOR_VALUE;
10397 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10399 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10400 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10401 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10402 for (i = 0; i < reg_num; i++)
10403 *reg++ = hclge_read_dev(&hdev->hw,
10404 tqp_intr_reg_addr_list[i] +
10405 HCLGE_RING_INT_REG_OFFSET * j);
10406 for (i = 0; i < separator_num; i++)
10407 *reg++ = SEPARATOR_VALUE;
10409 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10411 return data_num_sum;
10414 static int hclge_get_regs_len(struct hnae3_handle *handle)
10416 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10417 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10418 struct hclge_vport *vport = hclge_get_vport(handle);
10419 struct hclge_dev *hdev = vport->back;
10420 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10421 int regs_lines_32_bit, regs_lines_64_bit;
10424 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10426 dev_err(&hdev->pdev->dev,
10427 "Get register number failed, ret = %d.\n", ret);
10431 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10433 dev_err(&hdev->pdev->dev,
10434 "Get dfx reg len failed, ret = %d.\n", ret);
10438 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10439 REG_SEPARATOR_LINE;
10440 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10441 REG_SEPARATOR_LINE;
10442 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10443 REG_SEPARATOR_LINE;
10444 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10445 REG_SEPARATOR_LINE;
10446 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10447 REG_SEPARATOR_LINE;
10448 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10449 REG_SEPARATOR_LINE;
10451 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10452 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10453 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10456 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10459 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10460 struct hclge_vport *vport = hclge_get_vport(handle);
10461 struct hclge_dev *hdev = vport->back;
10462 u32 regs_num_32_bit, regs_num_64_bit;
10463 int i, reg_num, separator_num, ret;
10466 *version = hdev->fw_version;
10468 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10470 dev_err(&hdev->pdev->dev,
10471 "Get register number failed, ret = %d.\n", ret);
10475 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10477 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10479 dev_err(&hdev->pdev->dev,
10480 "Get 32 bit register failed, ret = %d.\n", ret);
10483 reg_num = regs_num_32_bit;
10485 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10486 for (i = 0; i < separator_num; i++)
10487 *reg++ = SEPARATOR_VALUE;
10489 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10491 dev_err(&hdev->pdev->dev,
10492 "Get 64 bit register failed, ret = %d.\n", ret);
10495 reg_num = regs_num_64_bit * 2;
10497 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10498 for (i = 0; i < separator_num; i++)
10499 *reg++ = SEPARATOR_VALUE;
10501 ret = hclge_get_dfx_reg(hdev, reg);
10503 dev_err(&hdev->pdev->dev,
10504 "Get dfx register failed, ret = %d.\n", ret);
10507 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10509 struct hclge_set_led_state_cmd *req;
10510 struct hclge_desc desc;
10513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10515 req = (struct hclge_set_led_state_cmd *)desc.data;
10516 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10517 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10519 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10521 dev_err(&hdev->pdev->dev,
10522 "Send set led state cmd error, ret =%d\n", ret);
10527 enum hclge_led_status {
10530 HCLGE_LED_NO_CHANGE = 0xFF,
10533 static int hclge_set_led_id(struct hnae3_handle *handle,
10534 enum ethtool_phys_id_state status)
10536 struct hclge_vport *vport = hclge_get_vport(handle);
10537 struct hclge_dev *hdev = vport->back;
10540 case ETHTOOL_ID_ACTIVE:
10541 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10542 case ETHTOOL_ID_INACTIVE:
10543 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10549 static void hclge_get_link_mode(struct hnae3_handle *handle,
10550 unsigned long *supported,
10551 unsigned long *advertising)
10553 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10554 struct hclge_vport *vport = hclge_get_vport(handle);
10555 struct hclge_dev *hdev = vport->back;
10556 unsigned int idx = 0;
10558 for (; idx < size; idx++) {
10559 supported[idx] = hdev->hw.mac.supported[idx];
10560 advertising[idx] = hdev->hw.mac.advertising[idx];
10564 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10566 struct hclge_vport *vport = hclge_get_vport(handle);
10567 struct hclge_dev *hdev = vport->back;
10569 return hclge_config_gro(hdev, enable);
10572 static const struct hnae3_ae_ops hclge_ops = {
10573 .init_ae_dev = hclge_init_ae_dev,
10574 .uninit_ae_dev = hclge_uninit_ae_dev,
10575 .flr_prepare = hclge_flr_prepare,
10576 .flr_done = hclge_flr_done,
10577 .init_client_instance = hclge_init_client_instance,
10578 .uninit_client_instance = hclge_uninit_client_instance,
10579 .map_ring_to_vector = hclge_map_ring_to_vector,
10580 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10581 .get_vector = hclge_get_vector,
10582 .put_vector = hclge_put_vector,
10583 .set_promisc_mode = hclge_set_promisc_mode,
10584 .set_loopback = hclge_set_loopback,
10585 .start = hclge_ae_start,
10586 .stop = hclge_ae_stop,
10587 .client_start = hclge_client_start,
10588 .client_stop = hclge_client_stop,
10589 .get_status = hclge_get_status,
10590 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10591 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10592 .get_media_type = hclge_get_media_type,
10593 .check_port_speed = hclge_check_port_speed,
10594 .get_fec = hclge_get_fec,
10595 .set_fec = hclge_set_fec,
10596 .get_rss_key_size = hclge_get_rss_key_size,
10597 .get_rss_indir_size = hclge_get_rss_indir_size,
10598 .get_rss = hclge_get_rss,
10599 .set_rss = hclge_set_rss,
10600 .set_rss_tuple = hclge_set_rss_tuple,
10601 .get_rss_tuple = hclge_get_rss_tuple,
10602 .get_tc_size = hclge_get_tc_size,
10603 .get_mac_addr = hclge_get_mac_addr,
10604 .set_mac_addr = hclge_set_mac_addr,
10605 .do_ioctl = hclge_do_ioctl,
10606 .add_uc_addr = hclge_add_uc_addr,
10607 .rm_uc_addr = hclge_rm_uc_addr,
10608 .add_mc_addr = hclge_add_mc_addr,
10609 .rm_mc_addr = hclge_rm_mc_addr,
10610 .set_autoneg = hclge_set_autoneg,
10611 .get_autoneg = hclge_get_autoneg,
10612 .restart_autoneg = hclge_restart_autoneg,
10613 .halt_autoneg = hclge_halt_autoneg,
10614 .get_pauseparam = hclge_get_pauseparam,
10615 .set_pauseparam = hclge_set_pauseparam,
10616 .set_mtu = hclge_set_mtu,
10617 .reset_queue = hclge_reset_tqp,
10618 .get_stats = hclge_get_stats,
10619 .get_mac_stats = hclge_get_mac_stat,
10620 .update_stats = hclge_update_stats,
10621 .get_strings = hclge_get_strings,
10622 .get_sset_count = hclge_get_sset_count,
10623 .get_fw_version = hclge_get_fw_version,
10624 .get_mdix_mode = hclge_get_mdix_mode,
10625 .enable_vlan_filter = hclge_enable_vlan_filter,
10626 .set_vlan_filter = hclge_set_vlan_filter,
10627 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10628 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10629 .reset_event = hclge_reset_event,
10630 .get_reset_level = hclge_get_reset_level,
10631 .set_default_reset_request = hclge_set_def_reset_request,
10632 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10633 .set_channels = hclge_set_channels,
10634 .get_channels = hclge_get_channels,
10635 .get_regs_len = hclge_get_regs_len,
10636 .get_regs = hclge_get_regs,
10637 .set_led_id = hclge_set_led_id,
10638 .get_link_mode = hclge_get_link_mode,
10639 .add_fd_entry = hclge_add_fd_entry,
10640 .del_fd_entry = hclge_del_fd_entry,
10641 .del_all_fd_entries = hclge_del_all_fd_entries,
10642 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10643 .get_fd_rule_info = hclge_get_fd_rule_info,
10644 .get_fd_all_rules = hclge_get_all_rules,
10645 .restore_fd_rules = hclge_restore_fd_entries,
10646 .enable_fd = hclge_enable_fd,
10647 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10648 .dbg_run_cmd = hclge_dbg_run_cmd,
10649 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10650 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10651 .ae_dev_resetting = hclge_ae_dev_resetting,
10652 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10653 .set_gro_en = hclge_gro_en,
10654 .get_global_queue_id = hclge_covert_handle_qid_global,
10655 .set_timer_task = hclge_set_timer_task,
10656 .mac_connect_phy = hclge_mac_connect_phy,
10657 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10658 .restore_vlan_table = hclge_restore_vlan_table,
10659 .get_vf_config = hclge_get_vf_config,
10660 .set_vf_link_state = hclge_set_vf_link_state,
10661 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10662 .set_vf_trust = hclge_set_vf_trust,
10663 .set_vf_rate = hclge_set_vf_rate,
10664 .set_vf_mac = hclge_set_vf_mac,
10667 static struct hnae3_ae_algo ae_algo = {
10669 .pdev_id_table = ae_algo_pci_tbl,
10672 static int hclge_init(void)
10674 pr_info("%s is initializing\n", HCLGE_NAME);
10676 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10678 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10682 hnae3_register_ae_algo(&ae_algo);
10687 static void hclge_exit(void)
10689 hnae3_unregister_ae_algo(&ae_algo);
10690 destroy_workqueue(hclge_wq);
10692 module_init(hclge_init);
10693 module_exit(hclge_exit);
10695 MODULE_LICENSE("GPL");
10696 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10697 MODULE_DESCRIPTION("HCLGE Driver");
10698 MODULE_VERSION(HCLGE_MOD_VERSION);