1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 return hclge_parse_func_status(hdev, req);
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
870 struct hclge_pf_res_cmd *req;
871 struct hclge_desc desc;
874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
877 dev_err(&hdev->pdev->dev,
878 "query pf resource failed %d.\n", ret);
882 req = (struct hclge_pf_res_cmd *)desc.data;
883 hdev->num_tqps = le16_to_cpu(req->tqp_num);
884 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
886 if (req->tx_buf_size)
888 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
894 if (req->dv_buf_size)
896 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
902 if (hnae3_dev_roce_supported(hdev)) {
903 hdev->roce_base_msix_offset =
904 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
907 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 /* nic's msix numbers is always equals to the roce's. */
911 hdev->num_nic_msi = hdev->num_roce_msi;
913 /* PF should have NIC vectors and Roce vectors,
914 * NIC vectors are queued before Roce vectors.
916 hdev->num_msi = hdev->num_roce_msi +
917 hdev->roce_base_msix_offset;
920 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
923 hdev->num_nic_msi = hdev->num_msi;
926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 dev_err(&hdev->pdev->dev,
928 "Just %u msi resources, not enough for pf(min:2).\n",
936 static int hclge_parse_speed(int speed_cmd, int *speed)
940 *speed = HCLGE_MAC_SPEED_10M;
943 *speed = HCLGE_MAC_SPEED_100M;
946 *speed = HCLGE_MAC_SPEED_1G;
949 *speed = HCLGE_MAC_SPEED_10G;
952 *speed = HCLGE_MAC_SPEED_25G;
955 *speed = HCLGE_MAC_SPEED_40G;
958 *speed = HCLGE_MAC_SPEED_50G;
961 *speed = HCLGE_MAC_SPEED_100G;
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
972 struct hclge_vport *vport = hclge_get_vport(handle);
973 struct hclge_dev *hdev = vport->back;
974 u32 speed_ability = hdev->hw.mac.speed_ability;
978 case HCLGE_MAC_SPEED_10M:
979 speed_bit = HCLGE_SUPPORT_10M_BIT;
981 case HCLGE_MAC_SPEED_100M:
982 speed_bit = HCLGE_SUPPORT_100M_BIT;
984 case HCLGE_MAC_SPEED_1G:
985 speed_bit = HCLGE_SUPPORT_1G_BIT;
987 case HCLGE_MAC_SPEED_10G:
988 speed_bit = HCLGE_SUPPORT_10G_BIT;
990 case HCLGE_MAC_SPEED_25G:
991 speed_bit = HCLGE_SUPPORT_25G_BIT;
993 case HCLGE_MAC_SPEED_40G:
994 speed_bit = HCLGE_SUPPORT_40G_BIT;
996 case HCLGE_MAC_SPEED_50G:
997 speed_bit = HCLGE_SUPPORT_50G_BIT;
999 case HCLGE_MAC_SPEED_100G:
1000 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 if (speed_bit & speed_ability)
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1096 switch (mac->speed) {
1097 case HCLGE_MAC_SPEED_10G:
1098 case HCLGE_MAC_SPEED_40G:
1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1104 case HCLGE_MAC_SPEED_25G:
1105 case HCLGE_MAC_SPEED_50G:
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 BIT(HNAE3_FEC_AUTO);
1112 case HCLGE_MAC_SPEED_100G:
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117 mac->fec_ability = 0;
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125 struct hclge_mac *mac = &hdev->hw.mac;
1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131 hclge_convert_setting_sr(mac, speed_ability);
1132 hclge_convert_setting_lr(mac, speed_ability);
1133 hclge_convert_setting_cr(mac, speed_ability);
1134 if (hdev->pdev->revision >= 0x21)
1135 hclge_convert_setting_fec(mac);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145 struct hclge_mac *mac = &hdev->hw.mac;
1147 hclge_convert_setting_kr(mac, speed_ability);
1148 if (hdev->pdev->revision >= 0x21)
1149 hclge_convert_setting_fec(mac);
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158 unsigned long *supported = hdev->hw.mac.supported;
1160 /* default to support all speed for GE port */
1162 speed_ability = HCLGE_SUPPORT_GE;
1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1188 u8 media_type = hdev->hw.mac.media_type;
1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 hclge_parse_copper_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 return HCLGE_MAC_SPEED_100G;
1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 return HCLGE_MAC_SPEED_50G;
1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 return HCLGE_MAC_SPEED_40G;
1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 return HCLGE_MAC_SPEED_25G;
1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 return HCLGE_MAC_SPEED_10G;
1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 return HCLGE_MAC_SPEED_1G;
1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 return HCLGE_MAC_SPEED_100M;
1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 return HCLGE_MAC_SPEED_10M;
1224 return HCLGE_MAC_SPEED_1G;
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1229 struct hclge_cfg_param_cmd *req;
1230 u64 mac_addr_tmp_high;
1234 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1236 /* get the configuration */
1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TQP_DESC_N_M,
1244 HCLGE_CFG_TQP_DESC_N_S);
1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_PHY_ADDR_M,
1248 HCLGE_CFG_PHY_ADDR_S);
1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 HCLGE_CFG_MEDIA_TP_M,
1251 HCLGE_CFG_MEDIA_TP_S);
1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 HCLGE_CFG_RX_BUF_LEN_M,
1254 HCLGE_CFG_RX_BUF_LEN_S);
1255 /* get mac_address */
1256 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 HCLGE_CFG_MAC_ADDR_H_M,
1259 HCLGE_CFG_MAC_ADDR_H_S);
1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 HCLGE_CFG_DEFAULT_SPEED_M,
1265 HCLGE_CFG_DEFAULT_SPEED_S);
1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 HCLGE_CFG_RSS_SIZE_M,
1268 HCLGE_CFG_RSS_SIZE_S);
1270 for (i = 0; i < ETH_ALEN; i++)
1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1273 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_SPEED_ABILITY_M,
1278 HCLGE_CFG_SPEED_ABILITY_S);
1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 if (!cfg->umv_space)
1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 /* hclge_get_cfg: query the static parameter from flash
1287 * @hdev: pointer to struct hclge_dev
1288 * @hcfg: the config structure to be getted
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293 struct hclge_cfg_param_cmd *req;
1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305 /* Len should be united by 4 bytes when send to hardware */
1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308 req->offset = cpu_to_le32(offset);
1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 hclge_parse_cfg(hcfg, desc);
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1326 ret = hclge_query_function_status(hdev);
1328 dev_err(&hdev->pdev->dev,
1329 "query function status error %d.\n", ret);
1333 /* get pf resource */
1334 return hclge_query_pf_resource(hdev);
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1339 #define HCLGE_MIN_TX_DESC 64
1340 #define HCLGE_MIN_RX_DESC 64
1342 if (!is_kdump_kernel())
1345 dev_info(&hdev->pdev->dev,
1346 "Running kdump kernel. Using minimal resources\n");
1348 /* minimal queue pairs equals to the number of vports */
1349 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1354 static int hclge_configure(struct hclge_dev *hdev)
1356 struct hclge_cfg cfg;
1360 ret = hclge_get_cfg(hdev, &cfg);
1362 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1366 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367 hdev->base_tqp_pid = 0;
1368 hdev->rss_size_max = cfg.rss_size_max;
1369 hdev->rx_buf_len = cfg.rx_buf_len;
1370 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371 hdev->hw.mac.media_type = cfg.media_type;
1372 hdev->hw.mac.phy_addr = cfg.phy_addr;
1373 hdev->num_tx_desc = cfg.tqp_desc_num;
1374 hdev->num_rx_desc = cfg.tqp_desc_num;
1375 hdev->tm_info.num_pg = 1;
1376 hdev->tc_max = cfg.tc_num;
1377 hdev->tm_info.hw_pfc_map = 0;
1378 hdev->wanted_umv_size = cfg.umv_space;
1380 if (hnae3_dev_fd_supported(hdev)) {
1382 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1385 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1387 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 hclge_parse_link_mode(hdev, cfg.speed_ability);
1393 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1395 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396 (hdev->tc_max < 1)) {
1397 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 /* Dev does not support DCB */
1403 if (!hnae3_dev_dcb_supported(hdev)) {
1407 hdev->pfc_max = hdev->tc_max;
1410 hdev->tm_info.num_tc = 1;
1412 /* Currently not support uncontiuous tc */
1413 for (i = 0; i < hdev->tm_info.num_tc; i++)
1414 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1416 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1418 hclge_init_kdump_kernel_config(hdev);
1420 /* Set the init affinity based on pci func number */
1421 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424 &hdev->affinity_mask);
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430 unsigned int tso_mss_max)
1432 struct hclge_cfg_tso_status_cmd *req;
1433 struct hclge_desc desc;
1436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1438 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1441 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443 req->tso_mss_min = cpu_to_le16(tso_mss);
1446 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448 req->tso_mss_max = cpu_to_le16(tso_mss);
1450 return hclge_cmd_send(&hdev->hw, &desc, 1);
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1455 struct hclge_cfg_gro_status_cmd *req;
1456 struct hclge_desc desc;
1459 if (!hnae3_dev_gro_supported(hdev))
1462 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1465 req->gro_en = cpu_to_le16(en ? 1 : 0);
1467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1469 dev_err(&hdev->pdev->dev,
1470 "GRO hardware config cmd failed, ret = %d\n", ret);
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1477 struct hclge_tqp *tqp;
1480 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481 sizeof(struct hclge_tqp), GFP_KERNEL);
1487 for (i = 0; i < hdev->num_tqps; i++) {
1488 tqp->dev = &hdev->pdev->dev;
1491 tqp->q.ae_algo = &ae_algo;
1492 tqp->q.buf_size = hdev->rx_buf_len;
1493 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494 tqp->q.rx_desc_num = hdev->num_rx_desc;
1495 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496 i * HCLGE_TQP_REG_SIZE;
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1507 struct hclge_tqp_map_cmd *req;
1508 struct hclge_desc desc;
1511 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1513 req = (struct hclge_tqp_map_cmd *)desc.data;
1514 req->tqp_id = cpu_to_le16(tqp_pid);
1515 req->tqp_vf = func_id;
1516 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1518 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519 req->tqp_vid = cpu_to_le16(tqp_vid);
1521 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1523 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1528 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1530 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531 struct hclge_dev *hdev = vport->back;
1534 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535 alloced < num_tqps; i++) {
1536 if (!hdev->htqp[i].alloced) {
1537 hdev->htqp[i].q.handle = &vport->nic;
1538 hdev->htqp[i].q.tqp_index = alloced;
1539 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542 hdev->htqp[i].alloced = true;
1546 vport->alloc_tqps = alloced;
1547 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548 vport->alloc_tqps / hdev->tm_info.num_tc);
1550 /* ensure one to one mapping between irq and queue at default */
1551 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558 u16 num_tx_desc, u16 num_rx_desc)
1561 struct hnae3_handle *nic = &vport->nic;
1562 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563 struct hclge_dev *hdev = vport->back;
1566 kinfo->num_tx_desc = num_tx_desc;
1567 kinfo->num_rx_desc = num_rx_desc;
1569 kinfo->rx_buf_len = hdev->rx_buf_len;
1571 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572 sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 ret = hclge_assign_tqp(vport, num_tqps);
1578 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584 struct hclge_vport *vport)
1586 struct hnae3_handle *nic = &vport->nic;
1587 struct hnae3_knic_private_info *kinfo;
1590 kinfo = &nic->kinfo;
1591 for (i = 0; i < vport->alloc_tqps; i++) {
1592 struct hclge_tqp *q =
1593 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 is_pf = !(vport->vport_id);
1598 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1609 struct hclge_vport *vport = hdev->vport;
1612 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613 for (i = 0; i < num_vport; i++) {
1616 ret = hclge_map_tqp_to_vport(hdev, vport);
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1628 struct hnae3_handle *nic = &vport->nic;
1629 struct hclge_dev *hdev = vport->back;
1632 nic->pdev = hdev->pdev;
1633 nic->ae_algo = &ae_algo;
1634 nic->numa_node_mask = hdev->numa_node_mask;
1636 ret = hclge_knic_setup(vport, num_tqps,
1637 hdev->num_tx_desc, hdev->num_rx_desc);
1639 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1646 struct pci_dev *pdev = hdev->pdev;
1647 struct hclge_vport *vport;
1653 /* We need to alloc a vport for main NIC of PF */
1654 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1656 if (hdev->num_tqps < num_vport) {
1657 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658 hdev->num_tqps, num_vport);
1662 /* Alloc the same number of TQPs for every vport */
1663 tqp_per_vport = hdev->num_tqps / num_vport;
1664 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1666 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1671 hdev->vport = vport;
1672 hdev->num_alloc_vport = num_vport;
1674 if (IS_ENABLED(CONFIG_PCI_IOV))
1675 hdev->num_alloc_vfs = hdev->num_req_vfs;
1677 for (i = 0; i < num_vport; i++) {
1679 vport->vport_id = i;
1680 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684 INIT_LIST_HEAD(&vport->vlan_list);
1685 INIT_LIST_HEAD(&vport->uc_mac_list);
1686 INIT_LIST_HEAD(&vport->mc_mac_list);
1689 ret = hclge_vport_setup(vport, tqp_main_vport);
1691 ret = hclge_vport_setup(vport, tqp_per_vport);
1694 "vport setup failed for vport %d, %d\n",
1705 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706 struct hclge_pkt_buf_alloc *buf_alloc)
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1711 struct hclge_tx_buff_alloc_cmd *req;
1712 struct hclge_desc desc;
1716 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1722 req->tx_pkt_buff[i] =
1723 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1729 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736 struct hclge_pkt_buf_alloc *buf_alloc)
1738 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1741 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752 if (hdev->hw_tc_map & BIT(i))
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759 struct hclge_pkt_buf_alloc *buf_alloc)
1761 struct hclge_priv_buf *priv;
1765 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766 priv = &buf_alloc->priv_buf[i];
1767 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777 struct hclge_pkt_buf_alloc *buf_alloc)
1779 struct hclge_priv_buf *priv;
1783 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784 priv = &buf_alloc->priv_buf[i];
1785 if (hdev->hw_tc_map & BIT(i) &&
1786 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1796 struct hclge_priv_buf *priv;
1800 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801 priv = &buf_alloc->priv_buf[i];
1803 rx_priv += priv->buf_size;
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1810 u32 i, total_tx_size = 0;
1812 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1815 return total_tx_size;
1818 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819 struct hclge_pkt_buf_alloc *buf_alloc,
1822 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823 u32 tc_num = hclge_get_tc_num(hdev);
1824 u32 shared_buf, aligned_mps;
1828 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1830 if (hnae3_dev_dcb_supported(hdev))
1831 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1834 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835 + hdev->dv_buf_size;
1837 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839 HCLGE_BUF_SIZE_UNIT);
1841 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842 if (rx_all < rx_priv + shared_std)
1845 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846 buf_alloc->s_buf.buf_size = shared_buf;
1847 if (hnae3_dev_dcb_supported(hdev)) {
1848 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851 HCLGE_BUF_SIZE_UNIT);
1853 buf_alloc->s_buf.self.high = aligned_mps +
1854 HCLGE_NON_DCB_ADDITIONAL_BUF;
1855 buf_alloc->s_buf.self.low = aligned_mps;
1858 if (hnae3_dev_dcb_supported(hdev)) {
1859 hi_thrd = shared_buf - hdev->dv_buf_size;
1861 if (tc_num <= NEED_RESERVE_TC_NUM)
1862 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1866 hi_thrd = hi_thrd / tc_num;
1868 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1872 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873 lo_thrd = aligned_mps;
1876 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885 struct hclge_pkt_buf_alloc *buf_alloc)
1889 total_size = hdev->pkt_buf_size;
1891 /* alloc tx buffer for all enabled tc */
1892 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1895 if (hdev->hw_tc_map & BIT(i)) {
1896 if (total_size < hdev->tx_buf_size)
1899 priv->tx_buf_size = hdev->tx_buf_size;
1901 priv->tx_buf_size = 0;
1904 total_size -= priv->tx_buf_size;
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911 struct hclge_pkt_buf_alloc *buf_alloc)
1913 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1917 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925 if (!(hdev->hw_tc_map & BIT(i)))
1930 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933 HCLGE_BUF_SIZE_UNIT);
1936 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1940 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1943 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947 struct hclge_pkt_buf_alloc *buf_alloc)
1949 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1953 /* let the last to be cleared first */
1954 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956 unsigned int mask = BIT((unsigned int)i);
1958 if (hdev->hw_tc_map & mask &&
1959 !(hdev->tm_info.hw_pfc_map & mask)) {
1960 /* Clear the no pfc TC private buffer */
1968 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969 no_pfc_priv_num == 0)
1973 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977 struct hclge_pkt_buf_alloc *buf_alloc)
1979 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1983 /* let the last to be cleared first */
1984 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986 unsigned int mask = BIT((unsigned int)i);
1988 if (hdev->hw_tc_map & mask &&
1989 hdev->tm_info.hw_pfc_map & mask) {
1990 /* Reduce the number of pfc TC with private buffer */
1998 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2003 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007 struct hclge_pkt_buf_alloc *buf_alloc)
2009 #define COMPENSATE_BUFFER 0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM 5
2011 #define PRIV_WL_GAP 0x1800
2013 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014 u32 tc_num = hclge_get_tc_num(hdev);
2015 u32 half_mps = hdev->mps >> 1;
2020 rx_priv = rx_priv / tc_num;
2022 if (tc_num <= NEED_RESERVE_TC_NUM)
2023 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2025 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026 COMPENSATE_HALF_MPS_NUM * half_mps;
2027 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2030 if (rx_priv < min_rx_priv)
2033 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041 if (!(hdev->hw_tc_map & BIT(i)))
2045 priv->buf_size = rx_priv;
2046 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2050 buf_alloc->s_buf.buf_size = 0;
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056 * @hdev: pointer to struct hclge_dev
2057 * @buf_alloc: pointer to buffer calculation data
2058 * @return: 0: calculate sucessful, negative: fail
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061 struct hclge_pkt_buf_alloc *buf_alloc)
2063 /* When DCB is not supported, rx private buffer is not allocated. */
2064 if (!hnae3_dev_dcb_supported(hdev)) {
2065 u32 rx_all = hdev->pkt_buf_size;
2067 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2074 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2077 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2080 /* try to decrease the buffer size */
2081 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2084 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2087 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094 struct hclge_pkt_buf_alloc *buf_alloc)
2096 struct hclge_rx_priv_buff_cmd *req;
2097 struct hclge_desc desc;
2101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2104 /* Alloc private buffer TCs */
2105 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2109 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2111 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2115 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2120 dev_err(&hdev->pdev->dev,
2121 "rx private buffer alloc cmd failed %d\n", ret);
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127 struct hclge_pkt_buf_alloc *buf_alloc)
2129 struct hclge_rx_priv_wl_buf *req;
2130 struct hclge_priv_buf *priv;
2131 struct hclge_desc desc[2];
2135 for (i = 0; i < 2; i++) {
2136 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2138 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2140 /* The first descriptor set the NEXT bit to 1 */
2142 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2144 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2146 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2149 priv = &buf_alloc->priv_buf[idx];
2150 req->tc_wl[j].high =
2151 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152 req->tc_wl[j].high |=
2153 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2155 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156 req->tc_wl[j].low |=
2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 /* Send 2 descriptor at one time */
2162 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2164 dev_err(&hdev->pdev->dev,
2165 "rx private waterline config cmd failed %d\n",
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171 struct hclge_pkt_buf_alloc *buf_alloc)
2173 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174 struct hclge_rx_com_thrd *req;
2175 struct hclge_desc desc[2];
2176 struct hclge_tc_thrd *tc;
2180 for (i = 0; i < 2; i++) {
2181 hclge_cmd_setup_basic_desc(&desc[i],
2182 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2185 /* The first descriptor set the NEXT bit to 1 */
2187 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2189 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2191 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2194 req->com_thrd[j].high =
2195 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196 req->com_thrd[j].high |=
2197 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 req->com_thrd[j].low =
2199 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200 req->com_thrd[j].low |=
2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2205 /* Send 2 descriptors at one time */
2206 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2208 dev_err(&hdev->pdev->dev,
2209 "common threshold config cmd failed %d\n", ret);
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214 struct hclge_pkt_buf_alloc *buf_alloc)
2216 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217 struct hclge_rx_com_wl *req;
2218 struct hclge_desc desc;
2221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2223 req = (struct hclge_rx_com_wl *)desc.data;
2224 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2227 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2230 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2232 dev_err(&hdev->pdev->dev,
2233 "common waterline config cmd failed %d\n", ret);
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2240 struct hclge_pkt_buf_alloc *pkt_buf;
2243 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2247 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2249 dev_err(&hdev->pdev->dev,
2250 "could not calc tx buffer size for all TCs %d\n", ret);
2254 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2256 dev_err(&hdev->pdev->dev,
2257 "could not alloc tx buffers %d\n", ret);
2261 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2263 dev_err(&hdev->pdev->dev,
2264 "could not calc rx priv buffer size for all TCs %d\n",
2269 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2271 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2276 if (hnae3_dev_dcb_supported(hdev)) {
2277 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2279 dev_err(&hdev->pdev->dev,
2280 "could not configure rx private waterline %d\n",
2285 ret = hclge_common_thrd_config(hdev, pkt_buf);
2287 dev_err(&hdev->pdev->dev,
2288 "could not configure common threshold %d\n",
2294 ret = hclge_common_wl_config(hdev, pkt_buf);
2296 dev_err(&hdev->pdev->dev,
2297 "could not configure common waterline %d\n", ret);
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2306 struct hnae3_handle *roce = &vport->roce;
2307 struct hnae3_handle *nic = &vport->nic;
2309 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2311 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312 vport->back->num_msi_left == 0)
2315 roce->rinfo.base_vector = vport->back->roce_base_vector;
2317 roce->rinfo.netdev = nic->kinfo.netdev;
2318 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2320 roce->pdev = nic->pdev;
2321 roce->ae_algo = nic->ae_algo;
2322 roce->numa_node_mask = nic->numa_node_mask;
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2329 struct pci_dev *pdev = hdev->pdev;
2333 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2335 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2338 "failed(%d) to allocate MSI/MSI-X vectors\n",
2342 if (vectors < hdev->num_msi)
2343 dev_warn(&hdev->pdev->dev,
2344 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345 hdev->num_msi, vectors);
2347 hdev->num_msi = vectors;
2348 hdev->num_msi_left = vectors;
2350 hdev->base_msi_vector = pdev->irq;
2351 hdev->roce_base_vector = hdev->base_msi_vector +
2352 hdev->roce_base_msix_offset;
2354 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355 sizeof(u16), GFP_KERNEL);
2356 if (!hdev->vector_status) {
2357 pci_free_irq_vectors(pdev);
2361 for (i = 0; i < hdev->num_msi; i++)
2362 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2364 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365 sizeof(int), GFP_KERNEL);
2366 if (!hdev->vector_irq) {
2367 pci_free_irq_vectors(pdev);
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2376 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377 duplex = HCLGE_MAC_FULL;
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2385 struct hclge_config_mac_speed_dup_cmd *req;
2386 struct hclge_desc desc;
2389 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2391 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2394 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2397 case HCLGE_MAC_SPEED_10M:
2398 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 HCLGE_CFG_SPEED_S, 6);
2401 case HCLGE_MAC_SPEED_100M:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 7);
2405 case HCLGE_MAC_SPEED_1G:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 0);
2409 case HCLGE_MAC_SPEED_10G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 1);
2413 case HCLGE_MAC_SPEED_25G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 2);
2417 case HCLGE_MAC_SPEED_40G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 3);
2421 case HCLGE_MAC_SPEED_50G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 4);
2425 case HCLGE_MAC_SPEED_100G:
2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 HCLGE_CFG_SPEED_S, 5);
2430 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2434 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2439 dev_err(&hdev->pdev->dev,
2440 "mac speed/duplex config cmd failed %d.\n", ret);
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2451 duplex = hclge_check_speed_dup(duplex, speed);
2452 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2455 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2459 hdev->hw.mac.speed = speed;
2460 hdev->hw.mac.duplex = duplex;
2465 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2468 struct hclge_vport *vport = hclge_get_vport(handle);
2469 struct hclge_dev *hdev = vport->back;
2471 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2474 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2476 struct hclge_config_auto_neg_cmd *req;
2477 struct hclge_desc desc;
2481 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2483 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2485 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2486 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2488 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2490 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2496 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2498 struct hclge_vport *vport = hclge_get_vport(handle);
2499 struct hclge_dev *hdev = vport->back;
2501 if (!hdev->hw.mac.support_autoneg) {
2503 dev_err(&hdev->pdev->dev,
2504 "autoneg is not supported by current port\n");
2511 return hclge_set_autoneg_en(hdev, enable);
2514 static int hclge_get_autoneg(struct hnae3_handle *handle)
2516 struct hclge_vport *vport = hclge_get_vport(handle);
2517 struct hclge_dev *hdev = vport->back;
2518 struct phy_device *phydev = hdev->hw.mac.phydev;
2521 return phydev->autoneg;
2523 return hdev->hw.mac.autoneg;
2526 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2528 struct hclge_vport *vport = hclge_get_vport(handle);
2529 struct hclge_dev *hdev = vport->back;
2532 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2534 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2537 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2540 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2542 struct hclge_vport *vport = hclge_get_vport(handle);
2543 struct hclge_dev *hdev = vport->back;
2545 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2546 return hclge_set_autoneg_en(hdev, !halt);
2551 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2553 struct hclge_config_fec_cmd *req;
2554 struct hclge_desc desc;
2557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2559 req = (struct hclge_config_fec_cmd *)desc.data;
2560 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2561 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2562 if (fec_mode & BIT(HNAE3_FEC_RS))
2563 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2564 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2565 if (fec_mode & BIT(HNAE3_FEC_BASER))
2566 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2567 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2571 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2576 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2578 struct hclge_vport *vport = hclge_get_vport(handle);
2579 struct hclge_dev *hdev = vport->back;
2580 struct hclge_mac *mac = &hdev->hw.mac;
2583 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2584 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2588 ret = hclge_set_fec_hw(hdev, fec_mode);
2592 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2596 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2599 struct hclge_vport *vport = hclge_get_vport(handle);
2600 struct hclge_dev *hdev = vport->back;
2601 struct hclge_mac *mac = &hdev->hw.mac;
2604 *fec_ability = mac->fec_ability;
2606 *fec_mode = mac->fec_mode;
2609 static int hclge_mac_init(struct hclge_dev *hdev)
2611 struct hclge_mac *mac = &hdev->hw.mac;
2614 hdev->support_sfp_query = true;
2615 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2616 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2617 hdev->hw.mac.duplex);
2621 if (hdev->hw.mac.support_autoneg) {
2622 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2629 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2630 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2635 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2637 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2641 ret = hclge_set_default_loopback(hdev);
2645 ret = hclge_buffer_alloc(hdev);
2647 dev_err(&hdev->pdev->dev,
2648 "allocate buffer fail, ret=%d\n", ret);
2653 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2655 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2656 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2657 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2658 hclge_wq, &hdev->service_task, 0);
2661 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2663 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2664 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2665 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2666 hclge_wq, &hdev->service_task, 0);
2669 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2671 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2672 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2673 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2674 hclge_wq, &hdev->service_task,
2678 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2680 struct hclge_link_status_cmd *req;
2681 struct hclge_desc desc;
2685 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2686 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2688 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2693 req = (struct hclge_link_status_cmd *)desc.data;
2694 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2696 return !!link_status;
2699 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2701 unsigned int mac_state;
2704 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2707 mac_state = hclge_get_mac_link_status(hdev);
2709 if (hdev->hw.mac.phydev) {
2710 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2711 link_stat = mac_state &
2712 hdev->hw.mac.phydev->link;
2717 link_stat = mac_state;
2723 static void hclge_update_link_status(struct hclge_dev *hdev)
2725 struct hnae3_client *rclient = hdev->roce_client;
2726 struct hnae3_client *client = hdev->nic_client;
2727 struct hnae3_handle *rhandle;
2728 struct hnae3_handle *handle;
2735 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2738 state = hclge_get_mac_phy_link(hdev);
2739 if (state != hdev->hw.mac.link) {
2740 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2741 handle = &hdev->vport[i].nic;
2742 client->ops->link_status_change(handle, state);
2743 hclge_config_mac_tnl_int(hdev, state);
2744 rhandle = &hdev->vport[i].roce;
2745 if (rclient && rclient->ops->link_status_change)
2746 rclient->ops->link_status_change(rhandle,
2749 hdev->hw.mac.link = state;
2752 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2755 static void hclge_update_port_capability(struct hclge_mac *mac)
2757 /* update fec ability by speed */
2758 hclge_convert_setting_fec(mac);
2760 /* firmware can not identify back plane type, the media type
2761 * read from configuration can help deal it
2763 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2764 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2765 mac->module_type = HNAE3_MODULE_TYPE_KR;
2766 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2767 mac->module_type = HNAE3_MODULE_TYPE_TP;
2769 if (mac->support_autoneg) {
2770 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2771 linkmode_copy(mac->advertising, mac->supported);
2773 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2775 linkmode_zero(mac->advertising);
2779 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2781 struct hclge_sfp_info_cmd *resp;
2782 struct hclge_desc desc;
2785 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2786 resp = (struct hclge_sfp_info_cmd *)desc.data;
2787 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2788 if (ret == -EOPNOTSUPP) {
2789 dev_warn(&hdev->pdev->dev,
2790 "IMP do not support get SFP speed %d\n", ret);
2793 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2797 *speed = le32_to_cpu(resp->speed);
2802 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2804 struct hclge_sfp_info_cmd *resp;
2805 struct hclge_desc desc;
2808 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2809 resp = (struct hclge_sfp_info_cmd *)desc.data;
2811 resp->query_type = QUERY_ACTIVE_SPEED;
2813 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2814 if (ret == -EOPNOTSUPP) {
2815 dev_warn(&hdev->pdev->dev,
2816 "IMP does not support get SFP info %d\n", ret);
2819 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2823 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2824 * set to mac->speed.
2826 if (!le32_to_cpu(resp->speed))
2829 mac->speed = le32_to_cpu(resp->speed);
2830 /* if resp->speed_ability is 0, it means it's an old version
2831 * firmware, do not update these params
2833 if (resp->speed_ability) {
2834 mac->module_type = le32_to_cpu(resp->module_type);
2835 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2836 mac->autoneg = resp->autoneg;
2837 mac->support_autoneg = resp->autoneg_ability;
2838 mac->speed_type = QUERY_ACTIVE_SPEED;
2839 if (!resp->active_fec)
2842 mac->fec_mode = BIT(resp->active_fec);
2844 mac->speed_type = QUERY_SFP_SPEED;
2850 static int hclge_update_port_info(struct hclge_dev *hdev)
2852 struct hclge_mac *mac = &hdev->hw.mac;
2853 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2856 /* get the port info from SFP cmd if not copper port */
2857 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2860 /* if IMP does not support get SFP/qSFP info, return directly */
2861 if (!hdev->support_sfp_query)
2864 if (hdev->pdev->revision >= 0x21)
2865 ret = hclge_get_sfp_info(hdev, mac);
2867 ret = hclge_get_sfp_speed(hdev, &speed);
2869 if (ret == -EOPNOTSUPP) {
2870 hdev->support_sfp_query = false;
2876 if (hdev->pdev->revision >= 0x21) {
2877 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2878 hclge_update_port_capability(mac);
2881 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2884 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2885 return 0; /* do nothing if no SFP */
2887 /* must config full duplex for SFP */
2888 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2892 static int hclge_get_status(struct hnae3_handle *handle)
2894 struct hclge_vport *vport = hclge_get_vport(handle);
2895 struct hclge_dev *hdev = vport->back;
2897 hclge_update_link_status(hdev);
2899 return hdev->hw.mac.link;
2902 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2904 if (!pci_num_vf(hdev->pdev)) {
2905 dev_err(&hdev->pdev->dev,
2906 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2910 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2911 dev_err(&hdev->pdev->dev,
2912 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2913 vf, pci_num_vf(hdev->pdev));
2917 /* VF start from 1 in vport */
2918 vf += HCLGE_VF_VPORT_START_NUM;
2919 return &hdev->vport[vf];
2922 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2923 struct ifla_vf_info *ivf)
2925 struct hclge_vport *vport = hclge_get_vport(handle);
2926 struct hclge_dev *hdev = vport->back;
2928 vport = hclge_get_vf_vport(hdev, vf);
2933 ivf->linkstate = vport->vf_info.link_state;
2934 ivf->spoofchk = vport->vf_info.spoofchk;
2935 ivf->trusted = vport->vf_info.trusted;
2936 ivf->min_tx_rate = 0;
2937 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2938 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2939 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2940 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2941 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2946 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2949 struct hclge_vport *vport = hclge_get_vport(handle);
2950 struct hclge_dev *hdev = vport->back;
2952 vport = hclge_get_vf_vport(hdev, vf);
2956 vport->vf_info.link_state = link_state;
2961 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2963 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2965 /* fetch the events from their corresponding regs */
2966 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2967 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2968 msix_src_reg = hclge_read_dev(&hdev->hw,
2969 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2971 /* Assumption: If by any chance reset and mailbox events are reported
2972 * together then we will only process reset event in this go and will
2973 * defer the processing of the mailbox events. Since, we would have not
2974 * cleared RX CMDQ event this time we would receive again another
2975 * interrupt from H/W just for the mailbox.
2977 * check for vector0 reset event sources
2979 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2980 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2981 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2982 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2983 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2984 hdev->rst_stats.imp_rst_cnt++;
2985 return HCLGE_VECTOR0_EVENT_RST;
2988 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2989 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2990 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2991 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2992 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2993 hdev->rst_stats.global_rst_cnt++;
2994 return HCLGE_VECTOR0_EVENT_RST;
2997 /* check for vector0 msix event source */
2998 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2999 *clearval = msix_src_reg;
3000 return HCLGE_VECTOR0_EVENT_ERR;
3003 /* check for vector0 mailbox(=CMDQ RX) event source */
3004 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3005 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3006 *clearval = cmdq_src_reg;
3007 return HCLGE_VECTOR0_EVENT_MBX;
3010 /* print other vector0 event source */
3011 dev_info(&hdev->pdev->dev,
3012 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3013 cmdq_src_reg, msix_src_reg);
3014 *clearval = msix_src_reg;
3016 return HCLGE_VECTOR0_EVENT_OTHER;
3019 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3022 switch (event_type) {
3023 case HCLGE_VECTOR0_EVENT_RST:
3024 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3026 case HCLGE_VECTOR0_EVENT_MBX:
3027 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3034 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3036 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3037 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3038 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3039 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3043 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3045 writel(enable ? 1 : 0, vector->addr);
3048 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3050 struct hclge_dev *hdev = data;
3054 hclge_enable_vector(&hdev->misc_vector, false);
3055 event_cause = hclge_check_event_cause(hdev, &clearval);
3057 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3058 switch (event_cause) {
3059 case HCLGE_VECTOR0_EVENT_ERR:
3060 /* we do not know what type of reset is required now. This could
3061 * only be decided after we fetch the type of errors which
3062 * caused this event. Therefore, we will do below for now:
3063 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3064 * have defered type of reset to be used.
3065 * 2. Schedule the reset serivce task.
3066 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3067 * will fetch the correct type of reset. This would be done
3068 * by first decoding the types of errors.
3070 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3072 case HCLGE_VECTOR0_EVENT_RST:
3073 hclge_reset_task_schedule(hdev);
3075 case HCLGE_VECTOR0_EVENT_MBX:
3076 /* If we are here then,
3077 * 1. Either we are not handling any mbx task and we are not
3080 * 2. We could be handling a mbx task but nothing more is
3082 * In both cases, we should schedule mbx task as there are more
3083 * mbx messages reported by this interrupt.
3085 hclge_mbx_task_schedule(hdev);
3088 dev_warn(&hdev->pdev->dev,
3089 "received unknown or unhandled event of vector0\n");
3093 hclge_clear_event_cause(hdev, event_cause, clearval);
3095 /* Enable interrupt if it is not cause by reset. And when
3096 * clearval equal to 0, it means interrupt status may be
3097 * cleared by hardware before driver reads status register.
3098 * For this case, vector0 interrupt also should be enabled.
3101 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3102 hclge_enable_vector(&hdev->misc_vector, true);
3108 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3110 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3111 dev_warn(&hdev->pdev->dev,
3112 "vector(vector_id %d) has been freed.\n", vector_id);
3116 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3117 hdev->num_msi_left += 1;
3118 hdev->num_msi_used -= 1;
3121 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3123 struct hclge_misc_vector *vector = &hdev->misc_vector;
3125 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3127 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3128 hdev->vector_status[0] = 0;
3130 hdev->num_msi_left -= 1;
3131 hdev->num_msi_used += 1;
3134 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3135 const cpumask_t *mask)
3137 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3140 cpumask_copy(&hdev->affinity_mask, mask);
3143 static void hclge_irq_affinity_release(struct kref *ref)
3147 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3149 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3150 &hdev->affinity_mask);
3152 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3153 hdev->affinity_notify.release = hclge_irq_affinity_release;
3154 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3155 &hdev->affinity_notify);
3158 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3160 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3161 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3164 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3168 hclge_get_misc_vector(hdev);
3170 /* this would be explicitly freed in the end */
3171 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3172 HCLGE_NAME, pci_name(hdev->pdev));
3173 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3174 0, hdev->misc_vector.name, hdev);
3176 hclge_free_vector(hdev, 0);
3177 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3178 hdev->misc_vector.vector_irq);
3184 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3186 free_irq(hdev->misc_vector.vector_irq, hdev);
3187 hclge_free_vector(hdev, 0);
3190 int hclge_notify_client(struct hclge_dev *hdev,
3191 enum hnae3_reset_notify_type type)
3193 struct hnae3_client *client = hdev->nic_client;
3196 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3199 if (!client->ops->reset_notify)
3202 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3203 struct hnae3_handle *handle = &hdev->vport[i].nic;
3206 ret = client->ops->reset_notify(handle, type);
3208 dev_err(&hdev->pdev->dev,
3209 "notify nic client failed %d(%d)\n", type, ret);
3217 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3218 enum hnae3_reset_notify_type type)
3220 struct hnae3_client *client = hdev->roce_client;
3224 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3227 if (!client->ops->reset_notify)
3230 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3231 struct hnae3_handle *handle = &hdev->vport[i].roce;
3233 ret = client->ops->reset_notify(handle, type);
3235 dev_err(&hdev->pdev->dev,
3236 "notify roce client failed %d(%d)",
3245 static int hclge_reset_wait(struct hclge_dev *hdev)
3247 #define HCLGE_RESET_WATI_MS 100
3248 #define HCLGE_RESET_WAIT_CNT 350
3250 u32 val, reg, reg_bit;
3253 switch (hdev->reset_type) {
3254 case HNAE3_IMP_RESET:
3255 reg = HCLGE_GLOBAL_RESET_REG;
3256 reg_bit = HCLGE_IMP_RESET_BIT;
3258 case HNAE3_GLOBAL_RESET:
3259 reg = HCLGE_GLOBAL_RESET_REG;
3260 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3262 case HNAE3_FUNC_RESET:
3263 reg = HCLGE_FUN_RST_ING;
3264 reg_bit = HCLGE_FUN_RST_ING_B;
3267 dev_err(&hdev->pdev->dev,
3268 "Wait for unsupported reset type: %d\n",
3273 val = hclge_read_dev(&hdev->hw, reg);
3274 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3275 msleep(HCLGE_RESET_WATI_MS);
3276 val = hclge_read_dev(&hdev->hw, reg);
3280 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3281 dev_warn(&hdev->pdev->dev,
3282 "Wait for reset timeout: %d\n", hdev->reset_type);
3289 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3291 struct hclge_vf_rst_cmd *req;
3292 struct hclge_desc desc;
3294 req = (struct hclge_vf_rst_cmd *)desc.data;
3295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3296 req->dest_vfid = func_id;
3301 return hclge_cmd_send(&hdev->hw, &desc, 1);
3304 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3308 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3309 struct hclge_vport *vport = &hdev->vport[i];
3312 /* Send cmd to set/clear VF's FUNC_RST_ING */
3313 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3315 dev_err(&hdev->pdev->dev,
3316 "set vf(%u) rst failed %d!\n",
3317 vport->vport_id, ret);
3321 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3324 /* Inform VF to process the reset.
3325 * hclge_inform_reset_assert_to_vf may fail if VF
3326 * driver is not loaded.
3328 ret = hclge_inform_reset_assert_to_vf(vport);
3330 dev_warn(&hdev->pdev->dev,
3331 "inform reset to vf(%u) failed %d!\n",
3332 vport->vport_id, ret);
3338 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3340 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3341 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3342 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3345 hclge_mbx_handler(hdev);
3347 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3350 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3352 struct hclge_pf_rst_sync_cmd *req;
3353 struct hclge_desc desc;
3357 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3361 /* vf need to down netdev by mbx during PF or FLR reset */
3362 hclge_mailbox_service_task(hdev);
3364 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3365 /* for compatible with old firmware, wait
3366 * 100 ms for VF to stop IO
3368 if (ret == -EOPNOTSUPP) {
3369 msleep(HCLGE_RESET_SYNC_TIME);
3372 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3375 } else if (req->all_vf_ready) {
3378 msleep(HCLGE_PF_RESET_SYNC_TIME);
3379 hclge_cmd_reuse_desc(&desc, true);
3380 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3382 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3385 void hclge_report_hw_error(struct hclge_dev *hdev,
3386 enum hnae3_hw_error_type type)
3388 struct hnae3_client *client = hdev->nic_client;
3391 if (!client || !client->ops->process_hw_error ||
3392 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3395 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3396 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3399 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3403 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3404 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3405 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3406 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3407 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3410 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3411 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3412 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3413 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3417 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3419 struct hclge_desc desc;
3420 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3424 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3425 req->fun_reset_vfid = func_id;
3427 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3429 dev_err(&hdev->pdev->dev,
3430 "send function reset cmd fail, status =%d\n", ret);
3435 static void hclge_do_reset(struct hclge_dev *hdev)
3437 struct hnae3_handle *handle = &hdev->vport[0].nic;
3438 struct pci_dev *pdev = hdev->pdev;
3441 if (hclge_get_hw_reset_stat(handle)) {
3442 dev_info(&pdev->dev, "Hardware reset not finish\n");
3443 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3444 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3445 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3449 switch (hdev->reset_type) {
3450 case HNAE3_GLOBAL_RESET:
3451 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3452 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3453 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3454 dev_info(&pdev->dev, "Global Reset requested\n");
3456 case HNAE3_FUNC_RESET:
3457 dev_info(&pdev->dev, "PF Reset requested\n");
3458 /* schedule again to check later */
3459 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3460 hclge_reset_task_schedule(hdev);
3463 dev_warn(&pdev->dev,
3464 "Unsupported reset type: %d\n", hdev->reset_type);
3469 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3470 unsigned long *addr)
3472 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3473 struct hclge_dev *hdev = ae_dev->priv;
3475 /* first, resolve any unknown reset type to the known type(s) */
3476 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3477 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3478 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3479 /* we will intentionally ignore any errors from this function
3480 * as we will end up in *some* reset request in any case
3482 if (hclge_handle_hw_msix_error(hdev, addr))
3483 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3486 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3487 /* We defered the clearing of the error event which caused
3488 * interrupt since it was not posssible to do that in
3489 * interrupt context (and this is the reason we introduced
3490 * new UNKNOWN reset type). Now, the errors have been
3491 * handled and cleared in hardware we can safely enable
3492 * interrupts. This is an exception to the norm.
3494 hclge_enable_vector(&hdev->misc_vector, true);
3497 /* return the highest priority reset level amongst all */
3498 if (test_bit(HNAE3_IMP_RESET, addr)) {
3499 rst_level = HNAE3_IMP_RESET;
3500 clear_bit(HNAE3_IMP_RESET, addr);
3501 clear_bit(HNAE3_GLOBAL_RESET, addr);
3502 clear_bit(HNAE3_FUNC_RESET, addr);
3503 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3504 rst_level = HNAE3_GLOBAL_RESET;
3505 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506 clear_bit(HNAE3_FUNC_RESET, addr);
3507 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3508 rst_level = HNAE3_FUNC_RESET;
3509 clear_bit(HNAE3_FUNC_RESET, addr);
3510 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3511 rst_level = HNAE3_FLR_RESET;
3512 clear_bit(HNAE3_FLR_RESET, addr);
3515 if (hdev->reset_type != HNAE3_NONE_RESET &&
3516 rst_level < hdev->reset_type)
3517 return HNAE3_NONE_RESET;
3522 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3526 switch (hdev->reset_type) {
3527 case HNAE3_IMP_RESET:
3528 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3530 case HNAE3_GLOBAL_RESET:
3531 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3540 /* For revision 0x20, the reset interrupt source
3541 * can only be cleared after hardware reset done
3543 if (hdev->pdev->revision == 0x20)
3544 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3547 hclge_enable_vector(&hdev->misc_vector, true);
3550 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3554 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3556 reg_val |= HCLGE_NIC_SW_RST_RDY;
3558 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3560 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3563 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3567 ret = hclge_set_all_vf_rst(hdev, true);
3571 hclge_func_reset_sync_vf(hdev);
3576 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3581 switch (hdev->reset_type) {
3582 case HNAE3_FUNC_RESET:
3583 ret = hclge_func_reset_notify_vf(hdev);
3587 ret = hclge_func_reset_cmd(hdev, 0);
3589 dev_err(&hdev->pdev->dev,
3590 "asserting function reset fail %d!\n", ret);
3594 /* After performaning pf reset, it is not necessary to do the
3595 * mailbox handling or send any command to firmware, because
3596 * any mailbox handling or command to firmware is only valid
3597 * after hclge_cmd_init is called.
3599 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3600 hdev->rst_stats.pf_rst_cnt++;
3602 case HNAE3_FLR_RESET:
3603 ret = hclge_func_reset_notify_vf(hdev);
3607 case HNAE3_IMP_RESET:
3608 hclge_handle_imp_error(hdev);
3609 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3610 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3611 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3617 /* inform hardware that preparatory work is done */
3618 msleep(HCLGE_RESET_SYNC_TIME);
3619 hclge_reset_handshake(hdev, true);
3620 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3625 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3627 #define MAX_RESET_FAIL_CNT 5
3629 if (hdev->reset_pending) {
3630 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3631 hdev->reset_pending);
3633 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3634 HCLGE_RESET_INT_M) {
3635 dev_info(&hdev->pdev->dev,
3636 "reset failed because new reset interrupt\n");
3637 hclge_clear_reset_cause(hdev);
3639 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3640 hdev->rst_stats.reset_fail_cnt++;
3641 set_bit(hdev->reset_type, &hdev->reset_pending);
3642 dev_info(&hdev->pdev->dev,
3643 "re-schedule reset task(%u)\n",
3644 hdev->rst_stats.reset_fail_cnt);
3648 hclge_clear_reset_cause(hdev);
3650 /* recover the handshake status when reset fail */
3651 hclge_reset_handshake(hdev, true);
3653 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3655 hclge_dbg_dump_rst_info(hdev);
3657 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3662 static int hclge_set_rst_done(struct hclge_dev *hdev)
3664 struct hclge_pf_rst_done_cmd *req;
3665 struct hclge_desc desc;
3668 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3669 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3670 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673 /* To be compatible with the old firmware, which does not support
3674 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3677 if (ret == -EOPNOTSUPP) {
3678 dev_warn(&hdev->pdev->dev,
3679 "current firmware does not support command(0x%x)!\n",
3680 HCLGE_OPC_PF_RST_DONE);
3683 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3690 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3694 switch (hdev->reset_type) {
3695 case HNAE3_FUNC_RESET:
3697 case HNAE3_FLR_RESET:
3698 ret = hclge_set_all_vf_rst(hdev, false);
3700 case HNAE3_GLOBAL_RESET:
3702 case HNAE3_IMP_RESET:
3703 ret = hclge_set_rst_done(hdev);
3709 /* clear up the handshake status after re-initialize done */
3710 hclge_reset_handshake(hdev, false);
3715 static int hclge_reset_stack(struct hclge_dev *hdev)
3719 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3723 ret = hclge_reset_ae_dev(hdev->ae_dev);
3727 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3731 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3734 static int hclge_reset_prepare(struct hclge_dev *hdev)
3736 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3739 /* Initialize ae_dev reset status as well, in case enet layer wants to
3740 * know if device is undergoing reset
3742 ae_dev->reset_type = hdev->reset_type;
3743 hdev->rst_stats.reset_cnt++;
3744 /* perform reset of the stack & ae device for a client */
3745 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3750 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3755 return hclge_reset_prepare_wait(hdev);
3758 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3760 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3761 enum hnae3_reset_type reset_level;
3764 hdev->rst_stats.hw_reset_done_cnt++;
3766 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3771 ret = hclge_reset_stack(hdev);
3776 hclge_clear_reset_cause(hdev);
3778 ret = hclge_reset_prepare_up(hdev);
3783 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3784 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3788 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3792 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3797 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3801 hdev->last_reset_time = jiffies;
3802 hdev->rst_stats.reset_fail_cnt = 0;
3803 hdev->rst_stats.reset_done_cnt++;
3804 ae_dev->reset_type = HNAE3_NONE_RESET;
3805 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3807 /* if default_reset_request has a higher level reset request,
3808 * it should be handled as soon as possible. since some errors
3809 * need this kind of reset to fix.
3811 reset_level = hclge_get_reset_level(ae_dev,
3812 &hdev->default_reset_request);
3813 if (reset_level != HNAE3_NONE_RESET)
3814 set_bit(reset_level, &hdev->reset_request);
3819 static void hclge_reset(struct hclge_dev *hdev)
3821 if (hclge_reset_prepare(hdev))
3824 if (hclge_reset_wait(hdev))
3827 if (hclge_reset_rebuild(hdev))
3833 if (hclge_reset_err_handle(hdev))
3834 hclge_reset_task_schedule(hdev);
3837 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3839 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3840 struct hclge_dev *hdev = ae_dev->priv;
3842 /* We might end up getting called broadly because of 2 below cases:
3843 * 1. Recoverable error was conveyed through APEI and only way to bring
3844 * normalcy is to reset.
3845 * 2. A new reset request from the stack due to timeout
3847 * For the first case,error event might not have ae handle available.
3848 * check if this is a new reset request and we are not here just because
3849 * last reset attempt did not succeed and watchdog hit us again. We will
3850 * know this if last reset request did not occur very recently (watchdog
3851 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3852 * In case of new request we reset the "reset level" to PF reset.
3853 * And if it is a repeat reset request of the most recent one then we
3854 * want to make sure we throttle the reset request. Therefore, we will
3855 * not allow it again before 3*HZ times.
3858 handle = &hdev->vport[0].nic;
3860 if (time_before(jiffies, (hdev->last_reset_time +
3861 HCLGE_RESET_INTERVAL))) {
3862 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3864 } else if (hdev->default_reset_request) {
3866 hclge_get_reset_level(ae_dev,
3867 &hdev->default_reset_request);
3868 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3869 hdev->reset_level = HNAE3_FUNC_RESET;
3872 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3875 /* request reset & schedule reset task */
3876 set_bit(hdev->reset_level, &hdev->reset_request);
3877 hclge_reset_task_schedule(hdev);
3879 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3880 hdev->reset_level++;
3883 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3884 enum hnae3_reset_type rst_type)
3886 struct hclge_dev *hdev = ae_dev->priv;
3888 set_bit(rst_type, &hdev->default_reset_request);
3891 static void hclge_reset_timer(struct timer_list *t)
3893 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3895 /* if default_reset_request has no value, it means that this reset
3896 * request has already be handled, so just return here
3898 if (!hdev->default_reset_request)
3901 dev_info(&hdev->pdev->dev,
3902 "triggering reset in reset timer\n");
3903 hclge_reset_event(hdev->pdev, NULL);
3906 static void hclge_reset_subtask(struct hclge_dev *hdev)
3908 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3910 /* check if there is any ongoing reset in the hardware. This status can
3911 * be checked from reset_pending. If there is then, we need to wait for
3912 * hardware to complete reset.
3913 * a. If we are able to figure out in reasonable time that hardware
3914 * has fully resetted then, we can proceed with driver, client
3916 * b. else, we can come back later to check this status so re-sched
3919 hdev->last_reset_time = jiffies;
3920 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3921 if (hdev->reset_type != HNAE3_NONE_RESET)
3924 /* check if we got any *new* reset requests to be honored */
3925 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3926 if (hdev->reset_type != HNAE3_NONE_RESET)
3927 hclge_do_reset(hdev);
3929 hdev->reset_type = HNAE3_NONE_RESET;
3932 static void hclge_reset_service_task(struct hclge_dev *hdev)
3934 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3937 down(&hdev->reset_sem);
3938 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3940 hclge_reset_subtask(hdev);
3942 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3943 up(&hdev->reset_sem);
3946 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3950 /* start from vport 1 for PF is always alive */
3951 for (i = 1; i < hdev->num_alloc_vport; i++) {
3952 struct hclge_vport *vport = &hdev->vport[i];
3954 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3955 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3957 /* If vf is not alive, set to default value */
3958 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3959 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3963 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3965 unsigned long delta = round_jiffies_relative(HZ);
3967 /* Always handle the link updating to make sure link state is
3968 * updated when it is triggered by mbx.
3970 hclge_update_link_status(hdev);
3972 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3973 delta = jiffies - hdev->last_serv_processed;
3975 if (delta < round_jiffies_relative(HZ)) {
3976 delta = round_jiffies_relative(HZ) - delta;
3981 hdev->serv_processed_cnt++;
3982 hclge_update_vport_alive(hdev);
3984 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3985 hdev->last_serv_processed = jiffies;
3989 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3990 hclge_update_stats_for_all(hdev);
3992 hclge_update_port_info(hdev);
3993 hclge_sync_vlan_filter(hdev);
3995 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3996 hclge_rfs_filter_expire(hdev);
3998 hdev->last_serv_processed = jiffies;
4001 hclge_task_schedule(hdev, delta);
4004 static void hclge_service_task(struct work_struct *work)
4006 struct hclge_dev *hdev =
4007 container_of(work, struct hclge_dev, service_task.work);
4009 hclge_reset_service_task(hdev);
4010 hclge_mailbox_service_task(hdev);
4011 hclge_periodic_service_task(hdev);
4013 /* Handle reset and mbx again in case periodical task delays the
4014 * handling by calling hclge_task_schedule() in
4015 * hclge_periodic_service_task().
4017 hclge_reset_service_task(hdev);
4018 hclge_mailbox_service_task(hdev);
4021 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4023 /* VF handle has no client */
4024 if (!handle->client)
4025 return container_of(handle, struct hclge_vport, nic);
4026 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4027 return container_of(handle, struct hclge_vport, roce);
4029 return container_of(handle, struct hclge_vport, nic);
4032 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4033 struct hnae3_vector_info *vector_info)
4035 struct hclge_vport *vport = hclge_get_vport(handle);
4036 struct hnae3_vector_info *vector = vector_info;
4037 struct hclge_dev *hdev = vport->back;
4041 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4042 vector_num = min(hdev->num_msi_left, vector_num);
4044 for (j = 0; j < vector_num; j++) {
4045 for (i = 1; i < hdev->num_msi; i++) {
4046 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4047 vector->vector = pci_irq_vector(hdev->pdev, i);
4048 vector->io_addr = hdev->hw.io_base +
4049 HCLGE_VECTOR_REG_BASE +
4050 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4052 HCLGE_VECTOR_VF_OFFSET;
4053 hdev->vector_status[i] = vport->vport_id;
4054 hdev->vector_irq[i] = vector->vector;
4063 hdev->num_msi_left -= alloc;
4064 hdev->num_msi_used += alloc;
4069 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4073 for (i = 0; i < hdev->num_msi; i++)
4074 if (vector == hdev->vector_irq[i])
4080 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4082 struct hclge_vport *vport = hclge_get_vport(handle);
4083 struct hclge_dev *hdev = vport->back;
4086 vector_id = hclge_get_vector_index(hdev, vector);
4087 if (vector_id < 0) {
4088 dev_err(&hdev->pdev->dev,
4089 "Get vector index fail. vector = %d\n", vector);
4093 hclge_free_vector(hdev, vector_id);
4098 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4100 return HCLGE_RSS_KEY_SIZE;
4103 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4105 return HCLGE_RSS_IND_TBL_SIZE;
4108 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4109 const u8 hfunc, const u8 *key)
4111 struct hclge_rss_config_cmd *req;
4112 unsigned int key_offset = 0;
4113 struct hclge_desc desc;
4118 key_counts = HCLGE_RSS_KEY_SIZE;
4119 req = (struct hclge_rss_config_cmd *)desc.data;
4121 while (key_counts) {
4122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4125 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4126 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4128 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4129 memcpy(req->hash_key,
4130 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4132 key_counts -= key_size;
4134 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4136 dev_err(&hdev->pdev->dev,
4137 "Configure RSS config fail, status = %d\n",
4145 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4147 struct hclge_rss_indirection_table_cmd *req;
4148 struct hclge_desc desc;
4152 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4154 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4155 hclge_cmd_setup_basic_desc
4156 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4158 req->start_table_index =
4159 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4160 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4162 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4163 req->rss_result[j] =
4164 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4166 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4168 dev_err(&hdev->pdev->dev,
4169 "Configure rss indir table fail,status = %d\n",
4177 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4178 u16 *tc_size, u16 *tc_offset)
4180 struct hclge_rss_tc_mode_cmd *req;
4181 struct hclge_desc desc;
4185 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4186 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4188 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4191 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4192 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4193 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4194 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4195 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4197 req->rss_tc_mode[i] = cpu_to_le16(mode);
4200 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4202 dev_err(&hdev->pdev->dev,
4203 "Configure rss tc mode fail, status = %d\n", ret);
4208 static void hclge_get_rss_type(struct hclge_vport *vport)
4210 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4211 vport->rss_tuple_sets.ipv4_udp_en ||
4212 vport->rss_tuple_sets.ipv4_sctp_en ||
4213 vport->rss_tuple_sets.ipv6_tcp_en ||
4214 vport->rss_tuple_sets.ipv6_udp_en ||
4215 vport->rss_tuple_sets.ipv6_sctp_en)
4216 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4217 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4218 vport->rss_tuple_sets.ipv6_fragment_en)
4219 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4221 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4224 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4226 struct hclge_rss_input_tuple_cmd *req;
4227 struct hclge_desc desc;
4230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4232 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4234 /* Get the tuple cfg from pf */
4235 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4236 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4237 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4238 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4239 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4240 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4241 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4242 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4243 hclge_get_rss_type(&hdev->vport[0]);
4244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4246 dev_err(&hdev->pdev->dev,
4247 "Configure rss input fail, status = %d\n", ret);
4251 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4254 struct hclge_vport *vport = hclge_get_vport(handle);
4257 /* Get hash algorithm */
4259 switch (vport->rss_algo) {
4260 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4261 *hfunc = ETH_RSS_HASH_TOP;
4263 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4264 *hfunc = ETH_RSS_HASH_XOR;
4267 *hfunc = ETH_RSS_HASH_UNKNOWN;
4272 /* Get the RSS Key required by the user */
4274 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4276 /* Get indirect table */
4278 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4279 indir[i] = vport->rss_indirection_tbl[i];
4284 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4285 const u8 *key, const u8 hfunc)
4287 struct hclge_vport *vport = hclge_get_vport(handle);
4288 struct hclge_dev *hdev = vport->back;
4292 /* Set the RSS Hash Key if specififed by the user */
4295 case ETH_RSS_HASH_TOP:
4296 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4298 case ETH_RSS_HASH_XOR:
4299 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4301 case ETH_RSS_HASH_NO_CHANGE:
4302 hash_algo = vport->rss_algo;
4308 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4312 /* Update the shadow RSS key with user specified qids */
4313 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4314 vport->rss_algo = hash_algo;
4317 /* Update the shadow RSS table with user specified qids */
4318 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4319 vport->rss_indirection_tbl[i] = indir[i];
4321 /* Update the hardware */
4322 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4325 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4327 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4329 if (nfc->data & RXH_L4_B_2_3)
4330 hash_sets |= HCLGE_D_PORT_BIT;
4332 hash_sets &= ~HCLGE_D_PORT_BIT;
4334 if (nfc->data & RXH_IP_SRC)
4335 hash_sets |= HCLGE_S_IP_BIT;
4337 hash_sets &= ~HCLGE_S_IP_BIT;
4339 if (nfc->data & RXH_IP_DST)
4340 hash_sets |= HCLGE_D_IP_BIT;
4342 hash_sets &= ~HCLGE_D_IP_BIT;
4344 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4345 hash_sets |= HCLGE_V_TAG_BIT;
4350 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4351 struct ethtool_rxnfc *nfc)
4353 struct hclge_vport *vport = hclge_get_vport(handle);
4354 struct hclge_dev *hdev = vport->back;
4355 struct hclge_rss_input_tuple_cmd *req;
4356 struct hclge_desc desc;
4360 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4361 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4364 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4365 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4367 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4368 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4369 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4370 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4371 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4372 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4373 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4374 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4376 tuple_sets = hclge_get_rss_hash_bits(nfc);
4377 switch (nfc->flow_type) {
4379 req->ipv4_tcp_en = tuple_sets;
4382 req->ipv6_tcp_en = tuple_sets;
4385 req->ipv4_udp_en = tuple_sets;
4388 req->ipv6_udp_en = tuple_sets;
4391 req->ipv4_sctp_en = tuple_sets;
4394 if ((nfc->data & RXH_L4_B_0_1) ||
4395 (nfc->data & RXH_L4_B_2_3))
4398 req->ipv6_sctp_en = tuple_sets;
4401 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4404 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4410 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4412 dev_err(&hdev->pdev->dev,
4413 "Set rss tuple fail, status = %d\n", ret);
4417 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4418 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4419 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4420 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4421 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4422 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4423 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4424 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4425 hclge_get_rss_type(vport);
4429 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4430 struct ethtool_rxnfc *nfc)
4432 struct hclge_vport *vport = hclge_get_vport(handle);
4437 switch (nfc->flow_type) {
4439 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4442 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4445 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4448 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4451 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4454 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4458 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4467 if (tuple_sets & HCLGE_D_PORT_BIT)
4468 nfc->data |= RXH_L4_B_2_3;
4469 if (tuple_sets & HCLGE_S_PORT_BIT)
4470 nfc->data |= RXH_L4_B_0_1;
4471 if (tuple_sets & HCLGE_D_IP_BIT)
4472 nfc->data |= RXH_IP_DST;
4473 if (tuple_sets & HCLGE_S_IP_BIT)
4474 nfc->data |= RXH_IP_SRC;
4479 static int hclge_get_tc_size(struct hnae3_handle *handle)
4481 struct hclge_vport *vport = hclge_get_vport(handle);
4482 struct hclge_dev *hdev = vport->back;
4484 return hdev->rss_size_max;
4487 int hclge_rss_init_hw(struct hclge_dev *hdev)
4489 struct hclge_vport *vport = hdev->vport;
4490 u8 *rss_indir = vport[0].rss_indirection_tbl;
4491 u16 rss_size = vport[0].alloc_rss_size;
4492 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4493 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4494 u8 *key = vport[0].rss_hash_key;
4495 u8 hfunc = vport[0].rss_algo;
4496 u16 tc_valid[HCLGE_MAX_TC_NUM];
4501 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4505 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4509 ret = hclge_set_rss_input_tuple(hdev);
4513 /* Each TC have the same queue size, and tc_size set to hardware is
4514 * the log2 of roundup power of two of rss_size, the acutal queue
4515 * size is limited by indirection table.
4517 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4518 dev_err(&hdev->pdev->dev,
4519 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4524 roundup_size = roundup_pow_of_two(rss_size);
4525 roundup_size = ilog2(roundup_size);
4527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4530 if (!(hdev->hw_tc_map & BIT(i)))
4534 tc_size[i] = roundup_size;
4535 tc_offset[i] = rss_size * i;
4538 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4541 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4543 struct hclge_vport *vport = hdev->vport;
4546 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4547 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4548 vport[j].rss_indirection_tbl[i] =
4549 i % vport[j].alloc_rss_size;
4553 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4555 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4556 struct hclge_vport *vport = hdev->vport;
4558 if (hdev->pdev->revision >= 0x21)
4559 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4561 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4562 vport[i].rss_tuple_sets.ipv4_tcp_en =
4563 HCLGE_RSS_INPUT_TUPLE_OTHER;
4564 vport[i].rss_tuple_sets.ipv4_udp_en =
4565 HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 vport[i].rss_tuple_sets.ipv4_sctp_en =
4567 HCLGE_RSS_INPUT_TUPLE_SCTP;
4568 vport[i].rss_tuple_sets.ipv4_fragment_en =
4569 HCLGE_RSS_INPUT_TUPLE_OTHER;
4570 vport[i].rss_tuple_sets.ipv6_tcp_en =
4571 HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 vport[i].rss_tuple_sets.ipv6_udp_en =
4573 HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 vport[i].rss_tuple_sets.ipv6_sctp_en =
4575 HCLGE_RSS_INPUT_TUPLE_SCTP;
4576 vport[i].rss_tuple_sets.ipv6_fragment_en =
4577 HCLGE_RSS_INPUT_TUPLE_OTHER;
4579 vport[i].rss_algo = rss_algo;
4581 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4582 HCLGE_RSS_KEY_SIZE);
4585 hclge_rss_indir_init_cfg(hdev);
4588 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4589 int vector_id, bool en,
4590 struct hnae3_ring_chain_node *ring_chain)
4592 struct hclge_dev *hdev = vport->back;
4593 struct hnae3_ring_chain_node *node;
4594 struct hclge_desc desc;
4595 struct hclge_ctrl_vector_chain_cmd *req =
4596 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4597 enum hclge_cmd_status status;
4598 enum hclge_opcode_type op;
4599 u16 tqp_type_and_id;
4602 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4603 hclge_cmd_setup_basic_desc(&desc, op, false);
4604 req->int_vector_id = vector_id;
4607 for (node = ring_chain; node; node = node->next) {
4608 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4609 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4611 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4612 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4613 HCLGE_TQP_ID_S, node->tqp_index);
4614 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4616 hnae3_get_field(node->int_gl_idx,
4617 HNAE3_RING_GL_IDX_M,
4618 HNAE3_RING_GL_IDX_S));
4619 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4620 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4621 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4622 req->vfid = vport->vport_id;
4624 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4626 dev_err(&hdev->pdev->dev,
4627 "Map TQP fail, status is %d.\n",
4633 hclge_cmd_setup_basic_desc(&desc,
4636 req->int_vector_id = vector_id;
4641 req->int_cause_num = i;
4642 req->vfid = vport->vport_id;
4643 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4645 dev_err(&hdev->pdev->dev,
4646 "Map TQP fail, status is %d.\n", status);
4654 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4655 struct hnae3_ring_chain_node *ring_chain)
4657 struct hclge_vport *vport = hclge_get_vport(handle);
4658 struct hclge_dev *hdev = vport->back;
4661 vector_id = hclge_get_vector_index(hdev, vector);
4662 if (vector_id < 0) {
4663 dev_err(&hdev->pdev->dev,
4664 "failed to get vector index. vector=%d\n", vector);
4668 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4671 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4672 struct hnae3_ring_chain_node *ring_chain)
4674 struct hclge_vport *vport = hclge_get_vport(handle);
4675 struct hclge_dev *hdev = vport->back;
4678 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4681 vector_id = hclge_get_vector_index(hdev, vector);
4682 if (vector_id < 0) {
4683 dev_err(&handle->pdev->dev,
4684 "Get vector index fail. ret =%d\n", vector_id);
4688 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4690 dev_err(&handle->pdev->dev,
4691 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4697 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4698 struct hclge_promisc_param *param)
4700 struct hclge_promisc_cfg_cmd *req;
4701 struct hclge_desc desc;
4704 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4706 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4707 req->vf_id = param->vf_id;
4709 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4710 * pdev revision(0x20), new revision support them. The
4711 * value of this two fields will not return error when driver
4712 * send command to fireware in revision(0x20).
4714 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4715 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4717 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4719 dev_err(&hdev->pdev->dev,
4720 "Set promisc mode fail, status is %d.\n", ret);
4725 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4726 bool en_uc, bool en_mc, bool en_bc,
4732 memset(param, 0, sizeof(struct hclge_promisc_param));
4734 param->enable = HCLGE_PROMISC_EN_UC;
4736 param->enable |= HCLGE_PROMISC_EN_MC;
4738 param->enable |= HCLGE_PROMISC_EN_BC;
4739 param->vf_id = vport_id;
4742 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4743 bool en_mc_pmc, bool en_bc_pmc)
4745 struct hclge_dev *hdev = vport->back;
4746 struct hclge_promisc_param param;
4748 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4750 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4753 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4756 struct hclge_vport *vport = hclge_get_vport(handle);
4757 bool en_bc_pmc = true;
4759 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4760 * always bypassed. So broadcast promisc should be disabled until
4761 * user enable promisc mode
4763 if (handle->pdev->revision == 0x20)
4764 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4766 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4770 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4772 struct hclge_get_fd_mode_cmd *req;
4773 struct hclge_desc desc;
4776 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4778 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4780 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4782 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4786 *fd_mode = req->mode;
4791 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4792 u32 *stage1_entry_num,
4793 u32 *stage2_entry_num,
4794 u16 *stage1_counter_num,
4795 u16 *stage2_counter_num)
4797 struct hclge_get_fd_allocation_cmd *req;
4798 struct hclge_desc desc;
4801 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4803 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4805 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4807 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4812 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4813 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4814 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4815 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4820 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4822 struct hclge_set_fd_key_config_cmd *req;
4823 struct hclge_fd_key_cfg *stage;
4824 struct hclge_desc desc;
4827 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4829 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4830 stage = &hdev->fd_cfg.key_cfg[stage_num];
4831 req->stage = stage_num;
4832 req->key_select = stage->key_sel;
4833 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4834 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4835 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4836 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4837 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4838 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4842 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4847 static int hclge_init_fd_config(struct hclge_dev *hdev)
4849 #define LOW_2_WORDS 0x03
4850 struct hclge_fd_key_cfg *key_cfg;
4853 if (!hnae3_dev_fd_supported(hdev))
4856 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4860 switch (hdev->fd_cfg.fd_mode) {
4861 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4862 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4864 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4865 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4868 dev_err(&hdev->pdev->dev,
4869 "Unsupported flow director mode %u\n",
4870 hdev->fd_cfg.fd_mode);
4874 hdev->fd_cfg.proto_support =
4875 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4876 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4877 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4878 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4879 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4880 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4881 key_cfg->outer_sipv6_word_en = 0;
4882 key_cfg->outer_dipv6_word_en = 0;
4884 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4885 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4886 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4887 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4889 /* If use max 400bit key, we can support tuples for ether type */
4890 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4891 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4892 key_cfg->tuple_active |=
4893 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4896 /* roce_type is used to filter roce frames
4897 * dst_vport is used to specify the rule
4899 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4901 ret = hclge_get_fd_allocation(hdev,
4902 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4903 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4904 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4905 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4909 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4912 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4913 int loc, u8 *key, bool is_add)
4915 struct hclge_fd_tcam_config_1_cmd *req1;
4916 struct hclge_fd_tcam_config_2_cmd *req2;
4917 struct hclge_fd_tcam_config_3_cmd *req3;
4918 struct hclge_desc desc[3];
4921 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4922 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4923 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4924 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4927 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4928 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4929 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4931 req1->stage = stage;
4932 req1->xy_sel = sel_x ? 1 : 0;
4933 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4934 req1->index = cpu_to_le32(loc);
4935 req1->entry_vld = sel_x ? is_add : 0;
4938 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4939 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4940 sizeof(req2->tcam_data));
4941 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4942 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4945 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4947 dev_err(&hdev->pdev->dev,
4948 "config tcam key fail, ret=%d\n",
4954 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4955 struct hclge_fd_ad_data *action)
4957 struct hclge_fd_ad_config_cmd *req;
4958 struct hclge_desc desc;
4962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4964 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4965 req->index = cpu_to_le32(loc);
4968 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4969 action->write_rule_id_to_bd);
4970 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4973 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4975 action->forward_to_direct_queue);
4976 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4978 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4979 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4980 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4981 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4982 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4983 action->counter_id);
4985 req->ad_data = cpu_to_le64(ad_data);
4986 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4988 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4993 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4994 struct hclge_fd_rule *rule)
4996 u16 tmp_x_s, tmp_y_s;
4997 u32 tmp_x_l, tmp_y_l;
5000 if (rule->unused_tuple & tuple_bit)
5003 switch (tuple_bit) {
5006 case BIT(INNER_DST_MAC):
5007 for (i = 0; i < ETH_ALEN; i++) {
5008 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5009 rule->tuples_mask.dst_mac[i]);
5010 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011 rule->tuples_mask.dst_mac[i]);
5015 case BIT(INNER_SRC_MAC):
5016 for (i = 0; i < ETH_ALEN; i++) {
5017 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5018 rule->tuples.src_mac[i]);
5019 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020 rule->tuples.src_mac[i]);
5024 case BIT(INNER_VLAN_TAG_FST):
5025 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5026 rule->tuples_mask.vlan_tag1);
5027 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5028 rule->tuples_mask.vlan_tag1);
5029 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5030 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5033 case BIT(INNER_ETH_TYPE):
5034 calc_x(tmp_x_s, rule->tuples.ether_proto,
5035 rule->tuples_mask.ether_proto);
5036 calc_y(tmp_y_s, rule->tuples.ether_proto,
5037 rule->tuples_mask.ether_proto);
5038 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5039 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5042 case BIT(INNER_IP_TOS):
5043 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5044 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047 case BIT(INNER_IP_PROTO):
5048 calc_x(*key_x, rule->tuples.ip_proto,
5049 rule->tuples_mask.ip_proto);
5050 calc_y(*key_y, rule->tuples.ip_proto,
5051 rule->tuples_mask.ip_proto);
5054 case BIT(INNER_SRC_IP):
5055 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5056 rule->tuples_mask.src_ip[IPV4_INDEX]);
5057 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5058 rule->tuples_mask.src_ip[IPV4_INDEX]);
5059 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5060 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5063 case BIT(INNER_DST_IP):
5064 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5065 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5066 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5067 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5069 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5072 case BIT(INNER_SRC_PORT):
5073 calc_x(tmp_x_s, rule->tuples.src_port,
5074 rule->tuples_mask.src_port);
5075 calc_y(tmp_y_s, rule->tuples.src_port,
5076 rule->tuples_mask.src_port);
5077 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5078 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5081 case BIT(INNER_DST_PORT):
5082 calc_x(tmp_x_s, rule->tuples.dst_port,
5083 rule->tuples_mask.dst_port);
5084 calc_y(tmp_y_s, rule->tuples.dst_port,
5085 rule->tuples_mask.dst_port);
5086 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5087 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5095 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5096 u8 vf_id, u8 network_port_id)
5098 u32 port_number = 0;
5100 if (port_type == HOST_PORT) {
5101 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5103 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5105 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5107 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5108 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5109 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5115 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5116 __le32 *key_x, __le32 *key_y,
5117 struct hclge_fd_rule *rule)
5119 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5120 u8 cur_pos = 0, tuple_size, shift_bits;
5123 for (i = 0; i < MAX_META_DATA; i++) {
5124 tuple_size = meta_data_key_info[i].key_length;
5125 tuple_bit = key_cfg->meta_data_active & BIT(i);
5127 switch (tuple_bit) {
5128 case BIT(ROCE_TYPE):
5129 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5130 cur_pos += tuple_size;
5132 case BIT(DST_VPORT):
5133 port_number = hclge_get_port_number(HOST_PORT, 0,
5135 hnae3_set_field(meta_data,
5136 GENMASK(cur_pos + tuple_size, cur_pos),
5137 cur_pos, port_number);
5138 cur_pos += tuple_size;
5145 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5146 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5147 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5149 *key_x = cpu_to_le32(tmp_x << shift_bits);
5150 *key_y = cpu_to_le32(tmp_y << shift_bits);
5153 /* A complete key is combined with meta data key and tuple key.
5154 * Meta data key is stored at the MSB region, and tuple key is stored at
5155 * the LSB region, unused bits will be filled 0.
5157 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5158 struct hclge_fd_rule *rule)
5160 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5161 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5162 u8 *cur_key_x, *cur_key_y;
5164 int ret, tuple_size;
5165 u8 meta_data_region;
5167 memset(key_x, 0, sizeof(key_x));
5168 memset(key_y, 0, sizeof(key_y));
5172 for (i = 0 ; i < MAX_TUPLE; i++) {
5176 tuple_size = tuple_key_info[i].key_length / 8;
5177 check_tuple = key_cfg->tuple_active & BIT(i);
5179 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5182 cur_key_x += tuple_size;
5183 cur_key_y += tuple_size;
5187 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5188 MAX_META_DATA_LENGTH / 8;
5190 hclge_fd_convert_meta_data(key_cfg,
5191 (__le32 *)(key_x + meta_data_region),
5192 (__le32 *)(key_y + meta_data_region),
5195 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5198 dev_err(&hdev->pdev->dev,
5199 "fd key_y config fail, loc=%u, ret=%d\n",
5200 rule->queue_id, ret);
5204 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5207 dev_err(&hdev->pdev->dev,
5208 "fd key_x config fail, loc=%u, ret=%d\n",
5209 rule->queue_id, ret);
5213 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5214 struct hclge_fd_rule *rule)
5216 struct hclge_fd_ad_data ad_data;
5218 ad_data.ad_id = rule->location;
5220 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5221 ad_data.drop_packet = true;
5222 ad_data.forward_to_direct_queue = false;
5223 ad_data.queue_id = 0;
5225 ad_data.drop_packet = false;
5226 ad_data.forward_to_direct_queue = true;
5227 ad_data.queue_id = rule->queue_id;
5230 ad_data.use_counter = false;
5231 ad_data.counter_id = 0;
5233 ad_data.use_next_stage = false;
5234 ad_data.next_input_key = 0;
5236 ad_data.write_rule_id_to_bd = true;
5237 ad_data.rule_id = rule->location;
5239 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5242 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5243 struct ethtool_rx_flow_spec *fs, u32 *unused)
5245 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5246 struct ethtool_usrip4_spec *usr_ip4_spec;
5247 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5248 struct ethtool_usrip6_spec *usr_ip6_spec;
5249 struct ethhdr *ether_spec;
5251 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5254 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5257 if ((fs->flow_type & FLOW_EXT) &&
5258 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5259 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5263 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5267 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5268 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5270 if (!tcp_ip4_spec->ip4src)
5271 *unused |= BIT(INNER_SRC_IP);
5273 if (!tcp_ip4_spec->ip4dst)
5274 *unused |= BIT(INNER_DST_IP);
5276 if (!tcp_ip4_spec->psrc)
5277 *unused |= BIT(INNER_SRC_PORT);
5279 if (!tcp_ip4_spec->pdst)
5280 *unused |= BIT(INNER_DST_PORT);
5282 if (!tcp_ip4_spec->tos)
5283 *unused |= BIT(INNER_IP_TOS);
5287 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5288 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5289 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5291 if (!usr_ip4_spec->ip4src)
5292 *unused |= BIT(INNER_SRC_IP);
5294 if (!usr_ip4_spec->ip4dst)
5295 *unused |= BIT(INNER_DST_IP);
5297 if (!usr_ip4_spec->tos)
5298 *unused |= BIT(INNER_IP_TOS);
5300 if (!usr_ip4_spec->proto)
5301 *unused |= BIT(INNER_IP_PROTO);
5303 if (usr_ip4_spec->l4_4_bytes)
5306 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5313 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5314 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5317 /* check whether src/dst ip address used */
5318 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5319 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5320 *unused |= BIT(INNER_SRC_IP);
5322 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5323 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5324 *unused |= BIT(INNER_DST_IP);
5326 if (!tcp_ip6_spec->psrc)
5327 *unused |= BIT(INNER_SRC_PORT);
5329 if (!tcp_ip6_spec->pdst)
5330 *unused |= BIT(INNER_DST_PORT);
5332 if (tcp_ip6_spec->tclass)
5336 case IPV6_USER_FLOW:
5337 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5338 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5339 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5340 BIT(INNER_DST_PORT);
5342 /* check whether src/dst ip address used */
5343 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5344 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5345 *unused |= BIT(INNER_SRC_IP);
5347 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5348 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5349 *unused |= BIT(INNER_DST_IP);
5351 if (!usr_ip6_spec->l4_proto)
5352 *unused |= BIT(INNER_IP_PROTO);
5354 if (usr_ip6_spec->tclass)
5357 if (usr_ip6_spec->l4_4_bytes)
5362 ether_spec = &fs->h_u.ether_spec;
5363 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5364 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5365 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5367 if (is_zero_ether_addr(ether_spec->h_source))
5368 *unused |= BIT(INNER_SRC_MAC);
5370 if (is_zero_ether_addr(ether_spec->h_dest))
5371 *unused |= BIT(INNER_DST_MAC);
5373 if (!ether_spec->h_proto)
5374 *unused |= BIT(INNER_ETH_TYPE);
5381 if ((fs->flow_type & FLOW_EXT)) {
5382 if (fs->h_ext.vlan_etype)
5384 if (!fs->h_ext.vlan_tci)
5385 *unused |= BIT(INNER_VLAN_TAG_FST);
5387 if (fs->m_ext.vlan_tci) {
5388 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5392 *unused |= BIT(INNER_VLAN_TAG_FST);
5395 if (fs->flow_type & FLOW_MAC_EXT) {
5396 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5399 if (is_zero_ether_addr(fs->h_ext.h_dest))
5400 *unused |= BIT(INNER_DST_MAC);
5402 *unused &= ~(BIT(INNER_DST_MAC));
5408 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5410 struct hclge_fd_rule *rule = NULL;
5411 struct hlist_node *node2;
5413 spin_lock_bh(&hdev->fd_rule_lock);
5414 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5415 if (rule->location >= location)
5419 spin_unlock_bh(&hdev->fd_rule_lock);
5421 return rule && rule->location == location;
5424 /* make sure being called after lock up with fd_rule_lock */
5425 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5426 struct hclge_fd_rule *new_rule,
5430 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5431 struct hlist_node *node2;
5433 if (is_add && !new_rule)
5436 hlist_for_each_entry_safe(rule, node2,
5437 &hdev->fd_rule_list, rule_node) {
5438 if (rule->location >= location)
5443 if (rule && rule->location == location) {
5444 hlist_del(&rule->rule_node);
5446 hdev->hclge_fd_rule_num--;
5449 if (!hdev->hclge_fd_rule_num)
5450 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5451 clear_bit(location, hdev->fd_bmap);
5455 } else if (!is_add) {
5456 dev_err(&hdev->pdev->dev,
5457 "delete fail, rule %u is inexistent\n",
5462 INIT_HLIST_NODE(&new_rule->rule_node);
5465 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5467 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5469 set_bit(location, hdev->fd_bmap);
5470 hdev->hclge_fd_rule_num++;
5471 hdev->fd_active_type = new_rule->rule_type;
5476 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5477 struct ethtool_rx_flow_spec *fs,
5478 struct hclge_fd_rule *rule)
5480 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5482 switch (flow_type) {
5486 rule->tuples.src_ip[IPV4_INDEX] =
5487 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5488 rule->tuples_mask.src_ip[IPV4_INDEX] =
5489 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5491 rule->tuples.dst_ip[IPV4_INDEX] =
5492 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5493 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5494 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5496 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5497 rule->tuples_mask.src_port =
5498 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5500 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5501 rule->tuples_mask.dst_port =
5502 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5504 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5505 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5507 rule->tuples.ether_proto = ETH_P_IP;
5508 rule->tuples_mask.ether_proto = 0xFFFF;
5512 rule->tuples.src_ip[IPV4_INDEX] =
5513 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5514 rule->tuples_mask.src_ip[IPV4_INDEX] =
5515 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5517 rule->tuples.dst_ip[IPV4_INDEX] =
5518 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5519 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5520 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5522 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5523 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5525 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5526 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5528 rule->tuples.ether_proto = ETH_P_IP;
5529 rule->tuples_mask.ether_proto = 0xFFFF;
5535 be32_to_cpu_array(rule->tuples.src_ip,
5536 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5537 be32_to_cpu_array(rule->tuples_mask.src_ip,
5538 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5540 be32_to_cpu_array(rule->tuples.dst_ip,
5541 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5542 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5543 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5545 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5546 rule->tuples_mask.src_port =
5547 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5549 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5550 rule->tuples_mask.dst_port =
5551 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5553 rule->tuples.ether_proto = ETH_P_IPV6;
5554 rule->tuples_mask.ether_proto = 0xFFFF;
5557 case IPV6_USER_FLOW:
5558 be32_to_cpu_array(rule->tuples.src_ip,
5559 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5560 be32_to_cpu_array(rule->tuples_mask.src_ip,
5561 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5563 be32_to_cpu_array(rule->tuples.dst_ip,
5564 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5565 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5566 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5568 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5569 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5571 rule->tuples.ether_proto = ETH_P_IPV6;
5572 rule->tuples_mask.ether_proto = 0xFFFF;
5576 ether_addr_copy(rule->tuples.src_mac,
5577 fs->h_u.ether_spec.h_source);
5578 ether_addr_copy(rule->tuples_mask.src_mac,
5579 fs->m_u.ether_spec.h_source);
5581 ether_addr_copy(rule->tuples.dst_mac,
5582 fs->h_u.ether_spec.h_dest);
5583 ether_addr_copy(rule->tuples_mask.dst_mac,
5584 fs->m_u.ether_spec.h_dest);
5586 rule->tuples.ether_proto =
5587 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5588 rule->tuples_mask.ether_proto =
5589 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5596 switch (flow_type) {
5599 rule->tuples.ip_proto = IPPROTO_SCTP;
5600 rule->tuples_mask.ip_proto = 0xFF;
5604 rule->tuples.ip_proto = IPPROTO_TCP;
5605 rule->tuples_mask.ip_proto = 0xFF;
5609 rule->tuples.ip_proto = IPPROTO_UDP;
5610 rule->tuples_mask.ip_proto = 0xFF;
5616 if ((fs->flow_type & FLOW_EXT)) {
5617 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5618 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5621 if (fs->flow_type & FLOW_MAC_EXT) {
5622 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5623 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5629 /* make sure being called after lock up with fd_rule_lock */
5630 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5631 struct hclge_fd_rule *rule)
5636 dev_err(&hdev->pdev->dev,
5637 "The flow director rule is NULL\n");
5641 /* it will never fail here, so needn't to check return value */
5642 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5644 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5648 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5655 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5659 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5660 struct ethtool_rxnfc *cmd)
5662 struct hclge_vport *vport = hclge_get_vport(handle);
5663 struct hclge_dev *hdev = vport->back;
5664 u16 dst_vport_id = 0, q_index = 0;
5665 struct ethtool_rx_flow_spec *fs;
5666 struct hclge_fd_rule *rule;
5671 if (!hnae3_dev_fd_supported(hdev))
5675 dev_warn(&hdev->pdev->dev,
5676 "Please enable flow director first\n");
5680 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5682 ret = hclge_fd_check_spec(hdev, fs, &unused);
5684 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5688 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5689 action = HCLGE_FD_ACTION_DROP_PACKET;
5691 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5692 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5695 if (vf > hdev->num_req_vfs) {
5696 dev_err(&hdev->pdev->dev,
5697 "Error: vf id (%u) > max vf num (%u)\n",
5698 vf, hdev->num_req_vfs);
5702 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5703 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5706 dev_err(&hdev->pdev->dev,
5707 "Error: queue id (%u) > max tqp num (%u)\n",
5712 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5716 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5720 ret = hclge_fd_get_tuple(hdev, fs, rule);
5726 rule->flow_type = fs->flow_type;
5728 rule->location = fs->location;
5729 rule->unused_tuple = unused;
5730 rule->vf_id = dst_vport_id;
5731 rule->queue_id = q_index;
5732 rule->action = action;
5733 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5735 /* to avoid rule conflict, when user configure rule by ethtool,
5736 * we need to clear all arfs rules
5738 hclge_clear_arfs_rules(handle);
5740 spin_lock_bh(&hdev->fd_rule_lock);
5741 ret = hclge_fd_config_rule(hdev, rule);
5743 spin_unlock_bh(&hdev->fd_rule_lock);
5748 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5749 struct ethtool_rxnfc *cmd)
5751 struct hclge_vport *vport = hclge_get_vport(handle);
5752 struct hclge_dev *hdev = vport->back;
5753 struct ethtool_rx_flow_spec *fs;
5756 if (!hnae3_dev_fd_supported(hdev))
5759 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5761 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5764 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5765 dev_err(&hdev->pdev->dev,
5766 "Delete fail, rule %u is inexistent\n", fs->location);
5770 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5775 spin_lock_bh(&hdev->fd_rule_lock);
5776 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5778 spin_unlock_bh(&hdev->fd_rule_lock);
5783 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5786 struct hclge_vport *vport = hclge_get_vport(handle);
5787 struct hclge_dev *hdev = vport->back;
5788 struct hclge_fd_rule *rule;
5789 struct hlist_node *node;
5792 if (!hnae3_dev_fd_supported(hdev))
5795 spin_lock_bh(&hdev->fd_rule_lock);
5796 for_each_set_bit(location, hdev->fd_bmap,
5797 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5798 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5802 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5804 hlist_del(&rule->rule_node);
5807 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5808 hdev->hclge_fd_rule_num = 0;
5809 bitmap_zero(hdev->fd_bmap,
5810 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5813 spin_unlock_bh(&hdev->fd_rule_lock);
5816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5818 struct hclge_vport *vport = hclge_get_vport(handle);
5819 struct hclge_dev *hdev = vport->back;
5820 struct hclge_fd_rule *rule;
5821 struct hlist_node *node;
5824 /* Return ok here, because reset error handling will check this
5825 * return value. If error is returned here, the reset process will
5828 if (!hnae3_dev_fd_supported(hdev))
5831 /* if fd is disabled, should not restore it when reset */
5835 spin_lock_bh(&hdev->fd_rule_lock);
5836 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5837 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5839 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5842 dev_warn(&hdev->pdev->dev,
5843 "Restore rule %u failed, remove it\n",
5845 clear_bit(rule->location, hdev->fd_bmap);
5846 hlist_del(&rule->rule_node);
5848 hdev->hclge_fd_rule_num--;
5852 if (hdev->hclge_fd_rule_num)
5853 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5855 spin_unlock_bh(&hdev->fd_rule_lock);
5860 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5861 struct ethtool_rxnfc *cmd)
5863 struct hclge_vport *vport = hclge_get_vport(handle);
5864 struct hclge_dev *hdev = vport->back;
5866 if (!hnae3_dev_fd_supported(hdev))
5869 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5870 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5875 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5876 struct ethtool_rxnfc *cmd)
5878 struct hclge_vport *vport = hclge_get_vport(handle);
5879 struct hclge_fd_rule *rule = NULL;
5880 struct hclge_dev *hdev = vport->back;
5881 struct ethtool_rx_flow_spec *fs;
5882 struct hlist_node *node2;
5884 if (!hnae3_dev_fd_supported(hdev))
5887 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5889 spin_lock_bh(&hdev->fd_rule_lock);
5891 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5892 if (rule->location >= fs->location)
5896 if (!rule || fs->location != rule->location) {
5897 spin_unlock_bh(&hdev->fd_rule_lock);
5902 fs->flow_type = rule->flow_type;
5903 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5907 fs->h_u.tcp_ip4_spec.ip4src =
5908 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5909 fs->m_u.tcp_ip4_spec.ip4src =
5910 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5911 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5913 fs->h_u.tcp_ip4_spec.ip4dst =
5914 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5915 fs->m_u.tcp_ip4_spec.ip4dst =
5916 rule->unused_tuple & BIT(INNER_DST_IP) ?
5917 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5919 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5920 fs->m_u.tcp_ip4_spec.psrc =
5921 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5922 0 : cpu_to_be16(rule->tuples_mask.src_port);
5924 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5925 fs->m_u.tcp_ip4_spec.pdst =
5926 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5927 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5929 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5930 fs->m_u.tcp_ip4_spec.tos =
5931 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5932 0 : rule->tuples_mask.ip_tos;
5936 fs->h_u.usr_ip4_spec.ip4src =
5937 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5938 fs->m_u.tcp_ip4_spec.ip4src =
5939 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5940 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5942 fs->h_u.usr_ip4_spec.ip4dst =
5943 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5944 fs->m_u.usr_ip4_spec.ip4dst =
5945 rule->unused_tuple & BIT(INNER_DST_IP) ?
5946 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5948 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5949 fs->m_u.usr_ip4_spec.tos =
5950 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5951 0 : rule->tuples_mask.ip_tos;
5953 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5954 fs->m_u.usr_ip4_spec.proto =
5955 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5956 0 : rule->tuples_mask.ip_proto;
5958 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5964 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5965 rule->tuples.src_ip, IPV6_SIZE);
5966 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5967 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5968 sizeof(int) * IPV6_SIZE);
5970 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5971 rule->tuples_mask.src_ip, IPV6_SIZE);
5973 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5974 rule->tuples.dst_ip, IPV6_SIZE);
5975 if (rule->unused_tuple & BIT(INNER_DST_IP))
5976 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5977 sizeof(int) * IPV6_SIZE);
5979 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5980 rule->tuples_mask.dst_ip, IPV6_SIZE);
5982 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5983 fs->m_u.tcp_ip6_spec.psrc =
5984 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5985 0 : cpu_to_be16(rule->tuples_mask.src_port);
5987 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5988 fs->m_u.tcp_ip6_spec.pdst =
5989 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5990 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5993 case IPV6_USER_FLOW:
5994 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5995 rule->tuples.src_ip, IPV6_SIZE);
5996 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5997 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5998 sizeof(int) * IPV6_SIZE);
6000 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6001 rule->tuples_mask.src_ip, IPV6_SIZE);
6003 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6004 rule->tuples.dst_ip, IPV6_SIZE);
6005 if (rule->unused_tuple & BIT(INNER_DST_IP))
6006 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6007 sizeof(int) * IPV6_SIZE);
6009 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6010 rule->tuples_mask.dst_ip, IPV6_SIZE);
6012 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6013 fs->m_u.usr_ip6_spec.l4_proto =
6014 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6015 0 : rule->tuples_mask.ip_proto;
6019 ether_addr_copy(fs->h_u.ether_spec.h_source,
6020 rule->tuples.src_mac);
6021 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6022 eth_zero_addr(fs->m_u.ether_spec.h_source);
6024 ether_addr_copy(fs->m_u.ether_spec.h_source,
6025 rule->tuples_mask.src_mac);
6027 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6028 rule->tuples.dst_mac);
6029 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6030 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6032 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6033 rule->tuples_mask.dst_mac);
6035 fs->h_u.ether_spec.h_proto =
6036 cpu_to_be16(rule->tuples.ether_proto);
6037 fs->m_u.ether_spec.h_proto =
6038 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6039 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6043 spin_unlock_bh(&hdev->fd_rule_lock);
6047 if (fs->flow_type & FLOW_EXT) {
6048 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6049 fs->m_ext.vlan_tci =
6050 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6051 cpu_to_be16(VLAN_VID_MASK) :
6052 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6055 if (fs->flow_type & FLOW_MAC_EXT) {
6056 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6057 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6058 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6060 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6061 rule->tuples_mask.dst_mac);
6064 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6065 fs->ring_cookie = RX_CLS_FLOW_DISC;
6069 fs->ring_cookie = rule->queue_id;
6070 vf_id = rule->vf_id;
6071 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6072 fs->ring_cookie |= vf_id;
6075 spin_unlock_bh(&hdev->fd_rule_lock);
6080 static int hclge_get_all_rules(struct hnae3_handle *handle,
6081 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6083 struct hclge_vport *vport = hclge_get_vport(handle);
6084 struct hclge_dev *hdev = vport->back;
6085 struct hclge_fd_rule *rule;
6086 struct hlist_node *node2;
6089 if (!hnae3_dev_fd_supported(hdev))
6092 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6094 spin_lock_bh(&hdev->fd_rule_lock);
6095 hlist_for_each_entry_safe(rule, node2,
6096 &hdev->fd_rule_list, rule_node) {
6097 if (cnt == cmd->rule_cnt) {
6098 spin_unlock_bh(&hdev->fd_rule_lock);
6102 rule_locs[cnt] = rule->location;
6106 spin_unlock_bh(&hdev->fd_rule_lock);
6108 cmd->rule_cnt = cnt;
6113 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6114 struct hclge_fd_rule_tuples *tuples)
6116 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6117 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6119 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6120 tuples->ip_proto = fkeys->basic.ip_proto;
6121 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6123 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6124 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6125 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6129 for (i = 0; i < IPV6_SIZE; i++) {
6130 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6131 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6136 /* traverse all rules, check whether an existed rule has the same tuples */
6137 static struct hclge_fd_rule *
6138 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6139 const struct hclge_fd_rule_tuples *tuples)
6141 struct hclge_fd_rule *rule = NULL;
6142 struct hlist_node *node;
6144 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6145 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6152 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6153 struct hclge_fd_rule *rule)
6155 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6156 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6157 BIT(INNER_SRC_PORT);
6160 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6161 if (tuples->ether_proto == ETH_P_IP) {
6162 if (tuples->ip_proto == IPPROTO_TCP)
6163 rule->flow_type = TCP_V4_FLOW;
6165 rule->flow_type = UDP_V4_FLOW;
6167 if (tuples->ip_proto == IPPROTO_TCP)
6168 rule->flow_type = TCP_V6_FLOW;
6170 rule->flow_type = UDP_V6_FLOW;
6172 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6173 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6176 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6177 u16 flow_id, struct flow_keys *fkeys)
6179 struct hclge_vport *vport = hclge_get_vport(handle);
6180 struct hclge_fd_rule_tuples new_tuples;
6181 struct hclge_dev *hdev = vport->back;
6182 struct hclge_fd_rule *rule;
6187 if (!hnae3_dev_fd_supported(hdev))
6190 memset(&new_tuples, 0, sizeof(new_tuples));
6191 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6193 spin_lock_bh(&hdev->fd_rule_lock);
6195 /* when there is already fd rule existed add by user,
6196 * arfs should not work
6198 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6199 spin_unlock_bh(&hdev->fd_rule_lock);
6204 /* check is there flow director filter existed for this flow,
6205 * if not, create a new filter for it;
6206 * if filter exist with different queue id, modify the filter;
6207 * if filter exist with same queue id, do nothing
6209 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6211 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6212 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6213 spin_unlock_bh(&hdev->fd_rule_lock);
6218 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6220 spin_unlock_bh(&hdev->fd_rule_lock);
6225 set_bit(bit_id, hdev->fd_bmap);
6226 rule->location = bit_id;
6227 rule->flow_id = flow_id;
6228 rule->queue_id = queue_id;
6229 hclge_fd_build_arfs_rule(&new_tuples, rule);
6230 ret = hclge_fd_config_rule(hdev, rule);
6232 spin_unlock_bh(&hdev->fd_rule_lock);
6237 return rule->location;
6240 spin_unlock_bh(&hdev->fd_rule_lock);
6242 if (rule->queue_id == queue_id)
6243 return rule->location;
6245 tmp_queue_id = rule->queue_id;
6246 rule->queue_id = queue_id;
6247 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6249 rule->queue_id = tmp_queue_id;
6253 return rule->location;
6256 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6258 #ifdef CONFIG_RFS_ACCEL
6259 struct hnae3_handle *handle = &hdev->vport[0].nic;
6260 struct hclge_fd_rule *rule;
6261 struct hlist_node *node;
6262 HLIST_HEAD(del_list);
6264 spin_lock_bh(&hdev->fd_rule_lock);
6265 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6266 spin_unlock_bh(&hdev->fd_rule_lock);
6269 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6270 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6271 rule->flow_id, rule->location)) {
6272 hlist_del_init(&rule->rule_node);
6273 hlist_add_head(&rule->rule_node, &del_list);
6274 hdev->hclge_fd_rule_num--;
6275 clear_bit(rule->location, hdev->fd_bmap);
6278 spin_unlock_bh(&hdev->fd_rule_lock);
6280 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6281 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6282 rule->location, NULL, false);
6288 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6290 #ifdef CONFIG_RFS_ACCEL
6291 struct hclge_vport *vport = hclge_get_vport(handle);
6292 struct hclge_dev *hdev = vport->back;
6294 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6295 hclge_del_all_fd_entries(handle, true);
6299 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6301 struct hclge_vport *vport = hclge_get_vport(handle);
6302 struct hclge_dev *hdev = vport->back;
6304 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6305 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6308 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6310 struct hclge_vport *vport = hclge_get_vport(handle);
6311 struct hclge_dev *hdev = vport->back;
6313 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6316 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6318 struct hclge_vport *vport = hclge_get_vport(handle);
6319 struct hclge_dev *hdev = vport->back;
6321 return hdev->rst_stats.hw_reset_done_cnt;
6324 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6326 struct hclge_vport *vport = hclge_get_vport(handle);
6327 struct hclge_dev *hdev = vport->back;
6330 hdev->fd_en = enable;
6331 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6333 hclge_del_all_fd_entries(handle, clear);
6335 hclge_restore_fd_entries(handle);
6338 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6340 struct hclge_desc desc;
6341 struct hclge_config_mac_mode_cmd *req =
6342 (struct hclge_config_mac_mode_cmd *)desc.data;
6346 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6349 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6350 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6351 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6352 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6353 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6354 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6355 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6356 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6357 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6358 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6361 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6365 dev_err(&hdev->pdev->dev,
6366 "mac enable fail, ret =%d.\n", ret);
6369 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6370 u8 switch_param, u8 param_mask)
6372 struct hclge_mac_vlan_switch_cmd *req;
6373 struct hclge_desc desc;
6377 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6378 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6380 /* read current config parameter */
6381 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6383 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6384 req->func_id = cpu_to_le32(func_id);
6386 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6388 dev_err(&hdev->pdev->dev,
6389 "read mac vlan switch parameter fail, ret = %d\n", ret);
6393 /* modify and write new config parameter */
6394 hclge_cmd_reuse_desc(&desc, false);
6395 req->switch_param = (req->switch_param & param_mask) | switch_param;
6396 req->param_mask = param_mask;
6398 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6400 dev_err(&hdev->pdev->dev,
6401 "set mac vlan switch parameter fail, ret = %d\n", ret);
6405 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6408 #define HCLGE_PHY_LINK_STATUS_NUM 200
6410 struct phy_device *phydev = hdev->hw.mac.phydev;
6415 ret = phy_read_status(phydev);
6417 dev_err(&hdev->pdev->dev,
6418 "phy update link status fail, ret = %d\n", ret);
6422 if (phydev->link == link_ret)
6425 msleep(HCLGE_LINK_STATUS_MS);
6426 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6429 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6431 #define HCLGE_MAC_LINK_STATUS_NUM 100
6437 ret = hclge_get_mac_link_status(hdev);
6440 else if (ret == link_ret)
6443 msleep(HCLGE_LINK_STATUS_MS);
6444 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6448 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6451 #define HCLGE_LINK_STATUS_DOWN 0
6452 #define HCLGE_LINK_STATUS_UP 1
6456 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6459 hclge_phy_link_status_wait(hdev, link_ret);
6461 return hclge_mac_link_status_wait(hdev, link_ret);
6464 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6466 struct hclge_config_mac_mode_cmd *req;
6467 struct hclge_desc desc;
6471 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6472 /* 1 Read out the MAC mode config at first */
6473 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6474 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6476 dev_err(&hdev->pdev->dev,
6477 "mac loopback get fail, ret =%d.\n", ret);
6481 /* 2 Then setup the loopback flag */
6482 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6483 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6484 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6485 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6487 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6489 /* 3 Config mac work mode with loopback flag
6490 * and its original configure parameters
6492 hclge_cmd_reuse_desc(&desc, false);
6493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6495 dev_err(&hdev->pdev->dev,
6496 "mac loopback set fail, ret =%d.\n", ret);
6500 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6501 enum hnae3_loop loop_mode)
6503 #define HCLGE_SERDES_RETRY_MS 10
6504 #define HCLGE_SERDES_RETRY_NUM 100
6506 struct hclge_serdes_lb_cmd *req;
6507 struct hclge_desc desc;
6511 req = (struct hclge_serdes_lb_cmd *)desc.data;
6512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6514 switch (loop_mode) {
6515 case HNAE3_LOOP_SERIAL_SERDES:
6516 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6518 case HNAE3_LOOP_PARALLEL_SERDES:
6519 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6522 dev_err(&hdev->pdev->dev,
6523 "unsupported serdes loopback mode %d\n", loop_mode);
6528 req->enable = loop_mode_b;
6529 req->mask = loop_mode_b;
6531 req->mask = loop_mode_b;
6534 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6536 dev_err(&hdev->pdev->dev,
6537 "serdes loopback set fail, ret = %d\n", ret);
6542 msleep(HCLGE_SERDES_RETRY_MS);
6543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6545 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6547 dev_err(&hdev->pdev->dev,
6548 "serdes loopback get, ret = %d\n", ret);
6551 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6552 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6554 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6555 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6557 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6558 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6564 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6565 enum hnae3_loop loop_mode)
6569 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6573 hclge_cfg_mac_mode(hdev, en);
6575 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6577 dev_err(&hdev->pdev->dev,
6578 "serdes loopback config mac mode timeout\n");
6583 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6584 struct phy_device *phydev)
6588 if (!phydev->suspended) {
6589 ret = phy_suspend(phydev);
6594 ret = phy_resume(phydev);
6598 return phy_loopback(phydev, true);
6601 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6602 struct phy_device *phydev)
6606 ret = phy_loopback(phydev, false);
6610 return phy_suspend(phydev);
6613 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6615 struct phy_device *phydev = hdev->hw.mac.phydev;
6622 ret = hclge_enable_phy_loopback(hdev, phydev);
6624 ret = hclge_disable_phy_loopback(hdev, phydev);
6626 dev_err(&hdev->pdev->dev,
6627 "set phy loopback fail, ret = %d\n", ret);
6631 hclge_cfg_mac_mode(hdev, en);
6633 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6635 dev_err(&hdev->pdev->dev,
6636 "phy loopback config mac mode timeout\n");
6641 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6642 int stream_id, bool enable)
6644 struct hclge_desc desc;
6645 struct hclge_cfg_com_tqp_queue_cmd *req =
6646 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6649 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6650 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6651 req->stream_id = cpu_to_le16(stream_id);
6653 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6655 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6657 dev_err(&hdev->pdev->dev,
6658 "Tqp enable fail, status =%d.\n", ret);
6662 static int hclge_set_loopback(struct hnae3_handle *handle,
6663 enum hnae3_loop loop_mode, bool en)
6665 struct hclge_vport *vport = hclge_get_vport(handle);
6666 struct hnae3_knic_private_info *kinfo;
6667 struct hclge_dev *hdev = vport->back;
6670 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6671 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6672 * the same, the packets are looped back in the SSU. If SSU loopback
6673 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6675 if (hdev->pdev->revision >= 0x21) {
6676 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6678 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6679 HCLGE_SWITCH_ALW_LPBK_MASK);
6684 switch (loop_mode) {
6685 case HNAE3_LOOP_APP:
6686 ret = hclge_set_app_loopback(hdev, en);
6688 case HNAE3_LOOP_SERIAL_SERDES:
6689 case HNAE3_LOOP_PARALLEL_SERDES:
6690 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6692 case HNAE3_LOOP_PHY:
6693 ret = hclge_set_phy_loopback(hdev, en);
6697 dev_err(&hdev->pdev->dev,
6698 "loop_mode %d is not supported\n", loop_mode);
6705 kinfo = &vport->nic.kinfo;
6706 for (i = 0; i < kinfo->num_tqps; i++) {
6707 ret = hclge_tqp_enable(hdev, i, 0, en);
6715 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6719 ret = hclge_set_app_loopback(hdev, false);
6723 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6727 return hclge_cfg_serdes_loopback(hdev, false,
6728 HNAE3_LOOP_PARALLEL_SERDES);
6731 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6733 struct hclge_vport *vport = hclge_get_vport(handle);
6734 struct hnae3_knic_private_info *kinfo;
6735 struct hnae3_queue *queue;
6736 struct hclge_tqp *tqp;
6739 kinfo = &vport->nic.kinfo;
6740 for (i = 0; i < kinfo->num_tqps; i++) {
6741 queue = handle->kinfo.tqp[i];
6742 tqp = container_of(queue, struct hclge_tqp, q);
6743 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6747 static void hclge_flush_link_update(struct hclge_dev *hdev)
6749 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6751 unsigned long last = hdev->serv_processed_cnt;
6754 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6755 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6756 last == hdev->serv_processed_cnt)
6760 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6762 struct hclge_vport *vport = hclge_get_vport(handle);
6763 struct hclge_dev *hdev = vport->back;
6766 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6768 /* Set the DOWN flag here to disable link updating */
6769 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6771 /* flush memory to make sure DOWN is seen by service task */
6772 smp_mb__before_atomic();
6773 hclge_flush_link_update(hdev);
6777 static int hclge_ae_start(struct hnae3_handle *handle)
6779 struct hclge_vport *vport = hclge_get_vport(handle);
6780 struct hclge_dev *hdev = vport->back;
6783 hclge_cfg_mac_mode(hdev, true);
6784 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6785 hdev->hw.mac.link = 0;
6787 /* reset tqp stats */
6788 hclge_reset_tqp_stats(handle);
6790 hclge_mac_start_phy(hdev);
6795 static void hclge_ae_stop(struct hnae3_handle *handle)
6797 struct hclge_vport *vport = hclge_get_vport(handle);
6798 struct hclge_dev *hdev = vport->back;
6801 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6803 hclge_clear_arfs_rules(handle);
6805 /* If it is not PF reset, the firmware will disable the MAC,
6806 * so it only need to stop phy here.
6808 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6809 hdev->reset_type != HNAE3_FUNC_RESET) {
6810 hclge_mac_stop_phy(hdev);
6811 hclge_update_link_status(hdev);
6815 for (i = 0; i < handle->kinfo.num_tqps; i++)
6816 hclge_reset_tqp(handle, i);
6818 hclge_config_mac_tnl_int(hdev, false);
6821 hclge_cfg_mac_mode(hdev, false);
6823 hclge_mac_stop_phy(hdev);
6825 /* reset tqp stats */
6826 hclge_reset_tqp_stats(handle);
6827 hclge_update_link_status(hdev);
6830 int hclge_vport_start(struct hclge_vport *vport)
6832 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6833 vport->last_active_jiffies = jiffies;
6837 void hclge_vport_stop(struct hclge_vport *vport)
6839 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6842 static int hclge_client_start(struct hnae3_handle *handle)
6844 struct hclge_vport *vport = hclge_get_vport(handle);
6846 return hclge_vport_start(vport);
6849 static void hclge_client_stop(struct hnae3_handle *handle)
6851 struct hclge_vport *vport = hclge_get_vport(handle);
6853 hclge_vport_stop(vport);
6856 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6857 u16 cmdq_resp, u8 resp_code,
6858 enum hclge_mac_vlan_tbl_opcode op)
6860 struct hclge_dev *hdev = vport->back;
6863 dev_err(&hdev->pdev->dev,
6864 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6869 if (op == HCLGE_MAC_VLAN_ADD) {
6870 if ((!resp_code) || (resp_code == 1)) {
6872 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6873 dev_err(&hdev->pdev->dev,
6874 "add mac addr failed for uc_overflow.\n");
6876 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6877 dev_err(&hdev->pdev->dev,
6878 "add mac addr failed for mc_overflow.\n");
6882 dev_err(&hdev->pdev->dev,
6883 "add mac addr failed for undefined, code=%u.\n",
6886 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6889 } else if (resp_code == 1) {
6890 dev_dbg(&hdev->pdev->dev,
6891 "remove mac addr failed for miss.\n");
6895 dev_err(&hdev->pdev->dev,
6896 "remove mac addr failed for undefined, code=%u.\n",
6899 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6902 } else if (resp_code == 1) {
6903 dev_dbg(&hdev->pdev->dev,
6904 "lookup mac addr failed for miss.\n");
6908 dev_err(&hdev->pdev->dev,
6909 "lookup mac addr failed for undefined, code=%u.\n",
6914 dev_err(&hdev->pdev->dev,
6915 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6920 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6922 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6924 unsigned int word_num;
6925 unsigned int bit_num;
6927 if (vfid > 255 || vfid < 0)
6930 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6931 word_num = vfid / 32;
6932 bit_num = vfid % 32;
6934 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6936 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6938 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6939 bit_num = vfid % 32;
6941 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6943 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6949 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6951 #define HCLGE_DESC_NUMBER 3
6952 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6955 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6956 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6957 if (desc[i].data[j])
6963 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6964 const u8 *addr, bool is_mc)
6966 const unsigned char *mac_addr = addr;
6967 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6968 (mac_addr[0]) | (mac_addr[1] << 8);
6969 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6971 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6973 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6974 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6977 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6978 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6981 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6982 struct hclge_mac_vlan_tbl_entry_cmd *req)
6984 struct hclge_dev *hdev = vport->back;
6985 struct hclge_desc desc;
6990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6992 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6996 dev_err(&hdev->pdev->dev,
6997 "del mac addr failed for cmd_send, ret =%d.\n",
7001 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7002 retval = le16_to_cpu(desc.retval);
7004 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7005 HCLGE_MAC_VLAN_REMOVE);
7008 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7009 struct hclge_mac_vlan_tbl_entry_cmd *req,
7010 struct hclge_desc *desc,
7013 struct hclge_dev *hdev = vport->back;
7018 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7020 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7021 memcpy(desc[0].data,
7023 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7024 hclge_cmd_setup_basic_desc(&desc[1],
7025 HCLGE_OPC_MAC_VLAN_ADD,
7027 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7028 hclge_cmd_setup_basic_desc(&desc[2],
7029 HCLGE_OPC_MAC_VLAN_ADD,
7031 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7033 memcpy(desc[0].data,
7035 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7036 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7039 dev_err(&hdev->pdev->dev,
7040 "lookup mac addr failed for cmd_send, ret =%d.\n",
7044 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7045 retval = le16_to_cpu(desc[0].retval);
7047 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7048 HCLGE_MAC_VLAN_LKUP);
7051 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7052 struct hclge_mac_vlan_tbl_entry_cmd *req,
7053 struct hclge_desc *mc_desc)
7055 struct hclge_dev *hdev = vport->back;
7062 struct hclge_desc desc;
7064 hclge_cmd_setup_basic_desc(&desc,
7065 HCLGE_OPC_MAC_VLAN_ADD,
7067 memcpy(desc.data, req,
7068 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7069 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7070 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7071 retval = le16_to_cpu(desc.retval);
7073 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7075 HCLGE_MAC_VLAN_ADD);
7077 hclge_cmd_reuse_desc(&mc_desc[0], false);
7078 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7079 hclge_cmd_reuse_desc(&mc_desc[1], false);
7080 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7081 hclge_cmd_reuse_desc(&mc_desc[2], false);
7082 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7083 memcpy(mc_desc[0].data, req,
7084 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7085 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7086 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7087 retval = le16_to_cpu(mc_desc[0].retval);
7089 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7091 HCLGE_MAC_VLAN_ADD);
7095 dev_err(&hdev->pdev->dev,
7096 "add mac addr failed for cmd_send, ret =%d.\n",
7104 static int hclge_init_umv_space(struct hclge_dev *hdev)
7106 u16 allocated_size = 0;
7109 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7114 if (allocated_size < hdev->wanted_umv_size)
7115 dev_warn(&hdev->pdev->dev,
7116 "Alloc umv space failed, want %u, get %u\n",
7117 hdev->wanted_umv_size, allocated_size);
7119 mutex_init(&hdev->umv_mutex);
7120 hdev->max_umv_size = allocated_size;
7121 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7122 * preserve some unicast mac vlan table entries shared by pf
7125 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7126 hdev->share_umv_size = hdev->priv_umv_size +
7127 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7132 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7136 if (hdev->max_umv_size > 0) {
7137 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7141 hdev->max_umv_size = 0;
7143 mutex_destroy(&hdev->umv_mutex);
7148 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7149 u16 *allocated_size, bool is_alloc)
7151 struct hclge_umv_spc_alc_cmd *req;
7152 struct hclge_desc desc;
7155 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7156 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7158 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7160 req->space_size = cpu_to_le32(space_size);
7162 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7164 dev_err(&hdev->pdev->dev,
7165 "%s umv space failed for cmd_send, ret =%d\n",
7166 is_alloc ? "allocate" : "free", ret);
7170 if (is_alloc && allocated_size)
7171 *allocated_size = le32_to_cpu(desc.data[1]);
7176 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7178 struct hclge_vport *vport;
7181 for (i = 0; i < hdev->num_alloc_vport; i++) {
7182 vport = &hdev->vport[i];
7183 vport->used_umv_num = 0;
7186 mutex_lock(&hdev->umv_mutex);
7187 hdev->share_umv_size = hdev->priv_umv_size +
7188 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7189 mutex_unlock(&hdev->umv_mutex);
7192 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7194 struct hclge_dev *hdev = vport->back;
7197 mutex_lock(&hdev->umv_mutex);
7198 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7199 hdev->share_umv_size == 0);
7200 mutex_unlock(&hdev->umv_mutex);
7205 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7207 struct hclge_dev *hdev = vport->back;
7209 mutex_lock(&hdev->umv_mutex);
7211 if (vport->used_umv_num > hdev->priv_umv_size)
7212 hdev->share_umv_size++;
7214 if (vport->used_umv_num > 0)
7215 vport->used_umv_num--;
7217 if (vport->used_umv_num >= hdev->priv_umv_size &&
7218 hdev->share_umv_size > 0)
7219 hdev->share_umv_size--;
7220 vport->used_umv_num++;
7222 mutex_unlock(&hdev->umv_mutex);
7225 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7226 const unsigned char *addr)
7228 struct hclge_vport *vport = hclge_get_vport(handle);
7230 return hclge_add_uc_addr_common(vport, addr);
7233 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7234 const unsigned char *addr)
7236 struct hclge_dev *hdev = vport->back;
7237 struct hclge_mac_vlan_tbl_entry_cmd req;
7238 struct hclge_desc desc;
7239 u16 egress_port = 0;
7242 /* mac addr check */
7243 if (is_zero_ether_addr(addr) ||
7244 is_broadcast_ether_addr(addr) ||
7245 is_multicast_ether_addr(addr)) {
7246 dev_err(&hdev->pdev->dev,
7247 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7248 addr, is_zero_ether_addr(addr),
7249 is_broadcast_ether_addr(addr),
7250 is_multicast_ether_addr(addr));
7254 memset(&req, 0, sizeof(req));
7256 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7257 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7259 req.egress_port = cpu_to_le16(egress_port);
7261 hclge_prepare_mac_addr(&req, addr, false);
7263 /* Lookup the mac address in the mac_vlan table, and add
7264 * it if the entry is inexistent. Repeated unicast entry
7265 * is not allowed in the mac vlan table.
7267 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7268 if (ret == -ENOENT) {
7269 if (!hclge_is_umv_space_full(vport)) {
7270 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7272 hclge_update_umv_space(vport, false);
7276 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7277 hdev->priv_umv_size);
7282 /* check if we just hit the duplicate */
7284 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7285 vport->vport_id, addr);
7289 dev_err(&hdev->pdev->dev,
7290 "PF failed to add unicast entry(%pM) in the MAC table\n",
7296 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7297 const unsigned char *addr)
7299 struct hclge_vport *vport = hclge_get_vport(handle);
7301 return hclge_rm_uc_addr_common(vport, addr);
7304 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7305 const unsigned char *addr)
7307 struct hclge_dev *hdev = vport->back;
7308 struct hclge_mac_vlan_tbl_entry_cmd req;
7311 /* mac addr check */
7312 if (is_zero_ether_addr(addr) ||
7313 is_broadcast_ether_addr(addr) ||
7314 is_multicast_ether_addr(addr)) {
7315 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7320 memset(&req, 0, sizeof(req));
7321 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7322 hclge_prepare_mac_addr(&req, addr, false);
7323 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7325 hclge_update_umv_space(vport, true);
7330 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7331 const unsigned char *addr)
7333 struct hclge_vport *vport = hclge_get_vport(handle);
7335 return hclge_add_mc_addr_common(vport, addr);
7338 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7339 const unsigned char *addr)
7341 struct hclge_dev *hdev = vport->back;
7342 struct hclge_mac_vlan_tbl_entry_cmd req;
7343 struct hclge_desc desc[3];
7346 /* mac addr check */
7347 if (!is_multicast_ether_addr(addr)) {
7348 dev_err(&hdev->pdev->dev,
7349 "Add mc mac err! invalid mac:%pM.\n",
7353 memset(&req, 0, sizeof(req));
7354 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7355 hclge_prepare_mac_addr(&req, addr, true);
7356 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7358 /* This mac addr do not exist, add new entry for it */
7359 memset(desc[0].data, 0, sizeof(desc[0].data));
7360 memset(desc[1].data, 0, sizeof(desc[0].data));
7361 memset(desc[2].data, 0, sizeof(desc[0].data));
7363 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7366 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7368 if (status == -ENOSPC)
7369 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7374 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7375 const unsigned char *addr)
7377 struct hclge_vport *vport = hclge_get_vport(handle);
7379 return hclge_rm_mc_addr_common(vport, addr);
7382 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7383 const unsigned char *addr)
7385 struct hclge_dev *hdev = vport->back;
7386 struct hclge_mac_vlan_tbl_entry_cmd req;
7387 enum hclge_cmd_status status;
7388 struct hclge_desc desc[3];
7390 /* mac addr check */
7391 if (!is_multicast_ether_addr(addr)) {
7392 dev_dbg(&hdev->pdev->dev,
7393 "Remove mc mac err! invalid mac:%pM.\n",
7398 memset(&req, 0, sizeof(req));
7399 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7400 hclge_prepare_mac_addr(&req, addr, true);
7401 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7403 /* This mac addr exist, remove this handle's VFID for it */
7404 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7408 if (hclge_is_all_function_id_zero(desc))
7409 /* All the vfid is zero, so need to delete this entry */
7410 status = hclge_remove_mac_vlan_tbl(vport, &req);
7412 /* Not all the vfid is zero, update the vfid */
7413 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7416 /* Maybe this mac address is in mta table, but it cannot be
7417 * deleted here because an entry of mta represents an address
7418 * range rather than a specific address. the delete action to
7419 * all entries will take effect in update_mta_status called by
7420 * hns3_nic_set_rx_mode.
7428 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7429 enum HCLGE_MAC_ADDR_TYPE mac_type)
7431 struct hclge_vport_mac_addr_cfg *mac_cfg;
7432 struct list_head *list;
7434 if (!vport->vport_id)
7437 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7441 mac_cfg->hd_tbl_status = true;
7442 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7444 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7445 &vport->uc_mac_list : &vport->mc_mac_list;
7447 list_add_tail(&mac_cfg->node, list);
7450 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7452 enum HCLGE_MAC_ADDR_TYPE mac_type)
7454 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7455 struct list_head *list;
7456 bool uc_flag, mc_flag;
7458 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7459 &vport->uc_mac_list : &vport->mc_mac_list;
7461 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7462 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7464 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7465 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7466 if (uc_flag && mac_cfg->hd_tbl_status)
7467 hclge_rm_uc_addr_common(vport, mac_addr);
7469 if (mc_flag && mac_cfg->hd_tbl_status)
7470 hclge_rm_mc_addr_common(vport, mac_addr);
7472 list_del(&mac_cfg->node);
7479 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7480 enum HCLGE_MAC_ADDR_TYPE mac_type)
7482 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7483 struct list_head *list;
7485 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7486 &vport->uc_mac_list : &vport->mc_mac_list;
7488 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7489 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7490 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7492 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7493 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7495 mac_cfg->hd_tbl_status = false;
7497 list_del(&mac_cfg->node);
7503 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7505 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7506 struct hclge_vport *vport;
7509 for (i = 0; i < hdev->num_alloc_vport; i++) {
7510 vport = &hdev->vport[i];
7511 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7512 list_del(&mac->node);
7516 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7517 list_del(&mac->node);
7523 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7524 u16 cmdq_resp, u8 resp_code)
7526 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7527 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7528 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7529 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7534 dev_err(&hdev->pdev->dev,
7535 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7540 switch (resp_code) {
7541 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7542 case HCLGE_ETHERTYPE_ALREADY_ADD:
7545 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7546 dev_err(&hdev->pdev->dev,
7547 "add mac ethertype failed for manager table overflow.\n");
7548 return_status = -EIO;
7550 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7551 dev_err(&hdev->pdev->dev,
7552 "add mac ethertype failed for key conflict.\n");
7553 return_status = -EIO;
7556 dev_err(&hdev->pdev->dev,
7557 "add mac ethertype failed for undefined, code=%u.\n",
7559 return_status = -EIO;
7562 return return_status;
7565 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7568 struct hclge_mac_vlan_tbl_entry_cmd req;
7569 struct hclge_dev *hdev = vport->back;
7570 struct hclge_desc desc;
7571 u16 egress_port = 0;
7574 if (is_zero_ether_addr(mac_addr))
7577 memset(&req, 0, sizeof(req));
7578 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7579 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7580 req.egress_port = cpu_to_le16(egress_port);
7581 hclge_prepare_mac_addr(&req, mac_addr, false);
7583 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7586 vf_idx += HCLGE_VF_VPORT_START_NUM;
7587 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7589 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7595 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7598 struct hclge_vport *vport = hclge_get_vport(handle);
7599 struct hclge_dev *hdev = vport->back;
7601 vport = hclge_get_vf_vport(hdev, vf);
7605 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7606 dev_info(&hdev->pdev->dev,
7607 "Specified MAC(=%pM) is same as before, no change committed!\n",
7612 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7613 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7618 ether_addr_copy(vport->vf_info.mac, mac_addr);
7619 dev_info(&hdev->pdev->dev,
7620 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7623 return hclge_inform_reset_assert_to_vf(vport);
7626 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7627 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7629 struct hclge_desc desc;
7634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7635 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7637 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7639 dev_err(&hdev->pdev->dev,
7640 "add mac ethertype failed for cmd_send, ret =%d.\n",
7645 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7646 retval = le16_to_cpu(desc.retval);
7648 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7651 static int init_mgr_tbl(struct hclge_dev *hdev)
7656 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7657 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7659 dev_err(&hdev->pdev->dev,
7660 "add mac ethertype failed, ret =%d.\n",
7669 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7671 struct hclge_vport *vport = hclge_get_vport(handle);
7672 struct hclge_dev *hdev = vport->back;
7674 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7677 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7680 const unsigned char *new_addr = (const unsigned char *)p;
7681 struct hclge_vport *vport = hclge_get_vport(handle);
7682 struct hclge_dev *hdev = vport->back;
7685 /* mac addr check */
7686 if (is_zero_ether_addr(new_addr) ||
7687 is_broadcast_ether_addr(new_addr) ||
7688 is_multicast_ether_addr(new_addr)) {
7689 dev_err(&hdev->pdev->dev,
7690 "Change uc mac err! invalid mac:%pM.\n",
7695 if ((!is_first || is_kdump_kernel()) &&
7696 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7697 dev_warn(&hdev->pdev->dev,
7698 "remove old uc mac address fail.\n");
7700 ret = hclge_add_uc_addr(handle, new_addr);
7702 dev_err(&hdev->pdev->dev,
7703 "add uc mac address fail, ret =%d.\n",
7707 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7708 dev_err(&hdev->pdev->dev,
7709 "restore uc mac address fail.\n");
7714 ret = hclge_pause_addr_cfg(hdev, new_addr);
7716 dev_err(&hdev->pdev->dev,
7717 "configure mac pause address fail, ret =%d.\n",
7722 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7727 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7730 struct hclge_vport *vport = hclge_get_vport(handle);
7731 struct hclge_dev *hdev = vport->back;
7733 if (!hdev->hw.mac.phydev)
7736 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7739 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7740 u8 fe_type, bool filter_en, u8 vf_id)
7742 struct hclge_vlan_filter_ctrl_cmd *req;
7743 struct hclge_desc desc;
7746 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7748 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7749 req->vlan_type = vlan_type;
7750 req->vlan_fe = filter_en ? fe_type : 0;
7753 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7755 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7761 #define HCLGE_FILTER_TYPE_VF 0
7762 #define HCLGE_FILTER_TYPE_PORT 1
7763 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7764 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7765 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7766 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7767 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7768 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7769 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7770 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7771 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7773 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7775 struct hclge_vport *vport = hclge_get_vport(handle);
7776 struct hclge_dev *hdev = vport->back;
7778 if (hdev->pdev->revision >= 0x21) {
7779 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7780 HCLGE_FILTER_FE_EGRESS, enable, 0);
7781 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7782 HCLGE_FILTER_FE_INGRESS, enable, 0);
7784 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7785 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7789 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7791 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7794 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7795 bool is_kill, u16 vlan,
7798 struct hclge_vport *vport = &hdev->vport[vfid];
7799 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7800 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7801 struct hclge_desc desc[2];
7806 /* if vf vlan table is full, firmware will close vf vlan filter, it
7807 * is unable and unnecessary to add new vlan id to vf vlan filter.
7808 * If spoof check is enable, and vf vlan is full, it shouldn't add
7809 * new vlan, because tx packets with these vlan id will be dropped.
7811 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7812 if (vport->vf_info.spoofchk && vlan) {
7813 dev_err(&hdev->pdev->dev,
7814 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7820 hclge_cmd_setup_basic_desc(&desc[0],
7821 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7822 hclge_cmd_setup_basic_desc(&desc[1],
7823 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7825 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7827 vf_byte_off = vfid / 8;
7828 vf_byte_val = 1 << (vfid % 8);
7830 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7831 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7833 req0->vlan_id = cpu_to_le16(vlan);
7834 req0->vlan_cfg = is_kill;
7836 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7837 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7839 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7841 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7843 dev_err(&hdev->pdev->dev,
7844 "Send vf vlan command fail, ret =%d.\n",
7850 #define HCLGE_VF_VLAN_NO_ENTRY 2
7851 if (!req0->resp_code || req0->resp_code == 1)
7854 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7855 set_bit(vfid, hdev->vf_vlan_full);
7856 dev_warn(&hdev->pdev->dev,
7857 "vf vlan table is full, vf vlan filter is disabled\n");
7861 dev_err(&hdev->pdev->dev,
7862 "Add vf vlan filter fail, ret =%u.\n",
7865 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7866 if (!req0->resp_code)
7869 /* vf vlan filter is disabled when vf vlan table is full,
7870 * then new vlan id will not be added into vf vlan table.
7871 * Just return 0 without warning, avoid massive verbose
7872 * print logs when unload.
7874 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7877 dev_err(&hdev->pdev->dev,
7878 "Kill vf vlan filter fail, ret =%u.\n",
7885 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7886 u16 vlan_id, bool is_kill)
7888 struct hclge_vlan_filter_pf_cfg_cmd *req;
7889 struct hclge_desc desc;
7890 u8 vlan_offset_byte_val;
7891 u8 vlan_offset_byte;
7895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7897 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7898 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7899 HCLGE_VLAN_BYTE_SIZE;
7900 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7902 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7903 req->vlan_offset = vlan_offset_160;
7904 req->vlan_cfg = is_kill;
7905 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7907 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7909 dev_err(&hdev->pdev->dev,
7910 "port vlan command, send fail, ret =%d.\n", ret);
7914 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7915 u16 vport_id, u16 vlan_id,
7918 u16 vport_idx, vport_num = 0;
7921 if (is_kill && !vlan_id)
7924 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7927 dev_err(&hdev->pdev->dev,
7928 "Set %u vport vlan filter config fail, ret =%d.\n",
7933 /* vlan 0 may be added twice when 8021q module is enabled */
7934 if (!is_kill && !vlan_id &&
7935 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7938 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7939 dev_err(&hdev->pdev->dev,
7940 "Add port vlan failed, vport %u is already in vlan %u\n",
7946 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7947 dev_err(&hdev->pdev->dev,
7948 "Delete port vlan failed, vport %u is not in vlan %u\n",
7953 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7956 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7957 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7963 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7965 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7966 struct hclge_vport_vtag_tx_cfg_cmd *req;
7967 struct hclge_dev *hdev = vport->back;
7968 struct hclge_desc desc;
7972 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7974 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7975 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7976 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7977 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7978 vcfg->accept_tag1 ? 1 : 0);
7979 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7980 vcfg->accept_untag1 ? 1 : 0);
7981 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7982 vcfg->accept_tag2 ? 1 : 0);
7983 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7984 vcfg->accept_untag2 ? 1 : 0);
7985 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7986 vcfg->insert_tag1_en ? 1 : 0);
7987 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7988 vcfg->insert_tag2_en ? 1 : 0);
7989 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7991 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7992 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7993 HCLGE_VF_NUM_PER_BYTE;
7994 req->vf_bitmap[bmap_index] =
7995 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7997 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7999 dev_err(&hdev->pdev->dev,
8000 "Send port txvlan cfg command fail, ret =%d\n",
8006 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8008 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8009 struct hclge_vport_vtag_rx_cfg_cmd *req;
8010 struct hclge_dev *hdev = vport->back;
8011 struct hclge_desc desc;
8015 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8017 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8018 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8019 vcfg->strip_tag1_en ? 1 : 0);
8020 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8021 vcfg->strip_tag2_en ? 1 : 0);
8022 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8023 vcfg->vlan1_vlan_prionly ? 1 : 0);
8024 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8025 vcfg->vlan2_vlan_prionly ? 1 : 0);
8027 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8028 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8029 HCLGE_VF_NUM_PER_BYTE;
8030 req->vf_bitmap[bmap_index] =
8031 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8033 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8035 dev_err(&hdev->pdev->dev,
8036 "Send port rxvlan cfg command fail, ret =%d\n",
8042 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8043 u16 port_base_vlan_state,
8048 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8049 vport->txvlan_cfg.accept_tag1 = true;
8050 vport->txvlan_cfg.insert_tag1_en = false;
8051 vport->txvlan_cfg.default_tag1 = 0;
8053 vport->txvlan_cfg.accept_tag1 = false;
8054 vport->txvlan_cfg.insert_tag1_en = true;
8055 vport->txvlan_cfg.default_tag1 = vlan_tag;
8058 vport->txvlan_cfg.accept_untag1 = true;
8060 /* accept_tag2 and accept_untag2 are not supported on
8061 * pdev revision(0x20), new revision support them,
8062 * this two fields can not be configured by user.
8064 vport->txvlan_cfg.accept_tag2 = true;
8065 vport->txvlan_cfg.accept_untag2 = true;
8066 vport->txvlan_cfg.insert_tag2_en = false;
8067 vport->txvlan_cfg.default_tag2 = 0;
8069 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8070 vport->rxvlan_cfg.strip_tag1_en = false;
8071 vport->rxvlan_cfg.strip_tag2_en =
8072 vport->rxvlan_cfg.rx_vlan_offload_en;
8074 vport->rxvlan_cfg.strip_tag1_en =
8075 vport->rxvlan_cfg.rx_vlan_offload_en;
8076 vport->rxvlan_cfg.strip_tag2_en = true;
8078 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8079 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8081 ret = hclge_set_vlan_tx_offload_cfg(vport);
8085 return hclge_set_vlan_rx_offload_cfg(vport);
8088 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8090 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8091 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8092 struct hclge_desc desc;
8095 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8096 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8097 rx_req->ot_fst_vlan_type =
8098 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8099 rx_req->ot_sec_vlan_type =
8100 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8101 rx_req->in_fst_vlan_type =
8102 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8103 rx_req->in_sec_vlan_type =
8104 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8106 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8108 dev_err(&hdev->pdev->dev,
8109 "Send rxvlan protocol type command fail, ret =%d\n",
8114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8116 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8117 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8118 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8120 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8122 dev_err(&hdev->pdev->dev,
8123 "Send txvlan protocol type command fail, ret =%d\n",
8129 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8131 #define HCLGE_DEF_VLAN_TYPE 0x8100
8133 struct hnae3_handle *handle = &hdev->vport[0].nic;
8134 struct hclge_vport *vport;
8138 if (hdev->pdev->revision >= 0x21) {
8139 /* for revision 0x21, vf vlan filter is per function */
8140 for (i = 0; i < hdev->num_alloc_vport; i++) {
8141 vport = &hdev->vport[i];
8142 ret = hclge_set_vlan_filter_ctrl(hdev,
8143 HCLGE_FILTER_TYPE_VF,
8144 HCLGE_FILTER_FE_EGRESS,
8151 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8152 HCLGE_FILTER_FE_INGRESS, true,
8157 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8158 HCLGE_FILTER_FE_EGRESS_V1_B,
8164 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8166 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8167 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8168 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8169 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8170 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8171 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8173 ret = hclge_set_vlan_protocol_type(hdev);
8177 for (i = 0; i < hdev->num_alloc_vport; i++) {
8180 vport = &hdev->vport[i];
8181 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8183 ret = hclge_vlan_offload_cfg(vport,
8184 vport->port_base_vlan_cfg.state,
8190 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8193 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8196 struct hclge_vport_vlan_cfg *vlan;
8198 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8202 vlan->hd_tbl_status = writen_to_tbl;
8203 vlan->vlan_id = vlan_id;
8205 list_add_tail(&vlan->node, &vport->vlan_list);
8208 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8210 struct hclge_vport_vlan_cfg *vlan, *tmp;
8211 struct hclge_dev *hdev = vport->back;
8214 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8215 if (!vlan->hd_tbl_status) {
8216 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8218 vlan->vlan_id, false);
8220 dev_err(&hdev->pdev->dev,
8221 "restore vport vlan list failed, ret=%d\n",
8226 vlan->hd_tbl_status = true;
8232 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8235 struct hclge_vport_vlan_cfg *vlan, *tmp;
8236 struct hclge_dev *hdev = vport->back;
8238 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8239 if (vlan->vlan_id == vlan_id) {
8240 if (is_write_tbl && vlan->hd_tbl_status)
8241 hclge_set_vlan_filter_hw(hdev,
8247 list_del(&vlan->node);
8254 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8256 struct hclge_vport_vlan_cfg *vlan, *tmp;
8257 struct hclge_dev *hdev = vport->back;
8259 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8260 if (vlan->hd_tbl_status)
8261 hclge_set_vlan_filter_hw(hdev,
8267 vlan->hd_tbl_status = false;
8269 list_del(&vlan->node);
8275 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8277 struct hclge_vport_vlan_cfg *vlan, *tmp;
8278 struct hclge_vport *vport;
8281 for (i = 0; i < hdev->num_alloc_vport; i++) {
8282 vport = &hdev->vport[i];
8283 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8284 list_del(&vlan->node);
8290 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8292 struct hclge_vport *vport = hclge_get_vport(handle);
8293 struct hclge_vport_vlan_cfg *vlan, *tmp;
8294 struct hclge_dev *hdev = vport->back;
8299 for (i = 0; i < hdev->num_alloc_vport; i++) {
8300 vport = &hdev->vport[i];
8301 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8302 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8303 state = vport->port_base_vlan_cfg.state;
8305 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8306 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8307 vport->vport_id, vlan_id,
8312 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8315 if (!vlan->hd_tbl_status)
8317 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8319 vlan->vlan_id, false);
8326 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8328 struct hclge_vport *vport = hclge_get_vport(handle);
8330 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8331 vport->rxvlan_cfg.strip_tag1_en = false;
8332 vport->rxvlan_cfg.strip_tag2_en = enable;
8334 vport->rxvlan_cfg.strip_tag1_en = enable;
8335 vport->rxvlan_cfg.strip_tag2_en = true;
8337 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8338 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8339 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8341 return hclge_set_vlan_rx_offload_cfg(vport);
8344 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8345 u16 port_base_vlan_state,
8346 struct hclge_vlan_info *new_info,
8347 struct hclge_vlan_info *old_info)
8349 struct hclge_dev *hdev = vport->back;
8352 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8353 hclge_rm_vport_all_vlan_table(vport, false);
8354 return hclge_set_vlan_filter_hw(hdev,
8355 htons(new_info->vlan_proto),
8361 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8362 vport->vport_id, old_info->vlan_tag,
8367 return hclge_add_vport_all_vlan_table(vport);
8370 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8371 struct hclge_vlan_info *vlan_info)
8373 struct hnae3_handle *nic = &vport->nic;
8374 struct hclge_vlan_info *old_vlan_info;
8375 struct hclge_dev *hdev = vport->back;
8378 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8380 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8384 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8385 /* add new VLAN tag */
8386 ret = hclge_set_vlan_filter_hw(hdev,
8387 htons(vlan_info->vlan_proto),
8389 vlan_info->vlan_tag,
8394 /* remove old VLAN tag */
8395 ret = hclge_set_vlan_filter_hw(hdev,
8396 htons(old_vlan_info->vlan_proto),
8398 old_vlan_info->vlan_tag,
8406 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8411 /* update state only when disable/enable port based VLAN */
8412 vport->port_base_vlan_cfg.state = state;
8413 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8414 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8416 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8419 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8420 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8421 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8426 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8427 enum hnae3_port_base_vlan_state state,
8430 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8432 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8434 return HNAE3_PORT_BASE_VLAN_ENABLE;
8437 return HNAE3_PORT_BASE_VLAN_DISABLE;
8438 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8439 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8441 return HNAE3_PORT_BASE_VLAN_MODIFY;
8445 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8446 u16 vlan, u8 qos, __be16 proto)
8448 struct hclge_vport *vport = hclge_get_vport(handle);
8449 struct hclge_dev *hdev = vport->back;
8450 struct hclge_vlan_info vlan_info;
8454 if (hdev->pdev->revision == 0x20)
8457 vport = hclge_get_vf_vport(hdev, vfid);
8461 /* qos is a 3 bits value, so can not be bigger than 7 */
8462 if (vlan > VLAN_N_VID - 1 || qos > 7)
8464 if (proto != htons(ETH_P_8021Q))
8465 return -EPROTONOSUPPORT;
8467 state = hclge_get_port_base_vlan_state(vport,
8468 vport->port_base_vlan_cfg.state,
8470 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8473 vlan_info.vlan_tag = vlan;
8474 vlan_info.qos = qos;
8475 vlan_info.vlan_proto = ntohs(proto);
8477 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8478 return hclge_update_port_base_vlan_cfg(vport, state,
8481 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8482 vport->vport_id, state,
8489 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8490 u16 vlan_id, bool is_kill)
8492 struct hclge_vport *vport = hclge_get_vport(handle);
8493 struct hclge_dev *hdev = vport->back;
8494 bool writen_to_tbl = false;
8497 /* When device is resetting, firmware is unable to handle
8498 * mailbox. Just record the vlan id, and remove it after
8501 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8502 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8506 /* when port base vlan enabled, we use port base vlan as the vlan
8507 * filter entry. In this case, we don't update vlan filter table
8508 * when user add new vlan or remove exist vlan, just update the vport
8509 * vlan list. The vlan id in vlan list will be writen in vlan filter
8510 * table until port base vlan disabled
8512 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8513 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8515 writen_to_tbl = true;
8520 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8522 hclge_add_vport_vlan_table(vport, vlan_id,
8524 } else if (is_kill) {
8525 /* when remove hw vlan filter failed, record the vlan id,
8526 * and try to remove it from hw later, to be consistence
8529 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8534 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8536 #define HCLGE_MAX_SYNC_COUNT 60
8538 int i, ret, sync_cnt = 0;
8541 /* start from vport 1 for PF is always alive */
8542 for (i = 0; i < hdev->num_alloc_vport; i++) {
8543 struct hclge_vport *vport = &hdev->vport[i];
8545 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8547 while (vlan_id != VLAN_N_VID) {
8548 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8549 vport->vport_id, vlan_id,
8551 if (ret && ret != -EINVAL)
8554 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8555 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8558 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8561 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8567 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8569 struct hclge_config_max_frm_size_cmd *req;
8570 struct hclge_desc desc;
8572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8574 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8575 req->max_frm_size = cpu_to_le16(new_mps);
8576 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8578 return hclge_cmd_send(&hdev->hw, &desc, 1);
8581 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8583 struct hclge_vport *vport = hclge_get_vport(handle);
8585 return hclge_set_vport_mtu(vport, new_mtu);
8588 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8590 struct hclge_dev *hdev = vport->back;
8591 int i, max_frm_size, ret;
8593 /* HW supprt 2 layer vlan */
8594 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8595 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8596 max_frm_size > HCLGE_MAC_MAX_FRAME)
8599 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8600 mutex_lock(&hdev->vport_lock);
8601 /* VF's mps must fit within hdev->mps */
8602 if (vport->vport_id && max_frm_size > hdev->mps) {
8603 mutex_unlock(&hdev->vport_lock);
8605 } else if (vport->vport_id) {
8606 vport->mps = max_frm_size;
8607 mutex_unlock(&hdev->vport_lock);
8611 /* PF's mps must be greater then VF's mps */
8612 for (i = 1; i < hdev->num_alloc_vport; i++)
8613 if (max_frm_size < hdev->vport[i].mps) {
8614 mutex_unlock(&hdev->vport_lock);
8618 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8620 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8622 dev_err(&hdev->pdev->dev,
8623 "Change mtu fail, ret =%d\n", ret);
8627 hdev->mps = max_frm_size;
8628 vport->mps = max_frm_size;
8630 ret = hclge_buffer_alloc(hdev);
8632 dev_err(&hdev->pdev->dev,
8633 "Allocate buffer fail, ret =%d\n", ret);
8636 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8637 mutex_unlock(&hdev->vport_lock);
8641 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8644 struct hclge_reset_tqp_queue_cmd *req;
8645 struct hclge_desc desc;
8648 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8650 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8651 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8653 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8655 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8657 dev_err(&hdev->pdev->dev,
8658 "Send tqp reset cmd error, status =%d\n", ret);
8665 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8667 struct hclge_reset_tqp_queue_cmd *req;
8668 struct hclge_desc desc;
8671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8673 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8674 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8676 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8678 dev_err(&hdev->pdev->dev,
8679 "Get reset status error, status =%d\n", ret);
8683 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8686 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8688 struct hnae3_queue *queue;
8689 struct hclge_tqp *tqp;
8691 queue = handle->kinfo.tqp[queue_id];
8692 tqp = container_of(queue, struct hclge_tqp, q);
8697 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8699 struct hclge_vport *vport = hclge_get_vport(handle);
8700 struct hclge_dev *hdev = vport->back;
8701 int reset_try_times = 0;
8706 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8708 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8710 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8714 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8716 dev_err(&hdev->pdev->dev,
8717 "Send reset tqp cmd fail, ret = %d\n", ret);
8721 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8722 reset_status = hclge_get_reset_status(hdev, queue_gid);
8726 /* Wait for tqp hw reset */
8727 usleep_range(1000, 1200);
8730 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8731 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8735 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8737 dev_err(&hdev->pdev->dev,
8738 "Deassert the soft reset fail, ret = %d\n", ret);
8743 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8745 struct hclge_dev *hdev = vport->back;
8746 int reset_try_times = 0;
8751 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8753 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8755 dev_warn(&hdev->pdev->dev,
8756 "Send reset tqp cmd fail, ret = %d\n", ret);
8760 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8761 reset_status = hclge_get_reset_status(hdev, queue_gid);
8765 /* Wait for tqp hw reset */
8766 usleep_range(1000, 1200);
8769 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8770 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8774 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8776 dev_warn(&hdev->pdev->dev,
8777 "Deassert the soft reset fail, ret = %d\n", ret);
8780 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8782 struct hclge_vport *vport = hclge_get_vport(handle);
8783 struct hclge_dev *hdev = vport->back;
8785 return hdev->fw_version;
8788 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8790 struct phy_device *phydev = hdev->hw.mac.phydev;
8795 phy_set_asym_pause(phydev, rx_en, tx_en);
8798 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8802 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8805 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8807 dev_err(&hdev->pdev->dev,
8808 "configure pauseparam error, ret = %d.\n", ret);
8813 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8815 struct phy_device *phydev = hdev->hw.mac.phydev;
8816 u16 remote_advertising = 0;
8817 u16 local_advertising;
8818 u32 rx_pause, tx_pause;
8821 if (!phydev->link || !phydev->autoneg)
8824 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8827 remote_advertising = LPA_PAUSE_CAP;
8829 if (phydev->asym_pause)
8830 remote_advertising |= LPA_PAUSE_ASYM;
8832 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8833 remote_advertising);
8834 tx_pause = flowctl & FLOW_CTRL_TX;
8835 rx_pause = flowctl & FLOW_CTRL_RX;
8837 if (phydev->duplex == HCLGE_MAC_HALF) {
8842 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8845 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8846 u32 *rx_en, u32 *tx_en)
8848 struct hclge_vport *vport = hclge_get_vport(handle);
8849 struct hclge_dev *hdev = vport->back;
8850 struct phy_device *phydev = hdev->hw.mac.phydev;
8852 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8854 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8860 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8863 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8866 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8875 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8876 u32 rx_en, u32 tx_en)
8879 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8880 else if (rx_en && !tx_en)
8881 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8882 else if (!rx_en && tx_en)
8883 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8885 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8887 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8890 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8891 u32 rx_en, u32 tx_en)
8893 struct hclge_vport *vport = hclge_get_vport(handle);
8894 struct hclge_dev *hdev = vport->back;
8895 struct phy_device *phydev = hdev->hw.mac.phydev;
8899 fc_autoneg = hclge_get_autoneg(handle);
8900 if (auto_neg != fc_autoneg) {
8901 dev_info(&hdev->pdev->dev,
8902 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8907 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8908 dev_info(&hdev->pdev->dev,
8909 "Priority flow control enabled. Cannot set link flow control.\n");
8913 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8915 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8918 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8921 return phy_start_aneg(phydev);
8926 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8927 u8 *auto_neg, u32 *speed, u8 *duplex)
8929 struct hclge_vport *vport = hclge_get_vport(handle);
8930 struct hclge_dev *hdev = vport->back;
8933 *speed = hdev->hw.mac.speed;
8935 *duplex = hdev->hw.mac.duplex;
8937 *auto_neg = hdev->hw.mac.autoneg;
8940 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8943 struct hclge_vport *vport = hclge_get_vport(handle);
8944 struct hclge_dev *hdev = vport->back;
8947 *media_type = hdev->hw.mac.media_type;
8950 *module_type = hdev->hw.mac.module_type;
8953 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8954 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8956 struct hclge_vport *vport = hclge_get_vport(handle);
8957 struct hclge_dev *hdev = vport->back;
8958 struct phy_device *phydev = hdev->hw.mac.phydev;
8959 int mdix_ctrl, mdix, is_resolved;
8960 unsigned int retval;
8963 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8964 *tp_mdix = ETH_TP_MDI_INVALID;
8968 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8970 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8971 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8972 HCLGE_PHY_MDIX_CTRL_S);
8974 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8975 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8976 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8978 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8980 switch (mdix_ctrl) {
8982 *tp_mdix_ctrl = ETH_TP_MDI;
8985 *tp_mdix_ctrl = ETH_TP_MDI_X;
8988 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8991 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8996 *tp_mdix = ETH_TP_MDI_INVALID;
8998 *tp_mdix = ETH_TP_MDI_X;
9000 *tp_mdix = ETH_TP_MDI;
9003 static void hclge_info_show(struct hclge_dev *hdev)
9005 struct device *dev = &hdev->pdev->dev;
9007 dev_info(dev, "PF info begin:\n");
9009 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9010 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9011 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9012 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9013 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9014 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9015 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9016 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9017 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9018 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9019 dev_info(dev, "This is %s PF\n",
9020 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9021 dev_info(dev, "DCB %s\n",
9022 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9023 dev_info(dev, "MQPRIO %s\n",
9024 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9026 dev_info(dev, "PF info end.\n");
9029 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9030 struct hclge_vport *vport)
9032 struct hnae3_client *client = vport->nic.client;
9033 struct hclge_dev *hdev = ae_dev->priv;
9034 int rst_cnt = hdev->rst_stats.reset_cnt;
9037 ret = client->ops->init_instance(&vport->nic);
9041 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9042 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9043 rst_cnt != hdev->rst_stats.reset_cnt) {
9048 /* Enable nic hw error interrupts */
9049 ret = hclge_config_nic_hw_error(hdev, true);
9051 dev_err(&ae_dev->pdev->dev,
9052 "fail(%d) to enable hw error interrupts\n", ret);
9056 hnae3_set_client_init_flag(client, ae_dev, 1);
9058 if (netif_msg_drv(&hdev->vport->nic))
9059 hclge_info_show(hdev);
9064 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9065 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9066 msleep(HCLGE_WAIT_RESET_DONE);
9068 client->ops->uninit_instance(&vport->nic, 0);
9073 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9074 struct hclge_vport *vport)
9076 struct hnae3_client *client = vport->roce.client;
9077 struct hclge_dev *hdev = ae_dev->priv;
9081 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9085 client = hdev->roce_client;
9086 ret = hclge_init_roce_base_info(vport);
9090 rst_cnt = hdev->rst_stats.reset_cnt;
9091 ret = client->ops->init_instance(&vport->roce);
9095 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9096 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9097 rst_cnt != hdev->rst_stats.reset_cnt) {
9102 /* Enable roce ras interrupts */
9103 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9105 dev_err(&ae_dev->pdev->dev,
9106 "fail(%d) to enable roce ras interrupts\n", ret);
9110 hnae3_set_client_init_flag(client, ae_dev, 1);
9115 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9116 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9117 msleep(HCLGE_WAIT_RESET_DONE);
9119 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9124 static int hclge_init_client_instance(struct hnae3_client *client,
9125 struct hnae3_ae_dev *ae_dev)
9127 struct hclge_dev *hdev = ae_dev->priv;
9128 struct hclge_vport *vport;
9131 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9132 vport = &hdev->vport[i];
9134 switch (client->type) {
9135 case HNAE3_CLIENT_KNIC:
9136 hdev->nic_client = client;
9137 vport->nic.client = client;
9138 ret = hclge_init_nic_client_instance(ae_dev, vport);
9142 ret = hclge_init_roce_client_instance(ae_dev, vport);
9147 case HNAE3_CLIENT_ROCE:
9148 if (hnae3_dev_roce_supported(hdev)) {
9149 hdev->roce_client = client;
9150 vport->roce.client = client;
9153 ret = hclge_init_roce_client_instance(ae_dev, vport);
9166 hdev->nic_client = NULL;
9167 vport->nic.client = NULL;
9170 hdev->roce_client = NULL;
9171 vport->roce.client = NULL;
9175 static void hclge_uninit_client_instance(struct hnae3_client *client,
9176 struct hnae3_ae_dev *ae_dev)
9178 struct hclge_dev *hdev = ae_dev->priv;
9179 struct hclge_vport *vport;
9182 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9183 vport = &hdev->vport[i];
9184 if (hdev->roce_client) {
9185 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9186 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9187 msleep(HCLGE_WAIT_RESET_DONE);
9189 hdev->roce_client->ops->uninit_instance(&vport->roce,
9191 hdev->roce_client = NULL;
9192 vport->roce.client = NULL;
9194 if (client->type == HNAE3_CLIENT_ROCE)
9196 if (hdev->nic_client && client->ops->uninit_instance) {
9197 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9198 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9199 msleep(HCLGE_WAIT_RESET_DONE);
9201 client->ops->uninit_instance(&vport->nic, 0);
9202 hdev->nic_client = NULL;
9203 vport->nic.client = NULL;
9208 static int hclge_pci_init(struct hclge_dev *hdev)
9210 struct pci_dev *pdev = hdev->pdev;
9211 struct hclge_hw *hw;
9214 ret = pci_enable_device(pdev);
9216 dev_err(&pdev->dev, "failed to enable PCI device\n");
9220 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9222 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9225 "can't set consistent PCI DMA");
9226 goto err_disable_device;
9228 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9231 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9233 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9234 goto err_disable_device;
9237 pci_set_master(pdev);
9239 hw->io_base = pcim_iomap(pdev, 2, 0);
9241 dev_err(&pdev->dev, "Can't map configuration register space\n");
9243 goto err_clr_master;
9246 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9250 pci_clear_master(pdev);
9251 pci_release_regions(pdev);
9253 pci_disable_device(pdev);
9258 static void hclge_pci_uninit(struct hclge_dev *hdev)
9260 struct pci_dev *pdev = hdev->pdev;
9262 pcim_iounmap(pdev, hdev->hw.io_base);
9263 pci_free_irq_vectors(pdev);
9264 pci_clear_master(pdev);
9265 pci_release_mem_regions(pdev);
9266 pci_disable_device(pdev);
9269 static void hclge_state_init(struct hclge_dev *hdev)
9271 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9272 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9273 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9274 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9275 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9276 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9277 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9280 static void hclge_state_uninit(struct hclge_dev *hdev)
9282 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9283 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9285 if (hdev->reset_timer.function)
9286 del_timer_sync(&hdev->reset_timer);
9287 if (hdev->service_task.work.func)
9288 cancel_delayed_work_sync(&hdev->service_task);
9291 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9293 #define HCLGE_FLR_RETRY_WAIT_MS 500
9294 #define HCLGE_FLR_RETRY_CNT 5
9296 struct hclge_dev *hdev = ae_dev->priv;
9301 down(&hdev->reset_sem);
9302 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9303 hdev->reset_type = HNAE3_FLR_RESET;
9304 ret = hclge_reset_prepare(hdev);
9306 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9308 if (hdev->reset_pending ||
9309 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9310 dev_err(&hdev->pdev->dev,
9311 "reset_pending:0x%lx, retry_cnt:%d\n",
9312 hdev->reset_pending, retry_cnt);
9313 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9314 up(&hdev->reset_sem);
9315 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9320 /* disable misc vector before FLR done */
9321 hclge_enable_vector(&hdev->misc_vector, false);
9322 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9323 hdev->rst_stats.flr_rst_cnt++;
9326 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9328 struct hclge_dev *hdev = ae_dev->priv;
9331 hclge_enable_vector(&hdev->misc_vector, true);
9333 ret = hclge_reset_rebuild(hdev);
9335 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9337 hdev->reset_type = HNAE3_NONE_RESET;
9338 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9339 up(&hdev->reset_sem);
9342 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9346 for (i = 0; i < hdev->num_alloc_vport; i++) {
9347 struct hclge_vport *vport = &hdev->vport[i];
9350 /* Send cmd to clear VF's FUNC_RST_ING */
9351 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9353 dev_warn(&hdev->pdev->dev,
9354 "clear vf(%u) rst failed %d!\n",
9355 vport->vport_id, ret);
9359 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9361 struct pci_dev *pdev = ae_dev->pdev;
9362 struct hclge_dev *hdev;
9365 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9372 hdev->ae_dev = ae_dev;
9373 hdev->reset_type = HNAE3_NONE_RESET;
9374 hdev->reset_level = HNAE3_FUNC_RESET;
9375 ae_dev->priv = hdev;
9377 /* HW supprt 2 layer vlan */
9378 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9380 mutex_init(&hdev->vport_lock);
9381 spin_lock_init(&hdev->fd_rule_lock);
9382 sema_init(&hdev->reset_sem, 1);
9384 ret = hclge_pci_init(hdev);
9388 /* Firmware command queue initialize */
9389 ret = hclge_cmd_queue_init(hdev);
9391 goto err_pci_uninit;
9393 /* Firmware command initialize */
9394 ret = hclge_cmd_init(hdev);
9396 goto err_cmd_uninit;
9398 ret = hclge_get_cap(hdev);
9400 goto err_cmd_uninit;
9402 ret = hclge_configure(hdev);
9404 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9405 goto err_cmd_uninit;
9408 ret = hclge_init_msi(hdev);
9410 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9411 goto err_cmd_uninit;
9414 ret = hclge_misc_irq_init(hdev);
9416 goto err_msi_uninit;
9418 ret = hclge_alloc_tqps(hdev);
9420 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9421 goto err_msi_irq_uninit;
9424 ret = hclge_alloc_vport(hdev);
9426 goto err_msi_irq_uninit;
9428 ret = hclge_map_tqp(hdev);
9430 goto err_msi_irq_uninit;
9432 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9433 ret = hclge_mac_mdio_config(hdev);
9435 goto err_msi_irq_uninit;
9438 ret = hclge_init_umv_space(hdev);
9440 goto err_mdiobus_unreg;
9442 ret = hclge_mac_init(hdev);
9444 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9445 goto err_mdiobus_unreg;
9448 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9450 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9451 goto err_mdiobus_unreg;
9454 ret = hclge_config_gro(hdev, true);
9456 goto err_mdiobus_unreg;
9458 ret = hclge_init_vlan_config(hdev);
9460 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9461 goto err_mdiobus_unreg;
9464 ret = hclge_tm_schd_init(hdev);
9466 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9467 goto err_mdiobus_unreg;
9470 hclge_rss_init_cfg(hdev);
9471 ret = hclge_rss_init_hw(hdev);
9473 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9474 goto err_mdiobus_unreg;
9477 ret = init_mgr_tbl(hdev);
9479 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9480 goto err_mdiobus_unreg;
9483 ret = hclge_init_fd_config(hdev);
9486 "fd table init fail, ret=%d\n", ret);
9487 goto err_mdiobus_unreg;
9490 INIT_KFIFO(hdev->mac_tnl_log);
9492 hclge_dcb_ops_set(hdev);
9494 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9495 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9497 /* Setup affinity after service timer setup because add_timer_on
9498 * is called in affinity notify.
9500 hclge_misc_affinity_setup(hdev);
9502 hclge_clear_all_event_cause(hdev);
9503 hclge_clear_resetting_state(hdev);
9505 /* Log and clear the hw errors those already occurred */
9506 hclge_handle_all_hns_hw_errors(ae_dev);
9508 /* request delayed reset for the error recovery because an immediate
9509 * global reset on a PF affecting pending initialization of other PFs
9511 if (ae_dev->hw_err_reset_req) {
9512 enum hnae3_reset_type reset_level;
9514 reset_level = hclge_get_reset_level(ae_dev,
9515 &ae_dev->hw_err_reset_req);
9516 hclge_set_def_reset_request(ae_dev, reset_level);
9517 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9520 /* Enable MISC vector(vector0) */
9521 hclge_enable_vector(&hdev->misc_vector, true);
9523 hclge_state_init(hdev);
9524 hdev->last_reset_time = jiffies;
9526 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9529 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9534 if (hdev->hw.mac.phydev)
9535 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9537 hclge_misc_irq_uninit(hdev);
9539 pci_free_irq_vectors(pdev);
9541 hclge_cmd_uninit(hdev);
9543 pcim_iounmap(pdev, hdev->hw.io_base);
9544 pci_clear_master(pdev);
9545 pci_release_regions(pdev);
9546 pci_disable_device(pdev);
9551 static void hclge_stats_clear(struct hclge_dev *hdev)
9553 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9556 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9558 return hclge_config_switch_param(hdev, vf, enable,
9559 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9562 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9564 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9565 HCLGE_FILTER_FE_NIC_INGRESS_B,
9569 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9573 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9575 dev_err(&hdev->pdev->dev,
9576 "Set vf %d mac spoof check %s failed, ret=%d\n",
9577 vf, enable ? "on" : "off", ret);
9581 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9583 dev_err(&hdev->pdev->dev,
9584 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9585 vf, enable ? "on" : "off", ret);
9590 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9593 struct hclge_vport *vport = hclge_get_vport(handle);
9594 struct hclge_dev *hdev = vport->back;
9595 u32 new_spoofchk = enable ? 1 : 0;
9598 if (hdev->pdev->revision == 0x20)
9601 vport = hclge_get_vf_vport(hdev, vf);
9605 if (vport->vf_info.spoofchk == new_spoofchk)
9608 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9609 dev_warn(&hdev->pdev->dev,
9610 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9612 else if (enable && hclge_is_umv_space_full(vport))
9613 dev_warn(&hdev->pdev->dev,
9614 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9617 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9621 vport->vf_info.spoofchk = new_spoofchk;
9625 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9627 struct hclge_vport *vport = hdev->vport;
9631 if (hdev->pdev->revision == 0x20)
9634 /* resume the vf spoof check state after reset */
9635 for (i = 0; i < hdev->num_alloc_vport; i++) {
9636 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9637 vport->vf_info.spoofchk);
9647 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9649 struct hclge_vport *vport = hclge_get_vport(handle);
9650 struct hclge_dev *hdev = vport->back;
9651 u32 new_trusted = enable ? 1 : 0;
9655 vport = hclge_get_vf_vport(hdev, vf);
9659 if (vport->vf_info.trusted == new_trusted)
9662 /* Disable promisc mode for VF if it is not trusted any more. */
9663 if (!enable && vport->vf_info.promisc_enable) {
9664 en_bc_pmc = hdev->pdev->revision != 0x20;
9665 ret = hclge_set_vport_promisc_mode(vport, false, false,
9669 vport->vf_info.promisc_enable = 0;
9670 hclge_inform_vf_promisc_info(vport);
9673 vport->vf_info.trusted = new_trusted;
9678 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9683 /* reset vf rate to default value */
9684 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9685 struct hclge_vport *vport = &hdev->vport[vf];
9687 vport->vf_info.max_tx_rate = 0;
9688 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9690 dev_err(&hdev->pdev->dev,
9691 "vf%d failed to reset to default, ret=%d\n",
9692 vf - HCLGE_VF_VPORT_START_NUM, ret);
9696 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9697 int min_tx_rate, int max_tx_rate)
9699 if (min_tx_rate != 0 ||
9700 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9701 dev_err(&hdev->pdev->dev,
9702 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9703 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9710 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9711 int min_tx_rate, int max_tx_rate, bool force)
9713 struct hclge_vport *vport = hclge_get_vport(handle);
9714 struct hclge_dev *hdev = vport->back;
9717 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9721 vport = hclge_get_vf_vport(hdev, vf);
9725 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9728 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9732 vport->vf_info.max_tx_rate = max_tx_rate;
9737 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9739 struct hnae3_handle *handle = &hdev->vport->nic;
9740 struct hclge_vport *vport;
9744 /* resume the vf max_tx_rate after reset */
9745 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9746 vport = hclge_get_vf_vport(hdev, vf);
9750 /* zero means max rate, after reset, firmware already set it to
9751 * max rate, so just continue.
9753 if (!vport->vf_info.max_tx_rate)
9756 ret = hclge_set_vf_rate(handle, vf, 0,
9757 vport->vf_info.max_tx_rate, true);
9759 dev_err(&hdev->pdev->dev,
9760 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9761 vf, vport->vf_info.max_tx_rate, ret);
9769 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9771 struct hclge_vport *vport = hdev->vport;
9774 for (i = 0; i < hdev->num_alloc_vport; i++) {
9775 hclge_vport_stop(vport);
9780 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9782 struct hclge_dev *hdev = ae_dev->priv;
9783 struct pci_dev *pdev = ae_dev->pdev;
9786 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9788 hclge_stats_clear(hdev);
9789 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9790 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9792 ret = hclge_cmd_init(hdev);
9794 dev_err(&pdev->dev, "Cmd queue init failed\n");
9798 ret = hclge_map_tqp(hdev);
9800 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9804 hclge_reset_umv_space(hdev);
9806 ret = hclge_mac_init(hdev);
9808 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9812 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9814 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9818 ret = hclge_config_gro(hdev, true);
9822 ret = hclge_init_vlan_config(hdev);
9824 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9828 ret = hclge_tm_init_hw(hdev, true);
9830 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9834 ret = hclge_rss_init_hw(hdev);
9836 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9840 ret = init_mgr_tbl(hdev);
9843 "failed to reinit manager table, ret = %d\n", ret);
9847 ret = hclge_init_fd_config(hdev);
9849 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9853 /* Log and clear the hw errors those already occurred */
9854 hclge_handle_all_hns_hw_errors(ae_dev);
9856 /* Re-enable the hw error interrupts because
9857 * the interrupts get disabled on global reset.
9859 ret = hclge_config_nic_hw_error(hdev, true);
9862 "fail(%d) to re-enable NIC hw error interrupts\n",
9867 if (hdev->roce_client) {
9868 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9871 "fail(%d) to re-enable roce ras interrupts\n",
9877 hclge_reset_vport_state(hdev);
9878 ret = hclge_reset_vport_spoofchk(hdev);
9882 ret = hclge_resume_vf_rate(hdev);
9886 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9892 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9894 struct hclge_dev *hdev = ae_dev->priv;
9895 struct hclge_mac *mac = &hdev->hw.mac;
9897 hclge_reset_vf_rate(hdev);
9898 hclge_misc_affinity_teardown(hdev);
9899 hclge_state_uninit(hdev);
9902 mdiobus_unregister(mac->mdio_bus);
9904 hclge_uninit_umv_space(hdev);
9906 /* Disable MISC vector(vector0) */
9907 hclge_enable_vector(&hdev->misc_vector, false);
9908 synchronize_irq(hdev->misc_vector.vector_irq);
9910 /* Disable all hw interrupts */
9911 hclge_config_mac_tnl_int(hdev, false);
9912 hclge_config_nic_hw_error(hdev, false);
9913 hclge_config_rocee_ras_interrupt(hdev, false);
9915 hclge_cmd_uninit(hdev);
9916 hclge_misc_irq_uninit(hdev);
9917 hclge_pci_uninit(hdev);
9918 mutex_destroy(&hdev->vport_lock);
9919 hclge_uninit_vport_mac_table(hdev);
9920 hclge_uninit_vport_vlan_table(hdev);
9921 ae_dev->priv = NULL;
9924 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9926 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9927 struct hclge_vport *vport = hclge_get_vport(handle);
9928 struct hclge_dev *hdev = vport->back;
9930 return min_t(u32, hdev->rss_size_max,
9931 vport->alloc_tqps / kinfo->num_tc);
9934 static void hclge_get_channels(struct hnae3_handle *handle,
9935 struct ethtool_channels *ch)
9937 ch->max_combined = hclge_get_max_channels(handle);
9938 ch->other_count = 1;
9940 ch->combined_count = handle->kinfo.rss_size;
9943 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9944 u16 *alloc_tqps, u16 *max_rss_size)
9946 struct hclge_vport *vport = hclge_get_vport(handle);
9947 struct hclge_dev *hdev = vport->back;
9949 *alloc_tqps = vport->alloc_tqps;
9950 *max_rss_size = hdev->rss_size_max;
9953 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9954 bool rxfh_configured)
9956 struct hclge_vport *vport = hclge_get_vport(handle);
9957 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9958 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9959 struct hclge_dev *hdev = vport->back;
9960 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9961 u16 cur_rss_size = kinfo->rss_size;
9962 u16 cur_tqps = kinfo->num_tqps;
9963 u16 tc_valid[HCLGE_MAX_TC_NUM];
9969 kinfo->req_rss_size = new_tqps_num;
9971 ret = hclge_tm_vport_map_update(hdev);
9973 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9977 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9978 roundup_size = ilog2(roundup_size);
9979 /* Set the RSS TC mode according to the new RSS size */
9980 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9983 if (!(hdev->hw_tc_map & BIT(i)))
9987 tc_size[i] = roundup_size;
9988 tc_offset[i] = kinfo->rss_size * i;
9990 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9994 /* RSS indirection table has been configuared by user */
9995 if (rxfh_configured)
9998 /* Reinitializes the rss indirect table according to the new RSS size */
9999 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10003 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10004 rss_indir[i] = i % kinfo->rss_size;
10006 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10008 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10015 dev_info(&hdev->pdev->dev,
10016 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10017 cur_rss_size, kinfo->rss_size,
10018 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10023 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10024 u32 *regs_num_64_bit)
10026 struct hclge_desc desc;
10030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10031 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10033 dev_err(&hdev->pdev->dev,
10034 "Query register number cmd failed, ret = %d.\n", ret);
10038 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10039 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10041 total_num = *regs_num_32_bit + *regs_num_64_bit;
10048 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10051 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10052 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10054 struct hclge_desc *desc;
10055 u32 *reg_val = data;
10065 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10066 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10067 HCLGE_32_BIT_REG_RTN_DATANUM);
10068 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10072 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10073 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10075 dev_err(&hdev->pdev->dev,
10076 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10081 for (i = 0; i < cmd_num; i++) {
10083 desc_data = (__le32 *)(&desc[i].data[0]);
10084 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10086 desc_data = (__le32 *)(&desc[i]);
10087 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10089 for (k = 0; k < n; k++) {
10090 *reg_val++ = le32_to_cpu(*desc_data++);
10102 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10105 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10106 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10108 struct hclge_desc *desc;
10109 u64 *reg_val = data;
10119 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10120 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10121 HCLGE_64_BIT_REG_RTN_DATANUM);
10122 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10126 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10127 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10129 dev_err(&hdev->pdev->dev,
10130 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10135 for (i = 0; i < cmd_num; i++) {
10137 desc_data = (__le64 *)(&desc[i].data[0]);
10138 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10140 desc_data = (__le64 *)(&desc[i]);
10141 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10143 for (k = 0; k < n; k++) {
10144 *reg_val++ = le64_to_cpu(*desc_data++);
10156 #define MAX_SEPARATE_NUM 4
10157 #define SEPARATOR_VALUE 0xFDFCFBFA
10158 #define REG_NUM_PER_LINE 4
10159 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10160 #define REG_SEPARATOR_LINE 1
10161 #define REG_NUM_REMAIN_MASK 3
10162 #define BD_LIST_MAX_NUM 30
10164 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10166 /*prepare 4 commands to query DFX BD number*/
10167 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10168 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10169 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10170 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10171 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10172 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10173 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10175 return hclge_cmd_send(&hdev->hw, desc, 4);
10178 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10182 u32 entries_per_desc, desc_index, index, offset, i;
10183 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10186 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10188 dev_err(&hdev->pdev->dev,
10189 "Get dfx bd num fail, status is %d.\n", ret);
10193 entries_per_desc = ARRAY_SIZE(desc[0].data);
10194 for (i = 0; i < type_num; i++) {
10195 offset = hclge_dfx_bd_offset_list[i];
10196 index = offset % entries_per_desc;
10197 desc_index = offset / entries_per_desc;
10198 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10204 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10205 struct hclge_desc *desc_src, int bd_num,
10206 enum hclge_opcode_type cmd)
10208 struct hclge_desc *desc = desc_src;
10211 hclge_cmd_setup_basic_desc(desc, cmd, true);
10212 for (i = 0; i < bd_num - 1; i++) {
10213 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10215 hclge_cmd_setup_basic_desc(desc, cmd, true);
10219 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10221 dev_err(&hdev->pdev->dev,
10222 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10228 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10231 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10232 struct hclge_desc *desc = desc_src;
10235 entries_per_desc = ARRAY_SIZE(desc->data);
10236 reg_num = entries_per_desc * bd_num;
10237 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10238 for (i = 0; i < reg_num; i++) {
10239 index = i % entries_per_desc;
10240 desc_index = i / entries_per_desc;
10241 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10243 for (i = 0; i < separator_num; i++)
10244 *reg++ = SEPARATOR_VALUE;
10246 return reg_num + separator_num;
10249 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10251 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10252 int data_len_per_desc, data_len, bd_num, i;
10253 int bd_num_list[BD_LIST_MAX_NUM];
10256 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10258 dev_err(&hdev->pdev->dev,
10259 "Get dfx reg bd num fail, status is %d.\n", ret);
10263 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10265 for (i = 0; i < dfx_reg_type_num; i++) {
10266 bd_num = bd_num_list[i];
10267 data_len = data_len_per_desc * bd_num;
10268 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10274 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10276 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10277 int bd_num, bd_num_max, buf_len, i;
10278 int bd_num_list[BD_LIST_MAX_NUM];
10279 struct hclge_desc *desc_src;
10283 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10285 dev_err(&hdev->pdev->dev,
10286 "Get dfx reg bd num fail, status is %d.\n", ret);
10290 bd_num_max = bd_num_list[0];
10291 for (i = 1; i < dfx_reg_type_num; i++)
10292 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10294 buf_len = sizeof(*desc_src) * bd_num_max;
10295 desc_src = kzalloc(buf_len, GFP_KERNEL);
10299 for (i = 0; i < dfx_reg_type_num; i++) {
10300 bd_num = bd_num_list[i];
10301 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10302 hclge_dfx_reg_opcode_list[i]);
10304 dev_err(&hdev->pdev->dev,
10305 "Get dfx reg fail, status is %d.\n", ret);
10309 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10316 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10317 struct hnae3_knic_private_info *kinfo)
10319 #define HCLGE_RING_REG_OFFSET 0x200
10320 #define HCLGE_RING_INT_REG_OFFSET 0x4
10322 int i, j, reg_num, separator_num;
10326 /* fetching per-PF registers valus from PF PCIe register space */
10327 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10328 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10329 for (i = 0; i < reg_num; i++)
10330 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10331 for (i = 0; i < separator_num; i++)
10332 *reg++ = SEPARATOR_VALUE;
10333 data_num_sum = reg_num + separator_num;
10335 reg_num = ARRAY_SIZE(common_reg_addr_list);
10336 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10337 for (i = 0; i < reg_num; i++)
10338 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10339 for (i = 0; i < separator_num; i++)
10340 *reg++ = SEPARATOR_VALUE;
10341 data_num_sum += reg_num + separator_num;
10343 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10344 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10345 for (j = 0; j < kinfo->num_tqps; j++) {
10346 for (i = 0; i < reg_num; i++)
10347 *reg++ = hclge_read_dev(&hdev->hw,
10348 ring_reg_addr_list[i] +
10349 HCLGE_RING_REG_OFFSET * j);
10350 for (i = 0; i < separator_num; i++)
10351 *reg++ = SEPARATOR_VALUE;
10353 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10355 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10356 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10357 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10358 for (i = 0; i < reg_num; i++)
10359 *reg++ = hclge_read_dev(&hdev->hw,
10360 tqp_intr_reg_addr_list[i] +
10361 HCLGE_RING_INT_REG_OFFSET * j);
10362 for (i = 0; i < separator_num; i++)
10363 *reg++ = SEPARATOR_VALUE;
10365 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10367 return data_num_sum;
10370 static int hclge_get_regs_len(struct hnae3_handle *handle)
10372 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10373 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10374 struct hclge_vport *vport = hclge_get_vport(handle);
10375 struct hclge_dev *hdev = vport->back;
10376 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10377 int regs_lines_32_bit, regs_lines_64_bit;
10380 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10382 dev_err(&hdev->pdev->dev,
10383 "Get register number failed, ret = %d.\n", ret);
10387 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10389 dev_err(&hdev->pdev->dev,
10390 "Get dfx reg len failed, ret = %d.\n", ret);
10394 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10395 REG_SEPARATOR_LINE;
10396 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10397 REG_SEPARATOR_LINE;
10398 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10399 REG_SEPARATOR_LINE;
10400 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10401 REG_SEPARATOR_LINE;
10402 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10403 REG_SEPARATOR_LINE;
10404 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10405 REG_SEPARATOR_LINE;
10407 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10408 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10409 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10412 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10415 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10416 struct hclge_vport *vport = hclge_get_vport(handle);
10417 struct hclge_dev *hdev = vport->back;
10418 u32 regs_num_32_bit, regs_num_64_bit;
10419 int i, reg_num, separator_num, ret;
10422 *version = hdev->fw_version;
10424 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10426 dev_err(&hdev->pdev->dev,
10427 "Get register number failed, ret = %d.\n", ret);
10431 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10433 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10435 dev_err(&hdev->pdev->dev,
10436 "Get 32 bit register failed, ret = %d.\n", ret);
10439 reg_num = regs_num_32_bit;
10441 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10442 for (i = 0; i < separator_num; i++)
10443 *reg++ = SEPARATOR_VALUE;
10445 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10447 dev_err(&hdev->pdev->dev,
10448 "Get 64 bit register failed, ret = %d.\n", ret);
10451 reg_num = regs_num_64_bit * 2;
10453 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10454 for (i = 0; i < separator_num; i++)
10455 *reg++ = SEPARATOR_VALUE;
10457 ret = hclge_get_dfx_reg(hdev, reg);
10459 dev_err(&hdev->pdev->dev,
10460 "Get dfx register failed, ret = %d.\n", ret);
10463 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10465 struct hclge_set_led_state_cmd *req;
10466 struct hclge_desc desc;
10469 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10471 req = (struct hclge_set_led_state_cmd *)desc.data;
10472 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10473 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10475 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10477 dev_err(&hdev->pdev->dev,
10478 "Send set led state cmd error, ret =%d\n", ret);
10483 enum hclge_led_status {
10486 HCLGE_LED_NO_CHANGE = 0xFF,
10489 static int hclge_set_led_id(struct hnae3_handle *handle,
10490 enum ethtool_phys_id_state status)
10492 struct hclge_vport *vport = hclge_get_vport(handle);
10493 struct hclge_dev *hdev = vport->back;
10496 case ETHTOOL_ID_ACTIVE:
10497 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10498 case ETHTOOL_ID_INACTIVE:
10499 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10505 static void hclge_get_link_mode(struct hnae3_handle *handle,
10506 unsigned long *supported,
10507 unsigned long *advertising)
10509 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10510 struct hclge_vport *vport = hclge_get_vport(handle);
10511 struct hclge_dev *hdev = vport->back;
10512 unsigned int idx = 0;
10514 for (; idx < size; idx++) {
10515 supported[idx] = hdev->hw.mac.supported[idx];
10516 advertising[idx] = hdev->hw.mac.advertising[idx];
10520 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10522 struct hclge_vport *vport = hclge_get_vport(handle);
10523 struct hclge_dev *hdev = vport->back;
10525 return hclge_config_gro(hdev, enable);
10528 static const struct hnae3_ae_ops hclge_ops = {
10529 .init_ae_dev = hclge_init_ae_dev,
10530 .uninit_ae_dev = hclge_uninit_ae_dev,
10531 .flr_prepare = hclge_flr_prepare,
10532 .flr_done = hclge_flr_done,
10533 .init_client_instance = hclge_init_client_instance,
10534 .uninit_client_instance = hclge_uninit_client_instance,
10535 .map_ring_to_vector = hclge_map_ring_to_vector,
10536 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10537 .get_vector = hclge_get_vector,
10538 .put_vector = hclge_put_vector,
10539 .set_promisc_mode = hclge_set_promisc_mode,
10540 .set_loopback = hclge_set_loopback,
10541 .start = hclge_ae_start,
10542 .stop = hclge_ae_stop,
10543 .client_start = hclge_client_start,
10544 .client_stop = hclge_client_stop,
10545 .get_status = hclge_get_status,
10546 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10547 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10548 .get_media_type = hclge_get_media_type,
10549 .check_port_speed = hclge_check_port_speed,
10550 .get_fec = hclge_get_fec,
10551 .set_fec = hclge_set_fec,
10552 .get_rss_key_size = hclge_get_rss_key_size,
10553 .get_rss_indir_size = hclge_get_rss_indir_size,
10554 .get_rss = hclge_get_rss,
10555 .set_rss = hclge_set_rss,
10556 .set_rss_tuple = hclge_set_rss_tuple,
10557 .get_rss_tuple = hclge_get_rss_tuple,
10558 .get_tc_size = hclge_get_tc_size,
10559 .get_mac_addr = hclge_get_mac_addr,
10560 .set_mac_addr = hclge_set_mac_addr,
10561 .do_ioctl = hclge_do_ioctl,
10562 .add_uc_addr = hclge_add_uc_addr,
10563 .rm_uc_addr = hclge_rm_uc_addr,
10564 .add_mc_addr = hclge_add_mc_addr,
10565 .rm_mc_addr = hclge_rm_mc_addr,
10566 .set_autoneg = hclge_set_autoneg,
10567 .get_autoneg = hclge_get_autoneg,
10568 .restart_autoneg = hclge_restart_autoneg,
10569 .halt_autoneg = hclge_halt_autoneg,
10570 .get_pauseparam = hclge_get_pauseparam,
10571 .set_pauseparam = hclge_set_pauseparam,
10572 .set_mtu = hclge_set_mtu,
10573 .reset_queue = hclge_reset_tqp,
10574 .get_stats = hclge_get_stats,
10575 .get_mac_stats = hclge_get_mac_stat,
10576 .update_stats = hclge_update_stats,
10577 .get_strings = hclge_get_strings,
10578 .get_sset_count = hclge_get_sset_count,
10579 .get_fw_version = hclge_get_fw_version,
10580 .get_mdix_mode = hclge_get_mdix_mode,
10581 .enable_vlan_filter = hclge_enable_vlan_filter,
10582 .set_vlan_filter = hclge_set_vlan_filter,
10583 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10584 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10585 .reset_event = hclge_reset_event,
10586 .get_reset_level = hclge_get_reset_level,
10587 .set_default_reset_request = hclge_set_def_reset_request,
10588 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10589 .set_channels = hclge_set_channels,
10590 .get_channels = hclge_get_channels,
10591 .get_regs_len = hclge_get_regs_len,
10592 .get_regs = hclge_get_regs,
10593 .set_led_id = hclge_set_led_id,
10594 .get_link_mode = hclge_get_link_mode,
10595 .add_fd_entry = hclge_add_fd_entry,
10596 .del_fd_entry = hclge_del_fd_entry,
10597 .del_all_fd_entries = hclge_del_all_fd_entries,
10598 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10599 .get_fd_rule_info = hclge_get_fd_rule_info,
10600 .get_fd_all_rules = hclge_get_all_rules,
10601 .restore_fd_rules = hclge_restore_fd_entries,
10602 .enable_fd = hclge_enable_fd,
10603 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10604 .dbg_run_cmd = hclge_dbg_run_cmd,
10605 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10606 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10607 .ae_dev_resetting = hclge_ae_dev_resetting,
10608 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10609 .set_gro_en = hclge_gro_en,
10610 .get_global_queue_id = hclge_covert_handle_qid_global,
10611 .set_timer_task = hclge_set_timer_task,
10612 .mac_connect_phy = hclge_mac_connect_phy,
10613 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10614 .restore_vlan_table = hclge_restore_vlan_table,
10615 .get_vf_config = hclge_get_vf_config,
10616 .set_vf_link_state = hclge_set_vf_link_state,
10617 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10618 .set_vf_trust = hclge_set_vf_trust,
10619 .set_vf_rate = hclge_set_vf_rate,
10620 .set_vf_mac = hclge_set_vf_mac,
10623 static struct hnae3_ae_algo ae_algo = {
10625 .pdev_id_table = ae_algo_pci_tbl,
10628 static int hclge_init(void)
10630 pr_info("%s is initializing\n", HCLGE_NAME);
10632 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10634 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10638 hnae3_register_ae_algo(&ae_algo);
10643 static void hclge_exit(void)
10645 hnae3_unregister_ae_algo(&ae_algo);
10646 destroy_workqueue(hclge_wq);
10648 module_init(hclge_init);
10649 module_exit(hclge_exit);
10651 MODULE_LICENSE("GPL");
10652 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10653 MODULE_DESCRIPTION("HCLGE Driver");
10654 MODULE_VERSION(HCLGE_MOD_VERSION);