1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 ret = hclge_parse_func_status(hdev, req);
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 struct hclge_pf_res_cmd *req;
873 struct hclge_desc desc;
876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 dev_err(&hdev->pdev->dev,
880 "query pf resource failed %d.\n", ret);
884 req = (struct hclge_pf_res_cmd *)desc.data;
885 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 if (req->tx_buf_size)
890 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 if (req->dv_buf_size)
898 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 if (hnae3_dev_roce_supported(hdev)) {
905 hdev->roce_base_msix_offset =
906 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 /* nic's msix numbers is always equals to the roce's. */
913 hdev->num_nic_msi = hdev->num_roce_msi;
915 /* PF should have NIC vectors and Roce vectors,
916 * NIC vectors are queued before Roce vectors.
918 hdev->num_msi = hdev->num_roce_msi +
919 hdev->roce_base_msix_offset;
922 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 hdev->num_nic_msi = hdev->num_msi;
928 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929 dev_err(&hdev->pdev->dev,
930 "Just %u msi resources, not enough for pf(min:2).\n",
938 static int hclge_parse_speed(int speed_cmd, int *speed)
942 *speed = HCLGE_MAC_SPEED_10M;
945 *speed = HCLGE_MAC_SPEED_100M;
948 *speed = HCLGE_MAC_SPEED_1G;
951 *speed = HCLGE_MAC_SPEED_10G;
954 *speed = HCLGE_MAC_SPEED_25G;
957 *speed = HCLGE_MAC_SPEED_40G;
960 *speed = HCLGE_MAC_SPEED_50G;
963 *speed = HCLGE_MAC_SPEED_100G;
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 struct hclge_vport *vport = hclge_get_vport(handle);
975 struct hclge_dev *hdev = vport->back;
976 u32 speed_ability = hdev->hw.mac.speed_ability;
980 case HCLGE_MAC_SPEED_10M:
981 speed_bit = HCLGE_SUPPORT_10M_BIT;
983 case HCLGE_MAC_SPEED_100M:
984 speed_bit = HCLGE_SUPPORT_100M_BIT;
986 case HCLGE_MAC_SPEED_1G:
987 speed_bit = HCLGE_SUPPORT_1G_BIT;
989 case HCLGE_MAC_SPEED_10G:
990 speed_bit = HCLGE_SUPPORT_10G_BIT;
992 case HCLGE_MAC_SPEED_25G:
993 speed_bit = HCLGE_SUPPORT_25G_BIT;
995 case HCLGE_MAC_SPEED_40G:
996 speed_bit = HCLGE_SUPPORT_40G_BIT;
998 case HCLGE_MAC_SPEED_50G:
999 speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 case HCLGE_MAC_SPEED_100G:
1002 speed_bit = HCLGE_SUPPORT_100G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 switch (mac->speed) {
1099 case HCLGE_MAC_SPEED_10G:
1100 case HCLGE_MAC_SPEED_40G:
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1104 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 case HCLGE_MAC_SPEED_25G:
1107 case HCLGE_MAC_SPEED_50G:
1108 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1111 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112 BIT(HNAE3_FEC_AUTO);
1114 case HCLGE_MAC_SPEED_100G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1119 mac->fec_ability = 0;
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1127 struct hclge_mac *mac = &hdev->hw.mac;
1129 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1133 hclge_convert_setting_sr(mac, speed_ability);
1134 hclge_convert_setting_lr(mac, speed_ability);
1135 hclge_convert_setting_cr(mac, speed_ability);
1136 if (hdev->pdev->revision >= 0x21)
1137 hclge_convert_setting_fec(mac);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1147 struct hclge_mac *mac = &hdev->hw.mac;
1149 hclge_convert_setting_kr(mac, speed_ability);
1150 if (hdev->pdev->revision >= 0x21)
1151 hclge_convert_setting_fec(mac);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1160 unsigned long *supported = hdev->hw.mac.supported;
1162 /* default to support all speed for GE port */
1164 speed_ability = HCLGE_SUPPORT_GE;
1166 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1170 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 u8 media_type = hdev->hw.mac.media_type;
1192 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195 hclge_parse_copper_link_mode(hdev, speed_ability);
1196 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197 hclge_parse_backplane_link_mode(hdev, speed_ability);
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1202 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203 return HCLGE_MAC_SPEED_100G;
1205 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206 return HCLGE_MAC_SPEED_50G;
1208 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209 return HCLGE_MAC_SPEED_40G;
1211 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212 return HCLGE_MAC_SPEED_25G;
1214 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215 return HCLGE_MAC_SPEED_10G;
1217 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218 return HCLGE_MAC_SPEED_1G;
1220 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221 return HCLGE_MAC_SPEED_100M;
1223 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224 return HCLGE_MAC_SPEED_10M;
1226 return HCLGE_MAC_SPEED_1G;
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 struct hclge_cfg_param_cmd *req;
1232 u64 mac_addr_tmp_high;
1236 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 /* get the configuration */
1239 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1242 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 HCLGE_CFG_TQP_DESC_N_M,
1246 HCLGE_CFG_TQP_DESC_N_S);
1248 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249 HCLGE_CFG_PHY_ADDR_M,
1250 HCLGE_CFG_PHY_ADDR_S);
1251 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_MEDIA_TP_M,
1253 HCLGE_CFG_MEDIA_TP_S);
1254 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_RX_BUF_LEN_M,
1256 HCLGE_CFG_RX_BUF_LEN_S);
1257 /* get mac_address */
1258 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260 HCLGE_CFG_MAC_ADDR_H_M,
1261 HCLGE_CFG_MAC_ADDR_H_S);
1263 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266 HCLGE_CFG_DEFAULT_SPEED_M,
1267 HCLGE_CFG_DEFAULT_SPEED_S);
1268 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_RSS_SIZE_M,
1270 HCLGE_CFG_RSS_SIZE_S);
1272 for (i = 0; i < ETH_ALEN; i++)
1273 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279 HCLGE_CFG_SPEED_ABILITY_M,
1280 HCLGE_CFG_SPEED_ABILITY_S);
1281 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_UMV_TBL_SPACE_M,
1283 HCLGE_CFG_UMV_TBL_SPACE_S);
1284 if (!cfg->umv_space)
1285 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1288 /* hclge_get_cfg: query the static parameter from flash
1289 * @hdev: pointer to struct hclge_dev
1290 * @hcfg: the config structure to be getted
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295 struct hclge_cfg_param_cmd *req;
1299 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1302 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307 /* Len should be united by 4 bytes when send to hardware */
1308 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310 req->offset = cpu_to_le32(offset);
1313 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 hclge_parse_cfg(hcfg, desc);
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1328 ret = hclge_query_function_status(hdev);
1330 dev_err(&hdev->pdev->dev,
1331 "query function status error %d.\n", ret);
1335 /* get pf resource */
1336 ret = hclge_query_pf_resource(hdev);
1338 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1345 #define HCLGE_MIN_TX_DESC 64
1346 #define HCLGE_MIN_RX_DESC 64
1348 if (!is_kdump_kernel())
1351 dev_info(&hdev->pdev->dev,
1352 "Running kdump kernel. Using minimal resources\n");
1354 /* minimal queue pairs equals to the number of vports */
1355 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1360 static int hclge_configure(struct hclge_dev *hdev)
1362 struct hclge_cfg cfg;
1366 ret = hclge_get_cfg(hdev, &cfg);
1368 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1372 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373 hdev->base_tqp_pid = 0;
1374 hdev->rss_size_max = cfg.rss_size_max;
1375 hdev->rx_buf_len = cfg.rx_buf_len;
1376 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377 hdev->hw.mac.media_type = cfg.media_type;
1378 hdev->hw.mac.phy_addr = cfg.phy_addr;
1379 hdev->num_tx_desc = cfg.tqp_desc_num;
1380 hdev->num_rx_desc = cfg.tqp_desc_num;
1381 hdev->tm_info.num_pg = 1;
1382 hdev->tc_max = cfg.tc_num;
1383 hdev->tm_info.hw_pfc_map = 0;
1384 hdev->wanted_umv_size = cfg.umv_space;
1386 if (hnae3_dev_fd_supported(hdev)) {
1388 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1391 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1393 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1397 hclge_parse_link_mode(hdev, cfg.speed_ability);
1399 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1401 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402 (hdev->tc_max < 1)) {
1403 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1408 /* Dev does not support DCB */
1409 if (!hnae3_dev_dcb_supported(hdev)) {
1413 hdev->pfc_max = hdev->tc_max;
1416 hdev->tm_info.num_tc = 1;
1418 /* Currently not support uncontiuous tc */
1419 for (i = 0; i < hdev->tm_info.num_tc; i++)
1420 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1422 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1424 hclge_init_kdump_kernel_config(hdev);
1426 /* Set the init affinity based on pci func number */
1427 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430 &hdev->affinity_mask);
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436 unsigned int tso_mss_max)
1438 struct hclge_cfg_tso_status_cmd *req;
1439 struct hclge_desc desc;
1442 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1444 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1447 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449 req->tso_mss_min = cpu_to_le16(tso_mss);
1452 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454 req->tso_mss_max = cpu_to_le16(tso_mss);
1456 return hclge_cmd_send(&hdev->hw, &desc, 1);
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1461 struct hclge_cfg_gro_status_cmd *req;
1462 struct hclge_desc desc;
1465 if (!hnae3_dev_gro_supported(hdev))
1468 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1471 req->gro_en = cpu_to_le16(en ? 1 : 0);
1473 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1475 dev_err(&hdev->pdev->dev,
1476 "GRO hardware config cmd failed, ret = %d\n", ret);
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1483 struct hclge_tqp *tqp;
1486 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487 sizeof(struct hclge_tqp), GFP_KERNEL);
1493 for (i = 0; i < hdev->num_tqps; i++) {
1494 tqp->dev = &hdev->pdev->dev;
1497 tqp->q.ae_algo = &ae_algo;
1498 tqp->q.buf_size = hdev->rx_buf_len;
1499 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502 i * HCLGE_TQP_REG_SIZE;
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1513 struct hclge_tqp_map_cmd *req;
1514 struct hclge_desc desc;
1517 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1519 req = (struct hclge_tqp_map_cmd *)desc.data;
1520 req->tqp_id = cpu_to_le16(tqp_pid);
1521 req->tqp_vf = func_id;
1522 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1524 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525 req->tqp_vid = cpu_to_le16(tqp_vid);
1527 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1529 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1534 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1536 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537 struct hclge_dev *hdev = vport->back;
1540 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541 alloced < num_tqps; i++) {
1542 if (!hdev->htqp[i].alloced) {
1543 hdev->htqp[i].q.handle = &vport->nic;
1544 hdev->htqp[i].q.tqp_index = alloced;
1545 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548 hdev->htqp[i].alloced = true;
1552 vport->alloc_tqps = alloced;
1553 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554 vport->alloc_tqps / hdev->tm_info.num_tc);
1556 /* ensure one to one mapping between irq and queue at default */
1557 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564 u16 num_tx_desc, u16 num_rx_desc)
1567 struct hnae3_handle *nic = &vport->nic;
1568 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569 struct hclge_dev *hdev = vport->back;
1572 kinfo->num_tx_desc = num_tx_desc;
1573 kinfo->num_rx_desc = num_rx_desc;
1575 kinfo->rx_buf_len = hdev->rx_buf_len;
1577 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578 sizeof(struct hnae3_queue *), GFP_KERNEL);
1582 ret = hclge_assign_tqp(vport, num_tqps);
1584 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590 struct hclge_vport *vport)
1592 struct hnae3_handle *nic = &vport->nic;
1593 struct hnae3_knic_private_info *kinfo;
1596 kinfo = &nic->kinfo;
1597 for (i = 0; i < vport->alloc_tqps; i++) {
1598 struct hclge_tqp *q =
1599 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1603 is_pf = !(vport->vport_id);
1604 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1615 struct hclge_vport *vport = hdev->vport;
1618 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619 for (i = 0; i < num_vport; i++) {
1622 ret = hclge_map_tqp_to_vport(hdev, vport);
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1634 struct hnae3_handle *nic = &vport->nic;
1635 struct hclge_dev *hdev = vport->back;
1638 nic->pdev = hdev->pdev;
1639 nic->ae_algo = &ae_algo;
1640 nic->numa_node_mask = hdev->numa_node_mask;
1642 ret = hclge_knic_setup(vport, num_tqps,
1643 hdev->num_tx_desc, hdev->num_rx_desc);
1645 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1652 struct pci_dev *pdev = hdev->pdev;
1653 struct hclge_vport *vport;
1659 /* We need to alloc a vport for main NIC of PF */
1660 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1662 if (hdev->num_tqps < num_vport) {
1663 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664 hdev->num_tqps, num_vport);
1668 /* Alloc the same number of TQPs for every vport */
1669 tqp_per_vport = hdev->num_tqps / num_vport;
1670 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1672 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1677 hdev->vport = vport;
1678 hdev->num_alloc_vport = num_vport;
1680 if (IS_ENABLED(CONFIG_PCI_IOV))
1681 hdev->num_alloc_vfs = hdev->num_req_vfs;
1683 for (i = 0; i < num_vport; i++) {
1685 vport->vport_id = i;
1686 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690 INIT_LIST_HEAD(&vport->vlan_list);
1691 INIT_LIST_HEAD(&vport->uc_mac_list);
1692 INIT_LIST_HEAD(&vport->mc_mac_list);
1695 ret = hclge_vport_setup(vport, tqp_main_vport);
1697 ret = hclge_vport_setup(vport, tqp_per_vport);
1700 "vport setup failed for vport %d, %d\n",
1711 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712 struct hclge_pkt_buf_alloc *buf_alloc)
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1717 struct hclge_tx_buff_alloc_cmd *req;
1718 struct hclge_desc desc;
1722 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1728 req->tx_pkt_buff[i] =
1729 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1735 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742 struct hclge_pkt_buf_alloc *buf_alloc)
1744 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1747 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1757 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758 if (hdev->hw_tc_map & BIT(i))
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765 struct hclge_pkt_buf_alloc *buf_alloc)
1767 struct hclge_priv_buf *priv;
1771 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 priv = &buf_alloc->priv_buf[i];
1773 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 struct hclge_priv_buf *priv;
1789 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790 priv = &buf_alloc->priv_buf[i];
1791 if (hdev->hw_tc_map & BIT(i) &&
1792 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1802 struct hclge_priv_buf *priv;
1806 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807 priv = &buf_alloc->priv_buf[i];
1809 rx_priv += priv->buf_size;
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1816 u32 i, total_tx_size = 0;
1818 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1821 return total_tx_size;
1824 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc,
1828 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829 u32 tc_num = hclge_get_tc_num(hdev);
1830 u32 shared_buf, aligned_mps;
1834 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1836 if (hnae3_dev_dcb_supported(hdev))
1837 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1840 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841 + hdev->dv_buf_size;
1843 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845 HCLGE_BUF_SIZE_UNIT);
1847 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848 if (rx_all < rx_priv + shared_std)
1851 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852 buf_alloc->s_buf.buf_size = shared_buf;
1853 if (hnae3_dev_dcb_supported(hdev)) {
1854 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857 HCLGE_BUF_SIZE_UNIT);
1859 buf_alloc->s_buf.self.high = aligned_mps +
1860 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861 buf_alloc->s_buf.self.low = aligned_mps;
1864 if (hnae3_dev_dcb_supported(hdev)) {
1865 hi_thrd = shared_buf - hdev->dv_buf_size;
1867 if (tc_num <= NEED_RESERVE_TC_NUM)
1868 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1872 hi_thrd = hi_thrd / tc_num;
1874 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1878 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879 lo_thrd = aligned_mps;
1882 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1895 total_size = hdev->pkt_buf_size;
1897 /* alloc tx buffer for all enabled tc */
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1901 if (hdev->hw_tc_map & BIT(i)) {
1902 if (total_size < hdev->tx_buf_size)
1905 priv->tx_buf_size = hdev->tx_buf_size;
1907 priv->tx_buf_size = 0;
1910 total_size -= priv->tx_buf_size;
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917 struct hclge_pkt_buf_alloc *buf_alloc)
1919 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1923 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1931 if (!(hdev->hw_tc_map & BIT(i)))
1936 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939 HCLGE_BUF_SIZE_UNIT);
1942 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1946 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1949 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1959 /* let the last to be cleared first */
1960 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962 unsigned int mask = BIT((unsigned int)i);
1964 if (hdev->hw_tc_map & mask &&
1965 !(hdev->tm_info.hw_pfc_map & mask)) {
1966 /* Clear the no pfc TC private buffer */
1974 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975 no_pfc_priv_num == 0)
1979 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983 struct hclge_pkt_buf_alloc *buf_alloc)
1985 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1989 /* let the last to be cleared first */
1990 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992 unsigned int mask = BIT((unsigned int)i);
1994 if (hdev->hw_tc_map & mask &&
1995 hdev->tm_info.hw_pfc_map & mask) {
1996 /* Reduce the number of pfc TC with private buffer */
2004 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2009 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013 struct hclge_pkt_buf_alloc *buf_alloc)
2015 #define COMPENSATE_BUFFER 0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP 0x1800
2019 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020 u32 tc_num = hclge_get_tc_num(hdev);
2021 u32 half_mps = hdev->mps >> 1;
2026 rx_priv = rx_priv / tc_num;
2028 if (tc_num <= NEED_RESERVE_TC_NUM)
2029 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2031 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032 COMPENSATE_HALF_MPS_NUM * half_mps;
2033 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2036 if (rx_priv < min_rx_priv)
2039 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2047 if (!(hdev->hw_tc_map & BIT(i)))
2051 priv->buf_size = rx_priv;
2052 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2056 buf_alloc->s_buf.buf_size = 0;
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062 * @hdev: pointer to struct hclge_dev
2063 * @buf_alloc: pointer to buffer calculation data
2064 * @return: 0: calculate sucessful, negative: fail
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067 struct hclge_pkt_buf_alloc *buf_alloc)
2069 /* When DCB is not supported, rx private buffer is not allocated. */
2070 if (!hnae3_dev_dcb_supported(hdev)) {
2071 u32 rx_all = hdev->pkt_buf_size;
2073 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2080 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2083 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2086 /* try to decrease the buffer size */
2087 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2090 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2093 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100 struct hclge_pkt_buf_alloc *buf_alloc)
2102 struct hclge_rx_priv_buff_cmd *req;
2103 struct hclge_desc desc;
2107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2110 /* Alloc private buffer TCs */
2111 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2115 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2117 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2121 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2126 dev_err(&hdev->pdev->dev,
2127 "rx private buffer alloc cmd failed %d\n", ret);
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 struct hclge_rx_priv_wl_buf *req;
2136 struct hclge_priv_buf *priv;
2137 struct hclge_desc desc[2];
2141 for (i = 0; i < 2; i++) {
2142 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2144 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2146 /* The first descriptor set the NEXT bit to 1 */
2148 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2152 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2155 priv = &buf_alloc->priv_buf[idx];
2156 req->tc_wl[j].high =
2157 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158 req->tc_wl[j].high |=
2159 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162 req->tc_wl[j].low |=
2163 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2167 /* Send 2 descriptor at one time */
2168 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2170 dev_err(&hdev->pdev->dev,
2171 "rx private waterline config cmd failed %d\n",
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177 struct hclge_pkt_buf_alloc *buf_alloc)
2179 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180 struct hclge_rx_com_thrd *req;
2181 struct hclge_desc desc[2];
2182 struct hclge_tc_thrd *tc;
2186 for (i = 0; i < 2; i++) {
2187 hclge_cmd_setup_basic_desc(&desc[i],
2188 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2191 /* The first descriptor set the NEXT bit to 1 */
2193 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2197 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2200 req->com_thrd[j].high =
2201 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202 req->com_thrd[j].high |=
2203 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204 req->com_thrd[j].low =
2205 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206 req->com_thrd[j].low |=
2207 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2211 /* Send 2 descriptors at one time */
2212 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2214 dev_err(&hdev->pdev->dev,
2215 "common threshold config cmd failed %d\n", ret);
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223 struct hclge_rx_com_wl *req;
2224 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2229 req = (struct hclge_rx_com_wl *)desc.data;
2230 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2238 dev_err(&hdev->pdev->dev,
2239 "common waterline config cmd failed %d\n", ret);
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2246 struct hclge_pkt_buf_alloc *pkt_buf;
2249 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2253 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2255 dev_err(&hdev->pdev->dev,
2256 "could not calc tx buffer size for all TCs %d\n", ret);
2260 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2262 dev_err(&hdev->pdev->dev,
2263 "could not alloc tx buffers %d\n", ret);
2267 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2269 dev_err(&hdev->pdev->dev,
2270 "could not calc rx priv buffer size for all TCs %d\n",
2275 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2277 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2282 if (hnae3_dev_dcb_supported(hdev)) {
2283 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2285 dev_err(&hdev->pdev->dev,
2286 "could not configure rx private waterline %d\n",
2291 ret = hclge_common_thrd_config(hdev, pkt_buf);
2293 dev_err(&hdev->pdev->dev,
2294 "could not configure common threshold %d\n",
2300 ret = hclge_common_wl_config(hdev, pkt_buf);
2302 dev_err(&hdev->pdev->dev,
2303 "could not configure common waterline %d\n", ret);
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2312 struct hnae3_handle *roce = &vport->roce;
2313 struct hnae3_handle *nic = &vport->nic;
2315 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2317 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318 vport->back->num_msi_left == 0)
2321 roce->rinfo.base_vector = vport->back->roce_base_vector;
2323 roce->rinfo.netdev = nic->kinfo.netdev;
2324 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2326 roce->pdev = nic->pdev;
2327 roce->ae_algo = nic->ae_algo;
2328 roce->numa_node_mask = nic->numa_node_mask;
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2335 struct pci_dev *pdev = hdev->pdev;
2339 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2341 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2344 "failed(%d) to allocate MSI/MSI-X vectors\n",
2348 if (vectors < hdev->num_msi)
2349 dev_warn(&hdev->pdev->dev,
2350 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351 hdev->num_msi, vectors);
2353 hdev->num_msi = vectors;
2354 hdev->num_msi_left = vectors;
2356 hdev->base_msi_vector = pdev->irq;
2357 hdev->roce_base_vector = hdev->base_msi_vector +
2358 hdev->roce_base_msix_offset;
2360 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 sizeof(u16), GFP_KERNEL);
2362 if (!hdev->vector_status) {
2363 pci_free_irq_vectors(pdev);
2367 for (i = 0; i < hdev->num_msi; i++)
2368 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2370 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371 sizeof(int), GFP_KERNEL);
2372 if (!hdev->vector_irq) {
2373 pci_free_irq_vectors(pdev);
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2382 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383 duplex = HCLGE_MAC_FULL;
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2391 struct hclge_config_mac_speed_dup_cmd *req;
2392 struct hclge_desc desc;
2395 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2400 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2403 case HCLGE_MAC_SPEED_10M:
2404 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405 HCLGE_CFG_SPEED_S, 6);
2407 case HCLGE_MAC_SPEED_100M:
2408 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409 HCLGE_CFG_SPEED_S, 7);
2411 case HCLGE_MAC_SPEED_1G:
2412 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413 HCLGE_CFG_SPEED_S, 0);
2415 case HCLGE_MAC_SPEED_10G:
2416 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417 HCLGE_CFG_SPEED_S, 1);
2419 case HCLGE_MAC_SPEED_25G:
2420 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421 HCLGE_CFG_SPEED_S, 2);
2423 case HCLGE_MAC_SPEED_40G:
2424 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425 HCLGE_CFG_SPEED_S, 3);
2427 case HCLGE_MAC_SPEED_50G:
2428 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429 HCLGE_CFG_SPEED_S, 4);
2431 case HCLGE_MAC_SPEED_100G:
2432 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433 HCLGE_CFG_SPEED_S, 5);
2436 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2440 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2445 dev_err(&hdev->pdev->dev,
2446 "mac speed/duplex config cmd failed %d.\n", ret);
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2457 duplex = hclge_check_speed_dup(duplex, speed);
2458 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2482 struct hclge_config_auto_neg_cmd *req;
2483 struct hclge_desc desc;
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2507 if (!hdev->hw.mac.support_autoneg) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2517 return hclge_set_autoneg_en(hdev, enable);
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2527 return phydev->autoneg;
2529 return hdev->hw.mac.autoneg;
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2610 *fec_ability = mac->fec_ability;
2612 *fec_mode = mac->fec_mode;
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2617 struct hclge_mac *mac = &hdev->hw.mac;
2620 hdev->support_sfp_query = true;
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
2625 dev_err(&hdev->pdev->dev,
2626 "Config mac speed dup fail ret=%d\n", ret);
2630 if (hdev->hw.mac.support_autoneg) {
2631 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2633 dev_err(&hdev->pdev->dev,
2634 "Config mac autoneg fail ret=%d\n", ret);
2641 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2644 dev_err(&hdev->pdev->dev,
2645 "Fec mode init fail, ret = %d\n", ret);
2650 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2652 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2656 ret = hclge_set_default_loopback(hdev);
2660 ret = hclge_buffer_alloc(hdev);
2662 dev_err(&hdev->pdev->dev,
2663 "allocate buffer fail, ret=%d\n", ret);
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2670 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673 hclge_wq, &hdev->service_task, 0);
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2678 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681 hclge_wq, &hdev->service_task, 0);
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2686 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2687 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2688 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2689 hclge_wq, &hdev->service_task,
2693 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2695 struct hclge_link_status_cmd *req;
2696 struct hclge_desc desc;
2700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2703 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2708 req = (struct hclge_link_status_cmd *)desc.data;
2709 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2711 return !!link_status;
2714 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2716 unsigned int mac_state;
2719 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2722 mac_state = hclge_get_mac_link_status(hdev);
2724 if (hdev->hw.mac.phydev) {
2725 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2726 link_stat = mac_state &
2727 hdev->hw.mac.phydev->link;
2732 link_stat = mac_state;
2738 static void hclge_update_link_status(struct hclge_dev *hdev)
2740 struct hnae3_client *rclient = hdev->roce_client;
2741 struct hnae3_client *client = hdev->nic_client;
2742 struct hnae3_handle *rhandle;
2743 struct hnae3_handle *handle;
2750 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2753 state = hclge_get_mac_phy_link(hdev);
2754 if (state != hdev->hw.mac.link) {
2755 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2756 handle = &hdev->vport[i].nic;
2757 client->ops->link_status_change(handle, state);
2758 hclge_config_mac_tnl_int(hdev, state);
2759 rhandle = &hdev->vport[i].roce;
2760 if (rclient && rclient->ops->link_status_change)
2761 rclient->ops->link_status_change(rhandle,
2764 hdev->hw.mac.link = state;
2767 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2770 static void hclge_update_port_capability(struct hclge_mac *mac)
2772 /* update fec ability by speed */
2773 hclge_convert_setting_fec(mac);
2775 /* firmware can not identify back plane type, the media type
2776 * read from configuration can help deal it
2778 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2779 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2780 mac->module_type = HNAE3_MODULE_TYPE_KR;
2781 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2782 mac->module_type = HNAE3_MODULE_TYPE_TP;
2784 if (mac->support_autoneg) {
2785 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2786 linkmode_copy(mac->advertising, mac->supported);
2788 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2790 linkmode_zero(mac->advertising);
2794 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2796 struct hclge_sfp_info_cmd *resp;
2797 struct hclge_desc desc;
2800 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2801 resp = (struct hclge_sfp_info_cmd *)desc.data;
2802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2803 if (ret == -EOPNOTSUPP) {
2804 dev_warn(&hdev->pdev->dev,
2805 "IMP do not support get SFP speed %d\n", ret);
2808 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2812 *speed = le32_to_cpu(resp->speed);
2817 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2819 struct hclge_sfp_info_cmd *resp;
2820 struct hclge_desc desc;
2823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2824 resp = (struct hclge_sfp_info_cmd *)desc.data;
2826 resp->query_type = QUERY_ACTIVE_SPEED;
2828 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2829 if (ret == -EOPNOTSUPP) {
2830 dev_warn(&hdev->pdev->dev,
2831 "IMP does not support get SFP info %d\n", ret);
2834 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2838 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2839 * set to mac->speed.
2841 if (!le32_to_cpu(resp->speed))
2844 mac->speed = le32_to_cpu(resp->speed);
2845 /* if resp->speed_ability is 0, it means it's an old version
2846 * firmware, do not update these params
2848 if (resp->speed_ability) {
2849 mac->module_type = le32_to_cpu(resp->module_type);
2850 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2851 mac->autoneg = resp->autoneg;
2852 mac->support_autoneg = resp->autoneg_ability;
2853 mac->speed_type = QUERY_ACTIVE_SPEED;
2854 if (!resp->active_fec)
2857 mac->fec_mode = BIT(resp->active_fec);
2859 mac->speed_type = QUERY_SFP_SPEED;
2865 static int hclge_update_port_info(struct hclge_dev *hdev)
2867 struct hclge_mac *mac = &hdev->hw.mac;
2868 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2871 /* get the port info from SFP cmd if not copper port */
2872 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2875 /* if IMP does not support get SFP/qSFP info, return directly */
2876 if (!hdev->support_sfp_query)
2879 if (hdev->pdev->revision >= 0x21)
2880 ret = hclge_get_sfp_info(hdev, mac);
2882 ret = hclge_get_sfp_speed(hdev, &speed);
2884 if (ret == -EOPNOTSUPP) {
2885 hdev->support_sfp_query = false;
2891 if (hdev->pdev->revision >= 0x21) {
2892 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2893 hclge_update_port_capability(mac);
2896 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2899 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2900 return 0; /* do nothing if no SFP */
2902 /* must config full duplex for SFP */
2903 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2907 static int hclge_get_status(struct hnae3_handle *handle)
2909 struct hclge_vport *vport = hclge_get_vport(handle);
2910 struct hclge_dev *hdev = vport->back;
2912 hclge_update_link_status(hdev);
2914 return hdev->hw.mac.link;
2917 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2919 if (pci_num_vf(hdev->pdev) == 0) {
2920 dev_err(&hdev->pdev->dev,
2921 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2925 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2926 dev_err(&hdev->pdev->dev,
2927 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2928 vf, pci_num_vf(hdev->pdev));
2932 /* VF start from 1 in vport */
2933 vf += HCLGE_VF_VPORT_START_NUM;
2934 return &hdev->vport[vf];
2937 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2938 struct ifla_vf_info *ivf)
2940 struct hclge_vport *vport = hclge_get_vport(handle);
2941 struct hclge_dev *hdev = vport->back;
2943 vport = hclge_get_vf_vport(hdev, vf);
2948 ivf->linkstate = vport->vf_info.link_state;
2949 ivf->spoofchk = vport->vf_info.spoofchk;
2950 ivf->trusted = vport->vf_info.trusted;
2951 ivf->min_tx_rate = 0;
2952 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2953 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2954 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2955 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2956 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2961 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2964 struct hclge_vport *vport = hclge_get_vport(handle);
2965 struct hclge_dev *hdev = vport->back;
2967 vport = hclge_get_vf_vport(hdev, vf);
2971 vport->vf_info.link_state = link_state;
2976 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2978 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2980 /* fetch the events from their corresponding regs */
2981 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2982 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2983 msix_src_reg = hclge_read_dev(&hdev->hw,
2984 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2986 /* Assumption: If by any chance reset and mailbox events are reported
2987 * together then we will only process reset event in this go and will
2988 * defer the processing of the mailbox events. Since, we would have not
2989 * cleared RX CMDQ event this time we would receive again another
2990 * interrupt from H/W just for the mailbox.
2992 * check for vector0 reset event sources
2994 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2995 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2996 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2997 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2998 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2999 hdev->rst_stats.imp_rst_cnt++;
3000 return HCLGE_VECTOR0_EVENT_RST;
3003 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
3004 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3005 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3006 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3007 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3008 hdev->rst_stats.global_rst_cnt++;
3009 return HCLGE_VECTOR0_EVENT_RST;
3012 /* check for vector0 msix event source */
3013 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3014 *clearval = msix_src_reg;
3015 return HCLGE_VECTOR0_EVENT_ERR;
3018 /* check for vector0 mailbox(=CMDQ RX) event source */
3019 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3020 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3021 *clearval = cmdq_src_reg;
3022 return HCLGE_VECTOR0_EVENT_MBX;
3025 /* print other vector0 event source */
3026 dev_info(&hdev->pdev->dev,
3027 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3028 cmdq_src_reg, msix_src_reg);
3029 *clearval = msix_src_reg;
3031 return HCLGE_VECTOR0_EVENT_OTHER;
3034 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3037 switch (event_type) {
3038 case HCLGE_VECTOR0_EVENT_RST:
3039 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3041 case HCLGE_VECTOR0_EVENT_MBX:
3042 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3049 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3051 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3052 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3053 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3054 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3055 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3058 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3060 writel(enable ? 1 : 0, vector->addr);
3063 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3065 struct hclge_dev *hdev = data;
3069 hclge_enable_vector(&hdev->misc_vector, false);
3070 event_cause = hclge_check_event_cause(hdev, &clearval);
3072 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3073 switch (event_cause) {
3074 case HCLGE_VECTOR0_EVENT_ERR:
3075 /* we do not know what type of reset is required now. This could
3076 * only be decided after we fetch the type of errors which
3077 * caused this event. Therefore, we will do below for now:
3078 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3079 * have defered type of reset to be used.
3080 * 2. Schedule the reset serivce task.
3081 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3082 * will fetch the correct type of reset. This would be done
3083 * by first decoding the types of errors.
3085 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3087 case HCLGE_VECTOR0_EVENT_RST:
3088 hclge_reset_task_schedule(hdev);
3090 case HCLGE_VECTOR0_EVENT_MBX:
3091 /* If we are here then,
3092 * 1. Either we are not handling any mbx task and we are not
3095 * 2. We could be handling a mbx task but nothing more is
3097 * In both cases, we should schedule mbx task as there are more
3098 * mbx messages reported by this interrupt.
3100 hclge_mbx_task_schedule(hdev);
3103 dev_warn(&hdev->pdev->dev,
3104 "received unknown or unhandled event of vector0\n");
3108 hclge_clear_event_cause(hdev, event_cause, clearval);
3110 /* Enable interrupt if it is not cause by reset. And when
3111 * clearval equal to 0, it means interrupt status may be
3112 * cleared by hardware before driver reads status register.
3113 * For this case, vector0 interrupt also should be enabled.
3116 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3117 hclge_enable_vector(&hdev->misc_vector, true);
3123 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3125 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3126 dev_warn(&hdev->pdev->dev,
3127 "vector(vector_id %d) has been freed.\n", vector_id);
3131 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3132 hdev->num_msi_left += 1;
3133 hdev->num_msi_used -= 1;
3136 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3138 struct hclge_misc_vector *vector = &hdev->misc_vector;
3140 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3142 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3143 hdev->vector_status[0] = 0;
3145 hdev->num_msi_left -= 1;
3146 hdev->num_msi_used += 1;
3149 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3150 const cpumask_t *mask)
3152 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3155 cpumask_copy(&hdev->affinity_mask, mask);
3158 static void hclge_irq_affinity_release(struct kref *ref)
3162 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3164 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3165 &hdev->affinity_mask);
3167 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3168 hdev->affinity_notify.release = hclge_irq_affinity_release;
3169 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3170 &hdev->affinity_notify);
3173 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3175 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3176 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3179 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3183 hclge_get_misc_vector(hdev);
3185 /* this would be explicitly freed in the end */
3186 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3187 HCLGE_NAME, pci_name(hdev->pdev));
3188 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3189 0, hdev->misc_vector.name, hdev);
3191 hclge_free_vector(hdev, 0);
3192 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3193 hdev->misc_vector.vector_irq);
3199 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3201 free_irq(hdev->misc_vector.vector_irq, hdev);
3202 hclge_free_vector(hdev, 0);
3205 int hclge_notify_client(struct hclge_dev *hdev,
3206 enum hnae3_reset_notify_type type)
3208 struct hnae3_client *client = hdev->nic_client;
3211 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3214 if (!client->ops->reset_notify)
3217 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3218 struct hnae3_handle *handle = &hdev->vport[i].nic;
3221 ret = client->ops->reset_notify(handle, type);
3223 dev_err(&hdev->pdev->dev,
3224 "notify nic client failed %d(%d)\n", type, ret);
3232 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3233 enum hnae3_reset_notify_type type)
3235 struct hnae3_client *client = hdev->roce_client;
3239 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3242 if (!client->ops->reset_notify)
3245 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3246 struct hnae3_handle *handle = &hdev->vport[i].roce;
3248 ret = client->ops->reset_notify(handle, type);
3250 dev_err(&hdev->pdev->dev,
3251 "notify roce client failed %d(%d)",
3260 static int hclge_reset_wait(struct hclge_dev *hdev)
3262 #define HCLGE_RESET_WATI_MS 100
3263 #define HCLGE_RESET_WAIT_CNT 200
3264 u32 val, reg, reg_bit;
3267 switch (hdev->reset_type) {
3268 case HNAE3_IMP_RESET:
3269 reg = HCLGE_GLOBAL_RESET_REG;
3270 reg_bit = HCLGE_IMP_RESET_BIT;
3272 case HNAE3_GLOBAL_RESET:
3273 reg = HCLGE_GLOBAL_RESET_REG;
3274 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3276 case HNAE3_FUNC_RESET:
3277 reg = HCLGE_FUN_RST_ING;
3278 reg_bit = HCLGE_FUN_RST_ING_B;
3280 case HNAE3_FLR_RESET:
3283 dev_err(&hdev->pdev->dev,
3284 "Wait for unsupported reset type: %d\n",
3289 if (hdev->reset_type == HNAE3_FLR_RESET) {
3290 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3291 cnt++ < HCLGE_RESET_WAIT_CNT)
3292 msleep(HCLGE_RESET_WATI_MS);
3294 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3295 dev_err(&hdev->pdev->dev,
3296 "flr wait timeout: %u\n", cnt);
3303 val = hclge_read_dev(&hdev->hw, reg);
3304 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3305 msleep(HCLGE_RESET_WATI_MS);
3306 val = hclge_read_dev(&hdev->hw, reg);
3310 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3311 dev_warn(&hdev->pdev->dev,
3312 "Wait for reset timeout: %d\n", hdev->reset_type);
3319 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3321 struct hclge_vf_rst_cmd *req;
3322 struct hclge_desc desc;
3324 req = (struct hclge_vf_rst_cmd *)desc.data;
3325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3326 req->dest_vfid = func_id;
3331 return hclge_cmd_send(&hdev->hw, &desc, 1);
3334 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3338 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3339 struct hclge_vport *vport = &hdev->vport[i];
3342 /* Send cmd to set/clear VF's FUNC_RST_ING */
3343 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3345 dev_err(&hdev->pdev->dev,
3346 "set vf(%u) rst failed %d!\n",
3347 vport->vport_id, ret);
3351 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3354 /* Inform VF to process the reset.
3355 * hclge_inform_reset_assert_to_vf may fail if VF
3356 * driver is not loaded.
3358 ret = hclge_inform_reset_assert_to_vf(vport);
3360 dev_warn(&hdev->pdev->dev,
3361 "inform reset to vf(%u) failed %d!\n",
3362 vport->vport_id, ret);
3368 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3370 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3371 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3372 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3375 hclge_mbx_handler(hdev);
3377 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3380 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3382 struct hclge_pf_rst_sync_cmd *req;
3383 struct hclge_desc desc;
3387 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3388 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3391 /* vf need to down netdev by mbx during PF or FLR reset */
3392 hclge_mailbox_service_task(hdev);
3394 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3395 /* for compatible with old firmware, wait
3396 * 100 ms for VF to stop IO
3398 if (ret == -EOPNOTSUPP) {
3399 msleep(HCLGE_RESET_SYNC_TIME);
3402 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3405 } else if (req->all_vf_ready) {
3408 msleep(HCLGE_PF_RESET_SYNC_TIME);
3409 hclge_cmd_reuse_desc(&desc, true);
3410 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3412 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3416 void hclge_report_hw_error(struct hclge_dev *hdev,
3417 enum hnae3_hw_error_type type)
3419 struct hnae3_client *client = hdev->nic_client;
3422 if (!client || !client->ops->process_hw_error ||
3423 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3426 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3427 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3430 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3434 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3435 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3436 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3437 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3438 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3441 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3442 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3443 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3444 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3448 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3450 struct hclge_desc desc;
3451 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3455 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3456 req->fun_reset_vfid = func_id;
3458 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3460 dev_err(&hdev->pdev->dev,
3461 "send function reset cmd fail, status =%d\n", ret);
3466 static void hclge_do_reset(struct hclge_dev *hdev)
3468 struct hnae3_handle *handle = &hdev->vport[0].nic;
3469 struct pci_dev *pdev = hdev->pdev;
3472 if (hclge_get_hw_reset_stat(handle)) {
3473 dev_info(&pdev->dev, "Hardware reset not finish\n");
3474 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3475 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3476 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3480 switch (hdev->reset_type) {
3481 case HNAE3_GLOBAL_RESET:
3482 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3483 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3484 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3485 dev_info(&pdev->dev, "Global Reset requested\n");
3487 case HNAE3_FUNC_RESET:
3488 dev_info(&pdev->dev, "PF Reset requested\n");
3489 /* schedule again to check later */
3490 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3491 hclge_reset_task_schedule(hdev);
3493 case HNAE3_FLR_RESET:
3494 dev_info(&pdev->dev, "FLR requested\n");
3495 /* schedule again to check later */
3496 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3497 hclge_reset_task_schedule(hdev);
3500 dev_warn(&pdev->dev,
3501 "Unsupported reset type: %d\n", hdev->reset_type);
3506 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3507 unsigned long *addr)
3509 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3510 struct hclge_dev *hdev = ae_dev->priv;
3512 /* first, resolve any unknown reset type to the known type(s) */
3513 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3514 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3515 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3516 /* we will intentionally ignore any errors from this function
3517 * as we will end up in *some* reset request in any case
3519 if (hclge_handle_hw_msix_error(hdev, addr))
3520 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3523 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3524 /* We defered the clearing of the error event which caused
3525 * interrupt since it was not posssible to do that in
3526 * interrupt context (and this is the reason we introduced
3527 * new UNKNOWN reset type). Now, the errors have been
3528 * handled and cleared in hardware we can safely enable
3529 * interrupts. This is an exception to the norm.
3531 hclge_enable_vector(&hdev->misc_vector, true);
3534 /* return the highest priority reset level amongst all */
3535 if (test_bit(HNAE3_IMP_RESET, addr)) {
3536 rst_level = HNAE3_IMP_RESET;
3537 clear_bit(HNAE3_IMP_RESET, addr);
3538 clear_bit(HNAE3_GLOBAL_RESET, addr);
3539 clear_bit(HNAE3_FUNC_RESET, addr);
3540 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3541 rst_level = HNAE3_GLOBAL_RESET;
3542 clear_bit(HNAE3_GLOBAL_RESET, addr);
3543 clear_bit(HNAE3_FUNC_RESET, addr);
3544 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3545 rst_level = HNAE3_FUNC_RESET;
3546 clear_bit(HNAE3_FUNC_RESET, addr);
3547 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3548 rst_level = HNAE3_FLR_RESET;
3549 clear_bit(HNAE3_FLR_RESET, addr);
3552 if (hdev->reset_type != HNAE3_NONE_RESET &&
3553 rst_level < hdev->reset_type)
3554 return HNAE3_NONE_RESET;
3559 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3563 switch (hdev->reset_type) {
3564 case HNAE3_IMP_RESET:
3565 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3567 case HNAE3_GLOBAL_RESET:
3568 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3577 /* For revision 0x20, the reset interrupt source
3578 * can only be cleared after hardware reset done
3580 if (hdev->pdev->revision == 0x20)
3581 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3584 hclge_enable_vector(&hdev->misc_vector, true);
3587 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3591 switch (hdev->reset_type) {
3592 case HNAE3_FUNC_RESET:
3594 case HNAE3_FLR_RESET:
3595 ret = hclge_set_all_vf_rst(hdev, true);
3604 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3608 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3610 reg_val |= HCLGE_NIC_SW_RST_RDY;
3612 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3614 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3617 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3622 switch (hdev->reset_type) {
3623 case HNAE3_FUNC_RESET:
3624 /* to confirm whether all running VF is ready
3625 * before request PF reset
3627 ret = hclge_func_reset_sync_vf(hdev);
3631 ret = hclge_func_reset_cmd(hdev, 0);
3633 dev_err(&hdev->pdev->dev,
3634 "asserting function reset fail %d!\n", ret);
3638 /* After performaning pf reset, it is not necessary to do the
3639 * mailbox handling or send any command to firmware, because
3640 * any mailbox handling or command to firmware is only valid
3641 * after hclge_cmd_init is called.
3643 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3644 hdev->rst_stats.pf_rst_cnt++;
3646 case HNAE3_FLR_RESET:
3647 /* to confirm whether all running VF is ready
3648 * before request PF reset
3650 ret = hclge_func_reset_sync_vf(hdev);
3654 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3655 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3656 hdev->rst_stats.flr_rst_cnt++;
3658 case HNAE3_IMP_RESET:
3659 hclge_handle_imp_error(hdev);
3660 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3661 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3662 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3668 /* inform hardware that preparatory work is done */
3669 msleep(HCLGE_RESET_SYNC_TIME);
3670 hclge_reset_handshake(hdev, true);
3671 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3676 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3678 #define MAX_RESET_FAIL_CNT 5
3680 if (hdev->reset_pending) {
3681 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3682 hdev->reset_pending);
3684 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3685 HCLGE_RESET_INT_M) {
3686 dev_info(&hdev->pdev->dev,
3687 "reset failed because new reset interrupt\n");
3688 hclge_clear_reset_cause(hdev);
3690 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3691 hdev->rst_stats.reset_fail_cnt++;
3692 set_bit(hdev->reset_type, &hdev->reset_pending);
3693 dev_info(&hdev->pdev->dev,
3694 "re-schedule reset task(%u)\n",
3695 hdev->rst_stats.reset_fail_cnt);
3699 hclge_clear_reset_cause(hdev);
3701 /* recover the handshake status when reset fail */
3702 hclge_reset_handshake(hdev, true);
3704 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3706 hclge_dbg_dump_rst_info(hdev);
3708 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3713 static int hclge_set_rst_done(struct hclge_dev *hdev)
3715 struct hclge_pf_rst_done_cmd *req;
3716 struct hclge_desc desc;
3719 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3721 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3723 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3724 /* To be compatible with the old firmware, which does not support
3725 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3728 if (ret == -EOPNOTSUPP) {
3729 dev_warn(&hdev->pdev->dev,
3730 "current firmware does not support command(0x%x)!\n",
3731 HCLGE_OPC_PF_RST_DONE);
3734 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3741 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3745 switch (hdev->reset_type) {
3746 case HNAE3_FUNC_RESET:
3748 case HNAE3_FLR_RESET:
3749 ret = hclge_set_all_vf_rst(hdev, false);
3751 case HNAE3_GLOBAL_RESET:
3753 case HNAE3_IMP_RESET:
3754 ret = hclge_set_rst_done(hdev);
3760 /* clear up the handshake status after re-initialize done */
3761 hclge_reset_handshake(hdev, false);
3766 static int hclge_reset_stack(struct hclge_dev *hdev)
3770 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3774 ret = hclge_reset_ae_dev(hdev->ae_dev);
3778 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3782 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3785 static void hclge_reset(struct hclge_dev *hdev)
3787 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3788 enum hnae3_reset_type reset_level;
3791 /* Initialize ae_dev reset status as well, in case enet layer wants to
3792 * know if device is undergoing reset
3794 ae_dev->reset_type = hdev->reset_type;
3795 hdev->rst_stats.reset_cnt++;
3796 /* perform reset of the stack & ae device for a client */
3797 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3801 ret = hclge_reset_prepare_down(hdev);
3806 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3808 goto err_reset_lock;
3812 ret = hclge_reset_prepare_wait(hdev);
3816 if (hclge_reset_wait(hdev))
3819 hdev->rst_stats.hw_reset_done_cnt++;
3821 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3827 ret = hclge_reset_stack(hdev);
3829 goto err_reset_lock;
3831 hclge_clear_reset_cause(hdev);
3833 ret = hclge_reset_prepare_up(hdev);
3835 goto err_reset_lock;
3839 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3840 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3844 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3849 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3851 goto err_reset_lock;
3855 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3859 hdev->last_reset_time = jiffies;
3860 hdev->rst_stats.reset_fail_cnt = 0;
3861 hdev->rst_stats.reset_done_cnt++;
3862 ae_dev->reset_type = HNAE3_NONE_RESET;
3863 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3865 /* if default_reset_request has a higher level reset request,
3866 * it should be handled as soon as possible. since some errors
3867 * need this kind of reset to fix.
3869 reset_level = hclge_get_reset_level(ae_dev,
3870 &hdev->default_reset_request);
3871 if (reset_level != HNAE3_NONE_RESET)
3872 set_bit(reset_level, &hdev->reset_request);
3879 if (hclge_reset_err_handle(hdev))
3880 hclge_reset_task_schedule(hdev);
3883 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3885 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3886 struct hclge_dev *hdev = ae_dev->priv;
3888 /* We might end up getting called broadly because of 2 below cases:
3889 * 1. Recoverable error was conveyed through APEI and only way to bring
3890 * normalcy is to reset.
3891 * 2. A new reset request from the stack due to timeout
3893 * For the first case,error event might not have ae handle available.
3894 * check if this is a new reset request and we are not here just because
3895 * last reset attempt did not succeed and watchdog hit us again. We will
3896 * know this if last reset request did not occur very recently (watchdog
3897 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3898 * In case of new request we reset the "reset level" to PF reset.
3899 * And if it is a repeat reset request of the most recent one then we
3900 * want to make sure we throttle the reset request. Therefore, we will
3901 * not allow it again before 3*HZ times.
3904 handle = &hdev->vport[0].nic;
3906 if (time_before(jiffies, (hdev->last_reset_time +
3907 HCLGE_RESET_INTERVAL))) {
3908 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3910 } else if (hdev->default_reset_request) {
3912 hclge_get_reset_level(ae_dev,
3913 &hdev->default_reset_request);
3914 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3915 hdev->reset_level = HNAE3_FUNC_RESET;
3918 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3921 /* request reset & schedule reset task */
3922 set_bit(hdev->reset_level, &hdev->reset_request);
3923 hclge_reset_task_schedule(hdev);
3925 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3926 hdev->reset_level++;
3929 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3930 enum hnae3_reset_type rst_type)
3932 struct hclge_dev *hdev = ae_dev->priv;
3934 set_bit(rst_type, &hdev->default_reset_request);
3937 static void hclge_reset_timer(struct timer_list *t)
3939 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3941 /* if default_reset_request has no value, it means that this reset
3942 * request has already be handled, so just return here
3944 if (!hdev->default_reset_request)
3947 dev_info(&hdev->pdev->dev,
3948 "triggering reset in reset timer\n");
3949 hclge_reset_event(hdev->pdev, NULL);
3952 static void hclge_reset_subtask(struct hclge_dev *hdev)
3954 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3956 /* check if there is any ongoing reset in the hardware. This status can
3957 * be checked from reset_pending. If there is then, we need to wait for
3958 * hardware to complete reset.
3959 * a. If we are able to figure out in reasonable time that hardware
3960 * has fully resetted then, we can proceed with driver, client
3962 * b. else, we can come back later to check this status so re-sched
3965 hdev->last_reset_time = jiffies;
3966 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3967 if (hdev->reset_type != HNAE3_NONE_RESET)
3970 /* check if we got any *new* reset requests to be honored */
3971 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3972 if (hdev->reset_type != HNAE3_NONE_RESET)
3973 hclge_do_reset(hdev);
3975 hdev->reset_type = HNAE3_NONE_RESET;
3978 static void hclge_reset_service_task(struct hclge_dev *hdev)
3980 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3983 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3986 hclge_reset_subtask(hdev);
3988 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3991 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3995 /* start from vport 1 for PF is always alive */
3996 for (i = 1; i < hdev->num_alloc_vport; i++) {
3997 struct hclge_vport *vport = &hdev->vport[i];
3999 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4000 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4002 /* If vf is not alive, set to default value */
4003 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4004 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4008 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4010 unsigned long delta = round_jiffies_relative(HZ);
4012 /* Always handle the link updating to make sure link state is
4013 * updated when it is triggered by mbx.
4015 hclge_update_link_status(hdev);
4017 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4018 delta = jiffies - hdev->last_serv_processed;
4020 if (delta < round_jiffies_relative(HZ)) {
4021 delta = round_jiffies_relative(HZ) - delta;
4026 hdev->serv_processed_cnt++;
4027 hclge_update_vport_alive(hdev);
4029 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4030 hdev->last_serv_processed = jiffies;
4034 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4035 hclge_update_stats_for_all(hdev);
4037 hclge_update_port_info(hdev);
4038 hclge_sync_vlan_filter(hdev);
4040 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4041 hclge_rfs_filter_expire(hdev);
4043 hdev->last_serv_processed = jiffies;
4046 hclge_task_schedule(hdev, delta);
4049 static void hclge_service_task(struct work_struct *work)
4051 struct hclge_dev *hdev =
4052 container_of(work, struct hclge_dev, service_task.work);
4054 hclge_reset_service_task(hdev);
4055 hclge_mailbox_service_task(hdev);
4056 hclge_periodic_service_task(hdev);
4058 /* Handle reset and mbx again in case periodical task delays the
4059 * handling by calling hclge_task_schedule() in
4060 * hclge_periodic_service_task().
4062 hclge_reset_service_task(hdev);
4063 hclge_mailbox_service_task(hdev);
4066 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4068 /* VF handle has no client */
4069 if (!handle->client)
4070 return container_of(handle, struct hclge_vport, nic);
4071 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4072 return container_of(handle, struct hclge_vport, roce);
4074 return container_of(handle, struct hclge_vport, nic);
4077 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4078 struct hnae3_vector_info *vector_info)
4080 struct hclge_vport *vport = hclge_get_vport(handle);
4081 struct hnae3_vector_info *vector = vector_info;
4082 struct hclge_dev *hdev = vport->back;
4086 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4087 vector_num = min(hdev->num_msi_left, vector_num);
4089 for (j = 0; j < vector_num; j++) {
4090 for (i = 1; i < hdev->num_msi; i++) {
4091 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4092 vector->vector = pci_irq_vector(hdev->pdev, i);
4093 vector->io_addr = hdev->hw.io_base +
4094 HCLGE_VECTOR_REG_BASE +
4095 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4097 HCLGE_VECTOR_VF_OFFSET;
4098 hdev->vector_status[i] = vport->vport_id;
4099 hdev->vector_irq[i] = vector->vector;
4108 hdev->num_msi_left -= alloc;
4109 hdev->num_msi_used += alloc;
4114 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4118 for (i = 0; i < hdev->num_msi; i++)
4119 if (vector == hdev->vector_irq[i])
4125 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4127 struct hclge_vport *vport = hclge_get_vport(handle);
4128 struct hclge_dev *hdev = vport->back;
4131 vector_id = hclge_get_vector_index(hdev, vector);
4132 if (vector_id < 0) {
4133 dev_err(&hdev->pdev->dev,
4134 "Get vector index fail. vector_id =%d\n", vector_id);
4138 hclge_free_vector(hdev, vector_id);
4143 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4145 return HCLGE_RSS_KEY_SIZE;
4148 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4150 return HCLGE_RSS_IND_TBL_SIZE;
4153 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4154 const u8 hfunc, const u8 *key)
4156 struct hclge_rss_config_cmd *req;
4157 unsigned int key_offset = 0;
4158 struct hclge_desc desc;
4163 key_counts = HCLGE_RSS_KEY_SIZE;
4164 req = (struct hclge_rss_config_cmd *)desc.data;
4166 while (key_counts) {
4167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4170 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4171 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4173 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4174 memcpy(req->hash_key,
4175 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4177 key_counts -= key_size;
4179 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4181 dev_err(&hdev->pdev->dev,
4182 "Configure RSS config fail, status = %d\n",
4190 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4192 struct hclge_rss_indirection_table_cmd *req;
4193 struct hclge_desc desc;
4197 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4199 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4200 hclge_cmd_setup_basic_desc
4201 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4203 req->start_table_index =
4204 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4205 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4207 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4208 req->rss_result[j] =
4209 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4211 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4213 dev_err(&hdev->pdev->dev,
4214 "Configure rss indir table fail,status = %d\n",
4222 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4223 u16 *tc_size, u16 *tc_offset)
4225 struct hclge_rss_tc_mode_cmd *req;
4226 struct hclge_desc desc;
4230 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4231 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4233 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4236 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4237 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4238 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4239 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4240 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4242 req->rss_tc_mode[i] = cpu_to_le16(mode);
4245 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4247 dev_err(&hdev->pdev->dev,
4248 "Configure rss tc mode fail, status = %d\n", ret);
4253 static void hclge_get_rss_type(struct hclge_vport *vport)
4255 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4256 vport->rss_tuple_sets.ipv4_udp_en ||
4257 vport->rss_tuple_sets.ipv4_sctp_en ||
4258 vport->rss_tuple_sets.ipv6_tcp_en ||
4259 vport->rss_tuple_sets.ipv6_udp_en ||
4260 vport->rss_tuple_sets.ipv6_sctp_en)
4261 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4262 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4263 vport->rss_tuple_sets.ipv6_fragment_en)
4264 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4266 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4269 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4271 struct hclge_rss_input_tuple_cmd *req;
4272 struct hclge_desc desc;
4275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4277 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4279 /* Get the tuple cfg from pf */
4280 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4281 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4282 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4283 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4284 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4285 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4286 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4287 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4288 hclge_get_rss_type(&hdev->vport[0]);
4289 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4291 dev_err(&hdev->pdev->dev,
4292 "Configure rss input fail, status = %d\n", ret);
4296 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4299 struct hclge_vport *vport = hclge_get_vport(handle);
4302 /* Get hash algorithm */
4304 switch (vport->rss_algo) {
4305 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4306 *hfunc = ETH_RSS_HASH_TOP;
4308 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4309 *hfunc = ETH_RSS_HASH_XOR;
4312 *hfunc = ETH_RSS_HASH_UNKNOWN;
4317 /* Get the RSS Key required by the user */
4319 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4321 /* Get indirect table */
4323 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4324 indir[i] = vport->rss_indirection_tbl[i];
4329 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4330 const u8 *key, const u8 hfunc)
4332 struct hclge_vport *vport = hclge_get_vport(handle);
4333 struct hclge_dev *hdev = vport->back;
4337 /* Set the RSS Hash Key if specififed by the user */
4340 case ETH_RSS_HASH_TOP:
4341 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4343 case ETH_RSS_HASH_XOR:
4344 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4346 case ETH_RSS_HASH_NO_CHANGE:
4347 hash_algo = vport->rss_algo;
4353 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4357 /* Update the shadow RSS key with user specified qids */
4358 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4359 vport->rss_algo = hash_algo;
4362 /* Update the shadow RSS table with user specified qids */
4363 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4364 vport->rss_indirection_tbl[i] = indir[i];
4366 /* Update the hardware */
4367 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4370 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4372 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4374 if (nfc->data & RXH_L4_B_2_3)
4375 hash_sets |= HCLGE_D_PORT_BIT;
4377 hash_sets &= ~HCLGE_D_PORT_BIT;
4379 if (nfc->data & RXH_IP_SRC)
4380 hash_sets |= HCLGE_S_IP_BIT;
4382 hash_sets &= ~HCLGE_S_IP_BIT;
4384 if (nfc->data & RXH_IP_DST)
4385 hash_sets |= HCLGE_D_IP_BIT;
4387 hash_sets &= ~HCLGE_D_IP_BIT;
4389 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4390 hash_sets |= HCLGE_V_TAG_BIT;
4395 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4396 struct ethtool_rxnfc *nfc)
4398 struct hclge_vport *vport = hclge_get_vport(handle);
4399 struct hclge_dev *hdev = vport->back;
4400 struct hclge_rss_input_tuple_cmd *req;
4401 struct hclge_desc desc;
4405 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4406 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4409 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4410 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4412 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4413 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4414 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4415 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4416 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4417 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4418 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4419 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4421 tuple_sets = hclge_get_rss_hash_bits(nfc);
4422 switch (nfc->flow_type) {
4424 req->ipv4_tcp_en = tuple_sets;
4427 req->ipv6_tcp_en = tuple_sets;
4430 req->ipv4_udp_en = tuple_sets;
4433 req->ipv6_udp_en = tuple_sets;
4436 req->ipv4_sctp_en = tuple_sets;
4439 if ((nfc->data & RXH_L4_B_0_1) ||
4440 (nfc->data & RXH_L4_B_2_3))
4443 req->ipv6_sctp_en = tuple_sets;
4446 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4449 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4455 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4457 dev_err(&hdev->pdev->dev,
4458 "Set rss tuple fail, status = %d\n", ret);
4462 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4463 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4464 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4465 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4466 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4467 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4468 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4469 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4470 hclge_get_rss_type(vport);
4474 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4475 struct ethtool_rxnfc *nfc)
4477 struct hclge_vport *vport = hclge_get_vport(handle);
4482 switch (nfc->flow_type) {
4484 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4487 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4490 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4493 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4496 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4499 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4503 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4512 if (tuple_sets & HCLGE_D_PORT_BIT)
4513 nfc->data |= RXH_L4_B_2_3;
4514 if (tuple_sets & HCLGE_S_PORT_BIT)
4515 nfc->data |= RXH_L4_B_0_1;
4516 if (tuple_sets & HCLGE_D_IP_BIT)
4517 nfc->data |= RXH_IP_DST;
4518 if (tuple_sets & HCLGE_S_IP_BIT)
4519 nfc->data |= RXH_IP_SRC;
4524 static int hclge_get_tc_size(struct hnae3_handle *handle)
4526 struct hclge_vport *vport = hclge_get_vport(handle);
4527 struct hclge_dev *hdev = vport->back;
4529 return hdev->rss_size_max;
4532 int hclge_rss_init_hw(struct hclge_dev *hdev)
4534 struct hclge_vport *vport = hdev->vport;
4535 u8 *rss_indir = vport[0].rss_indirection_tbl;
4536 u16 rss_size = vport[0].alloc_rss_size;
4537 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4538 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4539 u8 *key = vport[0].rss_hash_key;
4540 u8 hfunc = vport[0].rss_algo;
4541 u16 tc_valid[HCLGE_MAX_TC_NUM];
4546 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4550 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4554 ret = hclge_set_rss_input_tuple(hdev);
4558 /* Each TC have the same queue size, and tc_size set to hardware is
4559 * the log2 of roundup power of two of rss_size, the acutal queue
4560 * size is limited by indirection table.
4562 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4563 dev_err(&hdev->pdev->dev,
4564 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4569 roundup_size = roundup_pow_of_two(rss_size);
4570 roundup_size = ilog2(roundup_size);
4572 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4575 if (!(hdev->hw_tc_map & BIT(i)))
4579 tc_size[i] = roundup_size;
4580 tc_offset[i] = rss_size * i;
4583 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4586 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4588 struct hclge_vport *vport = hdev->vport;
4591 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4592 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4593 vport[j].rss_indirection_tbl[i] =
4594 i % vport[j].alloc_rss_size;
4598 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4600 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4601 struct hclge_vport *vport = hdev->vport;
4603 if (hdev->pdev->revision >= 0x21)
4604 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4606 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4607 vport[i].rss_tuple_sets.ipv4_tcp_en =
4608 HCLGE_RSS_INPUT_TUPLE_OTHER;
4609 vport[i].rss_tuple_sets.ipv4_udp_en =
4610 HCLGE_RSS_INPUT_TUPLE_OTHER;
4611 vport[i].rss_tuple_sets.ipv4_sctp_en =
4612 HCLGE_RSS_INPUT_TUPLE_SCTP;
4613 vport[i].rss_tuple_sets.ipv4_fragment_en =
4614 HCLGE_RSS_INPUT_TUPLE_OTHER;
4615 vport[i].rss_tuple_sets.ipv6_tcp_en =
4616 HCLGE_RSS_INPUT_TUPLE_OTHER;
4617 vport[i].rss_tuple_sets.ipv6_udp_en =
4618 HCLGE_RSS_INPUT_TUPLE_OTHER;
4619 vport[i].rss_tuple_sets.ipv6_sctp_en =
4620 HCLGE_RSS_INPUT_TUPLE_SCTP;
4621 vport[i].rss_tuple_sets.ipv6_fragment_en =
4622 HCLGE_RSS_INPUT_TUPLE_OTHER;
4624 vport[i].rss_algo = rss_algo;
4626 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4627 HCLGE_RSS_KEY_SIZE);
4630 hclge_rss_indir_init_cfg(hdev);
4633 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4634 int vector_id, bool en,
4635 struct hnae3_ring_chain_node *ring_chain)
4637 struct hclge_dev *hdev = vport->back;
4638 struct hnae3_ring_chain_node *node;
4639 struct hclge_desc desc;
4640 struct hclge_ctrl_vector_chain_cmd *req =
4641 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4642 enum hclge_cmd_status status;
4643 enum hclge_opcode_type op;
4644 u16 tqp_type_and_id;
4647 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4648 hclge_cmd_setup_basic_desc(&desc, op, false);
4649 req->int_vector_id = vector_id;
4652 for (node = ring_chain; node; node = node->next) {
4653 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4654 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4656 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4657 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4658 HCLGE_TQP_ID_S, node->tqp_index);
4659 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4661 hnae3_get_field(node->int_gl_idx,
4662 HNAE3_RING_GL_IDX_M,
4663 HNAE3_RING_GL_IDX_S));
4664 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4665 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4666 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4667 req->vfid = vport->vport_id;
4669 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4671 dev_err(&hdev->pdev->dev,
4672 "Map TQP fail, status is %d.\n",
4678 hclge_cmd_setup_basic_desc(&desc,
4681 req->int_vector_id = vector_id;
4686 req->int_cause_num = i;
4687 req->vfid = vport->vport_id;
4688 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4690 dev_err(&hdev->pdev->dev,
4691 "Map TQP fail, status is %d.\n", status);
4699 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4700 struct hnae3_ring_chain_node *ring_chain)
4702 struct hclge_vport *vport = hclge_get_vport(handle);
4703 struct hclge_dev *hdev = vport->back;
4706 vector_id = hclge_get_vector_index(hdev, vector);
4707 if (vector_id < 0) {
4708 dev_err(&hdev->pdev->dev,
4709 "failed to get vector index. vector=%d\n", vector);
4713 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4716 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4717 struct hnae3_ring_chain_node *ring_chain)
4719 struct hclge_vport *vport = hclge_get_vport(handle);
4720 struct hclge_dev *hdev = vport->back;
4723 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4726 vector_id = hclge_get_vector_index(hdev, vector);
4727 if (vector_id < 0) {
4728 dev_err(&handle->pdev->dev,
4729 "Get vector index fail. ret =%d\n", vector_id);
4733 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4735 dev_err(&handle->pdev->dev,
4736 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4742 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4743 struct hclge_promisc_param *param)
4745 struct hclge_promisc_cfg_cmd *req;
4746 struct hclge_desc desc;
4749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4751 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4752 req->vf_id = param->vf_id;
4754 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4755 * pdev revision(0x20), new revision support them. The
4756 * value of this two fields will not return error when driver
4757 * send command to fireware in revision(0x20).
4759 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4760 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4764 dev_err(&hdev->pdev->dev,
4765 "Set promisc mode fail, status is %d.\n", ret);
4770 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4771 bool en_uc, bool en_mc, bool en_bc,
4777 memset(param, 0, sizeof(struct hclge_promisc_param));
4779 param->enable = HCLGE_PROMISC_EN_UC;
4781 param->enable |= HCLGE_PROMISC_EN_MC;
4783 param->enable |= HCLGE_PROMISC_EN_BC;
4784 param->vf_id = vport_id;
4787 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4788 bool en_mc_pmc, bool en_bc_pmc)
4790 struct hclge_dev *hdev = vport->back;
4791 struct hclge_promisc_param param;
4793 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4795 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4798 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4801 struct hclge_vport *vport = hclge_get_vport(handle);
4802 bool en_bc_pmc = true;
4804 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4805 * always bypassed. So broadcast promisc should be disabled until
4806 * user enable promisc mode
4808 if (handle->pdev->revision == 0x20)
4809 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4811 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4815 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4817 struct hclge_get_fd_mode_cmd *req;
4818 struct hclge_desc desc;
4821 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4823 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4825 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4827 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4831 *fd_mode = req->mode;
4836 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4837 u32 *stage1_entry_num,
4838 u32 *stage2_entry_num,
4839 u16 *stage1_counter_num,
4840 u16 *stage2_counter_num)
4842 struct hclge_get_fd_allocation_cmd *req;
4843 struct hclge_desc desc;
4846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4848 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4850 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4852 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4857 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4858 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4859 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4860 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4865 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4867 struct hclge_set_fd_key_config_cmd *req;
4868 struct hclge_fd_key_cfg *stage;
4869 struct hclge_desc desc;
4872 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4874 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4875 stage = &hdev->fd_cfg.key_cfg[stage_num];
4876 req->stage = stage_num;
4877 req->key_select = stage->key_sel;
4878 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4879 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4880 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4881 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4882 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4883 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4887 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4892 static int hclge_init_fd_config(struct hclge_dev *hdev)
4894 #define LOW_2_WORDS 0x03
4895 struct hclge_fd_key_cfg *key_cfg;
4898 if (!hnae3_dev_fd_supported(hdev))
4901 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4905 switch (hdev->fd_cfg.fd_mode) {
4906 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4907 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4909 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4910 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4913 dev_err(&hdev->pdev->dev,
4914 "Unsupported flow director mode %u\n",
4915 hdev->fd_cfg.fd_mode);
4919 hdev->fd_cfg.proto_support =
4920 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4921 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4922 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4923 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4924 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4925 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4926 key_cfg->outer_sipv6_word_en = 0;
4927 key_cfg->outer_dipv6_word_en = 0;
4929 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4930 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4931 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4932 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4934 /* If use max 400bit key, we can support tuples for ether type */
4935 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4936 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4937 key_cfg->tuple_active |=
4938 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4941 /* roce_type is used to filter roce frames
4942 * dst_vport is used to specify the rule
4944 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4946 ret = hclge_get_fd_allocation(hdev,
4947 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4948 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4949 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4950 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4954 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4957 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4958 int loc, u8 *key, bool is_add)
4960 struct hclge_fd_tcam_config_1_cmd *req1;
4961 struct hclge_fd_tcam_config_2_cmd *req2;
4962 struct hclge_fd_tcam_config_3_cmd *req3;
4963 struct hclge_desc desc[3];
4966 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4967 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4968 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4969 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4970 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4972 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4973 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4974 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4976 req1->stage = stage;
4977 req1->xy_sel = sel_x ? 1 : 0;
4978 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4979 req1->index = cpu_to_le32(loc);
4980 req1->entry_vld = sel_x ? is_add : 0;
4983 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4984 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4985 sizeof(req2->tcam_data));
4986 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4987 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4990 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4992 dev_err(&hdev->pdev->dev,
4993 "config tcam key fail, ret=%d\n",
4999 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5000 struct hclge_fd_ad_data *action)
5002 struct hclge_fd_ad_config_cmd *req;
5003 struct hclge_desc desc;
5007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5009 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5010 req->index = cpu_to_le32(loc);
5013 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5014 action->write_rule_id_to_bd);
5015 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5018 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5019 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5020 action->forward_to_direct_queue);
5021 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5023 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5024 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5025 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5026 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5027 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5028 action->counter_id);
5030 req->ad_data = cpu_to_le64(ad_data);
5031 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5033 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5038 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5039 struct hclge_fd_rule *rule)
5041 u16 tmp_x_s, tmp_y_s;
5042 u32 tmp_x_l, tmp_y_l;
5045 if (rule->unused_tuple & tuple_bit)
5048 switch (tuple_bit) {
5051 case BIT(INNER_DST_MAC):
5052 for (i = 0; i < ETH_ALEN; i++) {
5053 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5054 rule->tuples_mask.dst_mac[i]);
5055 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5056 rule->tuples_mask.dst_mac[i]);
5060 case BIT(INNER_SRC_MAC):
5061 for (i = 0; i < ETH_ALEN; i++) {
5062 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5063 rule->tuples.src_mac[i]);
5064 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5065 rule->tuples.src_mac[i]);
5069 case BIT(INNER_VLAN_TAG_FST):
5070 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5071 rule->tuples_mask.vlan_tag1);
5072 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5073 rule->tuples_mask.vlan_tag1);
5074 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5075 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5078 case BIT(INNER_ETH_TYPE):
5079 calc_x(tmp_x_s, rule->tuples.ether_proto,
5080 rule->tuples_mask.ether_proto);
5081 calc_y(tmp_y_s, rule->tuples.ether_proto,
5082 rule->tuples_mask.ether_proto);
5083 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5084 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5087 case BIT(INNER_IP_TOS):
5088 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5089 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5092 case BIT(INNER_IP_PROTO):
5093 calc_x(*key_x, rule->tuples.ip_proto,
5094 rule->tuples_mask.ip_proto);
5095 calc_y(*key_y, rule->tuples.ip_proto,
5096 rule->tuples_mask.ip_proto);
5099 case BIT(INNER_SRC_IP):
5100 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5101 rule->tuples_mask.src_ip[IPV4_INDEX]);
5102 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5103 rule->tuples_mask.src_ip[IPV4_INDEX]);
5104 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5105 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5108 case BIT(INNER_DST_IP):
5109 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5110 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5111 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5112 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5113 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5114 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5117 case BIT(INNER_SRC_PORT):
5118 calc_x(tmp_x_s, rule->tuples.src_port,
5119 rule->tuples_mask.src_port);
5120 calc_y(tmp_y_s, rule->tuples.src_port,
5121 rule->tuples_mask.src_port);
5122 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5123 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5126 case BIT(INNER_DST_PORT):
5127 calc_x(tmp_x_s, rule->tuples.dst_port,
5128 rule->tuples_mask.dst_port);
5129 calc_y(tmp_y_s, rule->tuples.dst_port,
5130 rule->tuples_mask.dst_port);
5131 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5132 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5140 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5141 u8 vf_id, u8 network_port_id)
5143 u32 port_number = 0;
5145 if (port_type == HOST_PORT) {
5146 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5148 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5150 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5152 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5153 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5154 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5160 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5161 __le32 *key_x, __le32 *key_y,
5162 struct hclge_fd_rule *rule)
5164 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5165 u8 cur_pos = 0, tuple_size, shift_bits;
5168 for (i = 0; i < MAX_META_DATA; i++) {
5169 tuple_size = meta_data_key_info[i].key_length;
5170 tuple_bit = key_cfg->meta_data_active & BIT(i);
5172 switch (tuple_bit) {
5173 case BIT(ROCE_TYPE):
5174 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5175 cur_pos += tuple_size;
5177 case BIT(DST_VPORT):
5178 port_number = hclge_get_port_number(HOST_PORT, 0,
5180 hnae3_set_field(meta_data,
5181 GENMASK(cur_pos + tuple_size, cur_pos),
5182 cur_pos, port_number);
5183 cur_pos += tuple_size;
5190 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5191 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5192 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5194 *key_x = cpu_to_le32(tmp_x << shift_bits);
5195 *key_y = cpu_to_le32(tmp_y << shift_bits);
5198 /* A complete key is combined with meta data key and tuple key.
5199 * Meta data key is stored at the MSB region, and tuple key is stored at
5200 * the LSB region, unused bits will be filled 0.
5202 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5203 struct hclge_fd_rule *rule)
5205 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5206 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5207 u8 *cur_key_x, *cur_key_y;
5209 int ret, tuple_size;
5210 u8 meta_data_region;
5212 memset(key_x, 0, sizeof(key_x));
5213 memset(key_y, 0, sizeof(key_y));
5217 for (i = 0 ; i < MAX_TUPLE; i++) {
5221 tuple_size = tuple_key_info[i].key_length / 8;
5222 check_tuple = key_cfg->tuple_active & BIT(i);
5224 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5227 cur_key_x += tuple_size;
5228 cur_key_y += tuple_size;
5232 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5233 MAX_META_DATA_LENGTH / 8;
5235 hclge_fd_convert_meta_data(key_cfg,
5236 (__le32 *)(key_x + meta_data_region),
5237 (__le32 *)(key_y + meta_data_region),
5240 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5243 dev_err(&hdev->pdev->dev,
5244 "fd key_y config fail, loc=%u, ret=%d\n",
5245 rule->queue_id, ret);
5249 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5252 dev_err(&hdev->pdev->dev,
5253 "fd key_x config fail, loc=%u, ret=%d\n",
5254 rule->queue_id, ret);
5258 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5259 struct hclge_fd_rule *rule)
5261 struct hclge_fd_ad_data ad_data;
5263 ad_data.ad_id = rule->location;
5265 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5266 ad_data.drop_packet = true;
5267 ad_data.forward_to_direct_queue = false;
5268 ad_data.queue_id = 0;
5270 ad_data.drop_packet = false;
5271 ad_data.forward_to_direct_queue = true;
5272 ad_data.queue_id = rule->queue_id;
5275 ad_data.use_counter = false;
5276 ad_data.counter_id = 0;
5278 ad_data.use_next_stage = false;
5279 ad_data.next_input_key = 0;
5281 ad_data.write_rule_id_to_bd = true;
5282 ad_data.rule_id = rule->location;
5284 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5287 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5288 struct ethtool_rx_flow_spec *fs, u32 *unused)
5290 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5291 struct ethtool_usrip4_spec *usr_ip4_spec;
5292 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5293 struct ethtool_usrip6_spec *usr_ip6_spec;
5294 struct ethhdr *ether_spec;
5296 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5299 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5302 if ((fs->flow_type & FLOW_EXT) &&
5303 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5304 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5308 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5312 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5313 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5315 if (!tcp_ip4_spec->ip4src)
5316 *unused |= BIT(INNER_SRC_IP);
5318 if (!tcp_ip4_spec->ip4dst)
5319 *unused |= BIT(INNER_DST_IP);
5321 if (!tcp_ip4_spec->psrc)
5322 *unused |= BIT(INNER_SRC_PORT);
5324 if (!tcp_ip4_spec->pdst)
5325 *unused |= BIT(INNER_DST_PORT);
5327 if (!tcp_ip4_spec->tos)
5328 *unused |= BIT(INNER_IP_TOS);
5332 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5333 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5334 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5336 if (!usr_ip4_spec->ip4src)
5337 *unused |= BIT(INNER_SRC_IP);
5339 if (!usr_ip4_spec->ip4dst)
5340 *unused |= BIT(INNER_DST_IP);
5342 if (!usr_ip4_spec->tos)
5343 *unused |= BIT(INNER_IP_TOS);
5345 if (!usr_ip4_spec->proto)
5346 *unused |= BIT(INNER_IP_PROTO);
5348 if (usr_ip4_spec->l4_4_bytes)
5351 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5358 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5359 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5362 /* check whether src/dst ip address used */
5363 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5364 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5365 *unused |= BIT(INNER_SRC_IP);
5367 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5368 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5369 *unused |= BIT(INNER_DST_IP);
5371 if (!tcp_ip6_spec->psrc)
5372 *unused |= BIT(INNER_SRC_PORT);
5374 if (!tcp_ip6_spec->pdst)
5375 *unused |= BIT(INNER_DST_PORT);
5377 if (tcp_ip6_spec->tclass)
5381 case IPV6_USER_FLOW:
5382 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5383 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5384 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5385 BIT(INNER_DST_PORT);
5387 /* check whether src/dst ip address used */
5388 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5389 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5390 *unused |= BIT(INNER_SRC_IP);
5392 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5393 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5394 *unused |= BIT(INNER_DST_IP);
5396 if (!usr_ip6_spec->l4_proto)
5397 *unused |= BIT(INNER_IP_PROTO);
5399 if (usr_ip6_spec->tclass)
5402 if (usr_ip6_spec->l4_4_bytes)
5407 ether_spec = &fs->h_u.ether_spec;
5408 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5409 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5410 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5412 if (is_zero_ether_addr(ether_spec->h_source))
5413 *unused |= BIT(INNER_SRC_MAC);
5415 if (is_zero_ether_addr(ether_spec->h_dest))
5416 *unused |= BIT(INNER_DST_MAC);
5418 if (!ether_spec->h_proto)
5419 *unused |= BIT(INNER_ETH_TYPE);
5426 if ((fs->flow_type & FLOW_EXT)) {
5427 if (fs->h_ext.vlan_etype)
5429 if (!fs->h_ext.vlan_tci)
5430 *unused |= BIT(INNER_VLAN_TAG_FST);
5432 if (fs->m_ext.vlan_tci) {
5433 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5437 *unused |= BIT(INNER_VLAN_TAG_FST);
5440 if (fs->flow_type & FLOW_MAC_EXT) {
5441 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5444 if (is_zero_ether_addr(fs->h_ext.h_dest))
5445 *unused |= BIT(INNER_DST_MAC);
5447 *unused &= ~(BIT(INNER_DST_MAC));
5453 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5455 struct hclge_fd_rule *rule = NULL;
5456 struct hlist_node *node2;
5458 spin_lock_bh(&hdev->fd_rule_lock);
5459 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5460 if (rule->location >= location)
5464 spin_unlock_bh(&hdev->fd_rule_lock);
5466 return rule && rule->location == location;
5469 /* make sure being called after lock up with fd_rule_lock */
5470 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5471 struct hclge_fd_rule *new_rule,
5475 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5476 struct hlist_node *node2;
5478 if (is_add && !new_rule)
5481 hlist_for_each_entry_safe(rule, node2,
5482 &hdev->fd_rule_list, rule_node) {
5483 if (rule->location >= location)
5488 if (rule && rule->location == location) {
5489 hlist_del(&rule->rule_node);
5491 hdev->hclge_fd_rule_num--;
5494 if (!hdev->hclge_fd_rule_num)
5495 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5496 clear_bit(location, hdev->fd_bmap);
5500 } else if (!is_add) {
5501 dev_err(&hdev->pdev->dev,
5502 "delete fail, rule %u is inexistent\n",
5507 INIT_HLIST_NODE(&new_rule->rule_node);
5510 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5512 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5514 set_bit(location, hdev->fd_bmap);
5515 hdev->hclge_fd_rule_num++;
5516 hdev->fd_active_type = new_rule->rule_type;
5521 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5522 struct ethtool_rx_flow_spec *fs,
5523 struct hclge_fd_rule *rule)
5525 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5527 switch (flow_type) {
5531 rule->tuples.src_ip[IPV4_INDEX] =
5532 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5533 rule->tuples_mask.src_ip[IPV4_INDEX] =
5534 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5536 rule->tuples.dst_ip[IPV4_INDEX] =
5537 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5538 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5539 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5541 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5542 rule->tuples_mask.src_port =
5543 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5545 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5546 rule->tuples_mask.dst_port =
5547 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5549 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5550 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5552 rule->tuples.ether_proto = ETH_P_IP;
5553 rule->tuples_mask.ether_proto = 0xFFFF;
5557 rule->tuples.src_ip[IPV4_INDEX] =
5558 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5559 rule->tuples_mask.src_ip[IPV4_INDEX] =
5560 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5562 rule->tuples.dst_ip[IPV4_INDEX] =
5563 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5564 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5565 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5567 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5568 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5570 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5571 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5573 rule->tuples.ether_proto = ETH_P_IP;
5574 rule->tuples_mask.ether_proto = 0xFFFF;
5580 be32_to_cpu_array(rule->tuples.src_ip,
5581 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5582 be32_to_cpu_array(rule->tuples_mask.src_ip,
5583 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5585 be32_to_cpu_array(rule->tuples.dst_ip,
5586 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5587 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5588 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5590 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5591 rule->tuples_mask.src_port =
5592 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5594 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5595 rule->tuples_mask.dst_port =
5596 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5598 rule->tuples.ether_proto = ETH_P_IPV6;
5599 rule->tuples_mask.ether_proto = 0xFFFF;
5602 case IPV6_USER_FLOW:
5603 be32_to_cpu_array(rule->tuples.src_ip,
5604 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5605 be32_to_cpu_array(rule->tuples_mask.src_ip,
5606 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5608 be32_to_cpu_array(rule->tuples.dst_ip,
5609 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5610 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5611 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5613 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5614 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5616 rule->tuples.ether_proto = ETH_P_IPV6;
5617 rule->tuples_mask.ether_proto = 0xFFFF;
5621 ether_addr_copy(rule->tuples.src_mac,
5622 fs->h_u.ether_spec.h_source);
5623 ether_addr_copy(rule->tuples_mask.src_mac,
5624 fs->m_u.ether_spec.h_source);
5626 ether_addr_copy(rule->tuples.dst_mac,
5627 fs->h_u.ether_spec.h_dest);
5628 ether_addr_copy(rule->tuples_mask.dst_mac,
5629 fs->m_u.ether_spec.h_dest);
5631 rule->tuples.ether_proto =
5632 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5633 rule->tuples_mask.ether_proto =
5634 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5641 switch (flow_type) {
5644 rule->tuples.ip_proto = IPPROTO_SCTP;
5645 rule->tuples_mask.ip_proto = 0xFF;
5649 rule->tuples.ip_proto = IPPROTO_TCP;
5650 rule->tuples_mask.ip_proto = 0xFF;
5654 rule->tuples.ip_proto = IPPROTO_UDP;
5655 rule->tuples_mask.ip_proto = 0xFF;
5661 if ((fs->flow_type & FLOW_EXT)) {
5662 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5663 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5666 if (fs->flow_type & FLOW_MAC_EXT) {
5667 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5668 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5674 /* make sure being called after lock up with fd_rule_lock */
5675 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5676 struct hclge_fd_rule *rule)
5681 dev_err(&hdev->pdev->dev,
5682 "The flow director rule is NULL\n");
5686 /* it will never fail here, so needn't to check return value */
5687 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5689 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5693 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5700 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5704 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5705 struct ethtool_rxnfc *cmd)
5707 struct hclge_vport *vport = hclge_get_vport(handle);
5708 struct hclge_dev *hdev = vport->back;
5709 u16 dst_vport_id = 0, q_index = 0;
5710 struct ethtool_rx_flow_spec *fs;
5711 struct hclge_fd_rule *rule;
5716 if (!hnae3_dev_fd_supported(hdev))
5720 dev_warn(&hdev->pdev->dev,
5721 "Please enable flow director first\n");
5725 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5727 ret = hclge_fd_check_spec(hdev, fs, &unused);
5729 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5733 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5734 action = HCLGE_FD_ACTION_DROP_PACKET;
5736 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5737 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5740 if (vf > hdev->num_req_vfs) {
5741 dev_err(&hdev->pdev->dev,
5742 "Error: vf id (%u) > max vf num (%u)\n",
5743 vf, hdev->num_req_vfs);
5747 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5748 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5751 dev_err(&hdev->pdev->dev,
5752 "Error: queue id (%u) > max tqp num (%u)\n",
5757 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5761 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5765 ret = hclge_fd_get_tuple(hdev, fs, rule);
5771 rule->flow_type = fs->flow_type;
5773 rule->location = fs->location;
5774 rule->unused_tuple = unused;
5775 rule->vf_id = dst_vport_id;
5776 rule->queue_id = q_index;
5777 rule->action = action;
5778 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5780 /* to avoid rule conflict, when user configure rule by ethtool,
5781 * we need to clear all arfs rules
5783 hclge_clear_arfs_rules(handle);
5785 spin_lock_bh(&hdev->fd_rule_lock);
5786 ret = hclge_fd_config_rule(hdev, rule);
5788 spin_unlock_bh(&hdev->fd_rule_lock);
5793 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5794 struct ethtool_rxnfc *cmd)
5796 struct hclge_vport *vport = hclge_get_vport(handle);
5797 struct hclge_dev *hdev = vport->back;
5798 struct ethtool_rx_flow_spec *fs;
5801 if (!hnae3_dev_fd_supported(hdev))
5804 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5806 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5809 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5810 dev_err(&hdev->pdev->dev,
5811 "Delete fail, rule %u is inexistent\n", fs->location);
5815 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5820 spin_lock_bh(&hdev->fd_rule_lock);
5821 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5823 spin_unlock_bh(&hdev->fd_rule_lock);
5828 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5831 struct hclge_vport *vport = hclge_get_vport(handle);
5832 struct hclge_dev *hdev = vport->back;
5833 struct hclge_fd_rule *rule;
5834 struct hlist_node *node;
5837 if (!hnae3_dev_fd_supported(hdev))
5840 spin_lock_bh(&hdev->fd_rule_lock);
5841 for_each_set_bit(location, hdev->fd_bmap,
5842 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5843 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5847 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5849 hlist_del(&rule->rule_node);
5852 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5853 hdev->hclge_fd_rule_num = 0;
5854 bitmap_zero(hdev->fd_bmap,
5855 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5858 spin_unlock_bh(&hdev->fd_rule_lock);
5861 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5863 struct hclge_vport *vport = hclge_get_vport(handle);
5864 struct hclge_dev *hdev = vport->back;
5865 struct hclge_fd_rule *rule;
5866 struct hlist_node *node;
5869 /* Return ok here, because reset error handling will check this
5870 * return value. If error is returned here, the reset process will
5873 if (!hnae3_dev_fd_supported(hdev))
5876 /* if fd is disabled, should not restore it when reset */
5880 spin_lock_bh(&hdev->fd_rule_lock);
5881 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5882 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5884 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5887 dev_warn(&hdev->pdev->dev,
5888 "Restore rule %u failed, remove it\n",
5890 clear_bit(rule->location, hdev->fd_bmap);
5891 hlist_del(&rule->rule_node);
5893 hdev->hclge_fd_rule_num--;
5897 if (hdev->hclge_fd_rule_num)
5898 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5900 spin_unlock_bh(&hdev->fd_rule_lock);
5905 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5906 struct ethtool_rxnfc *cmd)
5908 struct hclge_vport *vport = hclge_get_vport(handle);
5909 struct hclge_dev *hdev = vport->back;
5911 if (!hnae3_dev_fd_supported(hdev))
5914 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5915 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5920 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5921 struct ethtool_rxnfc *cmd)
5923 struct hclge_vport *vport = hclge_get_vport(handle);
5924 struct hclge_fd_rule *rule = NULL;
5925 struct hclge_dev *hdev = vport->back;
5926 struct ethtool_rx_flow_spec *fs;
5927 struct hlist_node *node2;
5929 if (!hnae3_dev_fd_supported(hdev))
5932 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5934 spin_lock_bh(&hdev->fd_rule_lock);
5936 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5937 if (rule->location >= fs->location)
5941 if (!rule || fs->location != rule->location) {
5942 spin_unlock_bh(&hdev->fd_rule_lock);
5947 fs->flow_type = rule->flow_type;
5948 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5952 fs->h_u.tcp_ip4_spec.ip4src =
5953 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5954 fs->m_u.tcp_ip4_spec.ip4src =
5955 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5956 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5958 fs->h_u.tcp_ip4_spec.ip4dst =
5959 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5960 fs->m_u.tcp_ip4_spec.ip4dst =
5961 rule->unused_tuple & BIT(INNER_DST_IP) ?
5962 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5964 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5965 fs->m_u.tcp_ip4_spec.psrc =
5966 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5967 0 : cpu_to_be16(rule->tuples_mask.src_port);
5969 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5970 fs->m_u.tcp_ip4_spec.pdst =
5971 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5972 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5974 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5975 fs->m_u.tcp_ip4_spec.tos =
5976 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5977 0 : rule->tuples_mask.ip_tos;
5981 fs->h_u.usr_ip4_spec.ip4src =
5982 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5983 fs->m_u.tcp_ip4_spec.ip4src =
5984 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5985 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5987 fs->h_u.usr_ip4_spec.ip4dst =
5988 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5989 fs->m_u.usr_ip4_spec.ip4dst =
5990 rule->unused_tuple & BIT(INNER_DST_IP) ?
5991 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5993 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5994 fs->m_u.usr_ip4_spec.tos =
5995 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5996 0 : rule->tuples_mask.ip_tos;
5998 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5999 fs->m_u.usr_ip4_spec.proto =
6000 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6001 0 : rule->tuples_mask.ip_proto;
6003 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
6009 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
6010 rule->tuples.src_ip, IPV6_SIZE);
6011 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6012 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
6013 sizeof(int) * IPV6_SIZE);
6015 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
6016 rule->tuples_mask.src_ip, IPV6_SIZE);
6018 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6019 rule->tuples.dst_ip, IPV6_SIZE);
6020 if (rule->unused_tuple & BIT(INNER_DST_IP))
6021 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6022 sizeof(int) * IPV6_SIZE);
6024 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6025 rule->tuples_mask.dst_ip, IPV6_SIZE);
6027 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6028 fs->m_u.tcp_ip6_spec.psrc =
6029 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6030 0 : cpu_to_be16(rule->tuples_mask.src_port);
6032 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6033 fs->m_u.tcp_ip6_spec.pdst =
6034 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6035 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6038 case IPV6_USER_FLOW:
6039 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6040 rule->tuples.src_ip, IPV6_SIZE);
6041 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6042 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6043 sizeof(int) * IPV6_SIZE);
6045 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6046 rule->tuples_mask.src_ip, IPV6_SIZE);
6048 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6049 rule->tuples.dst_ip, IPV6_SIZE);
6050 if (rule->unused_tuple & BIT(INNER_DST_IP))
6051 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6052 sizeof(int) * IPV6_SIZE);
6054 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6055 rule->tuples_mask.dst_ip, IPV6_SIZE);
6057 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6058 fs->m_u.usr_ip6_spec.l4_proto =
6059 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6060 0 : rule->tuples_mask.ip_proto;
6064 ether_addr_copy(fs->h_u.ether_spec.h_source,
6065 rule->tuples.src_mac);
6066 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6067 eth_zero_addr(fs->m_u.ether_spec.h_source);
6069 ether_addr_copy(fs->m_u.ether_spec.h_source,
6070 rule->tuples_mask.src_mac);
6072 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6073 rule->tuples.dst_mac);
6074 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6075 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6077 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6078 rule->tuples_mask.dst_mac);
6080 fs->h_u.ether_spec.h_proto =
6081 cpu_to_be16(rule->tuples.ether_proto);
6082 fs->m_u.ether_spec.h_proto =
6083 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6084 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6088 spin_unlock_bh(&hdev->fd_rule_lock);
6092 if (fs->flow_type & FLOW_EXT) {
6093 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6094 fs->m_ext.vlan_tci =
6095 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6096 cpu_to_be16(VLAN_VID_MASK) :
6097 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6100 if (fs->flow_type & FLOW_MAC_EXT) {
6101 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6102 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6103 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6105 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6106 rule->tuples_mask.dst_mac);
6109 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6110 fs->ring_cookie = RX_CLS_FLOW_DISC;
6114 fs->ring_cookie = rule->queue_id;
6115 vf_id = rule->vf_id;
6116 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6117 fs->ring_cookie |= vf_id;
6120 spin_unlock_bh(&hdev->fd_rule_lock);
6125 static int hclge_get_all_rules(struct hnae3_handle *handle,
6126 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6128 struct hclge_vport *vport = hclge_get_vport(handle);
6129 struct hclge_dev *hdev = vport->back;
6130 struct hclge_fd_rule *rule;
6131 struct hlist_node *node2;
6134 if (!hnae3_dev_fd_supported(hdev))
6137 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6139 spin_lock_bh(&hdev->fd_rule_lock);
6140 hlist_for_each_entry_safe(rule, node2,
6141 &hdev->fd_rule_list, rule_node) {
6142 if (cnt == cmd->rule_cnt) {
6143 spin_unlock_bh(&hdev->fd_rule_lock);
6147 rule_locs[cnt] = rule->location;
6151 spin_unlock_bh(&hdev->fd_rule_lock);
6153 cmd->rule_cnt = cnt;
6158 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6159 struct hclge_fd_rule_tuples *tuples)
6161 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6162 tuples->ip_proto = fkeys->basic.ip_proto;
6163 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6165 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6166 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6167 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6169 memcpy(tuples->src_ip,
6170 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6171 sizeof(tuples->src_ip));
6172 memcpy(tuples->dst_ip,
6173 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6174 sizeof(tuples->dst_ip));
6178 /* traverse all rules, check whether an existed rule has the same tuples */
6179 static struct hclge_fd_rule *
6180 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6181 const struct hclge_fd_rule_tuples *tuples)
6183 struct hclge_fd_rule *rule = NULL;
6184 struct hlist_node *node;
6186 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6187 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6194 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6195 struct hclge_fd_rule *rule)
6197 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6198 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6199 BIT(INNER_SRC_PORT);
6202 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6203 if (tuples->ether_proto == ETH_P_IP) {
6204 if (tuples->ip_proto == IPPROTO_TCP)
6205 rule->flow_type = TCP_V4_FLOW;
6207 rule->flow_type = UDP_V4_FLOW;
6209 if (tuples->ip_proto == IPPROTO_TCP)
6210 rule->flow_type = TCP_V6_FLOW;
6212 rule->flow_type = UDP_V6_FLOW;
6214 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6215 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6218 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6219 u16 flow_id, struct flow_keys *fkeys)
6221 struct hclge_vport *vport = hclge_get_vport(handle);
6222 struct hclge_fd_rule_tuples new_tuples;
6223 struct hclge_dev *hdev = vport->back;
6224 struct hclge_fd_rule *rule;
6229 if (!hnae3_dev_fd_supported(hdev))
6232 memset(&new_tuples, 0, sizeof(new_tuples));
6233 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6235 spin_lock_bh(&hdev->fd_rule_lock);
6237 /* when there is already fd rule existed add by user,
6238 * arfs should not work
6240 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6241 spin_unlock_bh(&hdev->fd_rule_lock);
6246 /* check is there flow director filter existed for this flow,
6247 * if not, create a new filter for it;
6248 * if filter exist with different queue id, modify the filter;
6249 * if filter exist with same queue id, do nothing
6251 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6253 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6254 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6255 spin_unlock_bh(&hdev->fd_rule_lock);
6260 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6262 spin_unlock_bh(&hdev->fd_rule_lock);
6267 set_bit(bit_id, hdev->fd_bmap);
6268 rule->location = bit_id;
6269 rule->flow_id = flow_id;
6270 rule->queue_id = queue_id;
6271 hclge_fd_build_arfs_rule(&new_tuples, rule);
6272 ret = hclge_fd_config_rule(hdev, rule);
6274 spin_unlock_bh(&hdev->fd_rule_lock);
6279 return rule->location;
6282 spin_unlock_bh(&hdev->fd_rule_lock);
6284 if (rule->queue_id == queue_id)
6285 return rule->location;
6287 tmp_queue_id = rule->queue_id;
6288 rule->queue_id = queue_id;
6289 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6291 rule->queue_id = tmp_queue_id;
6295 return rule->location;
6298 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6300 #ifdef CONFIG_RFS_ACCEL
6301 struct hnae3_handle *handle = &hdev->vport[0].nic;
6302 struct hclge_fd_rule *rule;
6303 struct hlist_node *node;
6304 HLIST_HEAD(del_list);
6306 spin_lock_bh(&hdev->fd_rule_lock);
6307 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6308 spin_unlock_bh(&hdev->fd_rule_lock);
6311 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6312 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6313 rule->flow_id, rule->location)) {
6314 hlist_del_init(&rule->rule_node);
6315 hlist_add_head(&rule->rule_node, &del_list);
6316 hdev->hclge_fd_rule_num--;
6317 clear_bit(rule->location, hdev->fd_bmap);
6320 spin_unlock_bh(&hdev->fd_rule_lock);
6322 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6323 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6324 rule->location, NULL, false);
6330 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6332 #ifdef CONFIG_RFS_ACCEL
6333 struct hclge_vport *vport = hclge_get_vport(handle);
6334 struct hclge_dev *hdev = vport->back;
6336 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6337 hclge_del_all_fd_entries(handle, true);
6341 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6343 struct hclge_vport *vport = hclge_get_vport(handle);
6344 struct hclge_dev *hdev = vport->back;
6346 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6347 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6350 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6352 struct hclge_vport *vport = hclge_get_vport(handle);
6353 struct hclge_dev *hdev = vport->back;
6355 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6358 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6360 struct hclge_vport *vport = hclge_get_vport(handle);
6361 struct hclge_dev *hdev = vport->back;
6363 return hdev->rst_stats.hw_reset_done_cnt;
6366 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6368 struct hclge_vport *vport = hclge_get_vport(handle);
6369 struct hclge_dev *hdev = vport->back;
6372 hdev->fd_en = enable;
6373 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6375 hclge_del_all_fd_entries(handle, clear);
6377 hclge_restore_fd_entries(handle);
6380 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6382 struct hclge_desc desc;
6383 struct hclge_config_mac_mode_cmd *req =
6384 (struct hclge_config_mac_mode_cmd *)desc.data;
6388 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6391 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6392 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6393 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6394 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6395 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6396 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6397 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6398 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6399 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6400 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6403 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6407 dev_err(&hdev->pdev->dev,
6408 "mac enable fail, ret =%d.\n", ret);
6411 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6412 u8 switch_param, u8 param_mask)
6414 struct hclge_mac_vlan_switch_cmd *req;
6415 struct hclge_desc desc;
6419 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6420 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6422 /* read current config parameter */
6423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6425 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6426 req->func_id = cpu_to_le32(func_id);
6428 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6430 dev_err(&hdev->pdev->dev,
6431 "read mac vlan switch parameter fail, ret = %d\n", ret);
6435 /* modify and write new config parameter */
6436 hclge_cmd_reuse_desc(&desc, false);
6437 req->switch_param = (req->switch_param & param_mask) | switch_param;
6438 req->param_mask = param_mask;
6440 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6442 dev_err(&hdev->pdev->dev,
6443 "set mac vlan switch parameter fail, ret = %d\n", ret);
6447 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6450 #define HCLGE_PHY_LINK_STATUS_NUM 200
6452 struct phy_device *phydev = hdev->hw.mac.phydev;
6457 ret = phy_read_status(phydev);
6459 dev_err(&hdev->pdev->dev,
6460 "phy update link status fail, ret = %d\n", ret);
6464 if (phydev->link == link_ret)
6467 msleep(HCLGE_LINK_STATUS_MS);
6468 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6471 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6473 #define HCLGE_MAC_LINK_STATUS_NUM 100
6479 ret = hclge_get_mac_link_status(hdev);
6482 else if (ret == link_ret)
6485 msleep(HCLGE_LINK_STATUS_MS);
6486 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6490 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6493 #define HCLGE_LINK_STATUS_DOWN 0
6494 #define HCLGE_LINK_STATUS_UP 1
6498 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6501 hclge_phy_link_status_wait(hdev, link_ret);
6503 return hclge_mac_link_status_wait(hdev, link_ret);
6506 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6508 struct hclge_config_mac_mode_cmd *req;
6509 struct hclge_desc desc;
6513 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6514 /* 1 Read out the MAC mode config at first */
6515 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6516 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6518 dev_err(&hdev->pdev->dev,
6519 "mac loopback get fail, ret =%d.\n", ret);
6523 /* 2 Then setup the loopback flag */
6524 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6525 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6526 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6527 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6529 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6531 /* 3 Config mac work mode with loopback flag
6532 * and its original configure parameters
6534 hclge_cmd_reuse_desc(&desc, false);
6535 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6537 dev_err(&hdev->pdev->dev,
6538 "mac loopback set fail, ret =%d.\n", ret);
6542 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6543 enum hnae3_loop loop_mode)
6545 #define HCLGE_SERDES_RETRY_MS 10
6546 #define HCLGE_SERDES_RETRY_NUM 100
6548 struct hclge_serdes_lb_cmd *req;
6549 struct hclge_desc desc;
6553 req = (struct hclge_serdes_lb_cmd *)desc.data;
6554 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6556 switch (loop_mode) {
6557 case HNAE3_LOOP_SERIAL_SERDES:
6558 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6560 case HNAE3_LOOP_PARALLEL_SERDES:
6561 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6564 dev_err(&hdev->pdev->dev,
6565 "unsupported serdes loopback mode %d\n", loop_mode);
6570 req->enable = loop_mode_b;
6571 req->mask = loop_mode_b;
6573 req->mask = loop_mode_b;
6576 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6578 dev_err(&hdev->pdev->dev,
6579 "serdes loopback set fail, ret = %d\n", ret);
6584 msleep(HCLGE_SERDES_RETRY_MS);
6585 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6587 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6589 dev_err(&hdev->pdev->dev,
6590 "serdes loopback get, ret = %d\n", ret);
6593 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6594 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6596 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6597 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6599 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6600 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6606 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6607 enum hnae3_loop loop_mode)
6611 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6615 hclge_cfg_mac_mode(hdev, en);
6617 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6619 dev_err(&hdev->pdev->dev,
6620 "serdes loopback config mac mode timeout\n");
6625 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6626 struct phy_device *phydev)
6630 if (!phydev->suspended) {
6631 ret = phy_suspend(phydev);
6636 ret = phy_resume(phydev);
6640 return phy_loopback(phydev, true);
6643 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6644 struct phy_device *phydev)
6648 ret = phy_loopback(phydev, false);
6652 return phy_suspend(phydev);
6655 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6657 struct phy_device *phydev = hdev->hw.mac.phydev;
6664 ret = hclge_enable_phy_loopback(hdev, phydev);
6666 ret = hclge_disable_phy_loopback(hdev, phydev);
6668 dev_err(&hdev->pdev->dev,
6669 "set phy loopback fail, ret = %d\n", ret);
6673 hclge_cfg_mac_mode(hdev, en);
6675 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6677 dev_err(&hdev->pdev->dev,
6678 "phy loopback config mac mode timeout\n");
6683 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6684 int stream_id, bool enable)
6686 struct hclge_desc desc;
6687 struct hclge_cfg_com_tqp_queue_cmd *req =
6688 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6692 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6693 req->stream_id = cpu_to_le16(stream_id);
6695 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6697 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6699 dev_err(&hdev->pdev->dev,
6700 "Tqp enable fail, status =%d.\n", ret);
6704 static int hclge_set_loopback(struct hnae3_handle *handle,
6705 enum hnae3_loop loop_mode, bool en)
6707 struct hclge_vport *vport = hclge_get_vport(handle);
6708 struct hnae3_knic_private_info *kinfo;
6709 struct hclge_dev *hdev = vport->back;
6712 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6713 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6714 * the same, the packets are looped back in the SSU. If SSU loopback
6715 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6717 if (hdev->pdev->revision >= 0x21) {
6718 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6720 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6721 HCLGE_SWITCH_ALW_LPBK_MASK);
6726 switch (loop_mode) {
6727 case HNAE3_LOOP_APP:
6728 ret = hclge_set_app_loopback(hdev, en);
6730 case HNAE3_LOOP_SERIAL_SERDES:
6731 case HNAE3_LOOP_PARALLEL_SERDES:
6732 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6734 case HNAE3_LOOP_PHY:
6735 ret = hclge_set_phy_loopback(hdev, en);
6739 dev_err(&hdev->pdev->dev,
6740 "loop_mode %d is not supported\n", loop_mode);
6747 kinfo = &vport->nic.kinfo;
6748 for (i = 0; i < kinfo->num_tqps; i++) {
6749 ret = hclge_tqp_enable(hdev, i, 0, en);
6757 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6761 ret = hclge_set_app_loopback(hdev, false);
6765 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6769 return hclge_cfg_serdes_loopback(hdev, false,
6770 HNAE3_LOOP_PARALLEL_SERDES);
6773 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6775 struct hclge_vport *vport = hclge_get_vport(handle);
6776 struct hnae3_knic_private_info *kinfo;
6777 struct hnae3_queue *queue;
6778 struct hclge_tqp *tqp;
6781 kinfo = &vport->nic.kinfo;
6782 for (i = 0; i < kinfo->num_tqps; i++) {
6783 queue = handle->kinfo.tqp[i];
6784 tqp = container_of(queue, struct hclge_tqp, q);
6785 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6789 static void hclge_flush_link_update(struct hclge_dev *hdev)
6791 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6793 unsigned long last = hdev->serv_processed_cnt;
6796 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6797 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6798 last == hdev->serv_processed_cnt)
6802 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6804 struct hclge_vport *vport = hclge_get_vport(handle);
6805 struct hclge_dev *hdev = vport->back;
6808 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6810 /* Set the DOWN flag here to disable link updating */
6811 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6813 /* flush memory to make sure DOWN is seen by service task */
6814 smp_mb__before_atomic();
6815 hclge_flush_link_update(hdev);
6819 static int hclge_ae_start(struct hnae3_handle *handle)
6821 struct hclge_vport *vport = hclge_get_vport(handle);
6822 struct hclge_dev *hdev = vport->back;
6825 hclge_cfg_mac_mode(hdev, true);
6826 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6827 hdev->hw.mac.link = 0;
6829 /* reset tqp stats */
6830 hclge_reset_tqp_stats(handle);
6832 hclge_mac_start_phy(hdev);
6837 static void hclge_ae_stop(struct hnae3_handle *handle)
6839 struct hclge_vport *vport = hclge_get_vport(handle);
6840 struct hclge_dev *hdev = vport->back;
6843 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6845 hclge_clear_arfs_rules(handle);
6847 /* If it is not PF reset, the firmware will disable the MAC,
6848 * so it only need to stop phy here.
6850 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6851 hdev->reset_type != HNAE3_FUNC_RESET) {
6852 hclge_mac_stop_phy(hdev);
6853 hclge_update_link_status(hdev);
6857 for (i = 0; i < handle->kinfo.num_tqps; i++)
6858 hclge_reset_tqp(handle, i);
6860 hclge_config_mac_tnl_int(hdev, false);
6863 hclge_cfg_mac_mode(hdev, false);
6865 hclge_mac_stop_phy(hdev);
6867 /* reset tqp stats */
6868 hclge_reset_tqp_stats(handle);
6869 hclge_update_link_status(hdev);
6872 int hclge_vport_start(struct hclge_vport *vport)
6874 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6875 vport->last_active_jiffies = jiffies;
6879 void hclge_vport_stop(struct hclge_vport *vport)
6881 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6884 static int hclge_client_start(struct hnae3_handle *handle)
6886 struct hclge_vport *vport = hclge_get_vport(handle);
6888 return hclge_vport_start(vport);
6891 static void hclge_client_stop(struct hnae3_handle *handle)
6893 struct hclge_vport *vport = hclge_get_vport(handle);
6895 hclge_vport_stop(vport);
6898 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6899 u16 cmdq_resp, u8 resp_code,
6900 enum hclge_mac_vlan_tbl_opcode op)
6902 struct hclge_dev *hdev = vport->back;
6905 dev_err(&hdev->pdev->dev,
6906 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6911 if (op == HCLGE_MAC_VLAN_ADD) {
6912 if ((!resp_code) || (resp_code == 1)) {
6914 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6915 dev_err(&hdev->pdev->dev,
6916 "add mac addr failed for uc_overflow.\n");
6918 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6919 dev_err(&hdev->pdev->dev,
6920 "add mac addr failed for mc_overflow.\n");
6924 dev_err(&hdev->pdev->dev,
6925 "add mac addr failed for undefined, code=%u.\n",
6928 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6931 } else if (resp_code == 1) {
6932 dev_dbg(&hdev->pdev->dev,
6933 "remove mac addr failed for miss.\n");
6937 dev_err(&hdev->pdev->dev,
6938 "remove mac addr failed for undefined, code=%u.\n",
6941 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6944 } else if (resp_code == 1) {
6945 dev_dbg(&hdev->pdev->dev,
6946 "lookup mac addr failed for miss.\n");
6950 dev_err(&hdev->pdev->dev,
6951 "lookup mac addr failed for undefined, code=%u.\n",
6956 dev_err(&hdev->pdev->dev,
6957 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6962 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6964 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6966 unsigned int word_num;
6967 unsigned int bit_num;
6969 if (vfid > 255 || vfid < 0)
6972 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6973 word_num = vfid / 32;
6974 bit_num = vfid % 32;
6976 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6978 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6980 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6981 bit_num = vfid % 32;
6983 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6985 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6991 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6993 #define HCLGE_DESC_NUMBER 3
6994 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6997 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6998 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6999 if (desc[i].data[j])
7005 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7006 const u8 *addr, bool is_mc)
7008 const unsigned char *mac_addr = addr;
7009 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7010 (mac_addr[0]) | (mac_addr[1] << 8);
7011 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7013 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7015 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7016 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7019 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7020 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7023 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7024 struct hclge_mac_vlan_tbl_entry_cmd *req)
7026 struct hclge_dev *hdev = vport->back;
7027 struct hclge_desc desc;
7032 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7034 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7038 dev_err(&hdev->pdev->dev,
7039 "del mac addr failed for cmd_send, ret =%d.\n",
7043 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7044 retval = le16_to_cpu(desc.retval);
7046 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7047 HCLGE_MAC_VLAN_REMOVE);
7050 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7051 struct hclge_mac_vlan_tbl_entry_cmd *req,
7052 struct hclge_desc *desc,
7055 struct hclge_dev *hdev = vport->back;
7060 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7062 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7063 memcpy(desc[0].data,
7065 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7066 hclge_cmd_setup_basic_desc(&desc[1],
7067 HCLGE_OPC_MAC_VLAN_ADD,
7069 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7070 hclge_cmd_setup_basic_desc(&desc[2],
7071 HCLGE_OPC_MAC_VLAN_ADD,
7073 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7075 memcpy(desc[0].data,
7077 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7078 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7081 dev_err(&hdev->pdev->dev,
7082 "lookup mac addr failed for cmd_send, ret =%d.\n",
7086 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7087 retval = le16_to_cpu(desc[0].retval);
7089 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7090 HCLGE_MAC_VLAN_LKUP);
7093 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7094 struct hclge_mac_vlan_tbl_entry_cmd *req,
7095 struct hclge_desc *mc_desc)
7097 struct hclge_dev *hdev = vport->back;
7104 struct hclge_desc desc;
7106 hclge_cmd_setup_basic_desc(&desc,
7107 HCLGE_OPC_MAC_VLAN_ADD,
7109 memcpy(desc.data, req,
7110 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7111 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7112 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7113 retval = le16_to_cpu(desc.retval);
7115 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7117 HCLGE_MAC_VLAN_ADD);
7119 hclge_cmd_reuse_desc(&mc_desc[0], false);
7120 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7121 hclge_cmd_reuse_desc(&mc_desc[1], false);
7122 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7123 hclge_cmd_reuse_desc(&mc_desc[2], false);
7124 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7125 memcpy(mc_desc[0].data, req,
7126 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7127 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7128 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7129 retval = le16_to_cpu(mc_desc[0].retval);
7131 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7133 HCLGE_MAC_VLAN_ADD);
7137 dev_err(&hdev->pdev->dev,
7138 "add mac addr failed for cmd_send, ret =%d.\n",
7146 static int hclge_init_umv_space(struct hclge_dev *hdev)
7148 u16 allocated_size = 0;
7151 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7156 if (allocated_size < hdev->wanted_umv_size)
7157 dev_warn(&hdev->pdev->dev,
7158 "Alloc umv space failed, want %u, get %u\n",
7159 hdev->wanted_umv_size, allocated_size);
7161 mutex_init(&hdev->umv_mutex);
7162 hdev->max_umv_size = allocated_size;
7163 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7164 * preserve some unicast mac vlan table entries shared by pf
7167 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7168 hdev->share_umv_size = hdev->priv_umv_size +
7169 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7174 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7178 if (hdev->max_umv_size > 0) {
7179 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7183 hdev->max_umv_size = 0;
7185 mutex_destroy(&hdev->umv_mutex);
7190 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7191 u16 *allocated_size, bool is_alloc)
7193 struct hclge_umv_spc_alc_cmd *req;
7194 struct hclge_desc desc;
7197 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7198 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7200 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7202 req->space_size = cpu_to_le32(space_size);
7204 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7206 dev_err(&hdev->pdev->dev,
7207 "%s umv space failed for cmd_send, ret =%d\n",
7208 is_alloc ? "allocate" : "free", ret);
7212 if (is_alloc && allocated_size)
7213 *allocated_size = le32_to_cpu(desc.data[1]);
7218 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7220 struct hclge_vport *vport;
7223 for (i = 0; i < hdev->num_alloc_vport; i++) {
7224 vport = &hdev->vport[i];
7225 vport->used_umv_num = 0;
7228 mutex_lock(&hdev->umv_mutex);
7229 hdev->share_umv_size = hdev->priv_umv_size +
7230 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7231 mutex_unlock(&hdev->umv_mutex);
7234 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7236 struct hclge_dev *hdev = vport->back;
7239 mutex_lock(&hdev->umv_mutex);
7240 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7241 hdev->share_umv_size == 0);
7242 mutex_unlock(&hdev->umv_mutex);
7247 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7249 struct hclge_dev *hdev = vport->back;
7251 mutex_lock(&hdev->umv_mutex);
7253 if (vport->used_umv_num > hdev->priv_umv_size)
7254 hdev->share_umv_size++;
7256 if (vport->used_umv_num > 0)
7257 vport->used_umv_num--;
7259 if (vport->used_umv_num >= hdev->priv_umv_size &&
7260 hdev->share_umv_size > 0)
7261 hdev->share_umv_size--;
7262 vport->used_umv_num++;
7264 mutex_unlock(&hdev->umv_mutex);
7267 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7268 const unsigned char *addr)
7270 struct hclge_vport *vport = hclge_get_vport(handle);
7272 return hclge_add_uc_addr_common(vport, addr);
7275 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7276 const unsigned char *addr)
7278 struct hclge_dev *hdev = vport->back;
7279 struct hclge_mac_vlan_tbl_entry_cmd req;
7280 struct hclge_desc desc;
7281 u16 egress_port = 0;
7284 /* mac addr check */
7285 if (is_zero_ether_addr(addr) ||
7286 is_broadcast_ether_addr(addr) ||
7287 is_multicast_ether_addr(addr)) {
7288 dev_err(&hdev->pdev->dev,
7289 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7290 addr, is_zero_ether_addr(addr),
7291 is_broadcast_ether_addr(addr),
7292 is_multicast_ether_addr(addr));
7296 memset(&req, 0, sizeof(req));
7298 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7299 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7301 req.egress_port = cpu_to_le16(egress_port);
7303 hclge_prepare_mac_addr(&req, addr, false);
7305 /* Lookup the mac address in the mac_vlan table, and add
7306 * it if the entry is inexistent. Repeated unicast entry
7307 * is not allowed in the mac vlan table.
7309 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7310 if (ret == -ENOENT) {
7311 if (!hclge_is_umv_space_full(vport)) {
7312 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7314 hclge_update_umv_space(vport, false);
7318 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7319 hdev->priv_umv_size);
7324 /* check if we just hit the duplicate */
7326 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7327 vport->vport_id, addr);
7331 dev_err(&hdev->pdev->dev,
7332 "PF failed to add unicast entry(%pM) in the MAC table\n",
7338 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7339 const unsigned char *addr)
7341 struct hclge_vport *vport = hclge_get_vport(handle);
7343 return hclge_rm_uc_addr_common(vport, addr);
7346 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7347 const unsigned char *addr)
7349 struct hclge_dev *hdev = vport->back;
7350 struct hclge_mac_vlan_tbl_entry_cmd req;
7353 /* mac addr check */
7354 if (is_zero_ether_addr(addr) ||
7355 is_broadcast_ether_addr(addr) ||
7356 is_multicast_ether_addr(addr)) {
7357 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7362 memset(&req, 0, sizeof(req));
7363 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7364 hclge_prepare_mac_addr(&req, addr, false);
7365 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7367 hclge_update_umv_space(vport, true);
7372 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7373 const unsigned char *addr)
7375 struct hclge_vport *vport = hclge_get_vport(handle);
7377 return hclge_add_mc_addr_common(vport, addr);
7380 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7381 const unsigned char *addr)
7383 struct hclge_dev *hdev = vport->back;
7384 struct hclge_mac_vlan_tbl_entry_cmd req;
7385 struct hclge_desc desc[3];
7388 /* mac addr check */
7389 if (!is_multicast_ether_addr(addr)) {
7390 dev_err(&hdev->pdev->dev,
7391 "Add mc mac err! invalid mac:%pM.\n",
7395 memset(&req, 0, sizeof(req));
7396 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7397 hclge_prepare_mac_addr(&req, addr, true);
7398 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7400 /* This mac addr do not exist, add new entry for it */
7401 memset(desc[0].data, 0, sizeof(desc[0].data));
7402 memset(desc[1].data, 0, sizeof(desc[0].data));
7403 memset(desc[2].data, 0, sizeof(desc[0].data));
7405 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7408 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7410 if (status == -ENOSPC)
7411 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7416 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7417 const unsigned char *addr)
7419 struct hclge_vport *vport = hclge_get_vport(handle);
7421 return hclge_rm_mc_addr_common(vport, addr);
7424 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7425 const unsigned char *addr)
7427 struct hclge_dev *hdev = vport->back;
7428 struct hclge_mac_vlan_tbl_entry_cmd req;
7429 enum hclge_cmd_status status;
7430 struct hclge_desc desc[3];
7432 /* mac addr check */
7433 if (!is_multicast_ether_addr(addr)) {
7434 dev_dbg(&hdev->pdev->dev,
7435 "Remove mc mac err! invalid mac:%pM.\n",
7440 memset(&req, 0, sizeof(req));
7441 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7442 hclge_prepare_mac_addr(&req, addr, true);
7443 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7445 /* This mac addr exist, remove this handle's VFID for it */
7446 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7450 if (hclge_is_all_function_id_zero(desc))
7451 /* All the vfid is zero, so need to delete this entry */
7452 status = hclge_remove_mac_vlan_tbl(vport, &req);
7454 /* Not all the vfid is zero, update the vfid */
7455 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7458 /* Maybe this mac address is in mta table, but it cannot be
7459 * deleted here because an entry of mta represents an address
7460 * range rather than a specific address. the delete action to
7461 * all entries will take effect in update_mta_status called by
7462 * hns3_nic_set_rx_mode.
7470 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7471 enum HCLGE_MAC_ADDR_TYPE mac_type)
7473 struct hclge_vport_mac_addr_cfg *mac_cfg;
7474 struct list_head *list;
7476 if (!vport->vport_id)
7479 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7483 mac_cfg->hd_tbl_status = true;
7484 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7486 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7487 &vport->uc_mac_list : &vport->mc_mac_list;
7489 list_add_tail(&mac_cfg->node, list);
7492 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7494 enum HCLGE_MAC_ADDR_TYPE mac_type)
7496 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7497 struct list_head *list;
7498 bool uc_flag, mc_flag;
7500 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7501 &vport->uc_mac_list : &vport->mc_mac_list;
7503 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7504 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7506 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7507 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7508 if (uc_flag && mac_cfg->hd_tbl_status)
7509 hclge_rm_uc_addr_common(vport, mac_addr);
7511 if (mc_flag && mac_cfg->hd_tbl_status)
7512 hclge_rm_mc_addr_common(vport, mac_addr);
7514 list_del(&mac_cfg->node);
7521 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7522 enum HCLGE_MAC_ADDR_TYPE mac_type)
7524 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7525 struct list_head *list;
7527 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7528 &vport->uc_mac_list : &vport->mc_mac_list;
7530 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7531 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7532 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7534 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7535 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7537 mac_cfg->hd_tbl_status = false;
7539 list_del(&mac_cfg->node);
7545 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7547 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7548 struct hclge_vport *vport;
7551 for (i = 0; i < hdev->num_alloc_vport; i++) {
7552 vport = &hdev->vport[i];
7553 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7554 list_del(&mac->node);
7558 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7559 list_del(&mac->node);
7565 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7566 u16 cmdq_resp, u8 resp_code)
7568 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7569 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7570 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7571 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7576 dev_err(&hdev->pdev->dev,
7577 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7582 switch (resp_code) {
7583 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7584 case HCLGE_ETHERTYPE_ALREADY_ADD:
7587 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7588 dev_err(&hdev->pdev->dev,
7589 "add mac ethertype failed for manager table overflow.\n");
7590 return_status = -EIO;
7592 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7593 dev_err(&hdev->pdev->dev,
7594 "add mac ethertype failed for key conflict.\n");
7595 return_status = -EIO;
7598 dev_err(&hdev->pdev->dev,
7599 "add mac ethertype failed for undefined, code=%u.\n",
7601 return_status = -EIO;
7604 return return_status;
7607 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7610 struct hclge_mac_vlan_tbl_entry_cmd req;
7611 struct hclge_dev *hdev = vport->back;
7612 struct hclge_desc desc;
7613 u16 egress_port = 0;
7616 if (is_zero_ether_addr(mac_addr))
7619 memset(&req, 0, sizeof(req));
7620 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7621 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7622 req.egress_port = cpu_to_le16(egress_port);
7623 hclge_prepare_mac_addr(&req, mac_addr, false);
7625 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7628 vf_idx += HCLGE_VF_VPORT_START_NUM;
7629 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7631 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7637 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7640 struct hclge_vport *vport = hclge_get_vport(handle);
7641 struct hclge_dev *hdev = vport->back;
7643 vport = hclge_get_vf_vport(hdev, vf);
7647 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7648 dev_info(&hdev->pdev->dev,
7649 "Specified MAC(=%pM) is same as before, no change committed!\n",
7654 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7655 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7660 ether_addr_copy(vport->vf_info.mac, mac_addr);
7661 dev_info(&hdev->pdev->dev,
7662 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7665 return hclge_inform_reset_assert_to_vf(vport);
7668 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7669 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7671 struct hclge_desc desc;
7676 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7677 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7681 dev_err(&hdev->pdev->dev,
7682 "add mac ethertype failed for cmd_send, ret =%d.\n",
7687 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7688 retval = le16_to_cpu(desc.retval);
7690 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7693 static int init_mgr_tbl(struct hclge_dev *hdev)
7698 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7699 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7701 dev_err(&hdev->pdev->dev,
7702 "add mac ethertype failed, ret =%d.\n",
7711 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7713 struct hclge_vport *vport = hclge_get_vport(handle);
7714 struct hclge_dev *hdev = vport->back;
7716 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7719 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7722 const unsigned char *new_addr = (const unsigned char *)p;
7723 struct hclge_vport *vport = hclge_get_vport(handle);
7724 struct hclge_dev *hdev = vport->back;
7727 /* mac addr check */
7728 if (is_zero_ether_addr(new_addr) ||
7729 is_broadcast_ether_addr(new_addr) ||
7730 is_multicast_ether_addr(new_addr)) {
7731 dev_err(&hdev->pdev->dev,
7732 "Change uc mac err! invalid mac:%pM.\n",
7737 if ((!is_first || is_kdump_kernel()) &&
7738 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7739 dev_warn(&hdev->pdev->dev,
7740 "remove old uc mac address fail.\n");
7742 ret = hclge_add_uc_addr(handle, new_addr);
7744 dev_err(&hdev->pdev->dev,
7745 "add uc mac address fail, ret =%d.\n",
7749 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7750 dev_err(&hdev->pdev->dev,
7751 "restore uc mac address fail.\n");
7756 ret = hclge_pause_addr_cfg(hdev, new_addr);
7758 dev_err(&hdev->pdev->dev,
7759 "configure mac pause address fail, ret =%d.\n",
7764 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7769 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7772 struct hclge_vport *vport = hclge_get_vport(handle);
7773 struct hclge_dev *hdev = vport->back;
7775 if (!hdev->hw.mac.phydev)
7778 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7781 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7782 u8 fe_type, bool filter_en, u8 vf_id)
7784 struct hclge_vlan_filter_ctrl_cmd *req;
7785 struct hclge_desc desc;
7788 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7790 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7791 req->vlan_type = vlan_type;
7792 req->vlan_fe = filter_en ? fe_type : 0;
7795 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7797 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7803 #define HCLGE_FILTER_TYPE_VF 0
7804 #define HCLGE_FILTER_TYPE_PORT 1
7805 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7806 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7807 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7808 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7809 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7810 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7811 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7812 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7813 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7815 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7817 struct hclge_vport *vport = hclge_get_vport(handle);
7818 struct hclge_dev *hdev = vport->back;
7820 if (hdev->pdev->revision >= 0x21) {
7821 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7822 HCLGE_FILTER_FE_EGRESS, enable, 0);
7823 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7824 HCLGE_FILTER_FE_INGRESS, enable, 0);
7826 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7827 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7831 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7833 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7836 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7837 bool is_kill, u16 vlan,
7840 struct hclge_vport *vport = &hdev->vport[vfid];
7841 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7842 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7843 struct hclge_desc desc[2];
7848 /* if vf vlan table is full, firmware will close vf vlan filter, it
7849 * is unable and unnecessary to add new vlan id to vf vlan filter.
7850 * If spoof check is enable, and vf vlan is full, it shouldn't add
7851 * new vlan, because tx packets with these vlan id will be dropped.
7853 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7854 if (vport->vf_info.spoofchk && vlan) {
7855 dev_err(&hdev->pdev->dev,
7856 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7862 hclge_cmd_setup_basic_desc(&desc[0],
7863 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7864 hclge_cmd_setup_basic_desc(&desc[1],
7865 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7867 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7869 vf_byte_off = vfid / 8;
7870 vf_byte_val = 1 << (vfid % 8);
7872 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7873 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7875 req0->vlan_id = cpu_to_le16(vlan);
7876 req0->vlan_cfg = is_kill;
7878 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7879 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7881 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7883 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7885 dev_err(&hdev->pdev->dev,
7886 "Send vf vlan command fail, ret =%d.\n",
7892 #define HCLGE_VF_VLAN_NO_ENTRY 2
7893 if (!req0->resp_code || req0->resp_code == 1)
7896 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7897 set_bit(vfid, hdev->vf_vlan_full);
7898 dev_warn(&hdev->pdev->dev,
7899 "vf vlan table is full, vf vlan filter is disabled\n");
7903 dev_err(&hdev->pdev->dev,
7904 "Add vf vlan filter fail, ret =%u.\n",
7907 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7908 if (!req0->resp_code)
7911 /* vf vlan filter is disabled when vf vlan table is full,
7912 * then new vlan id will not be added into vf vlan table.
7913 * Just return 0 without warning, avoid massive verbose
7914 * print logs when unload.
7916 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7919 dev_err(&hdev->pdev->dev,
7920 "Kill vf vlan filter fail, ret =%u.\n",
7927 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7928 u16 vlan_id, bool is_kill)
7930 struct hclge_vlan_filter_pf_cfg_cmd *req;
7931 struct hclge_desc desc;
7932 u8 vlan_offset_byte_val;
7933 u8 vlan_offset_byte;
7937 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7939 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7940 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7941 HCLGE_VLAN_BYTE_SIZE;
7942 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7944 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7945 req->vlan_offset = vlan_offset_160;
7946 req->vlan_cfg = is_kill;
7947 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7949 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7951 dev_err(&hdev->pdev->dev,
7952 "port vlan command, send fail, ret =%d.\n", ret);
7956 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7957 u16 vport_id, u16 vlan_id,
7960 u16 vport_idx, vport_num = 0;
7963 if (is_kill && !vlan_id)
7966 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7969 dev_err(&hdev->pdev->dev,
7970 "Set %u vport vlan filter config fail, ret =%d.\n",
7975 /* vlan 0 may be added twice when 8021q module is enabled */
7976 if (!is_kill && !vlan_id &&
7977 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7980 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7981 dev_err(&hdev->pdev->dev,
7982 "Add port vlan failed, vport %u is already in vlan %u\n",
7988 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7989 dev_err(&hdev->pdev->dev,
7990 "Delete port vlan failed, vport %u is not in vlan %u\n",
7995 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7998 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7999 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8005 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8007 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8008 struct hclge_vport_vtag_tx_cfg_cmd *req;
8009 struct hclge_dev *hdev = vport->back;
8010 struct hclge_desc desc;
8014 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8016 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8017 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8018 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8019 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8020 vcfg->accept_tag1 ? 1 : 0);
8021 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8022 vcfg->accept_untag1 ? 1 : 0);
8023 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8024 vcfg->accept_tag2 ? 1 : 0);
8025 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8026 vcfg->accept_untag2 ? 1 : 0);
8027 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8028 vcfg->insert_tag1_en ? 1 : 0);
8029 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8030 vcfg->insert_tag2_en ? 1 : 0);
8031 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8033 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8034 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8035 HCLGE_VF_NUM_PER_BYTE;
8036 req->vf_bitmap[bmap_index] =
8037 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8039 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8041 dev_err(&hdev->pdev->dev,
8042 "Send port txvlan cfg command fail, ret =%d\n",
8048 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8050 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8051 struct hclge_vport_vtag_rx_cfg_cmd *req;
8052 struct hclge_dev *hdev = vport->back;
8053 struct hclge_desc desc;
8057 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8059 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8060 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8061 vcfg->strip_tag1_en ? 1 : 0);
8062 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8063 vcfg->strip_tag2_en ? 1 : 0);
8064 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8065 vcfg->vlan1_vlan_prionly ? 1 : 0);
8066 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8067 vcfg->vlan2_vlan_prionly ? 1 : 0);
8069 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8070 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8071 HCLGE_VF_NUM_PER_BYTE;
8072 req->vf_bitmap[bmap_index] =
8073 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8075 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8077 dev_err(&hdev->pdev->dev,
8078 "Send port rxvlan cfg command fail, ret =%d\n",
8084 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8085 u16 port_base_vlan_state,
8090 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8091 vport->txvlan_cfg.accept_tag1 = true;
8092 vport->txvlan_cfg.insert_tag1_en = false;
8093 vport->txvlan_cfg.default_tag1 = 0;
8095 vport->txvlan_cfg.accept_tag1 = false;
8096 vport->txvlan_cfg.insert_tag1_en = true;
8097 vport->txvlan_cfg.default_tag1 = vlan_tag;
8100 vport->txvlan_cfg.accept_untag1 = true;
8102 /* accept_tag2 and accept_untag2 are not supported on
8103 * pdev revision(0x20), new revision support them,
8104 * this two fields can not be configured by user.
8106 vport->txvlan_cfg.accept_tag2 = true;
8107 vport->txvlan_cfg.accept_untag2 = true;
8108 vport->txvlan_cfg.insert_tag2_en = false;
8109 vport->txvlan_cfg.default_tag2 = 0;
8111 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8112 vport->rxvlan_cfg.strip_tag1_en = false;
8113 vport->rxvlan_cfg.strip_tag2_en =
8114 vport->rxvlan_cfg.rx_vlan_offload_en;
8116 vport->rxvlan_cfg.strip_tag1_en =
8117 vport->rxvlan_cfg.rx_vlan_offload_en;
8118 vport->rxvlan_cfg.strip_tag2_en = true;
8120 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8121 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8123 ret = hclge_set_vlan_tx_offload_cfg(vport);
8127 return hclge_set_vlan_rx_offload_cfg(vport);
8130 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8132 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8133 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8134 struct hclge_desc desc;
8137 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8138 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8139 rx_req->ot_fst_vlan_type =
8140 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8141 rx_req->ot_sec_vlan_type =
8142 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8143 rx_req->in_fst_vlan_type =
8144 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8145 rx_req->in_sec_vlan_type =
8146 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8148 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8150 dev_err(&hdev->pdev->dev,
8151 "Send rxvlan protocol type command fail, ret =%d\n",
8156 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8158 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8159 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8160 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8162 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8164 dev_err(&hdev->pdev->dev,
8165 "Send txvlan protocol type command fail, ret =%d\n",
8171 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8173 #define HCLGE_DEF_VLAN_TYPE 0x8100
8175 struct hnae3_handle *handle = &hdev->vport[0].nic;
8176 struct hclge_vport *vport;
8180 if (hdev->pdev->revision >= 0x21) {
8181 /* for revision 0x21, vf vlan filter is per function */
8182 for (i = 0; i < hdev->num_alloc_vport; i++) {
8183 vport = &hdev->vport[i];
8184 ret = hclge_set_vlan_filter_ctrl(hdev,
8185 HCLGE_FILTER_TYPE_VF,
8186 HCLGE_FILTER_FE_EGRESS,
8193 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8194 HCLGE_FILTER_FE_INGRESS, true,
8199 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8200 HCLGE_FILTER_FE_EGRESS_V1_B,
8206 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8208 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8209 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8210 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8211 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8212 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8213 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8215 ret = hclge_set_vlan_protocol_type(hdev);
8219 for (i = 0; i < hdev->num_alloc_vport; i++) {
8222 vport = &hdev->vport[i];
8223 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8225 ret = hclge_vlan_offload_cfg(vport,
8226 vport->port_base_vlan_cfg.state,
8232 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8235 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8238 struct hclge_vport_vlan_cfg *vlan;
8240 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8244 vlan->hd_tbl_status = writen_to_tbl;
8245 vlan->vlan_id = vlan_id;
8247 list_add_tail(&vlan->node, &vport->vlan_list);
8250 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8252 struct hclge_vport_vlan_cfg *vlan, *tmp;
8253 struct hclge_dev *hdev = vport->back;
8256 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8257 if (!vlan->hd_tbl_status) {
8258 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8260 vlan->vlan_id, false);
8262 dev_err(&hdev->pdev->dev,
8263 "restore vport vlan list failed, ret=%d\n",
8268 vlan->hd_tbl_status = true;
8274 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8277 struct hclge_vport_vlan_cfg *vlan, *tmp;
8278 struct hclge_dev *hdev = vport->back;
8280 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8281 if (vlan->vlan_id == vlan_id) {
8282 if (is_write_tbl && vlan->hd_tbl_status)
8283 hclge_set_vlan_filter_hw(hdev,
8289 list_del(&vlan->node);
8296 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8298 struct hclge_vport_vlan_cfg *vlan, *tmp;
8299 struct hclge_dev *hdev = vport->back;
8301 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8302 if (vlan->hd_tbl_status)
8303 hclge_set_vlan_filter_hw(hdev,
8309 vlan->hd_tbl_status = false;
8311 list_del(&vlan->node);
8317 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8319 struct hclge_vport_vlan_cfg *vlan, *tmp;
8320 struct hclge_vport *vport;
8323 for (i = 0; i < hdev->num_alloc_vport; i++) {
8324 vport = &hdev->vport[i];
8325 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8326 list_del(&vlan->node);
8332 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8334 struct hclge_vport *vport = hclge_get_vport(handle);
8335 struct hclge_vport_vlan_cfg *vlan, *tmp;
8336 struct hclge_dev *hdev = vport->back;
8341 for (i = 0; i < hdev->num_alloc_vport; i++) {
8342 vport = &hdev->vport[i];
8343 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8344 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8345 state = vport->port_base_vlan_cfg.state;
8347 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8348 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8349 vport->vport_id, vlan_id,
8354 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8357 if (!vlan->hd_tbl_status)
8359 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8361 vlan->vlan_id, false);
8368 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8370 struct hclge_vport *vport = hclge_get_vport(handle);
8372 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8373 vport->rxvlan_cfg.strip_tag1_en = false;
8374 vport->rxvlan_cfg.strip_tag2_en = enable;
8376 vport->rxvlan_cfg.strip_tag1_en = enable;
8377 vport->rxvlan_cfg.strip_tag2_en = true;
8379 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8380 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8381 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8383 return hclge_set_vlan_rx_offload_cfg(vport);
8386 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8387 u16 port_base_vlan_state,
8388 struct hclge_vlan_info *new_info,
8389 struct hclge_vlan_info *old_info)
8391 struct hclge_dev *hdev = vport->back;
8394 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8395 hclge_rm_vport_all_vlan_table(vport, false);
8396 return hclge_set_vlan_filter_hw(hdev,
8397 htons(new_info->vlan_proto),
8403 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8404 vport->vport_id, old_info->vlan_tag,
8409 return hclge_add_vport_all_vlan_table(vport);
8412 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8413 struct hclge_vlan_info *vlan_info)
8415 struct hnae3_handle *nic = &vport->nic;
8416 struct hclge_vlan_info *old_vlan_info;
8417 struct hclge_dev *hdev = vport->back;
8420 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8422 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8426 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8427 /* add new VLAN tag */
8428 ret = hclge_set_vlan_filter_hw(hdev,
8429 htons(vlan_info->vlan_proto),
8431 vlan_info->vlan_tag,
8436 /* remove old VLAN tag */
8437 ret = hclge_set_vlan_filter_hw(hdev,
8438 htons(old_vlan_info->vlan_proto),
8440 old_vlan_info->vlan_tag,
8448 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8453 /* update state only when disable/enable port based VLAN */
8454 vport->port_base_vlan_cfg.state = state;
8455 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8456 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8458 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8461 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8462 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8463 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8468 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8469 enum hnae3_port_base_vlan_state state,
8472 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8474 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8476 return HNAE3_PORT_BASE_VLAN_ENABLE;
8479 return HNAE3_PORT_BASE_VLAN_DISABLE;
8480 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8481 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8483 return HNAE3_PORT_BASE_VLAN_MODIFY;
8487 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8488 u16 vlan, u8 qos, __be16 proto)
8490 struct hclge_vport *vport = hclge_get_vport(handle);
8491 struct hclge_dev *hdev = vport->back;
8492 struct hclge_vlan_info vlan_info;
8496 if (hdev->pdev->revision == 0x20)
8499 vport = hclge_get_vf_vport(hdev, vfid);
8503 /* qos is a 3 bits value, so can not be bigger than 7 */
8504 if (vlan > VLAN_N_VID - 1 || qos > 7)
8506 if (proto != htons(ETH_P_8021Q))
8507 return -EPROTONOSUPPORT;
8509 state = hclge_get_port_base_vlan_state(vport,
8510 vport->port_base_vlan_cfg.state,
8512 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8515 vlan_info.vlan_tag = vlan;
8516 vlan_info.qos = qos;
8517 vlan_info.vlan_proto = ntohs(proto);
8519 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8520 return hclge_update_port_base_vlan_cfg(vport, state,
8523 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8524 vport->vport_id, state,
8531 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8532 u16 vlan_id, bool is_kill)
8534 struct hclge_vport *vport = hclge_get_vport(handle);
8535 struct hclge_dev *hdev = vport->back;
8536 bool writen_to_tbl = false;
8539 /* When device is resetting, firmware is unable to handle
8540 * mailbox. Just record the vlan id, and remove it after
8543 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8544 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8548 /* when port base vlan enabled, we use port base vlan as the vlan
8549 * filter entry. In this case, we don't update vlan filter table
8550 * when user add new vlan or remove exist vlan, just update the vport
8551 * vlan list. The vlan id in vlan list will be writen in vlan filter
8552 * table until port base vlan disabled
8554 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8555 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8557 writen_to_tbl = true;
8562 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8564 hclge_add_vport_vlan_table(vport, vlan_id,
8566 } else if (is_kill) {
8567 /* when remove hw vlan filter failed, record the vlan id,
8568 * and try to remove it from hw later, to be consistence
8571 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8576 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8578 #define HCLGE_MAX_SYNC_COUNT 60
8580 int i, ret, sync_cnt = 0;
8583 /* start from vport 1 for PF is always alive */
8584 for (i = 0; i < hdev->num_alloc_vport; i++) {
8585 struct hclge_vport *vport = &hdev->vport[i];
8587 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8589 while (vlan_id != VLAN_N_VID) {
8590 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8591 vport->vport_id, vlan_id,
8593 if (ret && ret != -EINVAL)
8596 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8597 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8600 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8603 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8609 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8611 struct hclge_config_max_frm_size_cmd *req;
8612 struct hclge_desc desc;
8614 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8616 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8617 req->max_frm_size = cpu_to_le16(new_mps);
8618 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8620 return hclge_cmd_send(&hdev->hw, &desc, 1);
8623 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8625 struct hclge_vport *vport = hclge_get_vport(handle);
8627 return hclge_set_vport_mtu(vport, new_mtu);
8630 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8632 struct hclge_dev *hdev = vport->back;
8633 int i, max_frm_size, ret;
8635 /* HW supprt 2 layer vlan */
8636 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8637 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8638 max_frm_size > HCLGE_MAC_MAX_FRAME)
8641 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8642 mutex_lock(&hdev->vport_lock);
8643 /* VF's mps must fit within hdev->mps */
8644 if (vport->vport_id && max_frm_size > hdev->mps) {
8645 mutex_unlock(&hdev->vport_lock);
8647 } else if (vport->vport_id) {
8648 vport->mps = max_frm_size;
8649 mutex_unlock(&hdev->vport_lock);
8653 /* PF's mps must be greater then VF's mps */
8654 for (i = 1; i < hdev->num_alloc_vport; i++)
8655 if (max_frm_size < hdev->vport[i].mps) {
8656 mutex_unlock(&hdev->vport_lock);
8660 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8662 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8664 dev_err(&hdev->pdev->dev,
8665 "Change mtu fail, ret =%d\n", ret);
8669 hdev->mps = max_frm_size;
8670 vport->mps = max_frm_size;
8672 ret = hclge_buffer_alloc(hdev);
8674 dev_err(&hdev->pdev->dev,
8675 "Allocate buffer fail, ret =%d\n", ret);
8678 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8679 mutex_unlock(&hdev->vport_lock);
8683 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8686 struct hclge_reset_tqp_queue_cmd *req;
8687 struct hclge_desc desc;
8690 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8692 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8693 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8695 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8697 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8699 dev_err(&hdev->pdev->dev,
8700 "Send tqp reset cmd error, status =%d\n", ret);
8707 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8709 struct hclge_reset_tqp_queue_cmd *req;
8710 struct hclge_desc desc;
8713 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8715 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8716 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8720 dev_err(&hdev->pdev->dev,
8721 "Get reset status error, status =%d\n", ret);
8725 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8728 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8730 struct hnae3_queue *queue;
8731 struct hclge_tqp *tqp;
8733 queue = handle->kinfo.tqp[queue_id];
8734 tqp = container_of(queue, struct hclge_tqp, q);
8739 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8741 struct hclge_vport *vport = hclge_get_vport(handle);
8742 struct hclge_dev *hdev = vport->back;
8743 int reset_try_times = 0;
8748 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8750 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8752 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8756 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8758 dev_err(&hdev->pdev->dev,
8759 "Send reset tqp cmd fail, ret = %d\n", ret);
8763 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8764 reset_status = hclge_get_reset_status(hdev, queue_gid);
8768 /* Wait for tqp hw reset */
8769 usleep_range(1000, 1200);
8772 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8773 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8777 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8779 dev_err(&hdev->pdev->dev,
8780 "Deassert the soft reset fail, ret = %d\n", ret);
8785 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8787 struct hclge_dev *hdev = vport->back;
8788 int reset_try_times = 0;
8793 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8795 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8797 dev_warn(&hdev->pdev->dev,
8798 "Send reset tqp cmd fail, ret = %d\n", ret);
8802 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8803 reset_status = hclge_get_reset_status(hdev, queue_gid);
8807 /* Wait for tqp hw reset */
8808 usleep_range(1000, 1200);
8811 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8812 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8816 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8818 dev_warn(&hdev->pdev->dev,
8819 "Deassert the soft reset fail, ret = %d\n", ret);
8822 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8824 struct hclge_vport *vport = hclge_get_vport(handle);
8825 struct hclge_dev *hdev = vport->back;
8827 return hdev->fw_version;
8830 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8832 struct phy_device *phydev = hdev->hw.mac.phydev;
8837 phy_set_asym_pause(phydev, rx_en, tx_en);
8840 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8844 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8847 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8849 dev_err(&hdev->pdev->dev,
8850 "configure pauseparam error, ret = %d.\n", ret);
8855 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8857 struct phy_device *phydev = hdev->hw.mac.phydev;
8858 u16 remote_advertising = 0;
8859 u16 local_advertising;
8860 u32 rx_pause, tx_pause;
8863 if (!phydev->link || !phydev->autoneg)
8866 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8869 remote_advertising = LPA_PAUSE_CAP;
8871 if (phydev->asym_pause)
8872 remote_advertising |= LPA_PAUSE_ASYM;
8874 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8875 remote_advertising);
8876 tx_pause = flowctl & FLOW_CTRL_TX;
8877 rx_pause = flowctl & FLOW_CTRL_RX;
8879 if (phydev->duplex == HCLGE_MAC_HALF) {
8884 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8887 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8888 u32 *rx_en, u32 *tx_en)
8890 struct hclge_vport *vport = hclge_get_vport(handle);
8891 struct hclge_dev *hdev = vport->back;
8892 struct phy_device *phydev = hdev->hw.mac.phydev;
8894 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8896 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8902 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8905 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8908 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8917 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8918 u32 rx_en, u32 tx_en)
8921 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8922 else if (rx_en && !tx_en)
8923 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8924 else if (!rx_en && tx_en)
8925 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8927 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8929 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8932 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8933 u32 rx_en, u32 tx_en)
8935 struct hclge_vport *vport = hclge_get_vport(handle);
8936 struct hclge_dev *hdev = vport->back;
8937 struct phy_device *phydev = hdev->hw.mac.phydev;
8941 fc_autoneg = hclge_get_autoneg(handle);
8942 if (auto_neg != fc_autoneg) {
8943 dev_info(&hdev->pdev->dev,
8944 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8949 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8950 dev_info(&hdev->pdev->dev,
8951 "Priority flow control enabled. Cannot set link flow control.\n");
8955 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8957 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8960 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8963 return phy_start_aneg(phydev);
8968 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8969 u8 *auto_neg, u32 *speed, u8 *duplex)
8971 struct hclge_vport *vport = hclge_get_vport(handle);
8972 struct hclge_dev *hdev = vport->back;
8975 *speed = hdev->hw.mac.speed;
8977 *duplex = hdev->hw.mac.duplex;
8979 *auto_neg = hdev->hw.mac.autoneg;
8982 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8985 struct hclge_vport *vport = hclge_get_vport(handle);
8986 struct hclge_dev *hdev = vport->back;
8989 *media_type = hdev->hw.mac.media_type;
8992 *module_type = hdev->hw.mac.module_type;
8995 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8996 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8998 struct hclge_vport *vport = hclge_get_vport(handle);
8999 struct hclge_dev *hdev = vport->back;
9000 struct phy_device *phydev = hdev->hw.mac.phydev;
9001 int mdix_ctrl, mdix, is_resolved;
9002 unsigned int retval;
9005 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9006 *tp_mdix = ETH_TP_MDI_INVALID;
9010 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9012 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9013 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9014 HCLGE_PHY_MDIX_CTRL_S);
9016 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9017 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9018 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9020 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9022 switch (mdix_ctrl) {
9024 *tp_mdix_ctrl = ETH_TP_MDI;
9027 *tp_mdix_ctrl = ETH_TP_MDI_X;
9030 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9033 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9038 *tp_mdix = ETH_TP_MDI_INVALID;
9040 *tp_mdix = ETH_TP_MDI_X;
9042 *tp_mdix = ETH_TP_MDI;
9045 static void hclge_info_show(struct hclge_dev *hdev)
9047 struct device *dev = &hdev->pdev->dev;
9049 dev_info(dev, "PF info begin:\n");
9051 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9052 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9053 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9054 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9055 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9056 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9057 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9058 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9059 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9060 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9061 dev_info(dev, "This is %s PF\n",
9062 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9063 dev_info(dev, "DCB %s\n",
9064 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9065 dev_info(dev, "MQPRIO %s\n",
9066 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9068 dev_info(dev, "PF info end.\n");
9071 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9072 struct hclge_vport *vport)
9074 struct hnae3_client *client = vport->nic.client;
9075 struct hclge_dev *hdev = ae_dev->priv;
9076 int rst_cnt = hdev->rst_stats.reset_cnt;
9079 ret = client->ops->init_instance(&vport->nic);
9083 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9084 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9085 rst_cnt != hdev->rst_stats.reset_cnt) {
9090 /* Enable nic hw error interrupts */
9091 ret = hclge_config_nic_hw_error(hdev, true);
9093 dev_err(&ae_dev->pdev->dev,
9094 "fail(%d) to enable hw error interrupts\n", ret);
9098 hnae3_set_client_init_flag(client, ae_dev, 1);
9100 if (netif_msg_drv(&hdev->vport->nic))
9101 hclge_info_show(hdev);
9106 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9107 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9108 msleep(HCLGE_WAIT_RESET_DONE);
9110 client->ops->uninit_instance(&vport->nic, 0);
9115 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9116 struct hclge_vport *vport)
9118 struct hnae3_client *client = vport->roce.client;
9119 struct hclge_dev *hdev = ae_dev->priv;
9123 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9127 client = hdev->roce_client;
9128 ret = hclge_init_roce_base_info(vport);
9132 rst_cnt = hdev->rst_stats.reset_cnt;
9133 ret = client->ops->init_instance(&vport->roce);
9137 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9138 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9139 rst_cnt != hdev->rst_stats.reset_cnt) {
9144 /* Enable roce ras interrupts */
9145 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9147 dev_err(&ae_dev->pdev->dev,
9148 "fail(%d) to enable roce ras interrupts\n", ret);
9152 hnae3_set_client_init_flag(client, ae_dev, 1);
9157 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9158 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9159 msleep(HCLGE_WAIT_RESET_DONE);
9161 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9166 static int hclge_init_client_instance(struct hnae3_client *client,
9167 struct hnae3_ae_dev *ae_dev)
9169 struct hclge_dev *hdev = ae_dev->priv;
9170 struct hclge_vport *vport;
9173 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9174 vport = &hdev->vport[i];
9176 switch (client->type) {
9177 case HNAE3_CLIENT_KNIC:
9178 hdev->nic_client = client;
9179 vport->nic.client = client;
9180 ret = hclge_init_nic_client_instance(ae_dev, vport);
9184 ret = hclge_init_roce_client_instance(ae_dev, vport);
9189 case HNAE3_CLIENT_ROCE:
9190 if (hnae3_dev_roce_supported(hdev)) {
9191 hdev->roce_client = client;
9192 vport->roce.client = client;
9195 ret = hclge_init_roce_client_instance(ae_dev, vport);
9208 hdev->nic_client = NULL;
9209 vport->nic.client = NULL;
9212 hdev->roce_client = NULL;
9213 vport->roce.client = NULL;
9217 static void hclge_uninit_client_instance(struct hnae3_client *client,
9218 struct hnae3_ae_dev *ae_dev)
9220 struct hclge_dev *hdev = ae_dev->priv;
9221 struct hclge_vport *vport;
9224 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9225 vport = &hdev->vport[i];
9226 if (hdev->roce_client) {
9227 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9228 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9229 msleep(HCLGE_WAIT_RESET_DONE);
9231 hdev->roce_client->ops->uninit_instance(&vport->roce,
9233 hdev->roce_client = NULL;
9234 vport->roce.client = NULL;
9236 if (client->type == HNAE3_CLIENT_ROCE)
9238 if (hdev->nic_client && client->ops->uninit_instance) {
9239 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9240 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9241 msleep(HCLGE_WAIT_RESET_DONE);
9243 client->ops->uninit_instance(&vport->nic, 0);
9244 hdev->nic_client = NULL;
9245 vport->nic.client = NULL;
9250 static int hclge_pci_init(struct hclge_dev *hdev)
9252 struct pci_dev *pdev = hdev->pdev;
9253 struct hclge_hw *hw;
9256 ret = pci_enable_device(pdev);
9258 dev_err(&pdev->dev, "failed to enable PCI device\n");
9262 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9264 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9267 "can't set consistent PCI DMA");
9268 goto err_disable_device;
9270 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9273 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9275 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9276 goto err_disable_device;
9279 pci_set_master(pdev);
9281 hw->io_base = pcim_iomap(pdev, 2, 0);
9283 dev_err(&pdev->dev, "Can't map configuration register space\n");
9285 goto err_clr_master;
9288 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9292 pci_clear_master(pdev);
9293 pci_release_regions(pdev);
9295 pci_disable_device(pdev);
9300 static void hclge_pci_uninit(struct hclge_dev *hdev)
9302 struct pci_dev *pdev = hdev->pdev;
9304 pcim_iounmap(pdev, hdev->hw.io_base);
9305 pci_free_irq_vectors(pdev);
9306 pci_clear_master(pdev);
9307 pci_release_mem_regions(pdev);
9308 pci_disable_device(pdev);
9311 static void hclge_state_init(struct hclge_dev *hdev)
9313 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9314 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9315 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9316 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9317 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9318 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9319 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9322 static void hclge_state_uninit(struct hclge_dev *hdev)
9324 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9325 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9327 if (hdev->reset_timer.function)
9328 del_timer_sync(&hdev->reset_timer);
9329 if (hdev->service_task.work.func)
9330 cancel_delayed_work_sync(&hdev->service_task);
9333 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9335 #define HCLGE_FLR_WAIT_MS 100
9336 #define HCLGE_FLR_WAIT_CNT 50
9337 struct hclge_dev *hdev = ae_dev->priv;
9340 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9341 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9342 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9343 hclge_reset_event(hdev->pdev, NULL);
9345 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9346 cnt++ < HCLGE_FLR_WAIT_CNT)
9347 msleep(HCLGE_FLR_WAIT_MS);
9349 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9350 dev_err(&hdev->pdev->dev,
9351 "flr wait down timeout: %d\n", cnt);
9354 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9356 struct hclge_dev *hdev = ae_dev->priv;
9358 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9361 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9365 for (i = 0; i < hdev->num_alloc_vport; i++) {
9366 struct hclge_vport *vport = &hdev->vport[i];
9369 /* Send cmd to clear VF's FUNC_RST_ING */
9370 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9372 dev_warn(&hdev->pdev->dev,
9373 "clear vf(%u) rst failed %d!\n",
9374 vport->vport_id, ret);
9378 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9380 struct pci_dev *pdev = ae_dev->pdev;
9381 struct hclge_dev *hdev;
9384 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9391 hdev->ae_dev = ae_dev;
9392 hdev->reset_type = HNAE3_NONE_RESET;
9393 hdev->reset_level = HNAE3_FUNC_RESET;
9394 ae_dev->priv = hdev;
9396 /* HW supprt 2 layer vlan */
9397 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9399 mutex_init(&hdev->vport_lock);
9400 spin_lock_init(&hdev->fd_rule_lock);
9402 ret = hclge_pci_init(hdev);
9404 dev_err(&pdev->dev, "PCI init failed\n");
9408 /* Firmware command queue initialize */
9409 ret = hclge_cmd_queue_init(hdev);
9411 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9412 goto err_pci_uninit;
9415 /* Firmware command initialize */
9416 ret = hclge_cmd_init(hdev);
9418 goto err_cmd_uninit;
9420 ret = hclge_get_cap(hdev);
9422 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9424 goto err_cmd_uninit;
9427 ret = hclge_configure(hdev);
9429 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9430 goto err_cmd_uninit;
9433 ret = hclge_init_msi(hdev);
9435 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9436 goto err_cmd_uninit;
9439 ret = hclge_misc_irq_init(hdev);
9442 "Misc IRQ(vector0) init error, ret = %d.\n",
9444 goto err_msi_uninit;
9447 ret = hclge_alloc_tqps(hdev);
9449 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9450 goto err_msi_irq_uninit;
9453 ret = hclge_alloc_vport(hdev);
9455 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9456 goto err_msi_irq_uninit;
9459 ret = hclge_map_tqp(hdev);
9461 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9462 goto err_msi_irq_uninit;
9465 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9466 ret = hclge_mac_mdio_config(hdev);
9468 dev_err(&hdev->pdev->dev,
9469 "mdio config fail ret=%d\n", ret);
9470 goto err_msi_irq_uninit;
9474 ret = hclge_init_umv_space(hdev);
9476 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9477 goto err_mdiobus_unreg;
9480 ret = hclge_mac_init(hdev);
9482 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9483 goto err_mdiobus_unreg;
9486 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9488 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9489 goto err_mdiobus_unreg;
9492 ret = hclge_config_gro(hdev, true);
9494 goto err_mdiobus_unreg;
9496 ret = hclge_init_vlan_config(hdev);
9498 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9499 goto err_mdiobus_unreg;
9502 ret = hclge_tm_schd_init(hdev);
9504 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9505 goto err_mdiobus_unreg;
9508 hclge_rss_init_cfg(hdev);
9509 ret = hclge_rss_init_hw(hdev);
9511 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9512 goto err_mdiobus_unreg;
9515 ret = init_mgr_tbl(hdev);
9517 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9518 goto err_mdiobus_unreg;
9521 ret = hclge_init_fd_config(hdev);
9524 "fd table init fail, ret=%d\n", ret);
9525 goto err_mdiobus_unreg;
9528 INIT_KFIFO(hdev->mac_tnl_log);
9530 hclge_dcb_ops_set(hdev);
9532 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9533 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9535 /* Setup affinity after service timer setup because add_timer_on
9536 * is called in affinity notify.
9538 hclge_misc_affinity_setup(hdev);
9540 hclge_clear_all_event_cause(hdev);
9541 hclge_clear_resetting_state(hdev);
9543 /* Log and clear the hw errors those already occurred */
9544 hclge_handle_all_hns_hw_errors(ae_dev);
9546 /* request delayed reset for the error recovery because an immediate
9547 * global reset on a PF affecting pending initialization of other PFs
9549 if (ae_dev->hw_err_reset_req) {
9550 enum hnae3_reset_type reset_level;
9552 reset_level = hclge_get_reset_level(ae_dev,
9553 &ae_dev->hw_err_reset_req);
9554 hclge_set_def_reset_request(ae_dev, reset_level);
9555 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9558 /* Enable MISC vector(vector0) */
9559 hclge_enable_vector(&hdev->misc_vector, true);
9561 hclge_state_init(hdev);
9562 hdev->last_reset_time = jiffies;
9564 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9567 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9572 if (hdev->hw.mac.phydev)
9573 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9575 hclge_misc_irq_uninit(hdev);
9577 pci_free_irq_vectors(pdev);
9579 hclge_cmd_uninit(hdev);
9581 pcim_iounmap(pdev, hdev->hw.io_base);
9582 pci_clear_master(pdev);
9583 pci_release_regions(pdev);
9584 pci_disable_device(pdev);
9589 static void hclge_stats_clear(struct hclge_dev *hdev)
9591 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9594 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9596 return hclge_config_switch_param(hdev, vf, enable,
9597 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9600 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9602 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9603 HCLGE_FILTER_FE_NIC_INGRESS_B,
9607 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9611 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9613 dev_err(&hdev->pdev->dev,
9614 "Set vf %d mac spoof check %s failed, ret=%d\n",
9615 vf, enable ? "on" : "off", ret);
9619 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9621 dev_err(&hdev->pdev->dev,
9622 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9623 vf, enable ? "on" : "off", ret);
9628 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9631 struct hclge_vport *vport = hclge_get_vport(handle);
9632 struct hclge_dev *hdev = vport->back;
9633 u32 new_spoofchk = enable ? 1 : 0;
9636 if (hdev->pdev->revision == 0x20)
9639 vport = hclge_get_vf_vport(hdev, vf);
9643 if (vport->vf_info.spoofchk == new_spoofchk)
9646 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9647 dev_warn(&hdev->pdev->dev,
9648 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9650 else if (enable && hclge_is_umv_space_full(vport))
9651 dev_warn(&hdev->pdev->dev,
9652 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9655 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9659 vport->vf_info.spoofchk = new_spoofchk;
9663 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9665 struct hclge_vport *vport = hdev->vport;
9669 if (hdev->pdev->revision == 0x20)
9672 /* resume the vf spoof check state after reset */
9673 for (i = 0; i < hdev->num_alloc_vport; i++) {
9674 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9675 vport->vf_info.spoofchk);
9685 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9687 struct hclge_vport *vport = hclge_get_vport(handle);
9688 struct hclge_dev *hdev = vport->back;
9689 u32 new_trusted = enable ? 1 : 0;
9693 vport = hclge_get_vf_vport(hdev, vf);
9697 if (vport->vf_info.trusted == new_trusted)
9700 /* Disable promisc mode for VF if it is not trusted any more. */
9701 if (!enable && vport->vf_info.promisc_enable) {
9702 en_bc_pmc = hdev->pdev->revision != 0x20;
9703 ret = hclge_set_vport_promisc_mode(vport, false, false,
9707 vport->vf_info.promisc_enable = 0;
9708 hclge_inform_vf_promisc_info(vport);
9711 vport->vf_info.trusted = new_trusted;
9716 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9721 /* reset vf rate to default value */
9722 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9723 struct hclge_vport *vport = &hdev->vport[vf];
9725 vport->vf_info.max_tx_rate = 0;
9726 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9728 dev_err(&hdev->pdev->dev,
9729 "vf%d failed to reset to default, ret=%d\n",
9730 vf - HCLGE_VF_VPORT_START_NUM, ret);
9734 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9735 int min_tx_rate, int max_tx_rate)
9737 if (min_tx_rate != 0 ||
9738 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9739 dev_err(&hdev->pdev->dev,
9740 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9741 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9748 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9749 int min_tx_rate, int max_tx_rate, bool force)
9751 struct hclge_vport *vport = hclge_get_vport(handle);
9752 struct hclge_dev *hdev = vport->back;
9755 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9759 vport = hclge_get_vf_vport(hdev, vf);
9763 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9766 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9770 vport->vf_info.max_tx_rate = max_tx_rate;
9775 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9777 struct hnae3_handle *handle = &hdev->vport->nic;
9778 struct hclge_vport *vport;
9782 /* resume the vf max_tx_rate after reset */
9783 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9784 vport = hclge_get_vf_vport(hdev, vf);
9788 /* zero means max rate, after reset, firmware already set it to
9789 * max rate, so just continue.
9791 if (!vport->vf_info.max_tx_rate)
9794 ret = hclge_set_vf_rate(handle, vf, 0,
9795 vport->vf_info.max_tx_rate, true);
9797 dev_err(&hdev->pdev->dev,
9798 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9799 vf, vport->vf_info.max_tx_rate, ret);
9807 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9809 struct hclge_vport *vport = hdev->vport;
9812 for (i = 0; i < hdev->num_alloc_vport; i++) {
9813 hclge_vport_stop(vport);
9818 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9820 struct hclge_dev *hdev = ae_dev->priv;
9821 struct pci_dev *pdev = ae_dev->pdev;
9824 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9826 hclge_stats_clear(hdev);
9827 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9828 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9830 ret = hclge_cmd_init(hdev);
9832 dev_err(&pdev->dev, "Cmd queue init failed\n");
9836 ret = hclge_map_tqp(hdev);
9838 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9842 hclge_reset_umv_space(hdev);
9844 ret = hclge_mac_init(hdev);
9846 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9850 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9852 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9856 ret = hclge_config_gro(hdev, true);
9860 ret = hclge_init_vlan_config(hdev);
9862 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9866 ret = hclge_tm_init_hw(hdev, true);
9868 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9872 ret = hclge_rss_init_hw(hdev);
9874 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9878 ret = hclge_init_fd_config(hdev);
9880 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9884 /* Log and clear the hw errors those already occurred */
9885 hclge_handle_all_hns_hw_errors(ae_dev);
9887 /* Re-enable the hw error interrupts because
9888 * the interrupts get disabled on global reset.
9890 ret = hclge_config_nic_hw_error(hdev, true);
9893 "fail(%d) to re-enable NIC hw error interrupts\n",
9898 if (hdev->roce_client) {
9899 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9902 "fail(%d) to re-enable roce ras interrupts\n",
9908 hclge_reset_vport_state(hdev);
9909 ret = hclge_reset_vport_spoofchk(hdev);
9913 ret = hclge_resume_vf_rate(hdev);
9917 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9923 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9925 struct hclge_dev *hdev = ae_dev->priv;
9926 struct hclge_mac *mac = &hdev->hw.mac;
9928 hclge_reset_vf_rate(hdev);
9929 hclge_misc_affinity_teardown(hdev);
9930 hclge_state_uninit(hdev);
9933 mdiobus_unregister(mac->mdio_bus);
9935 hclge_uninit_umv_space(hdev);
9937 /* Disable MISC vector(vector0) */
9938 hclge_enable_vector(&hdev->misc_vector, false);
9939 synchronize_irq(hdev->misc_vector.vector_irq);
9941 /* Disable all hw interrupts */
9942 hclge_config_mac_tnl_int(hdev, false);
9943 hclge_config_nic_hw_error(hdev, false);
9944 hclge_config_rocee_ras_interrupt(hdev, false);
9946 hclge_cmd_uninit(hdev);
9947 hclge_misc_irq_uninit(hdev);
9948 hclge_pci_uninit(hdev);
9949 mutex_destroy(&hdev->vport_lock);
9950 hclge_uninit_vport_mac_table(hdev);
9951 hclge_uninit_vport_vlan_table(hdev);
9952 ae_dev->priv = NULL;
9955 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9957 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9958 struct hclge_vport *vport = hclge_get_vport(handle);
9959 struct hclge_dev *hdev = vport->back;
9961 return min_t(u32, hdev->rss_size_max,
9962 vport->alloc_tqps / kinfo->num_tc);
9965 static void hclge_get_channels(struct hnae3_handle *handle,
9966 struct ethtool_channels *ch)
9968 ch->max_combined = hclge_get_max_channels(handle);
9969 ch->other_count = 1;
9971 ch->combined_count = handle->kinfo.rss_size;
9974 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9975 u16 *alloc_tqps, u16 *max_rss_size)
9977 struct hclge_vport *vport = hclge_get_vport(handle);
9978 struct hclge_dev *hdev = vport->back;
9980 *alloc_tqps = vport->alloc_tqps;
9981 *max_rss_size = hdev->rss_size_max;
9984 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9985 bool rxfh_configured)
9987 struct hclge_vport *vport = hclge_get_vport(handle);
9988 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9989 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9990 struct hclge_dev *hdev = vport->back;
9991 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9992 u16 cur_rss_size = kinfo->rss_size;
9993 u16 cur_tqps = kinfo->num_tqps;
9994 u16 tc_valid[HCLGE_MAX_TC_NUM];
10000 kinfo->req_rss_size = new_tqps_num;
10002 ret = hclge_tm_vport_map_update(hdev);
10004 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10008 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10009 roundup_size = ilog2(roundup_size);
10010 /* Set the RSS TC mode according to the new RSS size */
10011 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10014 if (!(hdev->hw_tc_map & BIT(i)))
10018 tc_size[i] = roundup_size;
10019 tc_offset[i] = kinfo->rss_size * i;
10021 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10025 /* RSS indirection table has been configuared by user */
10026 if (rxfh_configured)
10029 /* Reinitializes the rss indirect table according to the new RSS size */
10030 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10034 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10035 rss_indir[i] = i % kinfo->rss_size;
10037 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10039 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10046 dev_info(&hdev->pdev->dev,
10047 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10048 cur_rss_size, kinfo->rss_size,
10049 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10054 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10055 u32 *regs_num_64_bit)
10057 struct hclge_desc desc;
10061 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10062 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10064 dev_err(&hdev->pdev->dev,
10065 "Query register number cmd failed, ret = %d.\n", ret);
10069 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10070 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10072 total_num = *regs_num_32_bit + *regs_num_64_bit;
10079 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10082 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10083 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10085 struct hclge_desc *desc;
10086 u32 *reg_val = data;
10096 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10097 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10098 HCLGE_32_BIT_REG_RTN_DATANUM);
10099 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10103 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10104 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10106 dev_err(&hdev->pdev->dev,
10107 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10112 for (i = 0; i < cmd_num; i++) {
10114 desc_data = (__le32 *)(&desc[i].data[0]);
10115 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10117 desc_data = (__le32 *)(&desc[i]);
10118 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10120 for (k = 0; k < n; k++) {
10121 *reg_val++ = le32_to_cpu(*desc_data++);
10133 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10136 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10137 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10139 struct hclge_desc *desc;
10140 u64 *reg_val = data;
10150 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10151 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10152 HCLGE_64_BIT_REG_RTN_DATANUM);
10153 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10157 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10158 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10160 dev_err(&hdev->pdev->dev,
10161 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10166 for (i = 0; i < cmd_num; i++) {
10168 desc_data = (__le64 *)(&desc[i].data[0]);
10169 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10171 desc_data = (__le64 *)(&desc[i]);
10172 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10174 for (k = 0; k < n; k++) {
10175 *reg_val++ = le64_to_cpu(*desc_data++);
10187 #define MAX_SEPARATE_NUM 4
10188 #define SEPARATOR_VALUE 0xFDFCFBFA
10189 #define REG_NUM_PER_LINE 4
10190 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10191 #define REG_SEPARATOR_LINE 1
10192 #define REG_NUM_REMAIN_MASK 3
10193 #define BD_LIST_MAX_NUM 30
10195 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10197 /*prepare 4 commands to query DFX BD number*/
10198 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10199 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10200 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10201 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10202 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10203 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10204 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10206 return hclge_cmd_send(&hdev->hw, desc, 4);
10209 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10213 #define HCLGE_DFX_REG_BD_NUM 4
10215 u32 entries_per_desc, desc_index, index, offset, i;
10216 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10219 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10221 dev_err(&hdev->pdev->dev,
10222 "Get dfx bd num fail, status is %d.\n", ret);
10226 entries_per_desc = ARRAY_SIZE(desc[0].data);
10227 for (i = 0; i < type_num; i++) {
10228 offset = hclge_dfx_bd_offset_list[i];
10229 index = offset % entries_per_desc;
10230 desc_index = offset / entries_per_desc;
10231 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10237 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10238 struct hclge_desc *desc_src, int bd_num,
10239 enum hclge_opcode_type cmd)
10241 struct hclge_desc *desc = desc_src;
10244 hclge_cmd_setup_basic_desc(desc, cmd, true);
10245 for (i = 0; i < bd_num - 1; i++) {
10246 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10248 hclge_cmd_setup_basic_desc(desc, cmd, true);
10252 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10254 dev_err(&hdev->pdev->dev,
10255 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10261 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10264 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10265 struct hclge_desc *desc = desc_src;
10268 entries_per_desc = ARRAY_SIZE(desc->data);
10269 reg_num = entries_per_desc * bd_num;
10270 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10271 for (i = 0; i < reg_num; i++) {
10272 index = i % entries_per_desc;
10273 desc_index = i / entries_per_desc;
10274 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10276 for (i = 0; i < separator_num; i++)
10277 *reg++ = SEPARATOR_VALUE;
10279 return reg_num + separator_num;
10282 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10284 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10285 int data_len_per_desc, data_len, bd_num, i;
10286 int bd_num_list[BD_LIST_MAX_NUM];
10289 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10291 dev_err(&hdev->pdev->dev,
10292 "Get dfx reg bd num fail, status is %d.\n", ret);
10296 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10298 for (i = 0; i < dfx_reg_type_num; i++) {
10299 bd_num = bd_num_list[i];
10300 data_len = data_len_per_desc * bd_num;
10301 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10307 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10309 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10310 int bd_num, bd_num_max, buf_len, i;
10311 int bd_num_list[BD_LIST_MAX_NUM];
10312 struct hclge_desc *desc_src;
10316 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10318 dev_err(&hdev->pdev->dev,
10319 "Get dfx reg bd num fail, status is %d.\n", ret);
10323 bd_num_max = bd_num_list[0];
10324 for (i = 1; i < dfx_reg_type_num; i++)
10325 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10327 buf_len = sizeof(*desc_src) * bd_num_max;
10328 desc_src = kzalloc(buf_len, GFP_KERNEL);
10330 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10334 for (i = 0; i < dfx_reg_type_num; i++) {
10335 bd_num = bd_num_list[i];
10336 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10337 hclge_dfx_reg_opcode_list[i]);
10339 dev_err(&hdev->pdev->dev,
10340 "Get dfx reg fail, status is %d.\n", ret);
10344 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10351 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10352 struct hnae3_knic_private_info *kinfo)
10354 #define HCLGE_RING_REG_OFFSET 0x200
10355 #define HCLGE_RING_INT_REG_OFFSET 0x4
10357 int i, j, reg_num, separator_num;
10361 /* fetching per-PF registers valus from PF PCIe register space */
10362 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10363 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10364 for (i = 0; i < reg_num; i++)
10365 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10366 for (i = 0; i < separator_num; i++)
10367 *reg++ = SEPARATOR_VALUE;
10368 data_num_sum = reg_num + separator_num;
10370 reg_num = ARRAY_SIZE(common_reg_addr_list);
10371 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10372 for (i = 0; i < reg_num; i++)
10373 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10374 for (i = 0; i < separator_num; i++)
10375 *reg++ = SEPARATOR_VALUE;
10376 data_num_sum += reg_num + separator_num;
10378 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10379 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10380 for (j = 0; j < kinfo->num_tqps; j++) {
10381 for (i = 0; i < reg_num; i++)
10382 *reg++ = hclge_read_dev(&hdev->hw,
10383 ring_reg_addr_list[i] +
10384 HCLGE_RING_REG_OFFSET * j);
10385 for (i = 0; i < separator_num; i++)
10386 *reg++ = SEPARATOR_VALUE;
10388 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10390 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10391 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10392 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10393 for (i = 0; i < reg_num; i++)
10394 *reg++ = hclge_read_dev(&hdev->hw,
10395 tqp_intr_reg_addr_list[i] +
10396 HCLGE_RING_INT_REG_OFFSET * j);
10397 for (i = 0; i < separator_num; i++)
10398 *reg++ = SEPARATOR_VALUE;
10400 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10402 return data_num_sum;
10405 static int hclge_get_regs_len(struct hnae3_handle *handle)
10407 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10408 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10409 struct hclge_vport *vport = hclge_get_vport(handle);
10410 struct hclge_dev *hdev = vport->back;
10411 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10412 int regs_lines_32_bit, regs_lines_64_bit;
10415 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10417 dev_err(&hdev->pdev->dev,
10418 "Get register number failed, ret = %d.\n", ret);
10422 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10424 dev_err(&hdev->pdev->dev,
10425 "Get dfx reg len failed, ret = %d.\n", ret);
10429 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10430 REG_SEPARATOR_LINE;
10431 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10432 REG_SEPARATOR_LINE;
10433 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10434 REG_SEPARATOR_LINE;
10435 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10436 REG_SEPARATOR_LINE;
10437 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10438 REG_SEPARATOR_LINE;
10439 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10440 REG_SEPARATOR_LINE;
10442 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10443 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10444 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10447 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10450 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10451 struct hclge_vport *vport = hclge_get_vport(handle);
10452 struct hclge_dev *hdev = vport->back;
10453 u32 regs_num_32_bit, regs_num_64_bit;
10454 int i, reg_num, separator_num, ret;
10457 *version = hdev->fw_version;
10459 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10461 dev_err(&hdev->pdev->dev,
10462 "Get register number failed, ret = %d.\n", ret);
10466 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10468 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10470 dev_err(&hdev->pdev->dev,
10471 "Get 32 bit register failed, ret = %d.\n", ret);
10474 reg_num = regs_num_32_bit;
10476 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10477 for (i = 0; i < separator_num; i++)
10478 *reg++ = SEPARATOR_VALUE;
10480 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10482 dev_err(&hdev->pdev->dev,
10483 "Get 64 bit register failed, ret = %d.\n", ret);
10486 reg_num = regs_num_64_bit * 2;
10488 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10489 for (i = 0; i < separator_num; i++)
10490 *reg++ = SEPARATOR_VALUE;
10492 ret = hclge_get_dfx_reg(hdev, reg);
10494 dev_err(&hdev->pdev->dev,
10495 "Get dfx register failed, ret = %d.\n", ret);
10498 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10500 struct hclge_set_led_state_cmd *req;
10501 struct hclge_desc desc;
10504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10506 req = (struct hclge_set_led_state_cmd *)desc.data;
10507 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10508 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10512 dev_err(&hdev->pdev->dev,
10513 "Send set led state cmd error, ret =%d\n", ret);
10518 enum hclge_led_status {
10521 HCLGE_LED_NO_CHANGE = 0xFF,
10524 static int hclge_set_led_id(struct hnae3_handle *handle,
10525 enum ethtool_phys_id_state status)
10527 struct hclge_vport *vport = hclge_get_vport(handle);
10528 struct hclge_dev *hdev = vport->back;
10531 case ETHTOOL_ID_ACTIVE:
10532 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10533 case ETHTOOL_ID_INACTIVE:
10534 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10540 static void hclge_get_link_mode(struct hnae3_handle *handle,
10541 unsigned long *supported,
10542 unsigned long *advertising)
10544 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10545 struct hclge_vport *vport = hclge_get_vport(handle);
10546 struct hclge_dev *hdev = vport->back;
10547 unsigned int idx = 0;
10549 for (; idx < size; idx++) {
10550 supported[idx] = hdev->hw.mac.supported[idx];
10551 advertising[idx] = hdev->hw.mac.advertising[idx];
10555 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10557 struct hclge_vport *vport = hclge_get_vport(handle);
10558 struct hclge_dev *hdev = vport->back;
10560 return hclge_config_gro(hdev, enable);
10563 static const struct hnae3_ae_ops hclge_ops = {
10564 .init_ae_dev = hclge_init_ae_dev,
10565 .uninit_ae_dev = hclge_uninit_ae_dev,
10566 .flr_prepare = hclge_flr_prepare,
10567 .flr_done = hclge_flr_done,
10568 .init_client_instance = hclge_init_client_instance,
10569 .uninit_client_instance = hclge_uninit_client_instance,
10570 .map_ring_to_vector = hclge_map_ring_to_vector,
10571 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10572 .get_vector = hclge_get_vector,
10573 .put_vector = hclge_put_vector,
10574 .set_promisc_mode = hclge_set_promisc_mode,
10575 .set_loopback = hclge_set_loopback,
10576 .start = hclge_ae_start,
10577 .stop = hclge_ae_stop,
10578 .client_start = hclge_client_start,
10579 .client_stop = hclge_client_stop,
10580 .get_status = hclge_get_status,
10581 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10582 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10583 .get_media_type = hclge_get_media_type,
10584 .check_port_speed = hclge_check_port_speed,
10585 .get_fec = hclge_get_fec,
10586 .set_fec = hclge_set_fec,
10587 .get_rss_key_size = hclge_get_rss_key_size,
10588 .get_rss_indir_size = hclge_get_rss_indir_size,
10589 .get_rss = hclge_get_rss,
10590 .set_rss = hclge_set_rss,
10591 .set_rss_tuple = hclge_set_rss_tuple,
10592 .get_rss_tuple = hclge_get_rss_tuple,
10593 .get_tc_size = hclge_get_tc_size,
10594 .get_mac_addr = hclge_get_mac_addr,
10595 .set_mac_addr = hclge_set_mac_addr,
10596 .do_ioctl = hclge_do_ioctl,
10597 .add_uc_addr = hclge_add_uc_addr,
10598 .rm_uc_addr = hclge_rm_uc_addr,
10599 .add_mc_addr = hclge_add_mc_addr,
10600 .rm_mc_addr = hclge_rm_mc_addr,
10601 .set_autoneg = hclge_set_autoneg,
10602 .get_autoneg = hclge_get_autoneg,
10603 .restart_autoneg = hclge_restart_autoneg,
10604 .halt_autoneg = hclge_halt_autoneg,
10605 .get_pauseparam = hclge_get_pauseparam,
10606 .set_pauseparam = hclge_set_pauseparam,
10607 .set_mtu = hclge_set_mtu,
10608 .reset_queue = hclge_reset_tqp,
10609 .get_stats = hclge_get_stats,
10610 .get_mac_stats = hclge_get_mac_stat,
10611 .update_stats = hclge_update_stats,
10612 .get_strings = hclge_get_strings,
10613 .get_sset_count = hclge_get_sset_count,
10614 .get_fw_version = hclge_get_fw_version,
10615 .get_mdix_mode = hclge_get_mdix_mode,
10616 .enable_vlan_filter = hclge_enable_vlan_filter,
10617 .set_vlan_filter = hclge_set_vlan_filter,
10618 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10619 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10620 .reset_event = hclge_reset_event,
10621 .get_reset_level = hclge_get_reset_level,
10622 .set_default_reset_request = hclge_set_def_reset_request,
10623 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10624 .set_channels = hclge_set_channels,
10625 .get_channels = hclge_get_channels,
10626 .get_regs_len = hclge_get_regs_len,
10627 .get_regs = hclge_get_regs,
10628 .set_led_id = hclge_set_led_id,
10629 .get_link_mode = hclge_get_link_mode,
10630 .add_fd_entry = hclge_add_fd_entry,
10631 .del_fd_entry = hclge_del_fd_entry,
10632 .del_all_fd_entries = hclge_del_all_fd_entries,
10633 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10634 .get_fd_rule_info = hclge_get_fd_rule_info,
10635 .get_fd_all_rules = hclge_get_all_rules,
10636 .restore_fd_rules = hclge_restore_fd_entries,
10637 .enable_fd = hclge_enable_fd,
10638 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10639 .dbg_run_cmd = hclge_dbg_run_cmd,
10640 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10641 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10642 .ae_dev_resetting = hclge_ae_dev_resetting,
10643 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10644 .set_gro_en = hclge_gro_en,
10645 .get_global_queue_id = hclge_covert_handle_qid_global,
10646 .set_timer_task = hclge_set_timer_task,
10647 .mac_connect_phy = hclge_mac_connect_phy,
10648 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10649 .restore_vlan_table = hclge_restore_vlan_table,
10650 .get_vf_config = hclge_get_vf_config,
10651 .set_vf_link_state = hclge_set_vf_link_state,
10652 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10653 .set_vf_trust = hclge_set_vf_trust,
10654 .set_vf_rate = hclge_set_vf_rate,
10655 .set_vf_mac = hclge_set_vf_mac,
10658 static struct hnae3_ae_algo ae_algo = {
10660 .pdev_id_table = ae_algo_pci_tbl,
10663 static int hclge_init(void)
10665 pr_info("%s is initializing\n", HCLGE_NAME);
10667 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10669 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10673 hnae3_register_ae_algo(&ae_algo);
10678 static void hclge_exit(void)
10680 hnae3_unregister_ae_algo(&ae_algo);
10681 destroy_workqueue(hclge_wq);
10683 module_init(hclge_init);
10684 module_exit(hclge_exit);
10686 MODULE_LICENSE("GPL");
10687 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10688 MODULE_DESCRIPTION("HCLGE Driver");
10689 MODULE_VERSION(HCLGE_MOD_VERSION);