net: hns3: remove an unnecessary check in hclge_set_umv_space()
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATUS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i = 0;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->pdev->revision >= 0x21 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if (hdev->hw.mac.phydev) {
755                         count += 1;
756                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757                 }
758
759         } else if (stringset == ETH_SS_STATS) {
760                 count = ARRAY_SIZE(g_mac_stats_string) +
761                         hclge_tqps_get_sset_count(handle, stringset);
762         }
763
764         return count;
765 }
766
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768                               u8 *data)
769 {
770         u8 *p = (char *)data;
771         int size;
772
773         if (stringset == ETH_SS_STATS) {
774                 size = ARRAY_SIZE(g_mac_stats_string);
775                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776                                            size, p);
777                 p = hclge_tqps_get_strings(handle, p);
778         } else if (stringset == ETH_SS_TEST) {
779                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781                                ETH_GSTRING_LEN);
782                         p += ETH_GSTRING_LEN;
783                 }
784                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786                                ETH_GSTRING_LEN);
787                         p += ETH_GSTRING_LEN;
788                 }
789                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790                         memcpy(p,
791                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792                                ETH_GSTRING_LEN);
793                         p += ETH_GSTRING_LEN;
794                 }
795                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797                                ETH_GSTRING_LEN);
798                         p += ETH_GSTRING_LEN;
799                 }
800         }
801 }
802
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804 {
805         struct hclge_vport *vport = hclge_get_vport(handle);
806         struct hclge_dev *hdev = vport->back;
807         u64 *p;
808
809         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810                                  ARRAY_SIZE(g_mac_stats_string), data);
811         p = hclge_tqps_get_stats(handle, p);
812 }
813
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815                                struct hns3_mac_stats *mac_stats)
816 {
817         struct hclge_vport *vport = hclge_get_vport(handle);
818         struct hclge_dev *hdev = vport->back;
819
820         hclge_update_stats(handle, NULL);
821
822         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 }
825
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827                                    struct hclge_func_status_cmd *status)
828 {
829 #define HCLGE_MAC_ID_MASK       0xF
830
831         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832                 return -EINVAL;
833
834         /* Set the pf to main pf */
835         if (status->pf_state & HCLGE_PF_STATE_MAIN)
836                 hdev->flag |= HCLGE_FLAG_MAIN;
837         else
838                 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
840         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841         return 0;
842 }
843
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 #define HCLGE_QUERY_MAX_CNT     5
847
848         struct hclge_func_status_cmd *req;
849         struct hclge_desc desc;
850         int timeout = 0;
851         int ret;
852
853         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854         req = (struct hclge_func_status_cmd *)desc.data;
855
856         do {
857                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858                 if (ret) {
859                         dev_err(&hdev->pdev->dev,
860                                 "query function status failed %d.\n", ret);
861                         return ret;
862                 }
863
864                 /* Check pf reset is done */
865                 if (req->pf_state)
866                         break;
867                 usleep_range(1000, 2000);
868         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
869
870         return hclge_parse_func_status(hdev, req);
871 }
872
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
874 {
875         struct hclge_pf_res_cmd *req;
876         struct hclge_desc desc;
877         int ret;
878
879         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881         if (ret) {
882                 dev_err(&hdev->pdev->dev,
883                         "query pf resource failed %d.\n", ret);
884                 return ret;
885         }
886
887         req = (struct hclge_pf_res_cmd *)desc.data;
888         hdev->num_tqps = le16_to_cpu(req->tqp_num);
889         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890
891         if (req->tx_buf_size)
892                 hdev->tx_buf_size =
893                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
894         else
895                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896
897         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898
899         if (req->dv_buf_size)
900                 hdev->dv_buf_size =
901                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
902         else
903                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904
905         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906
907         if (hnae3_dev_roce_supported(hdev)) {
908                 hdev->roce_base_msix_offset =
909                 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
911                 hdev->num_roce_msi =
912                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
914
915                 /* nic's msix numbers is always equals to the roce's. */
916                 hdev->num_nic_msi = hdev->num_roce_msi;
917
918                 /* PF should have NIC vectors and Roce vectors,
919                  * NIC vectors are queued before Roce vectors.
920                  */
921                 hdev->num_msi = hdev->num_roce_msi +
922                                 hdev->roce_base_msix_offset;
923         } else {
924                 hdev->num_msi =
925                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
927
928                 hdev->num_nic_msi = hdev->num_msi;
929         }
930
931         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932                 dev_err(&hdev->pdev->dev,
933                         "Just %u msi resources, not enough for pf(min:2).\n",
934                         hdev->num_nic_msi);
935                 return -EINVAL;
936         }
937
938         return 0;
939 }
940
941 static int hclge_parse_speed(int speed_cmd, int *speed)
942 {
943         switch (speed_cmd) {
944         case 6:
945                 *speed = HCLGE_MAC_SPEED_10M;
946                 break;
947         case 7:
948                 *speed = HCLGE_MAC_SPEED_100M;
949                 break;
950         case 0:
951                 *speed = HCLGE_MAC_SPEED_1G;
952                 break;
953         case 1:
954                 *speed = HCLGE_MAC_SPEED_10G;
955                 break;
956         case 2:
957                 *speed = HCLGE_MAC_SPEED_25G;
958                 break;
959         case 3:
960                 *speed = HCLGE_MAC_SPEED_40G;
961                 break;
962         case 4:
963                 *speed = HCLGE_MAC_SPEED_50G;
964                 break;
965         case 5:
966                 *speed = HCLGE_MAC_SPEED_100G;
967                 break;
968         default:
969                 return -EINVAL;
970         }
971
972         return 0;
973 }
974
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976 {
977         struct hclge_vport *vport = hclge_get_vport(handle);
978         struct hclge_dev *hdev = vport->back;
979         u32 speed_ability = hdev->hw.mac.speed_ability;
980         u32 speed_bit = 0;
981
982         switch (speed) {
983         case HCLGE_MAC_SPEED_10M:
984                 speed_bit = HCLGE_SUPPORT_10M_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_100M:
987                 speed_bit = HCLGE_SUPPORT_100M_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_1G:
990                 speed_bit = HCLGE_SUPPORT_1G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_10G:
993                 speed_bit = HCLGE_SUPPORT_10G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_25G:
996                 speed_bit = HCLGE_SUPPORT_25G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_40G:
999                 speed_bit = HCLGE_SUPPORT_40G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_50G:
1002                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1003                 break;
1004         case HCLGE_MAC_SPEED_100G:
1005                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006                 break;
1007         default:
1008                 return -EINVAL;
1009         }
1010
1011         if (speed_bit & speed_ability)
1012                 return 0;
1013
1014         return -EINVAL;
1015 }
1016
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1018 {
1019         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030                                  mac->supported);
1031         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033                                  mac->supported);
1034 }
1035
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037 {
1038         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049                                  mac->supported);
1050         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052                                  mac->supported);
1053 }
1054
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056 {
1057         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068                                  mac->supported);
1069         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071                                  mac->supported);
1072 }
1073
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075 {
1076         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081                                  mac->supported);
1082         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084                                  mac->supported);
1085         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087                                  mac->supported);
1088         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090                                  mac->supported);
1091         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093                                  mac->supported);
1094 }
1095
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097 {
1098         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100
1101         switch (mac->speed) {
1102         case HCLGE_MAC_SPEED_10G:
1103         case HCLGE_MAC_SPEED_40G:
1104                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105                                  mac->supported);
1106                 mac->fec_ability =
1107                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108                 break;
1109         case HCLGE_MAC_SPEED_25G:
1110         case HCLGE_MAC_SPEED_50G:
1111                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112                                  mac->supported);
1113                 mac->fec_ability =
1114                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115                         BIT(HNAE3_FEC_AUTO);
1116                 break;
1117         case HCLGE_MAC_SPEED_100G:
1118                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120                 break;
1121         default:
1122                 mac->fec_ability = 0;
1123                 break;
1124         }
1125 }
1126
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128                                         u8 speed_ability)
1129 {
1130         struct hclge_mac *mac = &hdev->hw.mac;
1131
1132         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134                                  mac->supported);
1135
1136         hclge_convert_setting_sr(mac, speed_ability);
1137         hclge_convert_setting_lr(mac, speed_ability);
1138         hclge_convert_setting_cr(mac, speed_ability);
1139         if (hdev->pdev->revision >= 0x21)
1140                 hclge_convert_setting_fec(mac);
1141
1142         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1145 }
1146
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148                                             u8 speed_ability)
1149 {
1150         struct hclge_mac *mac = &hdev->hw.mac;
1151
1152         hclge_convert_setting_kr(mac, speed_ability);
1153         if (hdev->pdev->revision >= 0x21)
1154                 hclge_convert_setting_fec(mac);
1155         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 }
1159
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161                                          u8 speed_ability)
1162 {
1163         unsigned long *supported = hdev->hw.mac.supported;
1164
1165         /* default to support all speed for GE port */
1166         if (!speed_ability)
1167                 speed_ability = HCLGE_SUPPORT_GE;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171                                  supported);
1172
1173         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175                                  supported);
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177                                  supported);
1178         }
1179
1180         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183         }
1184
1185         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1189 }
1190
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192 {
1193         u8 media_type = hdev->hw.mac.media_type;
1194
1195         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198                 hclge_parse_copper_link_mode(hdev, speed_ability);
1199         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1201 }
1202
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1204 {
1205         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206                 return HCLGE_MAC_SPEED_100G;
1207
1208         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209                 return HCLGE_MAC_SPEED_50G;
1210
1211         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212                 return HCLGE_MAC_SPEED_40G;
1213
1214         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215                 return HCLGE_MAC_SPEED_25G;
1216
1217         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218                 return HCLGE_MAC_SPEED_10G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221                 return HCLGE_MAC_SPEED_1G;
1222
1223         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224                 return HCLGE_MAC_SPEED_100M;
1225
1226         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227                 return HCLGE_MAC_SPEED_10M;
1228
1229         return HCLGE_MAC_SPEED_1G;
1230 }
1231
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233 {
1234         struct hclge_cfg_param_cmd *req;
1235         u64 mac_addr_tmp_high;
1236         u64 mac_addr_tmp;
1237         unsigned int i;
1238
1239         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1240
1241         /* get the configuration */
1242         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                               HCLGE_CFG_VMDQ_M,
1244                                               HCLGE_CFG_VMDQ_S);
1245         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248                                             HCLGE_CFG_TQP_DESC_N_M,
1249                                             HCLGE_CFG_TQP_DESC_N_S);
1250
1251         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252                                         HCLGE_CFG_PHY_ADDR_M,
1253                                         HCLGE_CFG_PHY_ADDR_S);
1254         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255                                           HCLGE_CFG_MEDIA_TP_M,
1256                                           HCLGE_CFG_MEDIA_TP_S);
1257         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258                                           HCLGE_CFG_RX_BUF_LEN_M,
1259                                           HCLGE_CFG_RX_BUF_LEN_S);
1260         /* get mac_address */
1261         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263                                             HCLGE_CFG_MAC_ADDR_H_M,
1264                                             HCLGE_CFG_MAC_ADDR_H_S);
1265
1266         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267
1268         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269                                              HCLGE_CFG_DEFAULT_SPEED_M,
1270                                              HCLGE_CFG_DEFAULT_SPEED_S);
1271         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272                                             HCLGE_CFG_RSS_SIZE_M,
1273                                             HCLGE_CFG_RSS_SIZE_S);
1274
1275         for (i = 0; i < ETH_ALEN; i++)
1276                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277
1278         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1280
1281         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                              HCLGE_CFG_SPEED_ABILITY_M,
1283                                              HCLGE_CFG_SPEED_ABILITY_S);
1284         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1286                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1287         if (!cfg->umv_space)
1288                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1289 }
1290
1291 /* hclge_get_cfg: query the static parameter from flash
1292  * @hdev: pointer to struct hclge_dev
1293  * @hcfg: the config structure to be getted
1294  */
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296 {
1297         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298         struct hclge_cfg_param_cmd *req;
1299         unsigned int i;
1300         int ret;
1301
1302         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1303                 u32 offset = 0;
1304
1305                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307                                            true);
1308                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310                 /* Len should be united by 4 bytes when send to hardware */
1311                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313                 req->offset = cpu_to_le32(offset);
1314         }
1315
1316         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317         if (ret) {
1318                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319                 return ret;
1320         }
1321
1322         hclge_parse_cfg(hcfg, desc);
1323
1324         return 0;
1325 }
1326
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1328 {
1329         int ret;
1330
1331         ret = hclge_query_function_status(hdev);
1332         if (ret) {
1333                 dev_err(&hdev->pdev->dev,
1334                         "query function status error %d.\n", ret);
1335                 return ret;
1336         }
1337
1338         /* get pf resource */
1339         return hclge_query_pf_resource(hdev);
1340 }
1341
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 {
1344 #define HCLGE_MIN_TX_DESC       64
1345 #define HCLGE_MIN_RX_DESC       64
1346
1347         if (!is_kdump_kernel())
1348                 return;
1349
1350         dev_info(&hdev->pdev->dev,
1351                  "Running kdump kernel. Using minimal resources\n");
1352
1353         /* minimal queue pairs equals to the number of vports */
1354         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357 }
1358
1359 static int hclge_configure(struct hclge_dev *hdev)
1360 {
1361         struct hclge_cfg cfg;
1362         unsigned int i;
1363         int ret;
1364
1365         ret = hclge_get_cfg(hdev, &cfg);
1366         if (ret) {
1367                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1368                 return ret;
1369         }
1370
1371         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1372         hdev->base_tqp_pid = 0;
1373         hdev->rss_size_max = cfg.rss_size_max;
1374         hdev->rx_buf_len = cfg.rx_buf_len;
1375         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1376         hdev->hw.mac.media_type = cfg.media_type;
1377         hdev->hw.mac.phy_addr = cfg.phy_addr;
1378         hdev->num_tx_desc = cfg.tqp_desc_num;
1379         hdev->num_rx_desc = cfg.tqp_desc_num;
1380         hdev->tm_info.num_pg = 1;
1381         hdev->tc_max = cfg.tc_num;
1382         hdev->tm_info.hw_pfc_map = 0;
1383         hdev->wanted_umv_size = cfg.umv_space;
1384
1385         if (hnae3_dev_fd_supported(hdev)) {
1386                 hdev->fd_en = true;
1387                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1388         }
1389
1390         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1391         if (ret) {
1392                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1393                 return ret;
1394         }
1395
1396         hclge_parse_link_mode(hdev, cfg.speed_ability);
1397
1398         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1399
1400         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1401             (hdev->tc_max < 1)) {
1402                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1403                          hdev->tc_max);
1404                 hdev->tc_max = 1;
1405         }
1406
1407         /* Dev does not support DCB */
1408         if (!hnae3_dev_dcb_supported(hdev)) {
1409                 hdev->tc_max = 1;
1410                 hdev->pfc_max = 0;
1411         } else {
1412                 hdev->pfc_max = hdev->tc_max;
1413         }
1414
1415         hdev->tm_info.num_tc = 1;
1416
1417         /* Currently not support uncontiuous tc */
1418         for (i = 0; i < hdev->tm_info.num_tc; i++)
1419                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1420
1421         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1422
1423         hclge_init_kdump_kernel_config(hdev);
1424
1425         /* Set the init affinity based on pci func number */
1426         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1427         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1428         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1429                         &hdev->affinity_mask);
1430
1431         return ret;
1432 }
1433
1434 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1435                             unsigned int tso_mss_max)
1436 {
1437         struct hclge_cfg_tso_status_cmd *req;
1438         struct hclge_desc desc;
1439         u16 tso_mss;
1440
1441         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1442
1443         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1444
1445         tso_mss = 0;
1446         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1448         req->tso_mss_min = cpu_to_le16(tso_mss);
1449
1450         tso_mss = 0;
1451         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1452                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1453         req->tso_mss_max = cpu_to_le16(tso_mss);
1454
1455         return hclge_cmd_send(&hdev->hw, &desc, 1);
1456 }
1457
1458 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1459 {
1460         struct hclge_cfg_gro_status_cmd *req;
1461         struct hclge_desc desc;
1462         int ret;
1463
1464         if (!hnae3_dev_gro_supported(hdev))
1465                 return 0;
1466
1467         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1468         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1469
1470         req->gro_en = cpu_to_le16(en ? 1 : 0);
1471
1472         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1473         if (ret)
1474                 dev_err(&hdev->pdev->dev,
1475                         "GRO hardware config cmd failed, ret = %d\n", ret);
1476
1477         return ret;
1478 }
1479
1480 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1481 {
1482         struct hclge_tqp *tqp;
1483         int i;
1484
1485         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1486                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1487         if (!hdev->htqp)
1488                 return -ENOMEM;
1489
1490         tqp = hdev->htqp;
1491
1492         for (i = 0; i < hdev->num_tqps; i++) {
1493                 tqp->dev = &hdev->pdev->dev;
1494                 tqp->index = i;
1495
1496                 tqp->q.ae_algo = &ae_algo;
1497                 tqp->q.buf_size = hdev->rx_buf_len;
1498                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1499                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1500                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1501                         i * HCLGE_TQP_REG_SIZE;
1502
1503                 tqp++;
1504         }
1505
1506         return 0;
1507 }
1508
1509 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1510                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1511 {
1512         struct hclge_tqp_map_cmd *req;
1513         struct hclge_desc desc;
1514         int ret;
1515
1516         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1517
1518         req = (struct hclge_tqp_map_cmd *)desc.data;
1519         req->tqp_id = cpu_to_le16(tqp_pid);
1520         req->tqp_vf = func_id;
1521         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1522         if (!is_pf)
1523                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1524         req->tqp_vid = cpu_to_le16(tqp_vid);
1525
1526         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1527         if (ret)
1528                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1529
1530         return ret;
1531 }
1532
1533 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1534 {
1535         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1536         struct hclge_dev *hdev = vport->back;
1537         int i, alloced;
1538
1539         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1540              alloced < num_tqps; i++) {
1541                 if (!hdev->htqp[i].alloced) {
1542                         hdev->htqp[i].q.handle = &vport->nic;
1543                         hdev->htqp[i].q.tqp_index = alloced;
1544                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1545                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1546                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1547                         hdev->htqp[i].alloced = true;
1548                         alloced++;
1549                 }
1550         }
1551         vport->alloc_tqps = alloced;
1552         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1553                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1554
1555         /* ensure one to one mapping between irq and queue at default */
1556         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1557                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1558
1559         return 0;
1560 }
1561
1562 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1563                             u16 num_tx_desc, u16 num_rx_desc)
1564
1565 {
1566         struct hnae3_handle *nic = &vport->nic;
1567         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1568         struct hclge_dev *hdev = vport->back;
1569         int ret;
1570
1571         kinfo->num_tx_desc = num_tx_desc;
1572         kinfo->num_rx_desc = num_rx_desc;
1573
1574         kinfo->rx_buf_len = hdev->rx_buf_len;
1575
1576         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1577                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1578         if (!kinfo->tqp)
1579                 return -ENOMEM;
1580
1581         ret = hclge_assign_tqp(vport, num_tqps);
1582         if (ret)
1583                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1584
1585         return ret;
1586 }
1587
1588 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1589                                   struct hclge_vport *vport)
1590 {
1591         struct hnae3_handle *nic = &vport->nic;
1592         struct hnae3_knic_private_info *kinfo;
1593         u16 i;
1594
1595         kinfo = &nic->kinfo;
1596         for (i = 0; i < vport->alloc_tqps; i++) {
1597                 struct hclge_tqp *q =
1598                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1599                 bool is_pf;
1600                 int ret;
1601
1602                 is_pf = !(vport->vport_id);
1603                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1604                                              i, is_pf);
1605                 if (ret)
1606                         return ret;
1607         }
1608
1609         return 0;
1610 }
1611
1612 static int hclge_map_tqp(struct hclge_dev *hdev)
1613 {
1614         struct hclge_vport *vport = hdev->vport;
1615         u16 i, num_vport;
1616
1617         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1618         for (i = 0; i < num_vport; i++) {
1619                 int ret;
1620
1621                 ret = hclge_map_tqp_to_vport(hdev, vport);
1622                 if (ret)
1623                         return ret;
1624
1625                 vport++;
1626         }
1627
1628         return 0;
1629 }
1630
1631 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1632 {
1633         struct hnae3_handle *nic = &vport->nic;
1634         struct hclge_dev *hdev = vport->back;
1635         int ret;
1636
1637         nic->pdev = hdev->pdev;
1638         nic->ae_algo = &ae_algo;
1639         nic->numa_node_mask = hdev->numa_node_mask;
1640
1641         ret = hclge_knic_setup(vport, num_tqps,
1642                                hdev->num_tx_desc, hdev->num_rx_desc);
1643         if (ret)
1644                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1645
1646         return ret;
1647 }
1648
1649 static int hclge_alloc_vport(struct hclge_dev *hdev)
1650 {
1651         struct pci_dev *pdev = hdev->pdev;
1652         struct hclge_vport *vport;
1653         u32 tqp_main_vport;
1654         u32 tqp_per_vport;
1655         int num_vport, i;
1656         int ret;
1657
1658         /* We need to alloc a vport for main NIC of PF */
1659         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1660
1661         if (hdev->num_tqps < num_vport) {
1662                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1663                         hdev->num_tqps, num_vport);
1664                 return -EINVAL;
1665         }
1666
1667         /* Alloc the same number of TQPs for every vport */
1668         tqp_per_vport = hdev->num_tqps / num_vport;
1669         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1670
1671         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1672                              GFP_KERNEL);
1673         if (!vport)
1674                 return -ENOMEM;
1675
1676         hdev->vport = vport;
1677         hdev->num_alloc_vport = num_vport;
1678
1679         if (IS_ENABLED(CONFIG_PCI_IOV))
1680                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1681
1682         for (i = 0; i < num_vport; i++) {
1683                 vport->back = hdev;
1684                 vport->vport_id = i;
1685                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1686                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1687                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1688                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1689                 INIT_LIST_HEAD(&vport->vlan_list);
1690                 INIT_LIST_HEAD(&vport->uc_mac_list);
1691                 INIT_LIST_HEAD(&vport->mc_mac_list);
1692                 spin_lock_init(&vport->mac_list_lock);
1693
1694                 if (i == 0)
1695                         ret = hclge_vport_setup(vport, tqp_main_vport);
1696                 else
1697                         ret = hclge_vport_setup(vport, tqp_per_vport);
1698                 if (ret) {
1699                         dev_err(&pdev->dev,
1700                                 "vport setup failed for vport %d, %d\n",
1701                                 i, ret);
1702                         return ret;
1703                 }
1704
1705                 vport++;
1706         }
1707
1708         return 0;
1709 }
1710
1711 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712                                     struct hclge_pkt_buf_alloc *buf_alloc)
1713 {
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1717         struct hclge_tx_buff_alloc_cmd *req;
1718         struct hclge_desc desc;
1719         int ret;
1720         u8 i;
1721
1722         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1723
1724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1727
1728                 req->tx_pkt_buff[i] =
1729                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731         }
1732
1733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1734         if (ret)
1735                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1736                         ret);
1737
1738         return ret;
1739 }
1740
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742                                  struct hclge_pkt_buf_alloc *buf_alloc)
1743 {
1744         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745
1746         if (ret)
1747                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1748
1749         return ret;
1750 }
1751
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1753 {
1754         unsigned int i;
1755         u32 cnt = 0;
1756
1757         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758                 if (hdev->hw_tc_map & BIT(i))
1759                         cnt++;
1760         return cnt;
1761 }
1762
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765                                   struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767         struct hclge_priv_buf *priv;
1768         unsigned int i;
1769         int cnt = 0;
1770
1771         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772                 priv = &buf_alloc->priv_buf[i];
1773                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1774                     priv->enable)
1775                         cnt++;
1776         }
1777
1778         return cnt;
1779 }
1780
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783                                      struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785         struct hclge_priv_buf *priv;
1786         unsigned int i;
1787         int cnt = 0;
1788
1789         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790                 priv = &buf_alloc->priv_buf[i];
1791                 if (hdev->hw_tc_map & BIT(i) &&
1792                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1793                     priv->enable)
1794                         cnt++;
1795         }
1796
1797         return cnt;
1798 }
1799
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1801 {
1802         struct hclge_priv_buf *priv;
1803         u32 rx_priv = 0;
1804         int i;
1805
1806         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807                 priv = &buf_alloc->priv_buf[i];
1808                 if (priv->enable)
1809                         rx_priv += priv->buf_size;
1810         }
1811         return rx_priv;
1812 }
1813
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1815 {
1816         u32 i, total_tx_size = 0;
1817
1818         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1820
1821         return total_tx_size;
1822 }
1823
1824 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825                                 struct hclge_pkt_buf_alloc *buf_alloc,
1826                                 u32 rx_all)
1827 {
1828         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829         u32 tc_num = hclge_get_tc_num(hdev);
1830         u32 shared_buf, aligned_mps;
1831         u32 rx_priv;
1832         int i;
1833
1834         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1835
1836         if (hnae3_dev_dcb_supported(hdev))
1837                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838                                         hdev->dv_buf_size;
1839         else
1840                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841                                         + hdev->dv_buf_size;
1842
1843         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845                              HCLGE_BUF_SIZE_UNIT);
1846
1847         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848         if (rx_all < rx_priv + shared_std)
1849                 return false;
1850
1851         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852         buf_alloc->s_buf.buf_size = shared_buf;
1853         if (hnae3_dev_dcb_supported(hdev)) {
1854                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857                                   HCLGE_BUF_SIZE_UNIT);
1858         } else {
1859                 buf_alloc->s_buf.self.high = aligned_mps +
1860                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861                 buf_alloc->s_buf.self.low = aligned_mps;
1862         }
1863
1864         if (hnae3_dev_dcb_supported(hdev)) {
1865                 hi_thrd = shared_buf - hdev->dv_buf_size;
1866
1867                 if (tc_num <= NEED_RESERVE_TC_NUM)
1868                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1869                                         / BUF_MAX_PERCENT;
1870
1871                 if (tc_num)
1872                         hi_thrd = hi_thrd / tc_num;
1873
1874                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1877         } else {
1878                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879                 lo_thrd = aligned_mps;
1880         }
1881
1882         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1885         }
1886
1887         return true;
1888 }
1889
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891                                 struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893         u32 i, total_size;
1894
1895         total_size = hdev->pkt_buf_size;
1896
1897         /* alloc tx buffer for all enabled tc */
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1900
1901                 if (hdev->hw_tc_map & BIT(i)) {
1902                         if (total_size < hdev->tx_buf_size)
1903                                 return -ENOMEM;
1904
1905                         priv->tx_buf_size = hdev->tx_buf_size;
1906                 } else {
1907                         priv->tx_buf_size = 0;
1908                 }
1909
1910                 total_size -= priv->tx_buf_size;
1911         }
1912
1913         return 0;
1914 }
1915
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917                                   struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921         unsigned int i;
1922
1923         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925
1926                 priv->enable = 0;
1927                 priv->wl.low = 0;
1928                 priv->wl.high = 0;
1929                 priv->buf_size = 0;
1930
1931                 if (!(hdev->hw_tc_map & BIT(i)))
1932                         continue;
1933
1934                 priv->enable = 1;
1935
1936                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939                                                 HCLGE_BUF_SIZE_UNIT);
1940                 } else {
1941                         priv->wl.low = 0;
1942                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1943                                         aligned_mps;
1944                 }
1945
1946                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947         }
1948
1949         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 }
1951
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953                                           struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957         int i;
1958
1959         /* let the last to be cleared first */
1960         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962                 unsigned int mask = BIT((unsigned int)i);
1963
1964                 if (hdev->hw_tc_map & mask &&
1965                     !(hdev->tm_info.hw_pfc_map & mask)) {
1966                         /* Clear the no pfc TC private buffer */
1967                         priv->wl.low = 0;
1968                         priv->wl.high = 0;
1969                         priv->buf_size = 0;
1970                         priv->enable = 0;
1971                         no_pfc_priv_num--;
1972                 }
1973
1974                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975                     no_pfc_priv_num == 0)
1976                         break;
1977         }
1978
1979         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 }
1981
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983                                         struct hclge_pkt_buf_alloc *buf_alloc)
1984 {
1985         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987         int i;
1988
1989         /* let the last to be cleared first */
1990         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992                 unsigned int mask = BIT((unsigned int)i);
1993
1994                 if (hdev->hw_tc_map & mask &&
1995                     hdev->tm_info.hw_pfc_map & mask) {
1996                         /* Reduce the number of pfc TC with private buffer */
1997                         priv->wl.low = 0;
1998                         priv->enable = 0;
1999                         priv->wl.high = 0;
2000                         priv->buf_size = 0;
2001                         pfc_priv_num--;
2002                 }
2003
2004                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2005                     pfc_priv_num == 0)
2006                         break;
2007         }
2008
2009         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 }
2011
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013                                       struct hclge_pkt_buf_alloc *buf_alloc)
2014 {
2015 #define COMPENSATE_BUFFER       0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP             0x1800
2018
2019         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020         u32 tc_num = hclge_get_tc_num(hdev);
2021         u32 half_mps = hdev->mps >> 1;
2022         u32 min_rx_priv;
2023         unsigned int i;
2024
2025         if (tc_num)
2026                 rx_priv = rx_priv / tc_num;
2027
2028         if (tc_num <= NEED_RESERVE_TC_NUM)
2029                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2030
2031         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032                         COMPENSATE_HALF_MPS_NUM * half_mps;
2033         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2035
2036         if (rx_priv < min_rx_priv)
2037                 return false;
2038
2039         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041
2042                 priv->enable = 0;
2043                 priv->wl.low = 0;
2044                 priv->wl.high = 0;
2045                 priv->buf_size = 0;
2046
2047                 if (!(hdev->hw_tc_map & BIT(i)))
2048                         continue;
2049
2050                 priv->enable = 1;
2051                 priv->buf_size = rx_priv;
2052                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054         }
2055
2056         buf_alloc->s_buf.buf_size = 0;
2057
2058         return true;
2059 }
2060
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062  * @hdev: pointer to struct hclge_dev
2063  * @buf_alloc: pointer to buffer calculation data
2064  * @return: 0: calculate sucessful, negative: fail
2065  */
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067                                 struct hclge_pkt_buf_alloc *buf_alloc)
2068 {
2069         /* When DCB is not supported, rx private buffer is not allocated. */
2070         if (!hnae3_dev_dcb_supported(hdev)) {
2071                 u32 rx_all = hdev->pkt_buf_size;
2072
2073                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2075                         return -ENOMEM;
2076
2077                 return 0;
2078         }
2079
2080         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081                 return 0;
2082
2083         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084                 return 0;
2085
2086         /* try to decrease the buffer size */
2087         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088                 return 0;
2089
2090         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091                 return 0;
2092
2093         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2094                 return 0;
2095
2096         return -ENOMEM;
2097 }
2098
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100                                    struct hclge_pkt_buf_alloc *buf_alloc)
2101 {
2102         struct hclge_rx_priv_buff_cmd *req;
2103         struct hclge_desc desc;
2104         int ret;
2105         int i;
2106
2107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2109
2110         /* Alloc private buffer TCs */
2111         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113
2114                 req->buf_num[i] =
2115                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2116                 req->buf_num[i] |=
2117                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2118         }
2119
2120         req->shared_buf =
2121                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2123
2124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2125         if (ret)
2126                 dev_err(&hdev->pdev->dev,
2127                         "rx private buffer alloc cmd failed %d\n", ret);
2128
2129         return ret;
2130 }
2131
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133                                    struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135         struct hclge_rx_priv_wl_buf *req;
2136         struct hclge_priv_buf *priv;
2137         struct hclge_desc desc[2];
2138         int i, j;
2139         int ret;
2140
2141         for (i = 0; i < 2; i++) {
2142                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2143                                            false);
2144                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2145
2146                 /* The first descriptor set the NEXT bit to 1 */
2147                 if (i == 0)
2148                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149                 else
2150                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2151
2152                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2154
2155                         priv = &buf_alloc->priv_buf[idx];
2156                         req->tc_wl[j].high =
2157                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158                         req->tc_wl[j].high |=
2159                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2160                         req->tc_wl[j].low =
2161                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162                         req->tc_wl[j].low |=
2163                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2164                 }
2165         }
2166
2167         /* Send 2 descriptor at one time */
2168         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2169         if (ret)
2170                 dev_err(&hdev->pdev->dev,
2171                         "rx private waterline config cmd failed %d\n",
2172                         ret);
2173         return ret;
2174 }
2175
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177                                     struct hclge_pkt_buf_alloc *buf_alloc)
2178 {
2179         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180         struct hclge_rx_com_thrd *req;
2181         struct hclge_desc desc[2];
2182         struct hclge_tc_thrd *tc;
2183         int i, j;
2184         int ret;
2185
2186         for (i = 0; i < 2; i++) {
2187                 hclge_cmd_setup_basic_desc(&desc[i],
2188                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2190
2191                 /* The first descriptor set the NEXT bit to 1 */
2192                 if (i == 0)
2193                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194                 else
2195                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2196
2197                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2199
2200                         req->com_thrd[j].high =
2201                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202                         req->com_thrd[j].high |=
2203                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204                         req->com_thrd[j].low =
2205                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206                         req->com_thrd[j].low |=
2207                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2208                 }
2209         }
2210
2211         /* Send 2 descriptors at one time */
2212         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2213         if (ret)
2214                 dev_err(&hdev->pdev->dev,
2215                         "common threshold config cmd failed %d\n", ret);
2216         return ret;
2217 }
2218
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220                                   struct hclge_pkt_buf_alloc *buf_alloc)
2221 {
2222         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223         struct hclge_rx_com_wl *req;
2224         struct hclge_desc desc;
2225         int ret;
2226
2227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2228
2229         req = (struct hclge_rx_com_wl *)desc.data;
2230         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232
2233         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2235
2236         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2237         if (ret)
2238                 dev_err(&hdev->pdev->dev,
2239                         "common waterline config cmd failed %d\n", ret);
2240
2241         return ret;
2242 }
2243
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2245 {
2246         struct hclge_pkt_buf_alloc *pkt_buf;
2247         int ret;
2248
2249         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2250         if (!pkt_buf)
2251                 return -ENOMEM;
2252
2253         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2254         if (ret) {
2255                 dev_err(&hdev->pdev->dev,
2256                         "could not calc tx buffer size for all TCs %d\n", ret);
2257                 goto out;
2258         }
2259
2260         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2261         if (ret) {
2262                 dev_err(&hdev->pdev->dev,
2263                         "could not alloc tx buffers %d\n", ret);
2264                 goto out;
2265         }
2266
2267         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2268         if (ret) {
2269                 dev_err(&hdev->pdev->dev,
2270                         "could not calc rx priv buffer size for all TCs %d\n",
2271                         ret);
2272                 goto out;
2273         }
2274
2275         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2276         if (ret) {
2277                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2278                         ret);
2279                 goto out;
2280         }
2281
2282         if (hnae3_dev_dcb_supported(hdev)) {
2283                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2284                 if (ret) {
2285                         dev_err(&hdev->pdev->dev,
2286                                 "could not configure rx private waterline %d\n",
2287                                 ret);
2288                         goto out;
2289                 }
2290
2291                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2292                 if (ret) {
2293                         dev_err(&hdev->pdev->dev,
2294                                 "could not configure common threshold %d\n",
2295                                 ret);
2296                         goto out;
2297                 }
2298         }
2299
2300         ret = hclge_common_wl_config(hdev, pkt_buf);
2301         if (ret)
2302                 dev_err(&hdev->pdev->dev,
2303                         "could not configure common waterline %d\n", ret);
2304
2305 out:
2306         kfree(pkt_buf);
2307         return ret;
2308 }
2309
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2311 {
2312         struct hnae3_handle *roce = &vport->roce;
2313         struct hnae3_handle *nic = &vport->nic;
2314
2315         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2316
2317         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318             vport->back->num_msi_left == 0)
2319                 return -EINVAL;
2320
2321         roce->rinfo.base_vector = vport->back->roce_base_vector;
2322
2323         roce->rinfo.netdev = nic->kinfo.netdev;
2324         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2325
2326         roce->pdev = nic->pdev;
2327         roce->ae_algo = nic->ae_algo;
2328         roce->numa_node_mask = nic->numa_node_mask;
2329
2330         return 0;
2331 }
2332
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2334 {
2335         struct pci_dev *pdev = hdev->pdev;
2336         int vectors;
2337         int i;
2338
2339         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2340                                         hdev->num_msi,
2341                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342         if (vectors < 0) {
2343                 dev_err(&pdev->dev,
2344                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2345                         vectors);
2346                 return vectors;
2347         }
2348         if (vectors < hdev->num_msi)
2349                 dev_warn(&hdev->pdev->dev,
2350                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351                          hdev->num_msi, vectors);
2352
2353         hdev->num_msi = vectors;
2354         hdev->num_msi_left = vectors;
2355
2356         hdev->base_msi_vector = pdev->irq;
2357         hdev->roce_base_vector = hdev->base_msi_vector +
2358                                 hdev->roce_base_msix_offset;
2359
2360         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361                                            sizeof(u16), GFP_KERNEL);
2362         if (!hdev->vector_status) {
2363                 pci_free_irq_vectors(pdev);
2364                 return -ENOMEM;
2365         }
2366
2367         for (i = 0; i < hdev->num_msi; i++)
2368                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2369
2370         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371                                         sizeof(int), GFP_KERNEL);
2372         if (!hdev->vector_irq) {
2373                 pci_free_irq_vectors(pdev);
2374                 return -ENOMEM;
2375         }
2376
2377         return 0;
2378 }
2379
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2381 {
2382         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383                 duplex = HCLGE_MAC_FULL;
2384
2385         return duplex;
2386 }
2387
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389                                       u8 duplex)
2390 {
2391         struct hclge_config_mac_speed_dup_cmd *req;
2392         struct hclge_desc desc;
2393         int ret;
2394
2395         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2396
2397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398
2399         if (duplex)
2400                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401
2402         switch (speed) {
2403         case HCLGE_MAC_SPEED_10M:
2404                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405                                 HCLGE_CFG_SPEED_S, 6);
2406                 break;
2407         case HCLGE_MAC_SPEED_100M:
2408                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409                                 HCLGE_CFG_SPEED_S, 7);
2410                 break;
2411         case HCLGE_MAC_SPEED_1G:
2412                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413                                 HCLGE_CFG_SPEED_S, 0);
2414                 break;
2415         case HCLGE_MAC_SPEED_10G:
2416                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417                                 HCLGE_CFG_SPEED_S, 1);
2418                 break;
2419         case HCLGE_MAC_SPEED_25G:
2420                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421                                 HCLGE_CFG_SPEED_S, 2);
2422                 break;
2423         case HCLGE_MAC_SPEED_40G:
2424                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425                                 HCLGE_CFG_SPEED_S, 3);
2426                 break;
2427         case HCLGE_MAC_SPEED_50G:
2428                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429                                 HCLGE_CFG_SPEED_S, 4);
2430                 break;
2431         case HCLGE_MAC_SPEED_100G:
2432                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433                                 HCLGE_CFG_SPEED_S, 5);
2434                 break;
2435         default:
2436                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2437                 return -EINVAL;
2438         }
2439
2440         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441                       1);
2442
2443         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2444         if (ret) {
2445                 dev_err(&hdev->pdev->dev,
2446                         "mac speed/duplex config cmd failed %d.\n", ret);
2447                 return ret;
2448         }
2449
2450         return 0;
2451 }
2452
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2454 {
2455         struct hclge_mac *mac = &hdev->hw.mac;
2456         int ret;
2457
2458         duplex = hclge_check_speed_dup(duplex, speed);
2459         if (!mac->support_autoneg && mac->speed == speed &&
2460             mac->duplex == duplex)
2461                 return 0;
2462
2463         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2464         if (ret)
2465                 return ret;
2466
2467         hdev->hw.mac.speed = speed;
2468         hdev->hw.mac.duplex = duplex;
2469
2470         return 0;
2471 }
2472
2473 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2474                                      u8 duplex)
2475 {
2476         struct hclge_vport *vport = hclge_get_vport(handle);
2477         struct hclge_dev *hdev = vport->back;
2478
2479         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2480 }
2481
2482 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2483 {
2484         struct hclge_config_auto_neg_cmd *req;
2485         struct hclge_desc desc;
2486         u32 flag = 0;
2487         int ret;
2488
2489         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2490
2491         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2492         if (enable)
2493                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2494         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2495
2496         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2497         if (ret)
2498                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2499                         ret);
2500
2501         return ret;
2502 }
2503
2504 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2505 {
2506         struct hclge_vport *vport = hclge_get_vport(handle);
2507         struct hclge_dev *hdev = vport->back;
2508
2509         if (!hdev->hw.mac.support_autoneg) {
2510                 if (enable) {
2511                         dev_err(&hdev->pdev->dev,
2512                                 "autoneg is not supported by current port\n");
2513                         return -EOPNOTSUPP;
2514                 } else {
2515                         return 0;
2516                 }
2517         }
2518
2519         return hclge_set_autoneg_en(hdev, enable);
2520 }
2521
2522 static int hclge_get_autoneg(struct hnae3_handle *handle)
2523 {
2524         struct hclge_vport *vport = hclge_get_vport(handle);
2525         struct hclge_dev *hdev = vport->back;
2526         struct phy_device *phydev = hdev->hw.mac.phydev;
2527
2528         if (phydev)
2529                 return phydev->autoneg;
2530
2531         return hdev->hw.mac.autoneg;
2532 }
2533
2534 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2535 {
2536         struct hclge_vport *vport = hclge_get_vport(handle);
2537         struct hclge_dev *hdev = vport->back;
2538         int ret;
2539
2540         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2541
2542         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2543         if (ret)
2544                 return ret;
2545         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2546 }
2547
2548 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2549 {
2550         struct hclge_vport *vport = hclge_get_vport(handle);
2551         struct hclge_dev *hdev = vport->back;
2552
2553         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2554                 return hclge_set_autoneg_en(hdev, !halt);
2555
2556         return 0;
2557 }
2558
2559 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2560 {
2561         struct hclge_config_fec_cmd *req;
2562         struct hclge_desc desc;
2563         int ret;
2564
2565         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2566
2567         req = (struct hclge_config_fec_cmd *)desc.data;
2568         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2569                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2570         if (fec_mode & BIT(HNAE3_FEC_RS))
2571                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2572                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2573         if (fec_mode & BIT(HNAE3_FEC_BASER))
2574                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2575                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2576
2577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2578         if (ret)
2579                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2580
2581         return ret;
2582 }
2583
2584 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2585 {
2586         struct hclge_vport *vport = hclge_get_vport(handle);
2587         struct hclge_dev *hdev = vport->back;
2588         struct hclge_mac *mac = &hdev->hw.mac;
2589         int ret;
2590
2591         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2592                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2593                 return -EINVAL;
2594         }
2595
2596         ret = hclge_set_fec_hw(hdev, fec_mode);
2597         if (ret)
2598                 return ret;
2599
2600         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2601         return 0;
2602 }
2603
2604 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2605                           u8 *fec_mode)
2606 {
2607         struct hclge_vport *vport = hclge_get_vport(handle);
2608         struct hclge_dev *hdev = vport->back;
2609         struct hclge_mac *mac = &hdev->hw.mac;
2610
2611         if (fec_ability)
2612                 *fec_ability = mac->fec_ability;
2613         if (fec_mode)
2614                 *fec_mode = mac->fec_mode;
2615 }
2616
2617 static int hclge_mac_init(struct hclge_dev *hdev)
2618 {
2619         struct hclge_mac *mac = &hdev->hw.mac;
2620         int ret;
2621
2622         hdev->support_sfp_query = true;
2623         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2624         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2625                                          hdev->hw.mac.duplex);
2626         if (ret)
2627                 return ret;
2628
2629         if (hdev->hw.mac.support_autoneg) {
2630                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2631                 if (ret)
2632                         return ret;
2633         }
2634
2635         mac->link = 0;
2636
2637         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2638                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2639                 if (ret)
2640                         return ret;
2641         }
2642
2643         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2644         if (ret) {
2645                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2646                 return ret;
2647         }
2648
2649         ret = hclge_set_default_loopback(hdev);
2650         if (ret)
2651                 return ret;
2652
2653         ret = hclge_buffer_alloc(hdev);
2654         if (ret)
2655                 dev_err(&hdev->pdev->dev,
2656                         "allocate buffer fail, ret=%d\n", ret);
2657
2658         return ret;
2659 }
2660
2661 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2662 {
2663         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2664             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2665                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2666                                     hclge_wq, &hdev->service_task, 0);
2667 }
2668
2669 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2670 {
2671         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2672             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2673                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2674                                     hclge_wq, &hdev->service_task, 0);
2675 }
2676
2677 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2678 {
2679         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2680             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2681                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2682                                     hclge_wq, &hdev->service_task,
2683                                     delay_time);
2684 }
2685
2686 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2687 {
2688         struct hclge_link_status_cmd *req;
2689         struct hclge_desc desc;
2690         int link_status;
2691         int ret;
2692
2693         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2694         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2695         if (ret) {
2696                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2697                         ret);
2698                 return ret;
2699         }
2700
2701         req = (struct hclge_link_status_cmd *)desc.data;
2702         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2703
2704         return !!link_status;
2705 }
2706
2707 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2708 {
2709         unsigned int mac_state;
2710         int link_stat;
2711
2712         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2713                 return 0;
2714
2715         mac_state = hclge_get_mac_link_status(hdev);
2716
2717         if (hdev->hw.mac.phydev) {
2718                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2719                         link_stat = mac_state &
2720                                 hdev->hw.mac.phydev->link;
2721                 else
2722                         link_stat = 0;
2723
2724         } else {
2725                 link_stat = mac_state;
2726         }
2727
2728         return !!link_stat;
2729 }
2730
2731 static void hclge_update_link_status(struct hclge_dev *hdev)
2732 {
2733         struct hnae3_client *rclient = hdev->roce_client;
2734         struct hnae3_client *client = hdev->nic_client;
2735         struct hnae3_handle *rhandle;
2736         struct hnae3_handle *handle;
2737         int state;
2738         int i;
2739
2740         if (!client)
2741                 return;
2742
2743         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2744                 return;
2745
2746         state = hclge_get_mac_phy_link(hdev);
2747         if (state != hdev->hw.mac.link) {
2748                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2749                         handle = &hdev->vport[i].nic;
2750                         client->ops->link_status_change(handle, state);
2751                         hclge_config_mac_tnl_int(hdev, state);
2752                         rhandle = &hdev->vport[i].roce;
2753                         if (rclient && rclient->ops->link_status_change)
2754                                 rclient->ops->link_status_change(rhandle,
2755                                                                  state);
2756                 }
2757                 hdev->hw.mac.link = state;
2758         }
2759
2760         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2761 }
2762
2763 static void hclge_update_port_capability(struct hclge_mac *mac)
2764 {
2765         /* update fec ability by speed */
2766         hclge_convert_setting_fec(mac);
2767
2768         /* firmware can not identify back plane type, the media type
2769          * read from configuration can help deal it
2770          */
2771         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2772             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2773                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2774         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2775                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2776
2777         if (mac->support_autoneg) {
2778                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2779                 linkmode_copy(mac->advertising, mac->supported);
2780         } else {
2781                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2782                                    mac->supported);
2783                 linkmode_zero(mac->advertising);
2784         }
2785 }
2786
2787 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2788 {
2789         struct hclge_sfp_info_cmd *resp;
2790         struct hclge_desc desc;
2791         int ret;
2792
2793         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2794         resp = (struct hclge_sfp_info_cmd *)desc.data;
2795         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2796         if (ret == -EOPNOTSUPP) {
2797                 dev_warn(&hdev->pdev->dev,
2798                          "IMP do not support get SFP speed %d\n", ret);
2799                 return ret;
2800         } else if (ret) {
2801                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2802                 return ret;
2803         }
2804
2805         *speed = le32_to_cpu(resp->speed);
2806
2807         return 0;
2808 }
2809
2810 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2811 {
2812         struct hclge_sfp_info_cmd *resp;
2813         struct hclge_desc desc;
2814         int ret;
2815
2816         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2817         resp = (struct hclge_sfp_info_cmd *)desc.data;
2818
2819         resp->query_type = QUERY_ACTIVE_SPEED;
2820
2821         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2822         if (ret == -EOPNOTSUPP) {
2823                 dev_warn(&hdev->pdev->dev,
2824                          "IMP does not support get SFP info %d\n", ret);
2825                 return ret;
2826         } else if (ret) {
2827                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2828                 return ret;
2829         }
2830
2831         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2832          * set to mac->speed.
2833          */
2834         if (!le32_to_cpu(resp->speed))
2835                 return 0;
2836
2837         mac->speed = le32_to_cpu(resp->speed);
2838         /* if resp->speed_ability is 0, it means it's an old version
2839          * firmware, do not update these params
2840          */
2841         if (resp->speed_ability) {
2842                 mac->module_type = le32_to_cpu(resp->module_type);
2843                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2844                 mac->autoneg = resp->autoneg;
2845                 mac->support_autoneg = resp->autoneg_ability;
2846                 mac->speed_type = QUERY_ACTIVE_SPEED;
2847                 if (!resp->active_fec)
2848                         mac->fec_mode = 0;
2849                 else
2850                         mac->fec_mode = BIT(resp->active_fec);
2851         } else {
2852                 mac->speed_type = QUERY_SFP_SPEED;
2853         }
2854
2855         return 0;
2856 }
2857
2858 static int hclge_update_port_info(struct hclge_dev *hdev)
2859 {
2860         struct hclge_mac *mac = &hdev->hw.mac;
2861         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2862         int ret;
2863
2864         /* get the port info from SFP cmd if not copper port */
2865         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2866                 return 0;
2867
2868         /* if IMP does not support get SFP/qSFP info, return directly */
2869         if (!hdev->support_sfp_query)
2870                 return 0;
2871
2872         if (hdev->pdev->revision >= 0x21)
2873                 ret = hclge_get_sfp_info(hdev, mac);
2874         else
2875                 ret = hclge_get_sfp_speed(hdev, &speed);
2876
2877         if (ret == -EOPNOTSUPP) {
2878                 hdev->support_sfp_query = false;
2879                 return ret;
2880         } else if (ret) {
2881                 return ret;
2882         }
2883
2884         if (hdev->pdev->revision >= 0x21) {
2885                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2886                         hclge_update_port_capability(mac);
2887                         return 0;
2888                 }
2889                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2890                                                HCLGE_MAC_FULL);
2891         } else {
2892                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2893                         return 0; /* do nothing if no SFP */
2894
2895                 /* must config full duplex for SFP */
2896                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2897         }
2898 }
2899
2900 static int hclge_get_status(struct hnae3_handle *handle)
2901 {
2902         struct hclge_vport *vport = hclge_get_vport(handle);
2903         struct hclge_dev *hdev = vport->back;
2904
2905         hclge_update_link_status(hdev);
2906
2907         return hdev->hw.mac.link;
2908 }
2909
2910 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2911 {
2912         if (!pci_num_vf(hdev->pdev)) {
2913                 dev_err(&hdev->pdev->dev,
2914                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2915                 return NULL;
2916         }
2917
2918         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2919                 dev_err(&hdev->pdev->dev,
2920                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2921                         vf, pci_num_vf(hdev->pdev));
2922                 return NULL;
2923         }
2924
2925         /* VF start from 1 in vport */
2926         vf += HCLGE_VF_VPORT_START_NUM;
2927         return &hdev->vport[vf];
2928 }
2929
2930 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2931                                struct ifla_vf_info *ivf)
2932 {
2933         struct hclge_vport *vport = hclge_get_vport(handle);
2934         struct hclge_dev *hdev = vport->back;
2935
2936         vport = hclge_get_vf_vport(hdev, vf);
2937         if (!vport)
2938                 return -EINVAL;
2939
2940         ivf->vf = vf;
2941         ivf->linkstate = vport->vf_info.link_state;
2942         ivf->spoofchk = vport->vf_info.spoofchk;
2943         ivf->trusted = vport->vf_info.trusted;
2944         ivf->min_tx_rate = 0;
2945         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2946         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2947         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2948         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2949         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2950
2951         return 0;
2952 }
2953
2954 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2955                                    int link_state)
2956 {
2957         struct hclge_vport *vport = hclge_get_vport(handle);
2958         struct hclge_dev *hdev = vport->back;
2959
2960         vport = hclge_get_vf_vport(hdev, vf);
2961         if (!vport)
2962                 return -EINVAL;
2963
2964         vport->vf_info.link_state = link_state;
2965
2966         return 0;
2967 }
2968
2969 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2970 {
2971         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2972
2973         /* fetch the events from their corresponding regs */
2974         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2975         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2976         msix_src_reg = hclge_read_dev(&hdev->hw,
2977                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2978
2979         /* Assumption: If by any chance reset and mailbox events are reported
2980          * together then we will only process reset event in this go and will
2981          * defer the processing of the mailbox events. Since, we would have not
2982          * cleared RX CMDQ event this time we would receive again another
2983          * interrupt from H/W just for the mailbox.
2984          *
2985          * check for vector0 reset event sources
2986          */
2987         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2988                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2989                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2990                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2991                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2992                 hdev->rst_stats.imp_rst_cnt++;
2993                 return HCLGE_VECTOR0_EVENT_RST;
2994         }
2995
2996         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2997                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2998                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2999                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3000                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3001                 hdev->rst_stats.global_rst_cnt++;
3002                 return HCLGE_VECTOR0_EVENT_RST;
3003         }
3004
3005         /* check for vector0 msix event source */
3006         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3007                 *clearval = msix_src_reg;
3008                 return HCLGE_VECTOR0_EVENT_ERR;
3009         }
3010
3011         /* check for vector0 mailbox(=CMDQ RX) event source */
3012         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3013                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3014                 *clearval = cmdq_src_reg;
3015                 return HCLGE_VECTOR0_EVENT_MBX;
3016         }
3017
3018         /* print other vector0 event source */
3019         dev_info(&hdev->pdev->dev,
3020                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3021                  cmdq_src_reg, msix_src_reg);
3022         *clearval = msix_src_reg;
3023
3024         return HCLGE_VECTOR0_EVENT_OTHER;
3025 }
3026
3027 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3028                                     u32 regclr)
3029 {
3030         switch (event_type) {
3031         case HCLGE_VECTOR0_EVENT_RST:
3032                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3033                 break;
3034         case HCLGE_VECTOR0_EVENT_MBX:
3035                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3036                 break;
3037         default:
3038                 break;
3039         }
3040 }
3041
3042 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3043 {
3044         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3045                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3046                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3047                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3048         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3049 }
3050
3051 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3052 {
3053         writel(enable ? 1 : 0, vector->addr);
3054 }
3055
3056 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3057 {
3058         struct hclge_dev *hdev = data;
3059         u32 clearval = 0;
3060         u32 event_cause;
3061
3062         hclge_enable_vector(&hdev->misc_vector, false);
3063         event_cause = hclge_check_event_cause(hdev, &clearval);
3064
3065         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3066         switch (event_cause) {
3067         case HCLGE_VECTOR0_EVENT_ERR:
3068                 /* we do not know what type of reset is required now. This could
3069                  * only be decided after we fetch the type of errors which
3070                  * caused this event. Therefore, we will do below for now:
3071                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3072                  *    have defered type of reset to be used.
3073                  * 2. Schedule the reset serivce task.
3074                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3075                  *    will fetch the correct type of reset.  This would be done
3076                  *    by first decoding the types of errors.
3077                  */
3078                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3079                 /* fall through */
3080         case HCLGE_VECTOR0_EVENT_RST:
3081                 hclge_reset_task_schedule(hdev);
3082                 break;
3083         case HCLGE_VECTOR0_EVENT_MBX:
3084                 /* If we are here then,
3085                  * 1. Either we are not handling any mbx task and we are not
3086                  *    scheduled as well
3087                  *                        OR
3088                  * 2. We could be handling a mbx task but nothing more is
3089                  *    scheduled.
3090                  * In both cases, we should schedule mbx task as there are more
3091                  * mbx messages reported by this interrupt.
3092                  */
3093                 hclge_mbx_task_schedule(hdev);
3094                 break;
3095         default:
3096                 dev_warn(&hdev->pdev->dev,
3097                          "received unknown or unhandled event of vector0\n");
3098                 break;
3099         }
3100
3101         hclge_clear_event_cause(hdev, event_cause, clearval);
3102
3103         /* Enable interrupt if it is not cause by reset. And when
3104          * clearval equal to 0, it means interrupt status may be
3105          * cleared by hardware before driver reads status register.
3106          * For this case, vector0 interrupt also should be enabled.
3107          */
3108         if (!clearval ||
3109             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3110                 hclge_enable_vector(&hdev->misc_vector, true);
3111         }
3112
3113         return IRQ_HANDLED;
3114 }
3115
3116 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3117 {
3118         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3119                 dev_warn(&hdev->pdev->dev,
3120                          "vector(vector_id %d) has been freed.\n", vector_id);
3121                 return;
3122         }
3123
3124         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3125         hdev->num_msi_left += 1;
3126         hdev->num_msi_used -= 1;
3127 }
3128
3129 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3130 {
3131         struct hclge_misc_vector *vector = &hdev->misc_vector;
3132
3133         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3134
3135         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3136         hdev->vector_status[0] = 0;
3137
3138         hdev->num_msi_left -= 1;
3139         hdev->num_msi_used += 1;
3140 }
3141
3142 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3143                                       const cpumask_t *mask)
3144 {
3145         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3146                                               affinity_notify);
3147
3148         cpumask_copy(&hdev->affinity_mask, mask);
3149 }
3150
3151 static void hclge_irq_affinity_release(struct kref *ref)
3152 {
3153 }
3154
3155 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3156 {
3157         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3158                               &hdev->affinity_mask);
3159
3160         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3161         hdev->affinity_notify.release = hclge_irq_affinity_release;
3162         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3163                                   &hdev->affinity_notify);
3164 }
3165
3166 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3167 {
3168         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3169         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3170 }
3171
3172 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3173 {
3174         int ret;
3175
3176         hclge_get_misc_vector(hdev);
3177
3178         /* this would be explicitly freed in the end */
3179         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3180                  HCLGE_NAME, pci_name(hdev->pdev));
3181         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3182                           0, hdev->misc_vector.name, hdev);
3183         if (ret) {
3184                 hclge_free_vector(hdev, 0);
3185                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3186                         hdev->misc_vector.vector_irq);
3187         }
3188
3189         return ret;
3190 }
3191
3192 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3193 {
3194         free_irq(hdev->misc_vector.vector_irq, hdev);
3195         hclge_free_vector(hdev, 0);
3196 }
3197
3198 int hclge_notify_client(struct hclge_dev *hdev,
3199                         enum hnae3_reset_notify_type type)
3200 {
3201         struct hnae3_client *client = hdev->nic_client;
3202         u16 i;
3203
3204         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3205                 return 0;
3206
3207         if (!client->ops->reset_notify)
3208                 return -EOPNOTSUPP;
3209
3210         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3211                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3212                 int ret;
3213
3214                 ret = client->ops->reset_notify(handle, type);
3215                 if (ret) {
3216                         dev_err(&hdev->pdev->dev,
3217                                 "notify nic client failed %d(%d)\n", type, ret);
3218                         return ret;
3219                 }
3220         }
3221
3222         return 0;
3223 }
3224
3225 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3226                                     enum hnae3_reset_notify_type type)
3227 {
3228         struct hnae3_client *client = hdev->roce_client;
3229         int ret = 0;
3230         u16 i;
3231
3232         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3233                 return 0;
3234
3235         if (!client->ops->reset_notify)
3236                 return -EOPNOTSUPP;
3237
3238         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3239                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3240
3241                 ret = client->ops->reset_notify(handle, type);
3242                 if (ret) {
3243                         dev_err(&hdev->pdev->dev,
3244                                 "notify roce client failed %d(%d)",
3245                                 type, ret);
3246                         return ret;
3247                 }
3248         }
3249
3250         return ret;
3251 }
3252
3253 static int hclge_reset_wait(struct hclge_dev *hdev)
3254 {
3255 #define HCLGE_RESET_WATI_MS     100
3256 #define HCLGE_RESET_WAIT_CNT    350
3257
3258         u32 val, reg, reg_bit;
3259         u32 cnt = 0;
3260
3261         switch (hdev->reset_type) {
3262         case HNAE3_IMP_RESET:
3263                 reg = HCLGE_GLOBAL_RESET_REG;
3264                 reg_bit = HCLGE_IMP_RESET_BIT;
3265                 break;
3266         case HNAE3_GLOBAL_RESET:
3267                 reg = HCLGE_GLOBAL_RESET_REG;
3268                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3269                 break;
3270         case HNAE3_FUNC_RESET:
3271                 reg = HCLGE_FUN_RST_ING;
3272                 reg_bit = HCLGE_FUN_RST_ING_B;
3273                 break;
3274         default:
3275                 dev_err(&hdev->pdev->dev,
3276                         "Wait for unsupported reset type: %d\n",
3277                         hdev->reset_type);
3278                 return -EINVAL;
3279         }
3280
3281         val = hclge_read_dev(&hdev->hw, reg);
3282         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3283                 msleep(HCLGE_RESET_WATI_MS);
3284                 val = hclge_read_dev(&hdev->hw, reg);
3285                 cnt++;
3286         }
3287
3288         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3289                 dev_warn(&hdev->pdev->dev,
3290                          "Wait for reset timeout: %d\n", hdev->reset_type);
3291                 return -EBUSY;
3292         }
3293
3294         return 0;
3295 }
3296
3297 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3298 {
3299         struct hclge_vf_rst_cmd *req;
3300         struct hclge_desc desc;
3301
3302         req = (struct hclge_vf_rst_cmd *)desc.data;
3303         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3304         req->dest_vfid = func_id;
3305
3306         if (reset)
3307                 req->vf_rst = 0x1;
3308
3309         return hclge_cmd_send(&hdev->hw, &desc, 1);
3310 }
3311
3312 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3313 {
3314         int i;
3315
3316         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3317                 struct hclge_vport *vport = &hdev->vport[i];
3318                 int ret;
3319
3320                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3321                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3322                 if (ret) {
3323                         dev_err(&hdev->pdev->dev,
3324                                 "set vf(%u) rst failed %d!\n",
3325                                 vport->vport_id, ret);
3326                         return ret;
3327                 }
3328
3329                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3330                         continue;
3331
3332                 /* Inform VF to process the reset.
3333                  * hclge_inform_reset_assert_to_vf may fail if VF
3334                  * driver is not loaded.
3335                  */
3336                 ret = hclge_inform_reset_assert_to_vf(vport);
3337                 if (ret)
3338                         dev_warn(&hdev->pdev->dev,
3339                                  "inform reset to vf(%u) failed %d!\n",
3340                                  vport->vport_id, ret);
3341         }
3342
3343         return 0;
3344 }
3345
3346 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3347 {
3348         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3349             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3350             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3351                 return;
3352
3353         hclge_mbx_handler(hdev);
3354
3355         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3356 }
3357
3358 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3359 {
3360         struct hclge_pf_rst_sync_cmd *req;
3361         struct hclge_desc desc;
3362         int cnt = 0;
3363         int ret;
3364
3365         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3366         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3367
3368         do {
3369                 /* vf need to down netdev by mbx during PF or FLR reset */
3370                 hclge_mailbox_service_task(hdev);
3371
3372                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3373                 /* for compatible with old firmware, wait
3374                  * 100 ms for VF to stop IO
3375                  */
3376                 if (ret == -EOPNOTSUPP) {
3377                         msleep(HCLGE_RESET_SYNC_TIME);
3378                         return;
3379                 } else if (ret) {
3380                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3381                                  ret);
3382                         return;
3383                 } else if (req->all_vf_ready) {
3384                         return;
3385                 }
3386                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3387                 hclge_cmd_reuse_desc(&desc, true);
3388         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3389
3390         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3391 }
3392
3393 void hclge_report_hw_error(struct hclge_dev *hdev,
3394                            enum hnae3_hw_error_type type)
3395 {
3396         struct hnae3_client *client = hdev->nic_client;
3397         u16 i;
3398
3399         if (!client || !client->ops->process_hw_error ||
3400             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3401                 return;
3402
3403         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3404                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3405 }
3406
3407 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3408 {
3409         u32 reg_val;
3410
3411         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3412         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3413                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3414                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3415                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3416         }
3417
3418         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3419                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3420                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3421                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3422         }
3423 }
3424
3425 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3426 {
3427         struct hclge_desc desc;
3428         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3429         int ret;
3430
3431         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3432         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3433         req->fun_reset_vfid = func_id;
3434
3435         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3436         if (ret)
3437                 dev_err(&hdev->pdev->dev,
3438                         "send function reset cmd fail, status =%d\n", ret);
3439
3440         return ret;
3441 }
3442
3443 static void hclge_do_reset(struct hclge_dev *hdev)
3444 {
3445         struct hnae3_handle *handle = &hdev->vport[0].nic;
3446         struct pci_dev *pdev = hdev->pdev;
3447         u32 val;
3448
3449         if (hclge_get_hw_reset_stat(handle)) {
3450                 dev_info(&pdev->dev, "hardware reset not finish\n");
3451                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3452                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3453                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3454                 return;
3455         }
3456
3457         switch (hdev->reset_type) {
3458         case HNAE3_GLOBAL_RESET:
3459                 dev_info(&pdev->dev, "global reset requested\n");
3460                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3461                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3462                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3463                 break;
3464         case HNAE3_FUNC_RESET:
3465                 dev_info(&pdev->dev, "PF reset requested\n");
3466                 /* schedule again to check later */
3467                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3468                 hclge_reset_task_schedule(hdev);
3469                 break;
3470         default:
3471                 dev_warn(&pdev->dev,
3472                          "unsupported reset type: %d\n", hdev->reset_type);
3473                 break;
3474         }
3475 }
3476
3477 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3478                                                    unsigned long *addr)
3479 {
3480         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3481         struct hclge_dev *hdev = ae_dev->priv;
3482
3483         /* first, resolve any unknown reset type to the known type(s) */
3484         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3485                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3486                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3487                 /* we will intentionally ignore any errors from this function
3488                  *  as we will end up in *some* reset request in any case
3489                  */
3490                 if (hclge_handle_hw_msix_error(hdev, addr))
3491                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3492                                  msix_sts_reg);
3493
3494                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3495                 /* We defered the clearing of the error event which caused
3496                  * interrupt since it was not posssible to do that in
3497                  * interrupt context (and this is the reason we introduced
3498                  * new UNKNOWN reset type). Now, the errors have been
3499                  * handled and cleared in hardware we can safely enable
3500                  * interrupts. This is an exception to the norm.
3501                  */
3502                 hclge_enable_vector(&hdev->misc_vector, true);
3503         }
3504
3505         /* return the highest priority reset level amongst all */
3506         if (test_bit(HNAE3_IMP_RESET, addr)) {
3507                 rst_level = HNAE3_IMP_RESET;
3508                 clear_bit(HNAE3_IMP_RESET, addr);
3509                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3510                 clear_bit(HNAE3_FUNC_RESET, addr);
3511         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3512                 rst_level = HNAE3_GLOBAL_RESET;
3513                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3514                 clear_bit(HNAE3_FUNC_RESET, addr);
3515         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3516                 rst_level = HNAE3_FUNC_RESET;
3517                 clear_bit(HNAE3_FUNC_RESET, addr);
3518         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3519                 rst_level = HNAE3_FLR_RESET;
3520                 clear_bit(HNAE3_FLR_RESET, addr);
3521         }
3522
3523         if (hdev->reset_type != HNAE3_NONE_RESET &&
3524             rst_level < hdev->reset_type)
3525                 return HNAE3_NONE_RESET;
3526
3527         return rst_level;
3528 }
3529
3530 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3531 {
3532         u32 clearval = 0;
3533
3534         switch (hdev->reset_type) {
3535         case HNAE3_IMP_RESET:
3536                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3537                 break;
3538         case HNAE3_GLOBAL_RESET:
3539                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3540                 break;
3541         default:
3542                 break;
3543         }
3544
3545         if (!clearval)
3546                 return;
3547
3548         /* For revision 0x20, the reset interrupt source
3549          * can only be cleared after hardware reset done
3550          */
3551         if (hdev->pdev->revision == 0x20)
3552                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3553                                 clearval);
3554
3555         hclge_enable_vector(&hdev->misc_vector, true);
3556 }
3557
3558 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3559 {
3560         u32 reg_val;
3561
3562         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3563         if (enable)
3564                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3565         else
3566                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3567
3568         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3569 }
3570
3571 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3572 {
3573         int ret;
3574
3575         ret = hclge_set_all_vf_rst(hdev, true);
3576         if (ret)
3577                 return ret;
3578
3579         hclge_func_reset_sync_vf(hdev);
3580
3581         return 0;
3582 }
3583
3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3585 {
3586         u32 reg_val;
3587         int ret = 0;
3588
3589         switch (hdev->reset_type) {
3590         case HNAE3_FUNC_RESET:
3591                 ret = hclge_func_reset_notify_vf(hdev);
3592                 if (ret)
3593                         return ret;
3594
3595                 ret = hclge_func_reset_cmd(hdev, 0);
3596                 if (ret) {
3597                         dev_err(&hdev->pdev->dev,
3598                                 "asserting function reset fail %d!\n", ret);
3599                         return ret;
3600                 }
3601
3602                 /* After performaning pf reset, it is not necessary to do the
3603                  * mailbox handling or send any command to firmware, because
3604                  * any mailbox handling or command to firmware is only valid
3605                  * after hclge_cmd_init is called.
3606                  */
3607                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3608                 hdev->rst_stats.pf_rst_cnt++;
3609                 break;
3610         case HNAE3_FLR_RESET:
3611                 ret = hclge_func_reset_notify_vf(hdev);
3612                 if (ret)
3613                         return ret;
3614                 break;
3615         case HNAE3_IMP_RESET:
3616                 hclge_handle_imp_error(hdev);
3617                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3618                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3619                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3620                 break;
3621         default:
3622                 break;
3623         }
3624
3625         /* inform hardware that preparatory work is done */
3626         msleep(HCLGE_RESET_SYNC_TIME);
3627         hclge_reset_handshake(hdev, true);
3628         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3629
3630         return ret;
3631 }
3632
3633 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3634 {
3635 #define MAX_RESET_FAIL_CNT 5
3636
3637         if (hdev->reset_pending) {
3638                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3639                          hdev->reset_pending);
3640                 return true;
3641         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3642                    HCLGE_RESET_INT_M) {
3643                 dev_info(&hdev->pdev->dev,
3644                          "reset failed because new reset interrupt\n");
3645                 hclge_clear_reset_cause(hdev);
3646                 return false;
3647         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3648                 hdev->rst_stats.reset_fail_cnt++;
3649                 set_bit(hdev->reset_type, &hdev->reset_pending);
3650                 dev_info(&hdev->pdev->dev,
3651                          "re-schedule reset task(%u)\n",
3652                          hdev->rst_stats.reset_fail_cnt);
3653                 return true;
3654         }
3655
3656         hclge_clear_reset_cause(hdev);
3657
3658         /* recover the handshake status when reset fail */
3659         hclge_reset_handshake(hdev, true);
3660
3661         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3662
3663         hclge_dbg_dump_rst_info(hdev);
3664
3665         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3666
3667         return false;
3668 }
3669
3670 static int hclge_set_rst_done(struct hclge_dev *hdev)
3671 {
3672         struct hclge_pf_rst_done_cmd *req;
3673         struct hclge_desc desc;
3674         int ret;
3675
3676         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3677         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3678         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3679
3680         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3681         /* To be compatible with the old firmware, which does not support
3682          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3683          * return success
3684          */
3685         if (ret == -EOPNOTSUPP) {
3686                 dev_warn(&hdev->pdev->dev,
3687                          "current firmware does not support command(0x%x)!\n",
3688                          HCLGE_OPC_PF_RST_DONE);
3689                 return 0;
3690         } else if (ret) {
3691                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3692                         ret);
3693         }
3694
3695         return ret;
3696 }
3697
3698 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3699 {
3700         int ret = 0;
3701
3702         switch (hdev->reset_type) {
3703         case HNAE3_FUNC_RESET:
3704                 /* fall through */
3705         case HNAE3_FLR_RESET:
3706                 ret = hclge_set_all_vf_rst(hdev, false);
3707                 break;
3708         case HNAE3_GLOBAL_RESET:
3709                 /* fall through */
3710         case HNAE3_IMP_RESET:
3711                 ret = hclge_set_rst_done(hdev);
3712                 break;
3713         default:
3714                 break;
3715         }
3716
3717         /* clear up the handshake status after re-initialize done */
3718         hclge_reset_handshake(hdev, false);
3719
3720         return ret;
3721 }
3722
3723 static int hclge_reset_stack(struct hclge_dev *hdev)
3724 {
3725         int ret;
3726
3727         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3728         if (ret)
3729                 return ret;
3730
3731         ret = hclge_reset_ae_dev(hdev->ae_dev);
3732         if (ret)
3733                 return ret;
3734
3735         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3736 }
3737
3738 static int hclge_reset_prepare(struct hclge_dev *hdev)
3739 {
3740         int ret;
3741
3742         hdev->rst_stats.reset_cnt++;
3743         /* perform reset of the stack & ae device for a client */
3744         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3745         if (ret)
3746                 return ret;
3747
3748         rtnl_lock();
3749         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3750         rtnl_unlock();
3751         if (ret)
3752                 return ret;
3753
3754         return hclge_reset_prepare_wait(hdev);
3755 }
3756
3757 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3758 {
3759         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3760         enum hnae3_reset_type reset_level;
3761         int ret;
3762
3763         hdev->rst_stats.hw_reset_done_cnt++;
3764
3765         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3766         if (ret)
3767                 return ret;
3768
3769         rtnl_lock();
3770         ret = hclge_reset_stack(hdev);
3771         rtnl_unlock();
3772         if (ret)
3773                 return ret;
3774
3775         hclge_clear_reset_cause(hdev);
3776
3777         ret = hclge_reset_prepare_up(hdev);
3778         if (ret)
3779                 return ret;
3780
3781
3782         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3783         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3784          * times
3785          */
3786         if (ret &&
3787             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3788                 return ret;
3789
3790         rtnl_lock();
3791         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3792         rtnl_unlock();
3793         if (ret)
3794                 return ret;
3795
3796         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3797         if (ret)
3798                 return ret;
3799
3800         hdev->last_reset_time = jiffies;
3801         hdev->rst_stats.reset_fail_cnt = 0;
3802         hdev->rst_stats.reset_done_cnt++;
3803         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3804
3805         /* if default_reset_request has a higher level reset request,
3806          * it should be handled as soon as possible. since some errors
3807          * need this kind of reset to fix.
3808          */
3809         reset_level = hclge_get_reset_level(ae_dev,
3810                                             &hdev->default_reset_request);
3811         if (reset_level != HNAE3_NONE_RESET)
3812                 set_bit(reset_level, &hdev->reset_request);
3813
3814         return 0;
3815 }
3816
3817 static void hclge_reset(struct hclge_dev *hdev)
3818 {
3819         if (hclge_reset_prepare(hdev))
3820                 goto err_reset;
3821
3822         if (hclge_reset_wait(hdev))
3823                 goto err_reset;
3824
3825         if (hclge_reset_rebuild(hdev))
3826                 goto err_reset;
3827
3828         return;
3829
3830 err_reset:
3831         if (hclge_reset_err_handle(hdev))
3832                 hclge_reset_task_schedule(hdev);
3833 }
3834
3835 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3836 {
3837         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3838         struct hclge_dev *hdev = ae_dev->priv;
3839
3840         /* We might end up getting called broadly because of 2 below cases:
3841          * 1. Recoverable error was conveyed through APEI and only way to bring
3842          *    normalcy is to reset.
3843          * 2. A new reset request from the stack due to timeout
3844          *
3845          * For the first case,error event might not have ae handle available.
3846          * check if this is a new reset request and we are not here just because
3847          * last reset attempt did not succeed and watchdog hit us again. We will
3848          * know this if last reset request did not occur very recently (watchdog
3849          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3850          * In case of new request we reset the "reset level" to PF reset.
3851          * And if it is a repeat reset request of the most recent one then we
3852          * want to make sure we throttle the reset request. Therefore, we will
3853          * not allow it again before 3*HZ times.
3854          */
3855         if (!handle)
3856                 handle = &hdev->vport[0].nic;
3857
3858         if (time_before(jiffies, (hdev->last_reset_time +
3859                                   HCLGE_RESET_INTERVAL))) {
3860                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3861                 return;
3862         } else if (hdev->default_reset_request) {
3863                 hdev->reset_level =
3864                         hclge_get_reset_level(ae_dev,
3865                                               &hdev->default_reset_request);
3866         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3867                 hdev->reset_level = HNAE3_FUNC_RESET;
3868         }
3869
3870         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3871                  hdev->reset_level);
3872
3873         /* request reset & schedule reset task */
3874         set_bit(hdev->reset_level, &hdev->reset_request);
3875         hclge_reset_task_schedule(hdev);
3876
3877         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3878                 hdev->reset_level++;
3879 }
3880
3881 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3882                                         enum hnae3_reset_type rst_type)
3883 {
3884         struct hclge_dev *hdev = ae_dev->priv;
3885
3886         set_bit(rst_type, &hdev->default_reset_request);
3887 }
3888
3889 static void hclge_reset_timer(struct timer_list *t)
3890 {
3891         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3892
3893         /* if default_reset_request has no value, it means that this reset
3894          * request has already be handled, so just return here
3895          */
3896         if (!hdev->default_reset_request)
3897                 return;
3898
3899         dev_info(&hdev->pdev->dev,
3900                  "triggering reset in reset timer\n");
3901         hclge_reset_event(hdev->pdev, NULL);
3902 }
3903
3904 static void hclge_reset_subtask(struct hclge_dev *hdev)
3905 {
3906         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3907
3908         /* check if there is any ongoing reset in the hardware. This status can
3909          * be checked from reset_pending. If there is then, we need to wait for
3910          * hardware to complete reset.
3911          *    a. If we are able to figure out in reasonable time that hardware
3912          *       has fully resetted then, we can proceed with driver, client
3913          *       reset.
3914          *    b. else, we can come back later to check this status so re-sched
3915          *       now.
3916          */
3917         hdev->last_reset_time = jiffies;
3918         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3919         if (hdev->reset_type != HNAE3_NONE_RESET)
3920                 hclge_reset(hdev);
3921
3922         /* check if we got any *new* reset requests to be honored */
3923         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3924         if (hdev->reset_type != HNAE3_NONE_RESET)
3925                 hclge_do_reset(hdev);
3926
3927         hdev->reset_type = HNAE3_NONE_RESET;
3928 }
3929
3930 static void hclge_reset_service_task(struct hclge_dev *hdev)
3931 {
3932         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3933                 return;
3934
3935         down(&hdev->reset_sem);
3936         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3937
3938         hclge_reset_subtask(hdev);
3939
3940         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3941         up(&hdev->reset_sem);
3942 }
3943
3944 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3945 {
3946         int i;
3947
3948         /* start from vport 1 for PF is always alive */
3949         for (i = 1; i < hdev->num_alloc_vport; i++) {
3950                 struct hclge_vport *vport = &hdev->vport[i];
3951
3952                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3953                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3954
3955                 /* If vf is not alive, set to default value */
3956                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3957                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3958         }
3959 }
3960
3961 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3962 {
3963         unsigned long delta = round_jiffies_relative(HZ);
3964
3965         /* Always handle the link updating to make sure link state is
3966          * updated when it is triggered by mbx.
3967          */
3968         hclge_update_link_status(hdev);
3969         hclge_sync_mac_table(hdev);
3970         hclge_sync_promisc_mode(hdev);
3971
3972         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3973                 delta = jiffies - hdev->last_serv_processed;
3974
3975                 if (delta < round_jiffies_relative(HZ)) {
3976                         delta = round_jiffies_relative(HZ) - delta;
3977                         goto out;
3978                 }
3979         }
3980
3981         hdev->serv_processed_cnt++;
3982         hclge_update_vport_alive(hdev);
3983
3984         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3985                 hdev->last_serv_processed = jiffies;
3986                 goto out;
3987         }
3988
3989         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3990                 hclge_update_stats_for_all(hdev);
3991
3992         hclge_update_port_info(hdev);
3993         hclge_sync_vlan_filter(hdev);
3994
3995         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3996                 hclge_rfs_filter_expire(hdev);
3997
3998         hdev->last_serv_processed = jiffies;
3999
4000 out:
4001         hclge_task_schedule(hdev, delta);
4002 }
4003
4004 static void hclge_service_task(struct work_struct *work)
4005 {
4006         struct hclge_dev *hdev =
4007                 container_of(work, struct hclge_dev, service_task.work);
4008
4009         hclge_reset_service_task(hdev);
4010         hclge_mailbox_service_task(hdev);
4011         hclge_periodic_service_task(hdev);
4012
4013         /* Handle reset and mbx again in case periodical task delays the
4014          * handling by calling hclge_task_schedule() in
4015          * hclge_periodic_service_task().
4016          */
4017         hclge_reset_service_task(hdev);
4018         hclge_mailbox_service_task(hdev);
4019 }
4020
4021 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4022 {
4023         /* VF handle has no client */
4024         if (!handle->client)
4025                 return container_of(handle, struct hclge_vport, nic);
4026         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4027                 return container_of(handle, struct hclge_vport, roce);
4028         else
4029                 return container_of(handle, struct hclge_vport, nic);
4030 }
4031
4032 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4033                             struct hnae3_vector_info *vector_info)
4034 {
4035         struct hclge_vport *vport = hclge_get_vport(handle);
4036         struct hnae3_vector_info *vector = vector_info;
4037         struct hclge_dev *hdev = vport->back;
4038         int alloc = 0;
4039         int i, j;
4040
4041         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4042         vector_num = min(hdev->num_msi_left, vector_num);
4043
4044         for (j = 0; j < vector_num; j++) {
4045                 for (i = 1; i < hdev->num_msi; i++) {
4046                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4047                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4048                                 vector->io_addr = hdev->hw.io_base +
4049                                         HCLGE_VECTOR_REG_BASE +
4050                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4051                                         vport->vport_id *
4052                                         HCLGE_VECTOR_VF_OFFSET;
4053                                 hdev->vector_status[i] = vport->vport_id;
4054                                 hdev->vector_irq[i] = vector->vector;
4055
4056                                 vector++;
4057                                 alloc++;
4058
4059                                 break;
4060                         }
4061                 }
4062         }
4063         hdev->num_msi_left -= alloc;
4064         hdev->num_msi_used += alloc;
4065
4066         return alloc;
4067 }
4068
4069 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4070 {
4071         int i;
4072
4073         for (i = 0; i < hdev->num_msi; i++)
4074                 if (vector == hdev->vector_irq[i])
4075                         return i;
4076
4077         return -EINVAL;
4078 }
4079
4080 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4081 {
4082         struct hclge_vport *vport = hclge_get_vport(handle);
4083         struct hclge_dev *hdev = vport->back;
4084         int vector_id;
4085
4086         vector_id = hclge_get_vector_index(hdev, vector);
4087         if (vector_id < 0) {
4088                 dev_err(&hdev->pdev->dev,
4089                         "Get vector index fail. vector = %d\n", vector);
4090                 return vector_id;
4091         }
4092
4093         hclge_free_vector(hdev, vector_id);
4094
4095         return 0;
4096 }
4097
4098 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4099 {
4100         return HCLGE_RSS_KEY_SIZE;
4101 }
4102
4103 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4104 {
4105         return HCLGE_RSS_IND_TBL_SIZE;
4106 }
4107
4108 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4109                                   const u8 hfunc, const u8 *key)
4110 {
4111         struct hclge_rss_config_cmd *req;
4112         unsigned int key_offset = 0;
4113         struct hclge_desc desc;
4114         int key_counts;
4115         int key_size;
4116         int ret;
4117
4118         key_counts = HCLGE_RSS_KEY_SIZE;
4119         req = (struct hclge_rss_config_cmd *)desc.data;
4120
4121         while (key_counts) {
4122                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4123                                            false);
4124
4125                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4126                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4127
4128                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4129                 memcpy(req->hash_key,
4130                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4131
4132                 key_counts -= key_size;
4133                 key_offset++;
4134                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4135                 if (ret) {
4136                         dev_err(&hdev->pdev->dev,
4137                                 "Configure RSS config fail, status = %d\n",
4138                                 ret);
4139                         return ret;
4140                 }
4141         }
4142         return 0;
4143 }
4144
4145 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4146 {
4147         struct hclge_rss_indirection_table_cmd *req;
4148         struct hclge_desc desc;
4149         int i, j;
4150         int ret;
4151
4152         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4153
4154         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4155                 hclge_cmd_setup_basic_desc
4156                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4157
4158                 req->start_table_index =
4159                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4160                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4161
4162                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4163                         req->rss_result[j] =
4164                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4165
4166                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4167                 if (ret) {
4168                         dev_err(&hdev->pdev->dev,
4169                                 "Configure rss indir table fail,status = %d\n",
4170                                 ret);
4171                         return ret;
4172                 }
4173         }
4174         return 0;
4175 }
4176
4177 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4178                                  u16 *tc_size, u16 *tc_offset)
4179 {
4180         struct hclge_rss_tc_mode_cmd *req;
4181         struct hclge_desc desc;
4182         int ret;
4183         int i;
4184
4185         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4186         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4187
4188         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4189                 u16 mode = 0;
4190
4191                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4192                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4193                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4194                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4195                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4196
4197                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4198         }
4199
4200         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4201         if (ret)
4202                 dev_err(&hdev->pdev->dev,
4203                         "Configure rss tc mode fail, status = %d\n", ret);
4204
4205         return ret;
4206 }
4207
4208 static void hclge_get_rss_type(struct hclge_vport *vport)
4209 {
4210         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4211             vport->rss_tuple_sets.ipv4_udp_en ||
4212             vport->rss_tuple_sets.ipv4_sctp_en ||
4213             vport->rss_tuple_sets.ipv6_tcp_en ||
4214             vport->rss_tuple_sets.ipv6_udp_en ||
4215             vport->rss_tuple_sets.ipv6_sctp_en)
4216                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4217         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4218                  vport->rss_tuple_sets.ipv6_fragment_en)
4219                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4220         else
4221                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4222 }
4223
4224 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4225 {
4226         struct hclge_rss_input_tuple_cmd *req;
4227         struct hclge_desc desc;
4228         int ret;
4229
4230         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4231
4232         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4233
4234         /* Get the tuple cfg from pf */
4235         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4236         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4237         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4238         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4239         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4240         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4241         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4242         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4243         hclge_get_rss_type(&hdev->vport[0]);
4244         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4245         if (ret)
4246                 dev_err(&hdev->pdev->dev,
4247                         "Configure rss input fail, status = %d\n", ret);
4248         return ret;
4249 }
4250
4251 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4252                          u8 *key, u8 *hfunc)
4253 {
4254         struct hclge_vport *vport = hclge_get_vport(handle);
4255         int i;
4256
4257         /* Get hash algorithm */
4258         if (hfunc) {
4259                 switch (vport->rss_algo) {
4260                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4261                         *hfunc = ETH_RSS_HASH_TOP;
4262                         break;
4263                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4264                         *hfunc = ETH_RSS_HASH_XOR;
4265                         break;
4266                 default:
4267                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4268                         break;
4269                 }
4270         }
4271
4272         /* Get the RSS Key required by the user */
4273         if (key)
4274                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4275
4276         /* Get indirect table */
4277         if (indir)
4278                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4279                         indir[i] =  vport->rss_indirection_tbl[i];
4280
4281         return 0;
4282 }
4283
4284 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4285                          const  u8 *key, const  u8 hfunc)
4286 {
4287         struct hclge_vport *vport = hclge_get_vport(handle);
4288         struct hclge_dev *hdev = vport->back;
4289         u8 hash_algo;
4290         int ret, i;
4291
4292         /* Set the RSS Hash Key if specififed by the user */
4293         if (key) {
4294                 switch (hfunc) {
4295                 case ETH_RSS_HASH_TOP:
4296                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4297                         break;
4298                 case ETH_RSS_HASH_XOR:
4299                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4300                         break;
4301                 case ETH_RSS_HASH_NO_CHANGE:
4302                         hash_algo = vport->rss_algo;
4303                         break;
4304                 default:
4305                         return -EINVAL;
4306                 }
4307
4308                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4309                 if (ret)
4310                         return ret;
4311
4312                 /* Update the shadow RSS key with user specified qids */
4313                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4314                 vport->rss_algo = hash_algo;
4315         }
4316
4317         /* Update the shadow RSS table with user specified qids */
4318         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4319                 vport->rss_indirection_tbl[i] = indir[i];
4320
4321         /* Update the hardware */
4322         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4323 }
4324
4325 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4326 {
4327         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4328
4329         if (nfc->data & RXH_L4_B_2_3)
4330                 hash_sets |= HCLGE_D_PORT_BIT;
4331         else
4332                 hash_sets &= ~HCLGE_D_PORT_BIT;
4333
4334         if (nfc->data & RXH_IP_SRC)
4335                 hash_sets |= HCLGE_S_IP_BIT;
4336         else
4337                 hash_sets &= ~HCLGE_S_IP_BIT;
4338
4339         if (nfc->data & RXH_IP_DST)
4340                 hash_sets |= HCLGE_D_IP_BIT;
4341         else
4342                 hash_sets &= ~HCLGE_D_IP_BIT;
4343
4344         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4345                 hash_sets |= HCLGE_V_TAG_BIT;
4346
4347         return hash_sets;
4348 }
4349
4350 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4351                                struct ethtool_rxnfc *nfc)
4352 {
4353         struct hclge_vport *vport = hclge_get_vport(handle);
4354         struct hclge_dev *hdev = vport->back;
4355         struct hclge_rss_input_tuple_cmd *req;
4356         struct hclge_desc desc;
4357         u8 tuple_sets;
4358         int ret;
4359
4360         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4361                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4362                 return -EINVAL;
4363
4364         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4365         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4366
4367         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4368         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4369         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4370         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4371         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4372         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4373         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4374         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4375
4376         tuple_sets = hclge_get_rss_hash_bits(nfc);
4377         switch (nfc->flow_type) {
4378         case TCP_V4_FLOW:
4379                 req->ipv4_tcp_en = tuple_sets;
4380                 break;
4381         case TCP_V6_FLOW:
4382                 req->ipv6_tcp_en = tuple_sets;
4383                 break;
4384         case UDP_V4_FLOW:
4385                 req->ipv4_udp_en = tuple_sets;
4386                 break;
4387         case UDP_V6_FLOW:
4388                 req->ipv6_udp_en = tuple_sets;
4389                 break;
4390         case SCTP_V4_FLOW:
4391                 req->ipv4_sctp_en = tuple_sets;
4392                 break;
4393         case SCTP_V6_FLOW:
4394                 if ((nfc->data & RXH_L4_B_0_1) ||
4395                     (nfc->data & RXH_L4_B_2_3))
4396                         return -EINVAL;
4397
4398                 req->ipv6_sctp_en = tuple_sets;
4399                 break;
4400         case IPV4_FLOW:
4401                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4402                 break;
4403         case IPV6_FLOW:
4404                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4405                 break;
4406         default:
4407                 return -EINVAL;
4408         }
4409
4410         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4411         if (ret) {
4412                 dev_err(&hdev->pdev->dev,
4413                         "Set rss tuple fail, status = %d\n", ret);
4414                 return ret;
4415         }
4416
4417         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4418         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4419         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4420         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4421         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4422         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4423         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4424         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4425         hclge_get_rss_type(vport);
4426         return 0;
4427 }
4428
4429 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4430                                struct ethtool_rxnfc *nfc)
4431 {
4432         struct hclge_vport *vport = hclge_get_vport(handle);
4433         u8 tuple_sets;
4434
4435         nfc->data = 0;
4436
4437         switch (nfc->flow_type) {
4438         case TCP_V4_FLOW:
4439                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4440                 break;
4441         case UDP_V4_FLOW:
4442                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4443                 break;
4444         case TCP_V6_FLOW:
4445                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4446                 break;
4447         case UDP_V6_FLOW:
4448                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4449                 break;
4450         case SCTP_V4_FLOW:
4451                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4452                 break;
4453         case SCTP_V6_FLOW:
4454                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4455                 break;
4456         case IPV4_FLOW:
4457         case IPV6_FLOW:
4458                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4459                 break;
4460         default:
4461                 return -EINVAL;
4462         }
4463
4464         if (!tuple_sets)
4465                 return 0;
4466
4467         if (tuple_sets & HCLGE_D_PORT_BIT)
4468                 nfc->data |= RXH_L4_B_2_3;
4469         if (tuple_sets & HCLGE_S_PORT_BIT)
4470                 nfc->data |= RXH_L4_B_0_1;
4471         if (tuple_sets & HCLGE_D_IP_BIT)
4472                 nfc->data |= RXH_IP_DST;
4473         if (tuple_sets & HCLGE_S_IP_BIT)
4474                 nfc->data |= RXH_IP_SRC;
4475
4476         return 0;
4477 }
4478
4479 static int hclge_get_tc_size(struct hnae3_handle *handle)
4480 {
4481         struct hclge_vport *vport = hclge_get_vport(handle);
4482         struct hclge_dev *hdev = vport->back;
4483
4484         return hdev->rss_size_max;
4485 }
4486
4487 int hclge_rss_init_hw(struct hclge_dev *hdev)
4488 {
4489         struct hclge_vport *vport = hdev->vport;
4490         u8 *rss_indir = vport[0].rss_indirection_tbl;
4491         u16 rss_size = vport[0].alloc_rss_size;
4492         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4493         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4494         u8 *key = vport[0].rss_hash_key;
4495         u8 hfunc = vport[0].rss_algo;
4496         u16 tc_valid[HCLGE_MAX_TC_NUM];
4497         u16 roundup_size;
4498         unsigned int i;
4499         int ret;
4500
4501         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4502         if (ret)
4503                 return ret;
4504
4505         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4506         if (ret)
4507                 return ret;
4508
4509         ret = hclge_set_rss_input_tuple(hdev);
4510         if (ret)
4511                 return ret;
4512
4513         /* Each TC have the same queue size, and tc_size set to hardware is
4514          * the log2 of roundup power of two of rss_size, the acutal queue
4515          * size is limited by indirection table.
4516          */
4517         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4518                 dev_err(&hdev->pdev->dev,
4519                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4520                         rss_size);
4521                 return -EINVAL;
4522         }
4523
4524         roundup_size = roundup_pow_of_two(rss_size);
4525         roundup_size = ilog2(roundup_size);
4526
4527         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4528                 tc_valid[i] = 0;
4529
4530                 if (!(hdev->hw_tc_map & BIT(i)))
4531                         continue;
4532
4533                 tc_valid[i] = 1;
4534                 tc_size[i] = roundup_size;
4535                 tc_offset[i] = rss_size * i;
4536         }
4537
4538         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4539 }
4540
4541 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4542 {
4543         struct hclge_vport *vport = hdev->vport;
4544         int i, j;
4545
4546         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4547                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4548                         vport[j].rss_indirection_tbl[i] =
4549                                 i % vport[j].alloc_rss_size;
4550         }
4551 }
4552
4553 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4554 {
4555         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4556         struct hclge_vport *vport = hdev->vport;
4557
4558         if (hdev->pdev->revision >= 0x21)
4559                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4560
4561         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4562                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4563                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4564                 vport[i].rss_tuple_sets.ipv4_udp_en =
4565                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4566                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4567                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4568                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4569                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4570                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4571                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4572                 vport[i].rss_tuple_sets.ipv6_udp_en =
4573                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4574                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4575                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4576                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4577                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4578
4579                 vport[i].rss_algo = rss_algo;
4580
4581                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4582                        HCLGE_RSS_KEY_SIZE);
4583         }
4584
4585         hclge_rss_indir_init_cfg(hdev);
4586 }
4587
4588 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4589                                 int vector_id, bool en,
4590                                 struct hnae3_ring_chain_node *ring_chain)
4591 {
4592         struct hclge_dev *hdev = vport->back;
4593         struct hnae3_ring_chain_node *node;
4594         struct hclge_desc desc;
4595         struct hclge_ctrl_vector_chain_cmd *req =
4596                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4597         enum hclge_cmd_status status;
4598         enum hclge_opcode_type op;
4599         u16 tqp_type_and_id;
4600         int i;
4601
4602         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4603         hclge_cmd_setup_basic_desc(&desc, op, false);
4604         req->int_vector_id = vector_id;
4605
4606         i = 0;
4607         for (node = ring_chain; node; node = node->next) {
4608                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4609                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4610                                 HCLGE_INT_TYPE_S,
4611                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4612                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4613                                 HCLGE_TQP_ID_S, node->tqp_index);
4614                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4615                                 HCLGE_INT_GL_IDX_S,
4616                                 hnae3_get_field(node->int_gl_idx,
4617                                                 HNAE3_RING_GL_IDX_M,
4618                                                 HNAE3_RING_GL_IDX_S));
4619                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4620                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4621                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4622                         req->vfid = vport->vport_id;
4623
4624                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4625                         if (status) {
4626                                 dev_err(&hdev->pdev->dev,
4627                                         "Map TQP fail, status is %d.\n",
4628                                         status);
4629                                 return -EIO;
4630                         }
4631                         i = 0;
4632
4633                         hclge_cmd_setup_basic_desc(&desc,
4634                                                    op,
4635                                                    false);
4636                         req->int_vector_id = vector_id;
4637                 }
4638         }
4639
4640         if (i > 0) {
4641                 req->int_cause_num = i;
4642                 req->vfid = vport->vport_id;
4643                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4644                 if (status) {
4645                         dev_err(&hdev->pdev->dev,
4646                                 "Map TQP fail, status is %d.\n", status);
4647                         return -EIO;
4648                 }
4649         }
4650
4651         return 0;
4652 }
4653
4654 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4655                                     struct hnae3_ring_chain_node *ring_chain)
4656 {
4657         struct hclge_vport *vport = hclge_get_vport(handle);
4658         struct hclge_dev *hdev = vport->back;
4659         int vector_id;
4660
4661         vector_id = hclge_get_vector_index(hdev, vector);
4662         if (vector_id < 0) {
4663                 dev_err(&hdev->pdev->dev,
4664                         "failed to get vector index. vector=%d\n", vector);
4665                 return vector_id;
4666         }
4667
4668         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4669 }
4670
4671 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4672                                        struct hnae3_ring_chain_node *ring_chain)
4673 {
4674         struct hclge_vport *vport = hclge_get_vport(handle);
4675         struct hclge_dev *hdev = vport->back;
4676         int vector_id, ret;
4677
4678         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4679                 return 0;
4680
4681         vector_id = hclge_get_vector_index(hdev, vector);
4682         if (vector_id < 0) {
4683                 dev_err(&handle->pdev->dev,
4684                         "Get vector index fail. ret =%d\n", vector_id);
4685                 return vector_id;
4686         }
4687
4688         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4689         if (ret)
4690                 dev_err(&handle->pdev->dev,
4691                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4692                         vector_id, ret);
4693
4694         return ret;
4695 }
4696
4697 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4698                                       struct hclge_promisc_param *param)
4699 {
4700         struct hclge_promisc_cfg_cmd *req;
4701         struct hclge_desc desc;
4702         int ret;
4703
4704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4705
4706         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4707         req->vf_id = param->vf_id;
4708
4709         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4710          * pdev revision(0x20), new revision support them. The
4711          * value of this two fields will not return error when driver
4712          * send command to fireware in revision(0x20).
4713          */
4714         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4715                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4716
4717         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4718         if (ret)
4719                 dev_err(&hdev->pdev->dev,
4720                         "failed to set vport %d promisc mode, ret = %d.\n",
4721                         param->vf_id, ret);
4722
4723         return ret;
4724 }
4725
4726 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4727                                      bool en_uc, bool en_mc, bool en_bc,
4728                                      int vport_id)
4729 {
4730         if (!param)
4731                 return;
4732
4733         memset(param, 0, sizeof(struct hclge_promisc_param));
4734         if (en_uc)
4735                 param->enable = HCLGE_PROMISC_EN_UC;
4736         if (en_mc)
4737                 param->enable |= HCLGE_PROMISC_EN_MC;
4738         if (en_bc)
4739                 param->enable |= HCLGE_PROMISC_EN_BC;
4740         param->vf_id = vport_id;
4741 }
4742
4743 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4744                                  bool en_mc_pmc, bool en_bc_pmc)
4745 {
4746         struct hclge_dev *hdev = vport->back;
4747         struct hclge_promisc_param param;
4748
4749         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4750                                  vport->vport_id);
4751         return hclge_cmd_set_promisc_mode(hdev, &param);
4752 }
4753
4754 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4755                                   bool en_mc_pmc)
4756 {
4757         struct hclge_vport *vport = hclge_get_vport(handle);
4758         bool en_bc_pmc = true;
4759
4760         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4761          * always bypassed. So broadcast promisc should be disabled until
4762          * user enable promisc mode
4763          */
4764         if (handle->pdev->revision == 0x20)
4765                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4766
4767         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4768                                             en_bc_pmc);
4769 }
4770
4771 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4772 {
4773         struct hclge_vport *vport = hclge_get_vport(handle);
4774         struct hclge_dev *hdev = vport->back;
4775
4776         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4777 }
4778
4779 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4780 {
4781         struct hclge_get_fd_mode_cmd *req;
4782         struct hclge_desc desc;
4783         int ret;
4784
4785         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4786
4787         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4788
4789         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4790         if (ret) {
4791                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4792                 return ret;
4793         }
4794
4795         *fd_mode = req->mode;
4796
4797         return ret;
4798 }
4799
4800 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4801                                    u32 *stage1_entry_num,
4802                                    u32 *stage2_entry_num,
4803                                    u16 *stage1_counter_num,
4804                                    u16 *stage2_counter_num)
4805 {
4806         struct hclge_get_fd_allocation_cmd *req;
4807         struct hclge_desc desc;
4808         int ret;
4809
4810         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4811
4812         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4813
4814         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4815         if (ret) {
4816                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4817                         ret);
4818                 return ret;
4819         }
4820
4821         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4822         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4823         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4824         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4825
4826         return ret;
4827 }
4828
4829 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4830                                    enum HCLGE_FD_STAGE stage_num)
4831 {
4832         struct hclge_set_fd_key_config_cmd *req;
4833         struct hclge_fd_key_cfg *stage;
4834         struct hclge_desc desc;
4835         int ret;
4836
4837         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4838
4839         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4840         stage = &hdev->fd_cfg.key_cfg[stage_num];
4841         req->stage = stage_num;
4842         req->key_select = stage->key_sel;
4843         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4844         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4845         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4846         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4847         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4848         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4849
4850         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4851         if (ret)
4852                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4853
4854         return ret;
4855 }
4856
4857 static int hclge_init_fd_config(struct hclge_dev *hdev)
4858 {
4859 #define LOW_2_WORDS             0x03
4860         struct hclge_fd_key_cfg *key_cfg;
4861         int ret;
4862
4863         if (!hnae3_dev_fd_supported(hdev))
4864                 return 0;
4865
4866         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4867         if (ret)
4868                 return ret;
4869
4870         switch (hdev->fd_cfg.fd_mode) {
4871         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4872                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4873                 break;
4874         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4875                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4876                 break;
4877         default:
4878                 dev_err(&hdev->pdev->dev,
4879                         "Unsupported flow director mode %u\n",
4880                         hdev->fd_cfg.fd_mode);
4881                 return -EOPNOTSUPP;
4882         }
4883
4884         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4885         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4886         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4887         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4888         key_cfg->outer_sipv6_word_en = 0;
4889         key_cfg->outer_dipv6_word_en = 0;
4890
4891         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4892                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4893                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4894                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4895
4896         /* If use max 400bit key, we can support tuples for ether type */
4897         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4898                 key_cfg->tuple_active |=
4899                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4900
4901         /* roce_type is used to filter roce frames
4902          * dst_vport is used to specify the rule
4903          */
4904         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4905
4906         ret = hclge_get_fd_allocation(hdev,
4907                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4908                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4909                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4910                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4911         if (ret)
4912                 return ret;
4913
4914         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4915 }
4916
4917 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4918                                 int loc, u8 *key, bool is_add)
4919 {
4920         struct hclge_fd_tcam_config_1_cmd *req1;
4921         struct hclge_fd_tcam_config_2_cmd *req2;
4922         struct hclge_fd_tcam_config_3_cmd *req3;
4923         struct hclge_desc desc[3];
4924         int ret;
4925
4926         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4927         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4928         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4929         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4930         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4931
4932         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4933         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4934         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4935
4936         req1->stage = stage;
4937         req1->xy_sel = sel_x ? 1 : 0;
4938         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4939         req1->index = cpu_to_le32(loc);
4940         req1->entry_vld = sel_x ? is_add : 0;
4941
4942         if (key) {
4943                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4944                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4945                        sizeof(req2->tcam_data));
4946                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4947                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4948         }
4949
4950         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4951         if (ret)
4952                 dev_err(&hdev->pdev->dev,
4953                         "config tcam key fail, ret=%d\n",
4954                         ret);
4955
4956         return ret;
4957 }
4958
4959 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4960                               struct hclge_fd_ad_data *action)
4961 {
4962         struct hclge_fd_ad_config_cmd *req;
4963         struct hclge_desc desc;
4964         u64 ad_data = 0;
4965         int ret;
4966
4967         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4968
4969         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4970         req->index = cpu_to_le32(loc);
4971         req->stage = stage;
4972
4973         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4974                       action->write_rule_id_to_bd);
4975         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4976                         action->rule_id);
4977         ad_data <<= 32;
4978         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4979         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4980                       action->forward_to_direct_queue);
4981         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4982                         action->queue_id);
4983         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4984         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4985                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4986         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4987         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4988                         action->counter_id);
4989
4990         req->ad_data = cpu_to_le64(ad_data);
4991         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4992         if (ret)
4993                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4994
4995         return ret;
4996 }
4997
4998 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4999                                    struct hclge_fd_rule *rule)
5000 {
5001         u16 tmp_x_s, tmp_y_s;
5002         u32 tmp_x_l, tmp_y_l;
5003         int i;
5004
5005         if (rule->unused_tuple & tuple_bit)
5006                 return true;
5007
5008         switch (tuple_bit) {
5009         case BIT(INNER_DST_MAC):
5010                 for (i = 0; i < ETH_ALEN; i++) {
5011                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5012                                rule->tuples_mask.dst_mac[i]);
5013                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5014                                rule->tuples_mask.dst_mac[i]);
5015                 }
5016
5017                 return true;
5018         case BIT(INNER_SRC_MAC):
5019                 for (i = 0; i < ETH_ALEN; i++) {
5020                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5021                                rule->tuples.src_mac[i]);
5022                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5023                                rule->tuples.src_mac[i]);
5024                 }
5025
5026                 return true;
5027         case BIT(INNER_VLAN_TAG_FST):
5028                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5029                        rule->tuples_mask.vlan_tag1);
5030                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5031                        rule->tuples_mask.vlan_tag1);
5032                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5033                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5034
5035                 return true;
5036         case BIT(INNER_ETH_TYPE):
5037                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5038                        rule->tuples_mask.ether_proto);
5039                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5040                        rule->tuples_mask.ether_proto);
5041                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5042                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5043
5044                 return true;
5045         case BIT(INNER_IP_TOS):
5046                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5048
5049                 return true;
5050         case BIT(INNER_IP_PROTO):
5051                 calc_x(*key_x, rule->tuples.ip_proto,
5052                        rule->tuples_mask.ip_proto);
5053                 calc_y(*key_y, rule->tuples.ip_proto,
5054                        rule->tuples_mask.ip_proto);
5055
5056                 return true;
5057         case BIT(INNER_SRC_IP):
5058                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5059                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5060                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5061                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5062                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5063                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5064
5065                 return true;
5066         case BIT(INNER_DST_IP):
5067                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5068                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5069                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5070                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5071                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5072                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5073
5074                 return true;
5075         case BIT(INNER_SRC_PORT):
5076                 calc_x(tmp_x_s, rule->tuples.src_port,
5077                        rule->tuples_mask.src_port);
5078                 calc_y(tmp_y_s, rule->tuples.src_port,
5079                        rule->tuples_mask.src_port);
5080                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5081                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5082
5083                 return true;
5084         case BIT(INNER_DST_PORT):
5085                 calc_x(tmp_x_s, rule->tuples.dst_port,
5086                        rule->tuples_mask.dst_port);
5087                 calc_y(tmp_y_s, rule->tuples.dst_port,
5088                        rule->tuples_mask.dst_port);
5089                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5090                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5091
5092                 return true;
5093         default:
5094                 return false;
5095         }
5096 }
5097
5098 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5099                                  u8 vf_id, u8 network_port_id)
5100 {
5101         u32 port_number = 0;
5102
5103         if (port_type == HOST_PORT) {
5104                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5105                                 pf_id);
5106                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5107                                 vf_id);
5108                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5109         } else {
5110                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5111                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5112                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5113         }
5114
5115         return port_number;
5116 }
5117
5118 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5119                                        __le32 *key_x, __le32 *key_y,
5120                                        struct hclge_fd_rule *rule)
5121 {
5122         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5123         u8 cur_pos = 0, tuple_size, shift_bits;
5124         unsigned int i;
5125
5126         for (i = 0; i < MAX_META_DATA; i++) {
5127                 tuple_size = meta_data_key_info[i].key_length;
5128                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5129
5130                 switch (tuple_bit) {
5131                 case BIT(ROCE_TYPE):
5132                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5133                         cur_pos += tuple_size;
5134                         break;
5135                 case BIT(DST_VPORT):
5136                         port_number = hclge_get_port_number(HOST_PORT, 0,
5137                                                             rule->vf_id, 0);
5138                         hnae3_set_field(meta_data,
5139                                         GENMASK(cur_pos + tuple_size, cur_pos),
5140                                         cur_pos, port_number);
5141                         cur_pos += tuple_size;
5142                         break;
5143                 default:
5144                         break;
5145                 }
5146         }
5147
5148         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5149         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5150         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5151
5152         *key_x = cpu_to_le32(tmp_x << shift_bits);
5153         *key_y = cpu_to_le32(tmp_y << shift_bits);
5154 }
5155
5156 /* A complete key is combined with meta data key and tuple key.
5157  * Meta data key is stored at the MSB region, and tuple key is stored at
5158  * the LSB region, unused bits will be filled 0.
5159  */
5160 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5161                             struct hclge_fd_rule *rule)
5162 {
5163         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5164         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5165         u8 *cur_key_x, *cur_key_y;
5166         u8 meta_data_region;
5167         u8 tuple_size;
5168         int ret;
5169         u32 i;
5170
5171         memset(key_x, 0, sizeof(key_x));
5172         memset(key_y, 0, sizeof(key_y));
5173         cur_key_x = key_x;
5174         cur_key_y = key_y;
5175
5176         for (i = 0 ; i < MAX_TUPLE; i++) {
5177                 bool tuple_valid;
5178                 u32 check_tuple;
5179
5180                 tuple_size = tuple_key_info[i].key_length / 8;
5181                 check_tuple = key_cfg->tuple_active & BIT(i);
5182
5183                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5184                                                      cur_key_y, rule);
5185                 if (tuple_valid) {
5186                         cur_key_x += tuple_size;
5187                         cur_key_y += tuple_size;
5188                 }
5189         }
5190
5191         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5192                         MAX_META_DATA_LENGTH / 8;
5193
5194         hclge_fd_convert_meta_data(key_cfg,
5195                                    (__le32 *)(key_x + meta_data_region),
5196                                    (__le32 *)(key_y + meta_data_region),
5197                                    rule);
5198
5199         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5200                                    true);
5201         if (ret) {
5202                 dev_err(&hdev->pdev->dev,
5203                         "fd key_y config fail, loc=%u, ret=%d\n",
5204                         rule->queue_id, ret);
5205                 return ret;
5206         }
5207
5208         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5209                                    true);
5210         if (ret)
5211                 dev_err(&hdev->pdev->dev,
5212                         "fd key_x config fail, loc=%u, ret=%d\n",
5213                         rule->queue_id, ret);
5214         return ret;
5215 }
5216
5217 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5218                                struct hclge_fd_rule *rule)
5219 {
5220         struct hclge_fd_ad_data ad_data;
5221
5222         ad_data.ad_id = rule->location;
5223
5224         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5225                 ad_data.drop_packet = true;
5226                 ad_data.forward_to_direct_queue = false;
5227                 ad_data.queue_id = 0;
5228         } else {
5229                 ad_data.drop_packet = false;
5230                 ad_data.forward_to_direct_queue = true;
5231                 ad_data.queue_id = rule->queue_id;
5232         }
5233
5234         ad_data.use_counter = false;
5235         ad_data.counter_id = 0;
5236
5237         ad_data.use_next_stage = false;
5238         ad_data.next_input_key = 0;
5239
5240         ad_data.write_rule_id_to_bd = true;
5241         ad_data.rule_id = rule->location;
5242
5243         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5244 }
5245
5246 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5247                                        u32 *unused_tuple)
5248 {
5249         if (!spec || !unused_tuple)
5250                 return -EINVAL;
5251
5252         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5253
5254         if (!spec->ip4src)
5255                 *unused_tuple |= BIT(INNER_SRC_IP);
5256
5257         if (!spec->ip4dst)
5258                 *unused_tuple |= BIT(INNER_DST_IP);
5259
5260         if (!spec->psrc)
5261                 *unused_tuple |= BIT(INNER_SRC_PORT);
5262
5263         if (!spec->pdst)
5264                 *unused_tuple |= BIT(INNER_DST_PORT);
5265
5266         if (!spec->tos)
5267                 *unused_tuple |= BIT(INNER_IP_TOS);
5268
5269         return 0;
5270 }
5271
5272 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5273                                     u32 *unused_tuple)
5274 {
5275         if (!spec || !unused_tuple)
5276                 return -EINVAL;
5277
5278         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5279                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5280
5281         if (!spec->ip4src)
5282                 *unused_tuple |= BIT(INNER_SRC_IP);
5283
5284         if (!spec->ip4dst)
5285                 *unused_tuple |= BIT(INNER_DST_IP);
5286
5287         if (!spec->tos)
5288                 *unused_tuple |= BIT(INNER_IP_TOS);
5289
5290         if (!spec->proto)
5291                 *unused_tuple |= BIT(INNER_IP_PROTO);
5292
5293         if (spec->l4_4_bytes)
5294                 return -EOPNOTSUPP;
5295
5296         if (spec->ip_ver != ETH_RX_NFC_IP4)
5297                 return -EOPNOTSUPP;
5298
5299         return 0;
5300 }
5301
5302 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5303                                        u32 *unused_tuple)
5304 {
5305         if (!spec || !unused_tuple)
5306                 return -EINVAL;
5307
5308         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5309                 BIT(INNER_IP_TOS);
5310
5311         /* check whether src/dst ip address used */
5312         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5313             !spec->ip6src[2] && !spec->ip6src[3])
5314                 *unused_tuple |= BIT(INNER_SRC_IP);
5315
5316         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5317             !spec->ip6dst[2] && !spec->ip6dst[3])
5318                 *unused_tuple |= BIT(INNER_DST_IP);
5319
5320         if (!spec->psrc)
5321                 *unused_tuple |= BIT(INNER_SRC_PORT);
5322
5323         if (!spec->pdst)
5324                 *unused_tuple |= BIT(INNER_DST_PORT);
5325
5326         if (spec->tclass)
5327                 return -EOPNOTSUPP;
5328
5329         return 0;
5330 }
5331
5332 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5333                                     u32 *unused_tuple)
5334 {
5335         if (!spec || !unused_tuple)
5336                 return -EINVAL;
5337
5338         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5339                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5340
5341         /* check whether src/dst ip address used */
5342         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5343             !spec->ip6src[2] && !spec->ip6src[3])
5344                 *unused_tuple |= BIT(INNER_SRC_IP);
5345
5346         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5347             !spec->ip6dst[2] && !spec->ip6dst[3])
5348                 *unused_tuple |= BIT(INNER_DST_IP);
5349
5350         if (!spec->l4_proto)
5351                 *unused_tuple |= BIT(INNER_IP_PROTO);
5352
5353         if (spec->tclass)
5354                 return -EOPNOTSUPP;
5355
5356         if (spec->l4_4_bytes)
5357                 return -EOPNOTSUPP;
5358
5359         return 0;
5360 }
5361
5362 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5363 {
5364         if (!spec || !unused_tuple)
5365                 return -EINVAL;
5366
5367         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5368                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5369                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5370
5371         if (is_zero_ether_addr(spec->h_source))
5372                 *unused_tuple |= BIT(INNER_SRC_MAC);
5373
5374         if (is_zero_ether_addr(spec->h_dest))
5375                 *unused_tuple |= BIT(INNER_DST_MAC);
5376
5377         if (!spec->h_proto)
5378                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5379
5380         return 0;
5381 }
5382
5383 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5384                                     struct ethtool_rx_flow_spec *fs,
5385                                     u32 *unused_tuple)
5386 {
5387         if (fs->flow_type & FLOW_EXT) {
5388                 if (fs->h_ext.vlan_etype) {
5389                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5390                         return -EOPNOTSUPP;
5391                 }
5392
5393                 if (!fs->h_ext.vlan_tci)
5394                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5395
5396                 if (fs->m_ext.vlan_tci &&
5397                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5398                         dev_err(&hdev->pdev->dev,
5399                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5400                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5401                         return -EINVAL;
5402                 }
5403         } else {
5404                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5405         }
5406
5407         if (fs->flow_type & FLOW_MAC_EXT) {
5408                 if (hdev->fd_cfg.fd_mode !=
5409                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5410                         dev_err(&hdev->pdev->dev,
5411                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5412                         return -EOPNOTSUPP;
5413                 }
5414
5415                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5416                         *unused_tuple |= BIT(INNER_DST_MAC);
5417                 else
5418                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5419         }
5420
5421         return 0;
5422 }
5423
5424 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5425                                struct ethtool_rx_flow_spec *fs,
5426                                u32 *unused_tuple)
5427 {
5428         u32 flow_type;
5429         int ret;
5430
5431         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5432                 dev_err(&hdev->pdev->dev,
5433                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5434                         fs->location,
5435                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5436                 return -EINVAL;
5437         }
5438
5439         if ((fs->flow_type & FLOW_EXT) &&
5440             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5441                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5442                 return -EOPNOTSUPP;
5443         }
5444
5445         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5446         switch (flow_type) {
5447         case SCTP_V4_FLOW:
5448         case TCP_V4_FLOW:
5449         case UDP_V4_FLOW:
5450                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5451                                                   unused_tuple);
5452                 break;
5453         case IP_USER_FLOW:
5454                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5455                                                unused_tuple);
5456                 break;
5457         case SCTP_V6_FLOW:
5458         case TCP_V6_FLOW:
5459         case UDP_V6_FLOW:
5460                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5461                                                   unused_tuple);
5462                 break;
5463         case IPV6_USER_FLOW:
5464                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5465                                                unused_tuple);
5466                 break;
5467         case ETHER_FLOW:
5468                 if (hdev->fd_cfg.fd_mode !=
5469                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5470                         dev_err(&hdev->pdev->dev,
5471                                 "ETHER_FLOW is not supported in current fd mode!\n");
5472                         return -EOPNOTSUPP;
5473                 }
5474
5475                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5476                                                  unused_tuple);
5477                 break;
5478         default:
5479                 dev_err(&hdev->pdev->dev,
5480                         "unsupported protocol type, protocol type = %#x\n",
5481                         flow_type);
5482                 return -EOPNOTSUPP;
5483         }
5484
5485         if (ret) {
5486                 dev_err(&hdev->pdev->dev,
5487                         "failed to check flow union tuple, ret = %d\n",
5488                         ret);
5489                 return ret;
5490         }
5491
5492         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5493 }
5494
5495 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5496 {
5497         struct hclge_fd_rule *rule = NULL;
5498         struct hlist_node *node2;
5499
5500         spin_lock_bh(&hdev->fd_rule_lock);
5501         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5502                 if (rule->location >= location)
5503                         break;
5504         }
5505
5506         spin_unlock_bh(&hdev->fd_rule_lock);
5507
5508         return  rule && rule->location == location;
5509 }
5510
5511 /* make sure being called after lock up with fd_rule_lock */
5512 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5513                                      struct hclge_fd_rule *new_rule,
5514                                      u16 location,
5515                                      bool is_add)
5516 {
5517         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5518         struct hlist_node *node2;
5519
5520         if (is_add && !new_rule)
5521                 return -EINVAL;
5522
5523         hlist_for_each_entry_safe(rule, node2,
5524                                   &hdev->fd_rule_list, rule_node) {
5525                 if (rule->location >= location)
5526                         break;
5527                 parent = rule;
5528         }
5529
5530         if (rule && rule->location == location) {
5531                 hlist_del(&rule->rule_node);
5532                 kfree(rule);
5533                 hdev->hclge_fd_rule_num--;
5534
5535                 if (!is_add) {
5536                         if (!hdev->hclge_fd_rule_num)
5537                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5538                         clear_bit(location, hdev->fd_bmap);
5539
5540                         return 0;
5541                 }
5542         } else if (!is_add) {
5543                 dev_err(&hdev->pdev->dev,
5544                         "delete fail, rule %u is inexistent\n",
5545                         location);
5546                 return -EINVAL;
5547         }
5548
5549         INIT_HLIST_NODE(&new_rule->rule_node);
5550
5551         if (parent)
5552                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5553         else
5554                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5555
5556         set_bit(location, hdev->fd_bmap);
5557         hdev->hclge_fd_rule_num++;
5558         hdev->fd_active_type = new_rule->rule_type;
5559
5560         return 0;
5561 }
5562
5563 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5564                               struct ethtool_rx_flow_spec *fs,
5565                               struct hclge_fd_rule *rule)
5566 {
5567         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5568
5569         switch (flow_type) {
5570         case SCTP_V4_FLOW:
5571         case TCP_V4_FLOW:
5572         case UDP_V4_FLOW:
5573                 rule->tuples.src_ip[IPV4_INDEX] =
5574                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5575                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5576                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5577
5578                 rule->tuples.dst_ip[IPV4_INDEX] =
5579                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5580                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5581                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5582
5583                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5584                 rule->tuples_mask.src_port =
5585                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5586
5587                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5588                 rule->tuples_mask.dst_port =
5589                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5590
5591                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5592                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5593
5594                 rule->tuples.ether_proto = ETH_P_IP;
5595                 rule->tuples_mask.ether_proto = 0xFFFF;
5596
5597                 break;
5598         case IP_USER_FLOW:
5599                 rule->tuples.src_ip[IPV4_INDEX] =
5600                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5601                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5602                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5603
5604                 rule->tuples.dst_ip[IPV4_INDEX] =
5605                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5606                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5607                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5608
5609                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5610                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5611
5612                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5613                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5614
5615                 rule->tuples.ether_proto = ETH_P_IP;
5616                 rule->tuples_mask.ether_proto = 0xFFFF;
5617
5618                 break;
5619         case SCTP_V6_FLOW:
5620         case TCP_V6_FLOW:
5621         case UDP_V6_FLOW:
5622                 be32_to_cpu_array(rule->tuples.src_ip,
5623                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5624                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5625                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5626
5627                 be32_to_cpu_array(rule->tuples.dst_ip,
5628                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5629                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5630                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5631
5632                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5633                 rule->tuples_mask.src_port =
5634                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5635
5636                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5637                 rule->tuples_mask.dst_port =
5638                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5639
5640                 rule->tuples.ether_proto = ETH_P_IPV6;
5641                 rule->tuples_mask.ether_proto = 0xFFFF;
5642
5643                 break;
5644         case IPV6_USER_FLOW:
5645                 be32_to_cpu_array(rule->tuples.src_ip,
5646                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5647                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5648                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5649
5650                 be32_to_cpu_array(rule->tuples.dst_ip,
5651                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5652                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5653                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5654
5655                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5656                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5657
5658                 rule->tuples.ether_proto = ETH_P_IPV6;
5659                 rule->tuples_mask.ether_proto = 0xFFFF;
5660
5661                 break;
5662         case ETHER_FLOW:
5663                 ether_addr_copy(rule->tuples.src_mac,
5664                                 fs->h_u.ether_spec.h_source);
5665                 ether_addr_copy(rule->tuples_mask.src_mac,
5666                                 fs->m_u.ether_spec.h_source);
5667
5668                 ether_addr_copy(rule->tuples.dst_mac,
5669                                 fs->h_u.ether_spec.h_dest);
5670                 ether_addr_copy(rule->tuples_mask.dst_mac,
5671                                 fs->m_u.ether_spec.h_dest);
5672
5673                 rule->tuples.ether_proto =
5674                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5675                 rule->tuples_mask.ether_proto =
5676                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5677
5678                 break;
5679         default:
5680                 return -EOPNOTSUPP;
5681         }
5682
5683         switch (flow_type) {
5684         case SCTP_V4_FLOW:
5685         case SCTP_V6_FLOW:
5686                 rule->tuples.ip_proto = IPPROTO_SCTP;
5687                 rule->tuples_mask.ip_proto = 0xFF;
5688                 break;
5689         case TCP_V4_FLOW:
5690         case TCP_V6_FLOW:
5691                 rule->tuples.ip_proto = IPPROTO_TCP;
5692                 rule->tuples_mask.ip_proto = 0xFF;
5693                 break;
5694         case UDP_V4_FLOW:
5695         case UDP_V6_FLOW:
5696                 rule->tuples.ip_proto = IPPROTO_UDP;
5697                 rule->tuples_mask.ip_proto = 0xFF;
5698                 break;
5699         default:
5700                 break;
5701         }
5702
5703         if (fs->flow_type & FLOW_EXT) {
5704                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5705                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5706         }
5707
5708         if (fs->flow_type & FLOW_MAC_EXT) {
5709                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5710                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5711         }
5712
5713         return 0;
5714 }
5715
5716 /* make sure being called after lock up with fd_rule_lock */
5717 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5718                                 struct hclge_fd_rule *rule)
5719 {
5720         int ret;
5721
5722         if (!rule) {
5723                 dev_err(&hdev->pdev->dev,
5724                         "The flow director rule is NULL\n");
5725                 return -EINVAL;
5726         }
5727
5728         /* it will never fail here, so needn't to check return value */
5729         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5730
5731         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5732         if (ret)
5733                 goto clear_rule;
5734
5735         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5736         if (ret)
5737                 goto clear_rule;
5738
5739         return 0;
5740
5741 clear_rule:
5742         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5743         return ret;
5744 }
5745
5746 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5747                               struct ethtool_rxnfc *cmd)
5748 {
5749         struct hclge_vport *vport = hclge_get_vport(handle);
5750         struct hclge_dev *hdev = vport->back;
5751         u16 dst_vport_id = 0, q_index = 0;
5752         struct ethtool_rx_flow_spec *fs;
5753         struct hclge_fd_rule *rule;
5754         u32 unused = 0;
5755         u8 action;
5756         int ret;
5757
5758         if (!hnae3_dev_fd_supported(hdev)) {
5759                 dev_err(&hdev->pdev->dev,
5760                         "flow table director is not supported\n");
5761                 return -EOPNOTSUPP;
5762         }
5763
5764         if (!hdev->fd_en) {
5765                 dev_err(&hdev->pdev->dev,
5766                         "please enable flow director first\n");
5767                 return -EOPNOTSUPP;
5768         }
5769
5770         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5771
5772         ret = hclge_fd_check_spec(hdev, fs, &unused);
5773         if (ret)
5774                 return ret;
5775
5776         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5777                 action = HCLGE_FD_ACTION_DROP_PACKET;
5778         } else {
5779                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5780                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5781                 u16 tqps;
5782
5783                 if (vf > hdev->num_req_vfs) {
5784                         dev_err(&hdev->pdev->dev,
5785                                 "Error: vf id (%u) > max vf num (%u)\n",
5786                                 vf, hdev->num_req_vfs);
5787                         return -EINVAL;
5788                 }
5789
5790                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5791                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5792
5793                 if (ring >= tqps) {
5794                         dev_err(&hdev->pdev->dev,
5795                                 "Error: queue id (%u) > max tqp num (%u)\n",
5796                                 ring, tqps - 1);
5797                         return -EINVAL;
5798                 }
5799
5800                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5801                 q_index = ring;
5802         }
5803
5804         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5805         if (!rule)
5806                 return -ENOMEM;
5807
5808         ret = hclge_fd_get_tuple(hdev, fs, rule);
5809         if (ret) {
5810                 kfree(rule);
5811                 return ret;
5812         }
5813
5814         rule->flow_type = fs->flow_type;
5815         rule->location = fs->location;
5816         rule->unused_tuple = unused;
5817         rule->vf_id = dst_vport_id;
5818         rule->queue_id = q_index;
5819         rule->action = action;
5820         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5821
5822         /* to avoid rule conflict, when user configure rule by ethtool,
5823          * we need to clear all arfs rules
5824          */
5825         hclge_clear_arfs_rules(handle);
5826
5827         spin_lock_bh(&hdev->fd_rule_lock);
5828         ret = hclge_fd_config_rule(hdev, rule);
5829
5830         spin_unlock_bh(&hdev->fd_rule_lock);
5831
5832         return ret;
5833 }
5834
5835 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5836                               struct ethtool_rxnfc *cmd)
5837 {
5838         struct hclge_vport *vport = hclge_get_vport(handle);
5839         struct hclge_dev *hdev = vport->back;
5840         struct ethtool_rx_flow_spec *fs;
5841         int ret;
5842
5843         if (!hnae3_dev_fd_supported(hdev))
5844                 return -EOPNOTSUPP;
5845
5846         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5847
5848         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5849                 return -EINVAL;
5850
5851         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5852                 dev_err(&hdev->pdev->dev,
5853                         "Delete fail, rule %u is inexistent\n", fs->location);
5854                 return -ENOENT;
5855         }
5856
5857         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5858                                    NULL, false);
5859         if (ret)
5860                 return ret;
5861
5862         spin_lock_bh(&hdev->fd_rule_lock);
5863         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5864
5865         spin_unlock_bh(&hdev->fd_rule_lock);
5866
5867         return ret;
5868 }
5869
5870 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5871                                      bool clear_list)
5872 {
5873         struct hclge_vport *vport = hclge_get_vport(handle);
5874         struct hclge_dev *hdev = vport->back;
5875         struct hclge_fd_rule *rule;
5876         struct hlist_node *node;
5877         u16 location;
5878
5879         if (!hnae3_dev_fd_supported(hdev))
5880                 return;
5881
5882         spin_lock_bh(&hdev->fd_rule_lock);
5883         for_each_set_bit(location, hdev->fd_bmap,
5884                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5885                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5886                                      NULL, false);
5887
5888         if (clear_list) {
5889                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5890                                           rule_node) {
5891                         hlist_del(&rule->rule_node);
5892                         kfree(rule);
5893                 }
5894                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5895                 hdev->hclge_fd_rule_num = 0;
5896                 bitmap_zero(hdev->fd_bmap,
5897                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5898         }
5899
5900         spin_unlock_bh(&hdev->fd_rule_lock);
5901 }
5902
5903 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5904 {
5905         struct hclge_vport *vport = hclge_get_vport(handle);
5906         struct hclge_dev *hdev = vport->back;
5907         struct hclge_fd_rule *rule;
5908         struct hlist_node *node;
5909         int ret;
5910
5911         /* Return ok here, because reset error handling will check this
5912          * return value. If error is returned here, the reset process will
5913          * fail.
5914          */
5915         if (!hnae3_dev_fd_supported(hdev))
5916                 return 0;
5917
5918         /* if fd is disabled, should not restore it when reset */
5919         if (!hdev->fd_en)
5920                 return 0;
5921
5922         spin_lock_bh(&hdev->fd_rule_lock);
5923         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5924                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5925                 if (!ret)
5926                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5927
5928                 if (ret) {
5929                         dev_warn(&hdev->pdev->dev,
5930                                  "Restore rule %u failed, remove it\n",
5931                                  rule->location);
5932                         clear_bit(rule->location, hdev->fd_bmap);
5933                         hlist_del(&rule->rule_node);
5934                         kfree(rule);
5935                         hdev->hclge_fd_rule_num--;
5936                 }
5937         }
5938
5939         if (hdev->hclge_fd_rule_num)
5940                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5941
5942         spin_unlock_bh(&hdev->fd_rule_lock);
5943
5944         return 0;
5945 }
5946
5947 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5948                                  struct ethtool_rxnfc *cmd)
5949 {
5950         struct hclge_vport *vport = hclge_get_vport(handle);
5951         struct hclge_dev *hdev = vport->back;
5952
5953         if (!hnae3_dev_fd_supported(hdev))
5954                 return -EOPNOTSUPP;
5955
5956         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5957         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5958
5959         return 0;
5960 }
5961
5962 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5963                                      struct ethtool_tcpip4_spec *spec,
5964                                      struct ethtool_tcpip4_spec *spec_mask)
5965 {
5966         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5967         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5968                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5969
5970         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5971         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5972                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5973
5974         spec->psrc = cpu_to_be16(rule->tuples.src_port);
5975         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5976                         0 : cpu_to_be16(rule->tuples_mask.src_port);
5977
5978         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5979         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5980                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
5981
5982         spec->tos = rule->tuples.ip_tos;
5983         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5984                         0 : rule->tuples_mask.ip_tos;
5985 }
5986
5987 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5988                                   struct ethtool_usrip4_spec *spec,
5989                                   struct ethtool_usrip4_spec *spec_mask)
5990 {
5991         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5992         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5993                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5994
5995         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5996         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5997                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5998
5999         spec->tos = rule->tuples.ip_tos;
6000         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6001                         0 : rule->tuples_mask.ip_tos;
6002
6003         spec->proto = rule->tuples.ip_proto;
6004         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6005                         0 : rule->tuples_mask.ip_proto;
6006
6007         spec->ip_ver = ETH_RX_NFC_IP4;
6008 }
6009
6010 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6011                                      struct ethtool_tcpip6_spec *spec,
6012                                      struct ethtool_tcpip6_spec *spec_mask)
6013 {
6014         cpu_to_be32_array(spec->ip6src,
6015                           rule->tuples.src_ip, IPV6_SIZE);
6016         cpu_to_be32_array(spec->ip6dst,
6017                           rule->tuples.dst_ip, IPV6_SIZE);
6018         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6019                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6020         else
6021                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6022                                   IPV6_SIZE);
6023
6024         if (rule->unused_tuple & BIT(INNER_DST_IP))
6025                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6026         else
6027                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6028                                   IPV6_SIZE);
6029
6030         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6031         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6032                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6033
6034         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6035         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6036                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6037 }
6038
6039 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6040                                   struct ethtool_usrip6_spec *spec,
6041                                   struct ethtool_usrip6_spec *spec_mask)
6042 {
6043         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6044         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6045         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6046                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6047         else
6048                 cpu_to_be32_array(spec_mask->ip6src,
6049                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6050
6051         if (rule->unused_tuple & BIT(INNER_DST_IP))
6052                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6053         else
6054                 cpu_to_be32_array(spec_mask->ip6dst,
6055                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6056
6057         spec->l4_proto = rule->tuples.ip_proto;
6058         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6059                         0 : rule->tuples_mask.ip_proto;
6060 }
6061
6062 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6063                                     struct ethhdr *spec,
6064                                     struct ethhdr *spec_mask)
6065 {
6066         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6067         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6068
6069         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6070                 eth_zero_addr(spec_mask->h_source);
6071         else
6072                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6073
6074         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6075                 eth_zero_addr(spec_mask->h_dest);
6076         else
6077                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6078
6079         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6080         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6081                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6082 }
6083
6084 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6085                                   struct hclge_fd_rule *rule)
6086 {
6087         if (fs->flow_type & FLOW_EXT) {
6088                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6089                 fs->m_ext.vlan_tci =
6090                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6091                                 cpu_to_be16(VLAN_VID_MASK) :
6092                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6093         }
6094
6095         if (fs->flow_type & FLOW_MAC_EXT) {
6096                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6097                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6098                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6099                 else
6100                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6101                                         rule->tuples_mask.dst_mac);
6102         }
6103 }
6104
6105 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6106                                   struct ethtool_rxnfc *cmd)
6107 {
6108         struct hclge_vport *vport = hclge_get_vport(handle);
6109         struct hclge_fd_rule *rule = NULL;
6110         struct hclge_dev *hdev = vport->back;
6111         struct ethtool_rx_flow_spec *fs;
6112         struct hlist_node *node2;
6113
6114         if (!hnae3_dev_fd_supported(hdev))
6115                 return -EOPNOTSUPP;
6116
6117         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6118
6119         spin_lock_bh(&hdev->fd_rule_lock);
6120
6121         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6122                 if (rule->location >= fs->location)
6123                         break;
6124         }
6125
6126         if (!rule || fs->location != rule->location) {
6127                 spin_unlock_bh(&hdev->fd_rule_lock);
6128
6129                 return -ENOENT;
6130         }
6131
6132         fs->flow_type = rule->flow_type;
6133         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6134         case SCTP_V4_FLOW:
6135         case TCP_V4_FLOW:
6136         case UDP_V4_FLOW:
6137                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6138                                          &fs->m_u.tcp_ip4_spec);
6139                 break;
6140         case IP_USER_FLOW:
6141                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6142                                       &fs->m_u.usr_ip4_spec);
6143                 break;
6144         case SCTP_V6_FLOW:
6145         case TCP_V6_FLOW:
6146         case UDP_V6_FLOW:
6147                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6148                                          &fs->m_u.tcp_ip6_spec);
6149                 break;
6150         case IPV6_USER_FLOW:
6151                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6152                                       &fs->m_u.usr_ip6_spec);
6153                 break;
6154         /* The flow type of fd rule has been checked before adding in to rule
6155          * list. As other flow types have been handled, it must be ETHER_FLOW
6156          * for the default case
6157          */
6158         default:
6159                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6160                                         &fs->m_u.ether_spec);
6161                 break;
6162         }
6163
6164         hclge_fd_get_ext_info(fs, rule);
6165
6166         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6167                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6168         } else {
6169                 u64 vf_id;
6170
6171                 fs->ring_cookie = rule->queue_id;
6172                 vf_id = rule->vf_id;
6173                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6174                 fs->ring_cookie |= vf_id;
6175         }
6176
6177         spin_unlock_bh(&hdev->fd_rule_lock);
6178
6179         return 0;
6180 }
6181
6182 static int hclge_get_all_rules(struct hnae3_handle *handle,
6183                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6184 {
6185         struct hclge_vport *vport = hclge_get_vport(handle);
6186         struct hclge_dev *hdev = vport->back;
6187         struct hclge_fd_rule *rule;
6188         struct hlist_node *node2;
6189         int cnt = 0;
6190
6191         if (!hnae3_dev_fd_supported(hdev))
6192                 return -EOPNOTSUPP;
6193
6194         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6195
6196         spin_lock_bh(&hdev->fd_rule_lock);
6197         hlist_for_each_entry_safe(rule, node2,
6198                                   &hdev->fd_rule_list, rule_node) {
6199                 if (cnt == cmd->rule_cnt) {
6200                         spin_unlock_bh(&hdev->fd_rule_lock);
6201                         return -EMSGSIZE;
6202                 }
6203
6204                 rule_locs[cnt] = rule->location;
6205                 cnt++;
6206         }
6207
6208         spin_unlock_bh(&hdev->fd_rule_lock);
6209
6210         cmd->rule_cnt = cnt;
6211
6212         return 0;
6213 }
6214
6215 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6216                                      struct hclge_fd_rule_tuples *tuples)
6217 {
6218 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6219 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6220
6221         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6222         tuples->ip_proto = fkeys->basic.ip_proto;
6223         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6224
6225         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6226                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6227                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6228         } else {
6229                 int i;
6230
6231                 for (i = 0; i < IPV6_SIZE; i++) {
6232                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6233                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6234                 }
6235         }
6236 }
6237
6238 /* traverse all rules, check whether an existed rule has the same tuples */
6239 static struct hclge_fd_rule *
6240 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6241                           const struct hclge_fd_rule_tuples *tuples)
6242 {
6243         struct hclge_fd_rule *rule = NULL;
6244         struct hlist_node *node;
6245
6246         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6247                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6248                         return rule;
6249         }
6250
6251         return NULL;
6252 }
6253
6254 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6255                                      struct hclge_fd_rule *rule)
6256 {
6257         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6258                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6259                              BIT(INNER_SRC_PORT);
6260         rule->action = 0;
6261         rule->vf_id = 0;
6262         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6263         if (tuples->ether_proto == ETH_P_IP) {
6264                 if (tuples->ip_proto == IPPROTO_TCP)
6265                         rule->flow_type = TCP_V4_FLOW;
6266                 else
6267                         rule->flow_type = UDP_V4_FLOW;
6268         } else {
6269                 if (tuples->ip_proto == IPPROTO_TCP)
6270                         rule->flow_type = TCP_V6_FLOW;
6271                 else
6272                         rule->flow_type = UDP_V6_FLOW;
6273         }
6274         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6275         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6276 }
6277
6278 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6279                                       u16 flow_id, struct flow_keys *fkeys)
6280 {
6281         struct hclge_vport *vport = hclge_get_vport(handle);
6282         struct hclge_fd_rule_tuples new_tuples;
6283         struct hclge_dev *hdev = vport->back;
6284         struct hclge_fd_rule *rule;
6285         u16 tmp_queue_id;
6286         u16 bit_id;
6287         int ret;
6288
6289         if (!hnae3_dev_fd_supported(hdev))
6290                 return -EOPNOTSUPP;
6291
6292         memset(&new_tuples, 0, sizeof(new_tuples));
6293         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6294
6295         spin_lock_bh(&hdev->fd_rule_lock);
6296
6297         /* when there is already fd rule existed add by user,
6298          * arfs should not work
6299          */
6300         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6301                 spin_unlock_bh(&hdev->fd_rule_lock);
6302                 return -EOPNOTSUPP;
6303         }
6304
6305         /* check is there flow director filter existed for this flow,
6306          * if not, create a new filter for it;
6307          * if filter exist with different queue id, modify the filter;
6308          * if filter exist with same queue id, do nothing
6309          */
6310         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6311         if (!rule) {
6312                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6313                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6314                         spin_unlock_bh(&hdev->fd_rule_lock);
6315                         return -ENOSPC;
6316                 }
6317
6318                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6319                 if (!rule) {
6320                         spin_unlock_bh(&hdev->fd_rule_lock);
6321                         return -ENOMEM;
6322                 }
6323
6324                 set_bit(bit_id, hdev->fd_bmap);
6325                 rule->location = bit_id;
6326                 rule->flow_id = flow_id;
6327                 rule->queue_id = queue_id;
6328                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6329                 ret = hclge_fd_config_rule(hdev, rule);
6330
6331                 spin_unlock_bh(&hdev->fd_rule_lock);
6332
6333                 if (ret)
6334                         return ret;
6335
6336                 return rule->location;
6337         }
6338
6339         spin_unlock_bh(&hdev->fd_rule_lock);
6340
6341         if (rule->queue_id == queue_id)
6342                 return rule->location;
6343
6344         tmp_queue_id = rule->queue_id;
6345         rule->queue_id = queue_id;
6346         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6347         if (ret) {
6348                 rule->queue_id = tmp_queue_id;
6349                 return ret;
6350         }
6351
6352         return rule->location;
6353 }
6354
6355 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6356 {
6357 #ifdef CONFIG_RFS_ACCEL
6358         struct hnae3_handle *handle = &hdev->vport[0].nic;
6359         struct hclge_fd_rule *rule;
6360         struct hlist_node *node;
6361         HLIST_HEAD(del_list);
6362
6363         spin_lock_bh(&hdev->fd_rule_lock);
6364         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6365                 spin_unlock_bh(&hdev->fd_rule_lock);
6366                 return;
6367         }
6368         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6369                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6370                                         rule->flow_id, rule->location)) {
6371                         hlist_del_init(&rule->rule_node);
6372                         hlist_add_head(&rule->rule_node, &del_list);
6373                         hdev->hclge_fd_rule_num--;
6374                         clear_bit(rule->location, hdev->fd_bmap);
6375                 }
6376         }
6377         spin_unlock_bh(&hdev->fd_rule_lock);
6378
6379         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6380                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6381                                      rule->location, NULL, false);
6382                 kfree(rule);
6383         }
6384 #endif
6385 }
6386
6387 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6388 {
6389 #ifdef CONFIG_RFS_ACCEL
6390         struct hclge_vport *vport = hclge_get_vport(handle);
6391         struct hclge_dev *hdev = vport->back;
6392
6393         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6394                 hclge_del_all_fd_entries(handle, true);
6395 #endif
6396 }
6397
6398 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6399 {
6400         struct hclge_vport *vport = hclge_get_vport(handle);
6401         struct hclge_dev *hdev = vport->back;
6402
6403         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6404                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6405 }
6406
6407 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6408 {
6409         struct hclge_vport *vport = hclge_get_vport(handle);
6410         struct hclge_dev *hdev = vport->back;
6411
6412         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6413 }
6414
6415 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6416 {
6417         struct hclge_vport *vport = hclge_get_vport(handle);
6418         struct hclge_dev *hdev = vport->back;
6419
6420         return hdev->rst_stats.hw_reset_done_cnt;
6421 }
6422
6423 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6424 {
6425         struct hclge_vport *vport = hclge_get_vport(handle);
6426         struct hclge_dev *hdev = vport->back;
6427         bool clear;
6428
6429         hdev->fd_en = enable;
6430         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6431         if (!enable)
6432                 hclge_del_all_fd_entries(handle, clear);
6433         else
6434                 hclge_restore_fd_entries(handle);
6435 }
6436
6437 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6438 {
6439         struct hclge_desc desc;
6440         struct hclge_config_mac_mode_cmd *req =
6441                 (struct hclge_config_mac_mode_cmd *)desc.data;
6442         u32 loop_en = 0;
6443         int ret;
6444
6445         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6446
6447         if (enable) {
6448                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6449                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6450                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6451                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6452                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6453                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6454                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6455                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6456                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6457                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6458         }
6459
6460         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6461
6462         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6463         if (ret)
6464                 dev_err(&hdev->pdev->dev,
6465                         "mac enable fail, ret =%d.\n", ret);
6466 }
6467
6468 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6469                                      u8 switch_param, u8 param_mask)
6470 {
6471         struct hclge_mac_vlan_switch_cmd *req;
6472         struct hclge_desc desc;
6473         u32 func_id;
6474         int ret;
6475
6476         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6477         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6478
6479         /* read current config parameter */
6480         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6481                                    true);
6482         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6483         req->func_id = cpu_to_le32(func_id);
6484
6485         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6486         if (ret) {
6487                 dev_err(&hdev->pdev->dev,
6488                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6489                 return ret;
6490         }
6491
6492         /* modify and write new config parameter */
6493         hclge_cmd_reuse_desc(&desc, false);
6494         req->switch_param = (req->switch_param & param_mask) | switch_param;
6495         req->param_mask = param_mask;
6496
6497         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6498         if (ret)
6499                 dev_err(&hdev->pdev->dev,
6500                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6501         return ret;
6502 }
6503
6504 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6505                                        int link_ret)
6506 {
6507 #define HCLGE_PHY_LINK_STATUS_NUM  200
6508
6509         struct phy_device *phydev = hdev->hw.mac.phydev;
6510         int i = 0;
6511         int ret;
6512
6513         do {
6514                 ret = phy_read_status(phydev);
6515                 if (ret) {
6516                         dev_err(&hdev->pdev->dev,
6517                                 "phy update link status fail, ret = %d\n", ret);
6518                         return;
6519                 }
6520
6521                 if (phydev->link == link_ret)
6522                         break;
6523
6524                 msleep(HCLGE_LINK_STATUS_MS);
6525         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6526 }
6527
6528 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6529 {
6530 #define HCLGE_MAC_LINK_STATUS_NUM  100
6531
6532         int i = 0;
6533         int ret;
6534
6535         do {
6536                 ret = hclge_get_mac_link_status(hdev);
6537                 if (ret < 0)
6538                         return ret;
6539                 else if (ret == link_ret)
6540                         return 0;
6541
6542                 msleep(HCLGE_LINK_STATUS_MS);
6543         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6544         return -EBUSY;
6545 }
6546
6547 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6548                                           bool is_phy)
6549 {
6550 #define HCLGE_LINK_STATUS_DOWN 0
6551 #define HCLGE_LINK_STATUS_UP   1
6552
6553         int link_ret;
6554
6555         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6556
6557         if (is_phy)
6558                 hclge_phy_link_status_wait(hdev, link_ret);
6559
6560         return hclge_mac_link_status_wait(hdev, link_ret);
6561 }
6562
6563 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6564 {
6565         struct hclge_config_mac_mode_cmd *req;
6566         struct hclge_desc desc;
6567         u32 loop_en;
6568         int ret;
6569
6570         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6571         /* 1 Read out the MAC mode config at first */
6572         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6573         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6574         if (ret) {
6575                 dev_err(&hdev->pdev->dev,
6576                         "mac loopback get fail, ret =%d.\n", ret);
6577                 return ret;
6578         }
6579
6580         /* 2 Then setup the loopback flag */
6581         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6582         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6583         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6584         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6585
6586         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6587
6588         /* 3 Config mac work mode with loopback flag
6589          * and its original configure parameters
6590          */
6591         hclge_cmd_reuse_desc(&desc, false);
6592         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6593         if (ret)
6594                 dev_err(&hdev->pdev->dev,
6595                         "mac loopback set fail, ret =%d.\n", ret);
6596         return ret;
6597 }
6598
6599 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6600                                      enum hnae3_loop loop_mode)
6601 {
6602 #define HCLGE_SERDES_RETRY_MS   10
6603 #define HCLGE_SERDES_RETRY_NUM  100
6604
6605         struct hclge_serdes_lb_cmd *req;
6606         struct hclge_desc desc;
6607         int ret, i = 0;
6608         u8 loop_mode_b;
6609
6610         req = (struct hclge_serdes_lb_cmd *)desc.data;
6611         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6612
6613         switch (loop_mode) {
6614         case HNAE3_LOOP_SERIAL_SERDES:
6615                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6616                 break;
6617         case HNAE3_LOOP_PARALLEL_SERDES:
6618                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6619                 break;
6620         default:
6621                 dev_err(&hdev->pdev->dev,
6622                         "unsupported serdes loopback mode %d\n", loop_mode);
6623                 return -ENOTSUPP;
6624         }
6625
6626         if (en) {
6627                 req->enable = loop_mode_b;
6628                 req->mask = loop_mode_b;
6629         } else {
6630                 req->mask = loop_mode_b;
6631         }
6632
6633         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6634         if (ret) {
6635                 dev_err(&hdev->pdev->dev,
6636                         "serdes loopback set fail, ret = %d\n", ret);
6637                 return ret;
6638         }
6639
6640         do {
6641                 msleep(HCLGE_SERDES_RETRY_MS);
6642                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6643                                            true);
6644                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6645                 if (ret) {
6646                         dev_err(&hdev->pdev->dev,
6647                                 "serdes loopback get, ret = %d\n", ret);
6648                         return ret;
6649                 }
6650         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6651                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6652
6653         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6654                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6655                 return -EBUSY;
6656         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6657                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6658                 return -EIO;
6659         }
6660         return ret;
6661 }
6662
6663 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6664                                      enum hnae3_loop loop_mode)
6665 {
6666         int ret;
6667
6668         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6669         if (ret)
6670                 return ret;
6671
6672         hclge_cfg_mac_mode(hdev, en);
6673
6674         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6675         if (ret)
6676                 dev_err(&hdev->pdev->dev,
6677                         "serdes loopback config mac mode timeout\n");
6678
6679         return ret;
6680 }
6681
6682 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6683                                      struct phy_device *phydev)
6684 {
6685         int ret;
6686
6687         if (!phydev->suspended) {
6688                 ret = phy_suspend(phydev);
6689                 if (ret)
6690                         return ret;
6691         }
6692
6693         ret = phy_resume(phydev);
6694         if (ret)
6695                 return ret;
6696
6697         return phy_loopback(phydev, true);
6698 }
6699
6700 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6701                                       struct phy_device *phydev)
6702 {
6703         int ret;
6704
6705         ret = phy_loopback(phydev, false);
6706         if (ret)
6707                 return ret;
6708
6709         return phy_suspend(phydev);
6710 }
6711
6712 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6713 {
6714         struct phy_device *phydev = hdev->hw.mac.phydev;
6715         int ret;
6716
6717         if (!phydev)
6718                 return -ENOTSUPP;
6719
6720         if (en)
6721                 ret = hclge_enable_phy_loopback(hdev, phydev);
6722         else
6723                 ret = hclge_disable_phy_loopback(hdev, phydev);
6724         if (ret) {
6725                 dev_err(&hdev->pdev->dev,
6726                         "set phy loopback fail, ret = %d\n", ret);
6727                 return ret;
6728         }
6729
6730         hclge_cfg_mac_mode(hdev, en);
6731
6732         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6733         if (ret)
6734                 dev_err(&hdev->pdev->dev,
6735                         "phy loopback config mac mode timeout\n");
6736
6737         return ret;
6738 }
6739
6740 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6741                             int stream_id, bool enable)
6742 {
6743         struct hclge_desc desc;
6744         struct hclge_cfg_com_tqp_queue_cmd *req =
6745                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6746         int ret;
6747
6748         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6749         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6750         req->stream_id = cpu_to_le16(stream_id);
6751         if (enable)
6752                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6753
6754         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6755         if (ret)
6756                 dev_err(&hdev->pdev->dev,
6757                         "Tqp enable fail, status =%d.\n", ret);
6758         return ret;
6759 }
6760
6761 static int hclge_set_loopback(struct hnae3_handle *handle,
6762                               enum hnae3_loop loop_mode, bool en)
6763 {
6764         struct hclge_vport *vport = hclge_get_vport(handle);
6765         struct hnae3_knic_private_info *kinfo;
6766         struct hclge_dev *hdev = vport->back;
6767         int i, ret;
6768
6769         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6770          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6771          * the same, the packets are looped back in the SSU. If SSU loopback
6772          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6773          */
6774         if (hdev->pdev->revision >= 0x21) {
6775                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6776
6777                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6778                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6779                 if (ret)
6780                         return ret;
6781         }
6782
6783         switch (loop_mode) {
6784         case HNAE3_LOOP_APP:
6785                 ret = hclge_set_app_loopback(hdev, en);
6786                 break;
6787         case HNAE3_LOOP_SERIAL_SERDES:
6788         case HNAE3_LOOP_PARALLEL_SERDES:
6789                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6790                 break;
6791         case HNAE3_LOOP_PHY:
6792                 ret = hclge_set_phy_loopback(hdev, en);
6793                 break;
6794         default:
6795                 ret = -ENOTSUPP;
6796                 dev_err(&hdev->pdev->dev,
6797                         "loop_mode %d is not supported\n", loop_mode);
6798                 break;
6799         }
6800
6801         if (ret)
6802                 return ret;
6803
6804         kinfo = &vport->nic.kinfo;
6805         for (i = 0; i < kinfo->num_tqps; i++) {
6806                 ret = hclge_tqp_enable(hdev, i, 0, en);
6807                 if (ret)
6808                         return ret;
6809         }
6810
6811         return 0;
6812 }
6813
6814 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6815 {
6816         int ret;
6817
6818         ret = hclge_set_app_loopback(hdev, false);
6819         if (ret)
6820                 return ret;
6821
6822         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6823         if (ret)
6824                 return ret;
6825
6826         return hclge_cfg_serdes_loopback(hdev, false,
6827                                          HNAE3_LOOP_PARALLEL_SERDES);
6828 }
6829
6830 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6831 {
6832         struct hclge_vport *vport = hclge_get_vport(handle);
6833         struct hnae3_knic_private_info *kinfo;
6834         struct hnae3_queue *queue;
6835         struct hclge_tqp *tqp;
6836         int i;
6837
6838         kinfo = &vport->nic.kinfo;
6839         for (i = 0; i < kinfo->num_tqps; i++) {
6840                 queue = handle->kinfo.tqp[i];
6841                 tqp = container_of(queue, struct hclge_tqp, q);
6842                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6843         }
6844 }
6845
6846 static void hclge_flush_link_update(struct hclge_dev *hdev)
6847 {
6848 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6849
6850         unsigned long last = hdev->serv_processed_cnt;
6851         int i = 0;
6852
6853         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6854                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6855                last == hdev->serv_processed_cnt)
6856                 usleep_range(1, 1);
6857 }
6858
6859 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6860 {
6861         struct hclge_vport *vport = hclge_get_vport(handle);
6862         struct hclge_dev *hdev = vport->back;
6863
6864         if (enable) {
6865                 hclge_task_schedule(hdev, 0);
6866         } else {
6867                 /* Set the DOWN flag here to disable link updating */
6868                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6869
6870                 /* flush memory to make sure DOWN is seen by service task */
6871                 smp_mb__before_atomic();
6872                 hclge_flush_link_update(hdev);
6873         }
6874 }
6875
6876 static int hclge_ae_start(struct hnae3_handle *handle)
6877 {
6878         struct hclge_vport *vport = hclge_get_vport(handle);
6879         struct hclge_dev *hdev = vport->back;
6880
6881         /* mac enable */
6882         hclge_cfg_mac_mode(hdev, true);
6883         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6884         hdev->hw.mac.link = 0;
6885
6886         /* reset tqp stats */
6887         hclge_reset_tqp_stats(handle);
6888
6889         hclge_mac_start_phy(hdev);
6890
6891         return 0;
6892 }
6893
6894 static void hclge_ae_stop(struct hnae3_handle *handle)
6895 {
6896         struct hclge_vport *vport = hclge_get_vport(handle);
6897         struct hclge_dev *hdev = vport->back;
6898         int i;
6899
6900         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6901
6902         hclge_clear_arfs_rules(handle);
6903
6904         /* If it is not PF reset, the firmware will disable the MAC,
6905          * so it only need to stop phy here.
6906          */
6907         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6908             hdev->reset_type != HNAE3_FUNC_RESET) {
6909                 hclge_mac_stop_phy(hdev);
6910                 hclge_update_link_status(hdev);
6911                 return;
6912         }
6913
6914         for (i = 0; i < handle->kinfo.num_tqps; i++)
6915                 hclge_reset_tqp(handle, i);
6916
6917         hclge_config_mac_tnl_int(hdev, false);
6918
6919         /* Mac disable */
6920         hclge_cfg_mac_mode(hdev, false);
6921
6922         hclge_mac_stop_phy(hdev);
6923
6924         /* reset tqp stats */
6925         hclge_reset_tqp_stats(handle);
6926         hclge_update_link_status(hdev);
6927 }
6928
6929 int hclge_vport_start(struct hclge_vport *vport)
6930 {
6931         struct hclge_dev *hdev = vport->back;
6932
6933         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6934         vport->last_active_jiffies = jiffies;
6935
6936         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6937                 if (vport->vport_id) {
6938                         hclge_restore_mac_table_common(vport);
6939                         hclge_restore_vport_vlan_table(vport);
6940                 } else {
6941                         hclge_restore_hw_table(hdev);
6942                 }
6943         }
6944
6945         clear_bit(vport->vport_id, hdev->vport_config_block);
6946
6947         return 0;
6948 }
6949
6950 void hclge_vport_stop(struct hclge_vport *vport)
6951 {
6952         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6953 }
6954
6955 static int hclge_client_start(struct hnae3_handle *handle)
6956 {
6957         struct hclge_vport *vport = hclge_get_vport(handle);
6958
6959         return hclge_vport_start(vport);
6960 }
6961
6962 static void hclge_client_stop(struct hnae3_handle *handle)
6963 {
6964         struct hclge_vport *vport = hclge_get_vport(handle);
6965
6966         hclge_vport_stop(vport);
6967 }
6968
6969 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6970                                          u16 cmdq_resp, u8  resp_code,
6971                                          enum hclge_mac_vlan_tbl_opcode op)
6972 {
6973         struct hclge_dev *hdev = vport->back;
6974
6975         if (cmdq_resp) {
6976                 dev_err(&hdev->pdev->dev,
6977                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6978                         cmdq_resp);
6979                 return -EIO;
6980         }
6981
6982         if (op == HCLGE_MAC_VLAN_ADD) {
6983                 if (!resp_code || resp_code == 1)
6984                         return 0;
6985                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6986                          resp_code == HCLGE_ADD_MC_OVERFLOW)
6987                         return -ENOSPC;
6988
6989                 dev_err(&hdev->pdev->dev,
6990                         "add mac addr failed for undefined, code=%u.\n",
6991                         resp_code);
6992                 return -EIO;
6993         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6994                 if (!resp_code) {
6995                         return 0;
6996                 } else if (resp_code == 1) {
6997                         dev_dbg(&hdev->pdev->dev,
6998                                 "remove mac addr failed for miss.\n");
6999                         return -ENOENT;
7000                 }
7001
7002                 dev_err(&hdev->pdev->dev,
7003                         "remove mac addr failed for undefined, code=%u.\n",
7004                         resp_code);
7005                 return -EIO;
7006         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7007                 if (!resp_code) {
7008                         return 0;
7009                 } else if (resp_code == 1) {
7010                         dev_dbg(&hdev->pdev->dev,
7011                                 "lookup mac addr failed for miss.\n");
7012                         return -ENOENT;
7013                 }
7014
7015                 dev_err(&hdev->pdev->dev,
7016                         "lookup mac addr failed for undefined, code=%u.\n",
7017                         resp_code);
7018                 return -EIO;
7019         }
7020
7021         dev_err(&hdev->pdev->dev,
7022                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7023
7024         return -EINVAL;
7025 }
7026
7027 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7028 {
7029 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7030
7031         unsigned int word_num;
7032         unsigned int bit_num;
7033
7034         if (vfid > 255 || vfid < 0)
7035                 return -EIO;
7036
7037         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7038                 word_num = vfid / 32;
7039                 bit_num  = vfid % 32;
7040                 if (clr)
7041                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7042                 else
7043                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7044         } else {
7045                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7046                 bit_num  = vfid % 32;
7047                 if (clr)
7048                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7049                 else
7050                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7051         }
7052
7053         return 0;
7054 }
7055
7056 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7057 {
7058 #define HCLGE_DESC_NUMBER 3
7059 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7060         int i, j;
7061
7062         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7063                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7064                         if (desc[i].data[j])
7065                                 return false;
7066
7067         return true;
7068 }
7069
7070 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7071                                    const u8 *addr, bool is_mc)
7072 {
7073         const unsigned char *mac_addr = addr;
7074         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7075                        (mac_addr[0]) | (mac_addr[1] << 8);
7076         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7077
7078         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7079         if (is_mc) {
7080                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7081                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7082         }
7083
7084         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7085         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7086 }
7087
7088 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7089                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7090 {
7091         struct hclge_dev *hdev = vport->back;
7092         struct hclge_desc desc;
7093         u8 resp_code;
7094         u16 retval;
7095         int ret;
7096
7097         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7098
7099         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7100
7101         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7102         if (ret) {
7103                 dev_err(&hdev->pdev->dev,
7104                         "del mac addr failed for cmd_send, ret =%d.\n",
7105                         ret);
7106                 return ret;
7107         }
7108         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7109         retval = le16_to_cpu(desc.retval);
7110
7111         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7112                                              HCLGE_MAC_VLAN_REMOVE);
7113 }
7114
7115 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7116                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7117                                      struct hclge_desc *desc,
7118                                      bool is_mc)
7119 {
7120         struct hclge_dev *hdev = vport->back;
7121         u8 resp_code;
7122         u16 retval;
7123         int ret;
7124
7125         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7126         if (is_mc) {
7127                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7128                 memcpy(desc[0].data,
7129                        req,
7130                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7131                 hclge_cmd_setup_basic_desc(&desc[1],
7132                                            HCLGE_OPC_MAC_VLAN_ADD,
7133                                            true);
7134                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7135                 hclge_cmd_setup_basic_desc(&desc[2],
7136                                            HCLGE_OPC_MAC_VLAN_ADD,
7137                                            true);
7138                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7139         } else {
7140                 memcpy(desc[0].data,
7141                        req,
7142                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7143                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7144         }
7145         if (ret) {
7146                 dev_err(&hdev->pdev->dev,
7147                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7148                         ret);
7149                 return ret;
7150         }
7151         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7152         retval = le16_to_cpu(desc[0].retval);
7153
7154         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7155                                              HCLGE_MAC_VLAN_LKUP);
7156 }
7157
7158 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7159                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7160                                   struct hclge_desc *mc_desc)
7161 {
7162         struct hclge_dev *hdev = vport->back;
7163         int cfg_status;
7164         u8 resp_code;
7165         u16 retval;
7166         int ret;
7167
7168         if (!mc_desc) {
7169                 struct hclge_desc desc;
7170
7171                 hclge_cmd_setup_basic_desc(&desc,
7172                                            HCLGE_OPC_MAC_VLAN_ADD,
7173                                            false);
7174                 memcpy(desc.data, req,
7175                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7176                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7177                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7178                 retval = le16_to_cpu(desc.retval);
7179
7180                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7181                                                            resp_code,
7182                                                            HCLGE_MAC_VLAN_ADD);
7183         } else {
7184                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7185                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7186                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7187                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7188                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7189                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7190                 memcpy(mc_desc[0].data, req,
7191                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7192                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7193                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7194                 retval = le16_to_cpu(mc_desc[0].retval);
7195
7196                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7197                                                            resp_code,
7198                                                            HCLGE_MAC_VLAN_ADD);
7199         }
7200
7201         if (ret) {
7202                 dev_err(&hdev->pdev->dev,
7203                         "add mac addr failed for cmd_send, ret =%d.\n",
7204                         ret);
7205                 return ret;
7206         }
7207
7208         return cfg_status;
7209 }
7210
7211 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7212                                u16 *allocated_size)
7213 {
7214         struct hclge_umv_spc_alc_cmd *req;
7215         struct hclge_desc desc;
7216         int ret;
7217
7218         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7219         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7220
7221         req->space_size = cpu_to_le32(space_size);
7222
7223         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7224         if (ret) {
7225                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7226                         ret);
7227                 return ret;
7228         }
7229
7230         *allocated_size = le32_to_cpu(desc.data[1]);
7231
7232         return 0;
7233 }
7234
7235 static int hclge_init_umv_space(struct hclge_dev *hdev)
7236 {
7237         u16 allocated_size = 0;
7238         int ret;
7239
7240         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7241         if (ret)
7242                 return ret;
7243
7244         if (allocated_size < hdev->wanted_umv_size)
7245                 dev_warn(&hdev->pdev->dev,
7246                          "failed to alloc umv space, want %u, get %u\n",
7247                          hdev->wanted_umv_size, allocated_size);
7248
7249         hdev->max_umv_size = allocated_size;
7250         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7251         hdev->share_umv_size = hdev->priv_umv_size +
7252                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7253
7254         return 0;
7255 }
7256
7257 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7258 {
7259         struct hclge_vport *vport;
7260         int i;
7261
7262         for (i = 0; i < hdev->num_alloc_vport; i++) {
7263                 vport = &hdev->vport[i];
7264                 vport->used_umv_num = 0;
7265         }
7266
7267         mutex_lock(&hdev->vport_lock);
7268         hdev->share_umv_size = hdev->priv_umv_size +
7269                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7270         mutex_unlock(&hdev->vport_lock);
7271 }
7272
7273 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7274 {
7275         struct hclge_dev *hdev = vport->back;
7276         bool is_full;
7277
7278         if (need_lock)
7279                 mutex_lock(&hdev->vport_lock);
7280
7281         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7282                    hdev->share_umv_size == 0);
7283
7284         if (need_lock)
7285                 mutex_unlock(&hdev->vport_lock);
7286
7287         return is_full;
7288 }
7289
7290 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7291 {
7292         struct hclge_dev *hdev = vport->back;
7293
7294         if (is_free) {
7295                 if (vport->used_umv_num > hdev->priv_umv_size)
7296                         hdev->share_umv_size++;
7297
7298                 if (vport->used_umv_num > 0)
7299                         vport->used_umv_num--;
7300         } else {
7301                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7302                     hdev->share_umv_size > 0)
7303                         hdev->share_umv_size--;
7304                 vport->used_umv_num++;
7305         }
7306 }
7307
7308 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7309                                                   const u8 *mac_addr)
7310 {
7311         struct hclge_mac_node *mac_node, *tmp;
7312
7313         list_for_each_entry_safe(mac_node, tmp, list, node)
7314                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7315                         return mac_node;
7316
7317         return NULL;
7318 }
7319
7320 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7321                                   enum HCLGE_MAC_NODE_STATE state)
7322 {
7323         switch (state) {
7324         /* from set_rx_mode or tmp_add_list */
7325         case HCLGE_MAC_TO_ADD:
7326                 if (mac_node->state == HCLGE_MAC_TO_DEL)
7327                         mac_node->state = HCLGE_MAC_ACTIVE;
7328                 break;
7329         /* only from set_rx_mode */
7330         case HCLGE_MAC_TO_DEL:
7331                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7332                         list_del(&mac_node->node);
7333                         kfree(mac_node);
7334                 } else {
7335                         mac_node->state = HCLGE_MAC_TO_DEL;
7336                 }
7337                 break;
7338         /* only from tmp_add_list, the mac_node->state won't be
7339          * ACTIVE.
7340          */
7341         case HCLGE_MAC_ACTIVE:
7342                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7343                         mac_node->state = HCLGE_MAC_ACTIVE;
7344
7345                 break;
7346         }
7347 }
7348
7349 int hclge_update_mac_list(struct hclge_vport *vport,
7350                           enum HCLGE_MAC_NODE_STATE state,
7351                           enum HCLGE_MAC_ADDR_TYPE mac_type,
7352                           const unsigned char *addr)
7353 {
7354         struct hclge_dev *hdev = vport->back;
7355         struct hclge_mac_node *mac_node;
7356         struct list_head *list;
7357
7358         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7359                 &vport->uc_mac_list : &vport->mc_mac_list;
7360
7361         spin_lock_bh(&vport->mac_list_lock);
7362
7363         /* if the mac addr is already in the mac list, no need to add a new
7364          * one into it, just check the mac addr state, convert it to a new
7365          * new state, or just remove it, or do nothing.
7366          */
7367         mac_node = hclge_find_mac_node(list, addr);
7368         if (mac_node) {
7369                 hclge_update_mac_node(mac_node, state);
7370                 spin_unlock_bh(&vport->mac_list_lock);
7371                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7372                 return 0;
7373         }
7374
7375         /* if this address is never added, unnecessary to delete */
7376         if (state == HCLGE_MAC_TO_DEL) {
7377                 spin_unlock_bh(&vport->mac_list_lock);
7378                 dev_err(&hdev->pdev->dev,
7379                         "failed to delete address %pM from mac list\n",
7380                         addr);
7381                 return -ENOENT;
7382         }
7383
7384         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7385         if (!mac_node) {
7386                 spin_unlock_bh(&vport->mac_list_lock);
7387                 return -ENOMEM;
7388         }
7389
7390         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7391
7392         mac_node->state = state;
7393         ether_addr_copy(mac_node->mac_addr, addr);
7394         list_add_tail(&mac_node->node, list);
7395
7396         spin_unlock_bh(&vport->mac_list_lock);
7397
7398         return 0;
7399 }
7400
7401 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7402                              const unsigned char *addr)
7403 {
7404         struct hclge_vport *vport = hclge_get_vport(handle);
7405
7406         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7407                                      addr);
7408 }
7409
7410 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7411                              const unsigned char *addr)
7412 {
7413         struct hclge_dev *hdev = vport->back;
7414         struct hclge_mac_vlan_tbl_entry_cmd req;
7415         struct hclge_desc desc;
7416         u16 egress_port = 0;
7417         int ret;
7418
7419         /* mac addr check */
7420         if (is_zero_ether_addr(addr) ||
7421             is_broadcast_ether_addr(addr) ||
7422             is_multicast_ether_addr(addr)) {
7423                 dev_err(&hdev->pdev->dev,
7424                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7425                          addr, is_zero_ether_addr(addr),
7426                          is_broadcast_ether_addr(addr),
7427                          is_multicast_ether_addr(addr));
7428                 return -EINVAL;
7429         }
7430
7431         memset(&req, 0, sizeof(req));
7432
7433         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7434                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7435
7436         req.egress_port = cpu_to_le16(egress_port);
7437
7438         hclge_prepare_mac_addr(&req, addr, false);
7439
7440         /* Lookup the mac address in the mac_vlan table, and add
7441          * it if the entry is inexistent. Repeated unicast entry
7442          * is not allowed in the mac vlan table.
7443          */
7444         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7445         if (ret == -ENOENT) {
7446                 mutex_lock(&hdev->vport_lock);
7447                 if (!hclge_is_umv_space_full(vport, false)) {
7448                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7449                         if (!ret)
7450                                 hclge_update_umv_space(vport, false);
7451                         mutex_unlock(&hdev->vport_lock);
7452                         return ret;
7453                 }
7454                 mutex_unlock(&hdev->vport_lock);
7455
7456                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7457                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7458                                 hdev->priv_umv_size);
7459
7460                 return -ENOSPC;
7461         }
7462
7463         /* check if we just hit the duplicate */
7464         if (!ret) {
7465                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7466                          vport->vport_id, addr);
7467                 return 0;
7468         }
7469
7470         dev_err(&hdev->pdev->dev,
7471                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7472                 addr);
7473
7474         return ret;
7475 }
7476
7477 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7478                             const unsigned char *addr)
7479 {
7480         struct hclge_vport *vport = hclge_get_vport(handle);
7481
7482         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7483                                      addr);
7484 }
7485
7486 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7487                             const unsigned char *addr)
7488 {
7489         struct hclge_dev *hdev = vport->back;
7490         struct hclge_mac_vlan_tbl_entry_cmd req;
7491         int ret;
7492
7493         /* mac addr check */
7494         if (is_zero_ether_addr(addr) ||
7495             is_broadcast_ether_addr(addr) ||
7496             is_multicast_ether_addr(addr)) {
7497                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7498                         addr);
7499                 return -EINVAL;
7500         }
7501
7502         memset(&req, 0, sizeof(req));
7503         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7504         hclge_prepare_mac_addr(&req, addr, false);
7505         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7506         if (!ret) {
7507                 mutex_lock(&hdev->vport_lock);
7508                 hclge_update_umv_space(vport, true);
7509                 mutex_unlock(&hdev->vport_lock);
7510         } else if (ret == -ENOENT) {
7511                 ret = 0;
7512         }
7513
7514         return ret;
7515 }
7516
7517 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7518                              const unsigned char *addr)
7519 {
7520         struct hclge_vport *vport = hclge_get_vport(handle);
7521
7522         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7523                                      addr);
7524 }
7525
7526 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7527                              const unsigned char *addr)
7528 {
7529         struct hclge_dev *hdev = vport->back;
7530         struct hclge_mac_vlan_tbl_entry_cmd req;
7531         struct hclge_desc desc[3];
7532         int status;
7533
7534         /* mac addr check */
7535         if (!is_multicast_ether_addr(addr)) {
7536                 dev_err(&hdev->pdev->dev,
7537                         "Add mc mac err! invalid mac:%pM.\n",
7538                          addr);
7539                 return -EINVAL;
7540         }
7541         memset(&req, 0, sizeof(req));
7542         hclge_prepare_mac_addr(&req, addr, true);
7543         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7544         if (status) {
7545                 /* This mac addr do not exist, add new entry for it */
7546                 memset(desc[0].data, 0, sizeof(desc[0].data));
7547                 memset(desc[1].data, 0, sizeof(desc[0].data));
7548                 memset(desc[2].data, 0, sizeof(desc[0].data));
7549         }
7550         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7551         if (status)
7552                 return status;
7553         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7554
7555         /* if already overflow, not to print each time */
7556         if (status == -ENOSPC &&
7557             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7558                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7559
7560         return status;
7561 }
7562
7563 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7564                             const unsigned char *addr)
7565 {
7566         struct hclge_vport *vport = hclge_get_vport(handle);
7567
7568         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7569                                      addr);
7570 }
7571
7572 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7573                             const unsigned char *addr)
7574 {
7575         struct hclge_dev *hdev = vport->back;
7576         struct hclge_mac_vlan_tbl_entry_cmd req;
7577         enum hclge_cmd_status status;
7578         struct hclge_desc desc[3];
7579
7580         /* mac addr check */
7581         if (!is_multicast_ether_addr(addr)) {
7582                 dev_dbg(&hdev->pdev->dev,
7583                         "Remove mc mac err! invalid mac:%pM.\n",
7584                          addr);
7585                 return -EINVAL;
7586         }
7587
7588         memset(&req, 0, sizeof(req));
7589         hclge_prepare_mac_addr(&req, addr, true);
7590         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7591         if (!status) {
7592                 /* This mac addr exist, remove this handle's VFID for it */
7593                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7594                 if (status)
7595                         return status;
7596
7597                 if (hclge_is_all_function_id_zero(desc))
7598                         /* All the vfid is zero, so need to delete this entry */
7599                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7600                 else
7601                         /* Not all the vfid is zero, update the vfid */
7602                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7603
7604         } else if (status == -ENOENT) {
7605                 status = 0;
7606         }
7607
7608         return status;
7609 }
7610
7611 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7612                                       struct list_head *list,
7613                                       int (*sync)(struct hclge_vport *,
7614                                                   const unsigned char *))
7615 {
7616         struct hclge_mac_node *mac_node, *tmp;
7617         int ret;
7618
7619         list_for_each_entry_safe(mac_node, tmp, list, node) {
7620                 ret = sync(vport, mac_node->mac_addr);
7621                 if (!ret) {
7622                         mac_node->state = HCLGE_MAC_ACTIVE;
7623                 } else {
7624                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7625                                 &vport->state);
7626                         break;
7627                 }
7628         }
7629 }
7630
7631 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7632                                         struct list_head *list,
7633                                         int (*unsync)(struct hclge_vport *,
7634                                                       const unsigned char *))
7635 {
7636         struct hclge_mac_node *mac_node, *tmp;
7637         int ret;
7638
7639         list_for_each_entry_safe(mac_node, tmp, list, node) {
7640                 ret = unsync(vport, mac_node->mac_addr);
7641                 if (!ret || ret == -ENOENT) {
7642                         list_del(&mac_node->node);
7643                         kfree(mac_node);
7644                 } else {
7645                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7646                                 &vport->state);
7647                         break;
7648                 }
7649         }
7650 }
7651
7652 static bool hclge_sync_from_add_list(struct list_head *add_list,
7653                                      struct list_head *mac_list)
7654 {
7655         struct hclge_mac_node *mac_node, *tmp, *new_node;
7656         bool all_added = true;
7657
7658         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7659                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7660                         all_added = false;
7661
7662                 /* if the mac address from tmp_add_list is not in the
7663                  * uc/mc_mac_list, it means have received a TO_DEL request
7664                  * during the time window of adding the mac address into mac
7665                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7666                  * then it will be removed at next time. else it must be TO_ADD,
7667                  * this address hasn't been added into mac table,
7668                  * so just remove the mac node.
7669                  */
7670                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7671                 if (new_node) {
7672                         hclge_update_mac_node(new_node, mac_node->state);
7673                         list_del(&mac_node->node);
7674                         kfree(mac_node);
7675                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7676                         mac_node->state = HCLGE_MAC_TO_DEL;
7677                         list_del(&mac_node->node);
7678                         list_add_tail(&mac_node->node, mac_list);
7679                 } else {
7680                         list_del(&mac_node->node);
7681                         kfree(mac_node);
7682                 }
7683         }
7684
7685         return all_added;
7686 }
7687
7688 static void hclge_sync_from_del_list(struct list_head *del_list,
7689                                      struct list_head *mac_list)
7690 {
7691         struct hclge_mac_node *mac_node, *tmp, *new_node;
7692
7693         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7694                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7695                 if (new_node) {
7696                         /* If the mac addr exists in the mac list, it means
7697                          * received a new TO_ADD request during the time window
7698                          * of configuring the mac address. For the mac node
7699                          * state is TO_ADD, and the address is already in the
7700                          * in the hardware(due to delete fail), so we just need
7701                          * to change the mac node state to ACTIVE.
7702                          */
7703                         new_node->state = HCLGE_MAC_ACTIVE;
7704                         list_del(&mac_node->node);
7705                         kfree(mac_node);
7706                 } else {
7707                         list_del(&mac_node->node);
7708                         list_add_tail(&mac_node->node, mac_list);
7709                 }
7710         }
7711 }
7712
7713 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7714                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
7715                                         bool is_all_added)
7716 {
7717         if (mac_type == HCLGE_MAC_ADDR_UC) {
7718                 if (is_all_added)
7719                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7720                 else
7721                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7722         } else {
7723                 if (is_all_added)
7724                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7725                 else
7726                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7727         }
7728 }
7729
7730 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7731                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
7732 {
7733         struct hclge_mac_node *mac_node, *tmp, *new_node;
7734         struct list_head tmp_add_list, tmp_del_list;
7735         struct list_head *list;
7736         bool all_added;
7737
7738         INIT_LIST_HEAD(&tmp_add_list);
7739         INIT_LIST_HEAD(&tmp_del_list);
7740
7741         /* move the mac addr to the tmp_add_list and tmp_del_list, then
7742          * we can add/delete these mac addr outside the spin lock
7743          */
7744         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7745                 &vport->uc_mac_list : &vport->mc_mac_list;
7746
7747         spin_lock_bh(&vport->mac_list_lock);
7748
7749         list_for_each_entry_safe(mac_node, tmp, list, node) {
7750                 switch (mac_node->state) {
7751                 case HCLGE_MAC_TO_DEL:
7752                         list_del(&mac_node->node);
7753                         list_add_tail(&mac_node->node, &tmp_del_list);
7754                         break;
7755                 case HCLGE_MAC_TO_ADD:
7756                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7757                         if (!new_node)
7758                                 goto stop_traverse;
7759                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7760                         new_node->state = mac_node->state;
7761                         list_add_tail(&new_node->node, &tmp_add_list);
7762                         break;
7763                 default:
7764                         break;
7765                 }
7766         }
7767
7768 stop_traverse:
7769         spin_unlock_bh(&vport->mac_list_lock);
7770
7771         /* delete first, in order to get max mac table space for adding */
7772         if (mac_type == HCLGE_MAC_ADDR_UC) {
7773                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7774                                             hclge_rm_uc_addr_common);
7775                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7776                                           hclge_add_uc_addr_common);
7777         } else {
7778                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7779                                             hclge_rm_mc_addr_common);
7780                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7781                                           hclge_add_mc_addr_common);
7782         }
7783
7784         /* if some mac addresses were added/deleted fail, move back to the
7785          * mac_list, and retry at next time.
7786          */
7787         spin_lock_bh(&vport->mac_list_lock);
7788
7789         hclge_sync_from_del_list(&tmp_del_list, list);
7790         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7791
7792         spin_unlock_bh(&vport->mac_list_lock);
7793
7794         hclge_update_overflow_flags(vport, mac_type, all_added);
7795 }
7796
7797 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7798 {
7799         struct hclge_dev *hdev = vport->back;
7800
7801         if (test_bit(vport->vport_id, hdev->vport_config_block))
7802                 return false;
7803
7804         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7805                 return true;
7806
7807         return false;
7808 }
7809
7810 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7811 {
7812         int i;
7813
7814         for (i = 0; i < hdev->num_alloc_vport; i++) {
7815                 struct hclge_vport *vport = &hdev->vport[i];
7816
7817                 if (!hclge_need_sync_mac_table(vport))
7818                         continue;
7819
7820                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7821                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7822         }
7823 }
7824
7825 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7826                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7827 {
7828         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7829         struct hclge_mac_node *mac_cfg, *tmp;
7830         struct hclge_dev *hdev = vport->back;
7831         struct list_head tmp_del_list, *list;
7832         int ret;
7833
7834         if (mac_type == HCLGE_MAC_ADDR_UC) {
7835                 list = &vport->uc_mac_list;
7836                 unsync = hclge_rm_uc_addr_common;
7837         } else {
7838                 list = &vport->mc_mac_list;
7839                 unsync = hclge_rm_mc_addr_common;
7840         }
7841
7842         INIT_LIST_HEAD(&tmp_del_list);
7843
7844         if (!is_del_list)
7845                 set_bit(vport->vport_id, hdev->vport_config_block);
7846
7847         spin_lock_bh(&vport->mac_list_lock);
7848
7849         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7850                 switch (mac_cfg->state) {
7851                 case HCLGE_MAC_TO_DEL:
7852                 case HCLGE_MAC_ACTIVE:
7853                         list_del(&mac_cfg->node);
7854                         list_add_tail(&mac_cfg->node, &tmp_del_list);
7855                         break;
7856                 case HCLGE_MAC_TO_ADD:
7857                         if (is_del_list) {
7858                                 list_del(&mac_cfg->node);
7859                                 kfree(mac_cfg);
7860                         }
7861                         break;
7862                 }
7863         }
7864
7865         spin_unlock_bh(&vport->mac_list_lock);
7866
7867         list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7868                 ret = unsync(vport, mac_cfg->mac_addr);
7869                 if (!ret || ret == -ENOENT) {
7870                         /* clear all mac addr from hardware, but remain these
7871                          * mac addr in the mac list, and restore them after
7872                          * vf reset finished.
7873                          */
7874                         if (!is_del_list &&
7875                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
7876                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
7877                         } else {
7878                                 list_del(&mac_cfg->node);
7879                                 kfree(mac_cfg);
7880                         }
7881                 } else if (is_del_list) {
7882                         mac_cfg->state = HCLGE_MAC_TO_DEL;
7883                 }
7884         }
7885
7886         spin_lock_bh(&vport->mac_list_lock);
7887
7888         hclge_sync_from_del_list(&tmp_del_list, list);
7889
7890         spin_unlock_bh(&vport->mac_list_lock);
7891 }
7892
7893 /* remove all mac address when uninitailize */
7894 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7895                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
7896 {
7897         struct hclge_mac_node *mac_node, *tmp;
7898         struct hclge_dev *hdev = vport->back;
7899         struct list_head tmp_del_list, *list;
7900
7901         INIT_LIST_HEAD(&tmp_del_list);
7902
7903         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7904                 &vport->uc_mac_list : &vport->mc_mac_list;
7905
7906         spin_lock_bh(&vport->mac_list_lock);
7907
7908         list_for_each_entry_safe(mac_node, tmp, list, node) {
7909                 switch (mac_node->state) {
7910                 case HCLGE_MAC_TO_DEL:
7911                 case HCLGE_MAC_ACTIVE:
7912                         list_del(&mac_node->node);
7913                         list_add_tail(&mac_node->node, &tmp_del_list);
7914                         break;
7915                 case HCLGE_MAC_TO_ADD:
7916                         list_del(&mac_node->node);
7917                         kfree(mac_node);
7918                         break;
7919                 }
7920         }
7921
7922         spin_unlock_bh(&vport->mac_list_lock);
7923
7924         if (mac_type == HCLGE_MAC_ADDR_UC)
7925                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7926                                             hclge_rm_uc_addr_common);
7927         else
7928                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7929                                             hclge_rm_mc_addr_common);
7930
7931         if (!list_empty(&tmp_del_list))
7932                 dev_warn(&hdev->pdev->dev,
7933                          "uninit %s mac list for vport %u not completely.\n",
7934                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7935                          vport->vport_id);
7936
7937         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7938                 list_del(&mac_node->node);
7939                 kfree(mac_node);
7940         }
7941 }
7942
7943 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7944 {
7945         struct hclge_vport *vport;
7946         int i;
7947
7948         for (i = 0; i < hdev->num_alloc_vport; i++) {
7949                 vport = &hdev->vport[i];
7950                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7951                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7952         }
7953 }
7954
7955 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7956                                               u16 cmdq_resp, u8 resp_code)
7957 {
7958 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7959 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7960 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7961 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7962
7963         int return_status;
7964
7965         if (cmdq_resp) {
7966                 dev_err(&hdev->pdev->dev,
7967                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7968                         cmdq_resp);
7969                 return -EIO;
7970         }
7971
7972         switch (resp_code) {
7973         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7974         case HCLGE_ETHERTYPE_ALREADY_ADD:
7975                 return_status = 0;
7976                 break;
7977         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7978                 dev_err(&hdev->pdev->dev,
7979                         "add mac ethertype failed for manager table overflow.\n");
7980                 return_status = -EIO;
7981                 break;
7982         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7983                 dev_err(&hdev->pdev->dev,
7984                         "add mac ethertype failed for key conflict.\n");
7985                 return_status = -EIO;
7986                 break;
7987         default:
7988                 dev_err(&hdev->pdev->dev,
7989                         "add mac ethertype failed for undefined, code=%u.\n",
7990                         resp_code);
7991                 return_status = -EIO;
7992         }
7993
7994         return return_status;
7995 }
7996
7997 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7998                                      u8 *mac_addr)
7999 {
8000         struct hclge_mac_vlan_tbl_entry_cmd req;
8001         struct hclge_dev *hdev = vport->back;
8002         struct hclge_desc desc;
8003         u16 egress_port = 0;
8004         int i;
8005
8006         if (is_zero_ether_addr(mac_addr))
8007                 return false;
8008
8009         memset(&req, 0, sizeof(req));
8010         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8011                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8012         req.egress_port = cpu_to_le16(egress_port);
8013         hclge_prepare_mac_addr(&req, mac_addr, false);
8014
8015         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8016                 return true;
8017
8018         vf_idx += HCLGE_VF_VPORT_START_NUM;
8019         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8020                 if (i != vf_idx &&
8021                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8022                         return true;
8023
8024         return false;
8025 }
8026
8027 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8028                             u8 *mac_addr)
8029 {
8030         struct hclge_vport *vport = hclge_get_vport(handle);
8031         struct hclge_dev *hdev = vport->back;
8032
8033         vport = hclge_get_vf_vport(hdev, vf);
8034         if (!vport)
8035                 return -EINVAL;
8036
8037         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8038                 dev_info(&hdev->pdev->dev,
8039                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8040                          mac_addr);
8041                 return 0;
8042         }
8043
8044         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8045                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8046                         mac_addr);
8047                 return -EEXIST;
8048         }
8049
8050         ether_addr_copy(vport->vf_info.mac, mac_addr);
8051
8052         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8053                 dev_info(&hdev->pdev->dev,
8054                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8055                          vf, mac_addr);
8056                 return hclge_inform_reset_assert_to_vf(vport);
8057         }
8058
8059         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8060                  vf, mac_addr);
8061         return 0;
8062 }
8063
8064 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8065                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8066 {
8067         struct hclge_desc desc;
8068         u8 resp_code;
8069         u16 retval;
8070         int ret;
8071
8072         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8073         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8074
8075         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8076         if (ret) {
8077                 dev_err(&hdev->pdev->dev,
8078                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8079                         ret);
8080                 return ret;
8081         }
8082
8083         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8084         retval = le16_to_cpu(desc.retval);
8085
8086         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8087 }
8088
8089 static int init_mgr_tbl(struct hclge_dev *hdev)
8090 {
8091         int ret;
8092         int i;
8093
8094         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8095                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8096                 if (ret) {
8097                         dev_err(&hdev->pdev->dev,
8098                                 "add mac ethertype failed, ret =%d.\n",
8099                                 ret);
8100                         return ret;
8101                 }
8102         }
8103
8104         return 0;
8105 }
8106
8107 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8108 {
8109         struct hclge_vport *vport = hclge_get_vport(handle);
8110         struct hclge_dev *hdev = vport->back;
8111
8112         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8113 }
8114
8115 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8116                                        const u8 *old_addr, const u8 *new_addr)
8117 {
8118         struct list_head *list = &vport->uc_mac_list;
8119         struct hclge_mac_node *old_node, *new_node;
8120
8121         new_node = hclge_find_mac_node(list, new_addr);
8122         if (!new_node) {
8123                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8124                 if (!new_node)
8125                         return -ENOMEM;
8126
8127                 new_node->state = HCLGE_MAC_TO_ADD;
8128                 ether_addr_copy(new_node->mac_addr, new_addr);
8129                 list_add(&new_node->node, list);
8130         } else {
8131                 if (new_node->state == HCLGE_MAC_TO_DEL)
8132                         new_node->state = HCLGE_MAC_ACTIVE;
8133
8134                 /* make sure the new addr is in the list head, avoid dev
8135                  * addr may be not re-added into mac table for the umv space
8136                  * limitation after global/imp reset which will clear mac
8137                  * table by hardware.
8138                  */
8139                 list_move(&new_node->node, list);
8140         }
8141
8142         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8143                 old_node = hclge_find_mac_node(list, old_addr);
8144                 if (old_node) {
8145                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8146                                 list_del(&old_node->node);
8147                                 kfree(old_node);
8148                         } else {
8149                                 old_node->state = HCLGE_MAC_TO_DEL;
8150                         }
8151                 }
8152         }
8153
8154         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8155
8156         return 0;
8157 }
8158
8159 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8160                               bool is_first)
8161 {
8162         const unsigned char *new_addr = (const unsigned char *)p;
8163         struct hclge_vport *vport = hclge_get_vport(handle);
8164         struct hclge_dev *hdev = vport->back;
8165         unsigned char *old_addr = NULL;
8166         int ret;
8167
8168         /* mac addr check */
8169         if (is_zero_ether_addr(new_addr) ||
8170             is_broadcast_ether_addr(new_addr) ||
8171             is_multicast_ether_addr(new_addr)) {
8172                 dev_err(&hdev->pdev->dev,
8173                         "change uc mac err! invalid mac: %pM.\n",
8174                          new_addr);
8175                 return -EINVAL;
8176         }
8177
8178         ret = hclge_pause_addr_cfg(hdev, new_addr);
8179         if (ret) {
8180                 dev_err(&hdev->pdev->dev,
8181                         "failed to configure mac pause address, ret = %d\n",
8182                         ret);
8183                 return ret;
8184         }
8185
8186         if (!is_first)
8187                 old_addr = hdev->hw.mac.mac_addr;
8188
8189         spin_lock_bh(&vport->mac_list_lock);
8190         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8191         if (ret) {
8192                 dev_err(&hdev->pdev->dev,
8193                         "failed to change the mac addr:%pM, ret = %d\n",
8194                         new_addr, ret);
8195                 spin_unlock_bh(&vport->mac_list_lock);
8196
8197                 if (!is_first)
8198                         hclge_pause_addr_cfg(hdev, old_addr);
8199
8200                 return ret;
8201         }
8202         /* we must update dev addr with spin lock protect, preventing dev addr
8203          * being removed by set_rx_mode path.
8204          */
8205         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8206         spin_unlock_bh(&vport->mac_list_lock);
8207
8208         hclge_task_schedule(hdev, 0);
8209
8210         return 0;
8211 }
8212
8213 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8214                           int cmd)
8215 {
8216         struct hclge_vport *vport = hclge_get_vport(handle);
8217         struct hclge_dev *hdev = vport->back;
8218
8219         if (!hdev->hw.mac.phydev)
8220                 return -EOPNOTSUPP;
8221
8222         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8223 }
8224
8225 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8226                                       u8 fe_type, bool filter_en, u8 vf_id)
8227 {
8228         struct hclge_vlan_filter_ctrl_cmd *req;
8229         struct hclge_desc desc;
8230         int ret;
8231
8232         /* read current vlan filter parameter */
8233         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8234         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8235         req->vlan_type = vlan_type;
8236         req->vf_id = vf_id;
8237
8238         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8239         if (ret) {
8240                 dev_err(&hdev->pdev->dev,
8241                         "failed to get vlan filter config, ret = %d.\n", ret);
8242                 return ret;
8243         }
8244
8245         /* modify and write new config parameter */
8246         hclge_cmd_reuse_desc(&desc, false);
8247         req->vlan_fe = filter_en ?
8248                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8249
8250         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8251         if (ret)
8252                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8253                         ret);
8254
8255         return ret;
8256 }
8257
8258 #define HCLGE_FILTER_TYPE_VF            0
8259 #define HCLGE_FILTER_TYPE_PORT          1
8260 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8261 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8262 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8263 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8264 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8265 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8266                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8267 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8268                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8269
8270 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8271 {
8272         struct hclge_vport *vport = hclge_get_vport(handle);
8273         struct hclge_dev *hdev = vport->back;
8274
8275         if (hdev->pdev->revision >= 0x21) {
8276                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8277                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8278                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8279                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8280         } else {
8281                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8282                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8283                                            0);
8284         }
8285         if (enable)
8286                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8287         else
8288                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8289 }
8290
8291 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8292                                     bool is_kill, u16 vlan,
8293                                     __be16 proto)
8294 {
8295         struct hclge_vport *vport = &hdev->vport[vfid];
8296         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8297         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8298         struct hclge_desc desc[2];
8299         u8 vf_byte_val;
8300         u8 vf_byte_off;
8301         int ret;
8302
8303         /* if vf vlan table is full, firmware will close vf vlan filter, it
8304          * is unable and unnecessary to add new vlan id to vf vlan filter.
8305          * If spoof check is enable, and vf vlan is full, it shouldn't add
8306          * new vlan, because tx packets with these vlan id will be dropped.
8307          */
8308         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8309                 if (vport->vf_info.spoofchk && vlan) {
8310                         dev_err(&hdev->pdev->dev,
8311                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8312                         return -EPERM;
8313                 }
8314                 return 0;
8315         }
8316
8317         hclge_cmd_setup_basic_desc(&desc[0],
8318                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8319         hclge_cmd_setup_basic_desc(&desc[1],
8320                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8321
8322         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8323
8324         vf_byte_off = vfid / 8;
8325         vf_byte_val = 1 << (vfid % 8);
8326
8327         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8328         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8329
8330         req0->vlan_id  = cpu_to_le16(vlan);
8331         req0->vlan_cfg = is_kill;
8332
8333         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8334                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8335         else
8336                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8337
8338         ret = hclge_cmd_send(&hdev->hw, desc, 2);
8339         if (ret) {
8340                 dev_err(&hdev->pdev->dev,
8341                         "Send vf vlan command fail, ret =%d.\n",
8342                         ret);
8343                 return ret;
8344         }
8345
8346         if (!is_kill) {
8347 #define HCLGE_VF_VLAN_NO_ENTRY  2
8348                 if (!req0->resp_code || req0->resp_code == 1)
8349                         return 0;
8350
8351                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8352                         set_bit(vfid, hdev->vf_vlan_full);
8353                         dev_warn(&hdev->pdev->dev,
8354                                  "vf vlan table is full, vf vlan filter is disabled\n");
8355                         return 0;
8356                 }
8357
8358                 dev_err(&hdev->pdev->dev,
8359                         "Add vf vlan filter fail, ret =%u.\n",
8360                         req0->resp_code);
8361         } else {
8362 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
8363                 if (!req0->resp_code)
8364                         return 0;
8365
8366                 /* vf vlan filter is disabled when vf vlan table is full,
8367                  * then new vlan id will not be added into vf vlan table.
8368                  * Just return 0 without warning, avoid massive verbose
8369                  * print logs when unload.
8370                  */
8371                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8372                         return 0;
8373
8374                 dev_err(&hdev->pdev->dev,
8375                         "Kill vf vlan filter fail, ret =%u.\n",
8376                         req0->resp_code);
8377         }
8378
8379         return -EIO;
8380 }
8381
8382 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8383                                       u16 vlan_id, bool is_kill)
8384 {
8385         struct hclge_vlan_filter_pf_cfg_cmd *req;
8386         struct hclge_desc desc;
8387         u8 vlan_offset_byte_val;
8388         u8 vlan_offset_byte;
8389         u8 vlan_offset_160;
8390         int ret;
8391
8392         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8393
8394         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8395         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8396                            HCLGE_VLAN_BYTE_SIZE;
8397         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8398
8399         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8400         req->vlan_offset = vlan_offset_160;
8401         req->vlan_cfg = is_kill;
8402         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8403
8404         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8405         if (ret)
8406                 dev_err(&hdev->pdev->dev,
8407                         "port vlan command, send fail, ret =%d.\n", ret);
8408         return ret;
8409 }
8410
8411 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8412                                     u16 vport_id, u16 vlan_id,
8413                                     bool is_kill)
8414 {
8415         u16 vport_idx, vport_num = 0;
8416         int ret;
8417
8418         if (is_kill && !vlan_id)
8419                 return 0;
8420
8421         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8422                                        proto);
8423         if (ret) {
8424                 dev_err(&hdev->pdev->dev,
8425                         "Set %u vport vlan filter config fail, ret =%d.\n",
8426                         vport_id, ret);
8427                 return ret;
8428         }
8429
8430         /* vlan 0 may be added twice when 8021q module is enabled */
8431         if (!is_kill && !vlan_id &&
8432             test_bit(vport_id, hdev->vlan_table[vlan_id]))
8433                 return 0;
8434
8435         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8436                 dev_err(&hdev->pdev->dev,
8437                         "Add port vlan failed, vport %u is already in vlan %u\n",
8438                         vport_id, vlan_id);
8439                 return -EINVAL;
8440         }
8441
8442         if (is_kill &&
8443             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8444                 dev_err(&hdev->pdev->dev,
8445                         "Delete port vlan failed, vport %u is not in vlan %u\n",
8446                         vport_id, vlan_id);
8447                 return -EINVAL;
8448         }
8449
8450         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8451                 vport_num++;
8452
8453         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8454                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8455                                                  is_kill);
8456
8457         return ret;
8458 }
8459
8460 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8461 {
8462         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8463         struct hclge_vport_vtag_tx_cfg_cmd *req;
8464         struct hclge_dev *hdev = vport->back;
8465         struct hclge_desc desc;
8466         u16 bmap_index;
8467         int status;
8468
8469         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8470
8471         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8472         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8473         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8474         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8475                       vcfg->accept_tag1 ? 1 : 0);
8476         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8477                       vcfg->accept_untag1 ? 1 : 0);
8478         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8479                       vcfg->accept_tag2 ? 1 : 0);
8480         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8481                       vcfg->accept_untag2 ? 1 : 0);
8482         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8483                       vcfg->insert_tag1_en ? 1 : 0);
8484         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8485                       vcfg->insert_tag2_en ? 1 : 0);
8486         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8487
8488         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8489         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8490                         HCLGE_VF_NUM_PER_BYTE;
8491         req->vf_bitmap[bmap_index] =
8492                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8493
8494         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8495         if (status)
8496                 dev_err(&hdev->pdev->dev,
8497                         "Send port txvlan cfg command fail, ret =%d\n",
8498                         status);
8499
8500         return status;
8501 }
8502
8503 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8504 {
8505         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8506         struct hclge_vport_vtag_rx_cfg_cmd *req;
8507         struct hclge_dev *hdev = vport->back;
8508         struct hclge_desc desc;
8509         u16 bmap_index;
8510         int status;
8511
8512         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8513
8514         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8515         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8516                       vcfg->strip_tag1_en ? 1 : 0);
8517         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8518                       vcfg->strip_tag2_en ? 1 : 0);
8519         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8520                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8521         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8522                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8523
8524         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8525         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8526                         HCLGE_VF_NUM_PER_BYTE;
8527         req->vf_bitmap[bmap_index] =
8528                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8529
8530         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8531         if (status)
8532                 dev_err(&hdev->pdev->dev,
8533                         "Send port rxvlan cfg command fail, ret =%d\n",
8534                         status);
8535
8536         return status;
8537 }
8538
8539 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8540                                   u16 port_base_vlan_state,
8541                                   u16 vlan_tag)
8542 {
8543         int ret;
8544
8545         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8546                 vport->txvlan_cfg.accept_tag1 = true;
8547                 vport->txvlan_cfg.insert_tag1_en = false;
8548                 vport->txvlan_cfg.default_tag1 = 0;
8549         } else {
8550                 vport->txvlan_cfg.accept_tag1 = false;
8551                 vport->txvlan_cfg.insert_tag1_en = true;
8552                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8553         }
8554
8555         vport->txvlan_cfg.accept_untag1 = true;
8556
8557         /* accept_tag2 and accept_untag2 are not supported on
8558          * pdev revision(0x20), new revision support them,
8559          * this two fields can not be configured by user.
8560          */
8561         vport->txvlan_cfg.accept_tag2 = true;
8562         vport->txvlan_cfg.accept_untag2 = true;
8563         vport->txvlan_cfg.insert_tag2_en = false;
8564         vport->txvlan_cfg.default_tag2 = 0;
8565
8566         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8567                 vport->rxvlan_cfg.strip_tag1_en = false;
8568                 vport->rxvlan_cfg.strip_tag2_en =
8569                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8570         } else {
8571                 vport->rxvlan_cfg.strip_tag1_en =
8572                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8573                 vport->rxvlan_cfg.strip_tag2_en = true;
8574         }
8575         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8576         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8577
8578         ret = hclge_set_vlan_tx_offload_cfg(vport);
8579         if (ret)
8580                 return ret;
8581
8582         return hclge_set_vlan_rx_offload_cfg(vport);
8583 }
8584
8585 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8586 {
8587         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8588         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8589         struct hclge_desc desc;
8590         int status;
8591
8592         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8593         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8594         rx_req->ot_fst_vlan_type =
8595                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8596         rx_req->ot_sec_vlan_type =
8597                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8598         rx_req->in_fst_vlan_type =
8599                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8600         rx_req->in_sec_vlan_type =
8601                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8602
8603         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8604         if (status) {
8605                 dev_err(&hdev->pdev->dev,
8606                         "Send rxvlan protocol type command fail, ret =%d\n",
8607                         status);
8608                 return status;
8609         }
8610
8611         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8612
8613         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8614         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8615         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8616
8617         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8618         if (status)
8619                 dev_err(&hdev->pdev->dev,
8620                         "Send txvlan protocol type command fail, ret =%d\n",
8621                         status);
8622
8623         return status;
8624 }
8625
8626 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8627 {
8628 #define HCLGE_DEF_VLAN_TYPE             0x8100
8629
8630         struct hnae3_handle *handle = &hdev->vport[0].nic;
8631         struct hclge_vport *vport;
8632         int ret;
8633         int i;
8634
8635         if (hdev->pdev->revision >= 0x21) {
8636                 /* for revision 0x21, vf vlan filter is per function */
8637                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8638                         vport = &hdev->vport[i];
8639                         ret = hclge_set_vlan_filter_ctrl(hdev,
8640                                                          HCLGE_FILTER_TYPE_VF,
8641                                                          HCLGE_FILTER_FE_EGRESS,
8642                                                          true,
8643                                                          vport->vport_id);
8644                         if (ret)
8645                                 return ret;
8646                 }
8647
8648                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8649                                                  HCLGE_FILTER_FE_INGRESS, true,
8650                                                  0);
8651                 if (ret)
8652                         return ret;
8653         } else {
8654                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8655                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8656                                                  true, 0);
8657                 if (ret)
8658                         return ret;
8659         }
8660
8661         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8662
8663         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8664         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8665         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8666         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8667         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8668         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8669
8670         ret = hclge_set_vlan_protocol_type(hdev);
8671         if (ret)
8672                 return ret;
8673
8674         for (i = 0; i < hdev->num_alloc_vport; i++) {
8675                 u16 vlan_tag;
8676
8677                 vport = &hdev->vport[i];
8678                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8679
8680                 ret = hclge_vlan_offload_cfg(vport,
8681                                              vport->port_base_vlan_cfg.state,
8682                                              vlan_tag);
8683                 if (ret)
8684                         return ret;
8685         }
8686
8687         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8688 }
8689
8690 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8691                                        bool writen_to_tbl)
8692 {
8693         struct hclge_vport_vlan_cfg *vlan;
8694
8695         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8696         if (!vlan)
8697                 return;
8698
8699         vlan->hd_tbl_status = writen_to_tbl;
8700         vlan->vlan_id = vlan_id;
8701
8702         list_add_tail(&vlan->node, &vport->vlan_list);
8703 }
8704
8705 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8706 {
8707         struct hclge_vport_vlan_cfg *vlan, *tmp;
8708         struct hclge_dev *hdev = vport->back;
8709         int ret;
8710
8711         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8712                 if (!vlan->hd_tbl_status) {
8713                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8714                                                        vport->vport_id,
8715                                                        vlan->vlan_id, false);
8716                         if (ret) {
8717                                 dev_err(&hdev->pdev->dev,
8718                                         "restore vport vlan list failed, ret=%d\n",
8719                                         ret);
8720                                 return ret;
8721                         }
8722                 }
8723                 vlan->hd_tbl_status = true;
8724         }
8725
8726         return 0;
8727 }
8728
8729 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8730                                       bool is_write_tbl)
8731 {
8732         struct hclge_vport_vlan_cfg *vlan, *tmp;
8733         struct hclge_dev *hdev = vport->back;
8734
8735         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8736                 if (vlan->vlan_id == vlan_id) {
8737                         if (is_write_tbl && vlan->hd_tbl_status)
8738                                 hclge_set_vlan_filter_hw(hdev,
8739                                                          htons(ETH_P_8021Q),
8740                                                          vport->vport_id,
8741                                                          vlan_id,
8742                                                          true);
8743
8744                         list_del(&vlan->node);
8745                         kfree(vlan);
8746                         break;
8747                 }
8748         }
8749 }
8750
8751 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8752 {
8753         struct hclge_vport_vlan_cfg *vlan, *tmp;
8754         struct hclge_dev *hdev = vport->back;
8755
8756         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8757                 if (vlan->hd_tbl_status)
8758                         hclge_set_vlan_filter_hw(hdev,
8759                                                  htons(ETH_P_8021Q),
8760                                                  vport->vport_id,
8761                                                  vlan->vlan_id,
8762                                                  true);
8763
8764                 vlan->hd_tbl_status = false;
8765                 if (is_del_list) {
8766                         list_del(&vlan->node);
8767                         kfree(vlan);
8768                 }
8769         }
8770         clear_bit(vport->vport_id, hdev->vf_vlan_full);
8771 }
8772
8773 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8774 {
8775         struct hclge_vport_vlan_cfg *vlan, *tmp;
8776         struct hclge_vport *vport;
8777         int i;
8778
8779         for (i = 0; i < hdev->num_alloc_vport; i++) {
8780                 vport = &hdev->vport[i];
8781                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8782                         list_del(&vlan->node);
8783                         kfree(vlan);
8784                 }
8785         }
8786 }
8787
8788 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8789 {
8790         struct hclge_vport_vlan_cfg *vlan, *tmp;
8791         struct hclge_dev *hdev = vport->back;
8792         u16 vlan_proto;
8793         u16 vlan_id;
8794         u16 state;
8795         int ret;
8796
8797         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8798         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8799         state = vport->port_base_vlan_cfg.state;
8800
8801         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8802                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8803                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8804                                          vport->vport_id, vlan_id,
8805                                          false);
8806                 return;
8807         }
8808
8809         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8810                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8811                                                vport->vport_id,
8812                                                vlan->vlan_id, false);
8813                 if (ret)
8814                         break;
8815                 vlan->hd_tbl_status = true;
8816         }
8817 }
8818
8819 /* For global reset and imp reset, hardware will clear the mac table,
8820  * so we change the mac address state from ACTIVE to TO_ADD, then they
8821  * can be restored in the service task after reset complete. Furtherly,
8822  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8823  * be restored after reset, so just remove these mac nodes from mac_list.
8824  */
8825 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8826 {
8827         struct hclge_mac_node *mac_node, *tmp;
8828
8829         list_for_each_entry_safe(mac_node, tmp, list, node) {
8830                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8831                         mac_node->state = HCLGE_MAC_TO_ADD;
8832                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8833                         list_del(&mac_node->node);
8834                         kfree(mac_node);
8835                 }
8836         }
8837 }
8838
8839 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8840 {
8841         spin_lock_bh(&vport->mac_list_lock);
8842
8843         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8844         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8845         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8846
8847         spin_unlock_bh(&vport->mac_list_lock);
8848 }
8849
8850 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8851 {
8852         struct hclge_vport *vport = &hdev->vport[0];
8853         struct hnae3_handle *handle = &vport->nic;
8854
8855         hclge_restore_mac_table_common(vport);
8856         hclge_restore_vport_vlan_table(vport);
8857         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8858
8859         hclge_restore_fd_entries(handle);
8860 }
8861
8862 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8863 {
8864         struct hclge_vport *vport = hclge_get_vport(handle);
8865
8866         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8867                 vport->rxvlan_cfg.strip_tag1_en = false;
8868                 vport->rxvlan_cfg.strip_tag2_en = enable;
8869         } else {
8870                 vport->rxvlan_cfg.strip_tag1_en = enable;
8871                 vport->rxvlan_cfg.strip_tag2_en = true;
8872         }
8873         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8874         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8875         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8876
8877         return hclge_set_vlan_rx_offload_cfg(vport);
8878 }
8879
8880 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8881                                             u16 port_base_vlan_state,
8882                                             struct hclge_vlan_info *new_info,
8883                                             struct hclge_vlan_info *old_info)
8884 {
8885         struct hclge_dev *hdev = vport->back;
8886         int ret;
8887
8888         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8889                 hclge_rm_vport_all_vlan_table(vport, false);
8890                 return hclge_set_vlan_filter_hw(hdev,
8891                                                  htons(new_info->vlan_proto),
8892                                                  vport->vport_id,
8893                                                  new_info->vlan_tag,
8894                                                  false);
8895         }
8896
8897         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8898                                        vport->vport_id, old_info->vlan_tag,
8899                                        true);
8900         if (ret)
8901                 return ret;
8902
8903         return hclge_add_vport_all_vlan_table(vport);
8904 }
8905
8906 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8907                                     struct hclge_vlan_info *vlan_info)
8908 {
8909         struct hnae3_handle *nic = &vport->nic;
8910         struct hclge_vlan_info *old_vlan_info;
8911         struct hclge_dev *hdev = vport->back;
8912         int ret;
8913
8914         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8915
8916         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8917         if (ret)
8918                 return ret;
8919
8920         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8921                 /* add new VLAN tag */
8922                 ret = hclge_set_vlan_filter_hw(hdev,
8923                                                htons(vlan_info->vlan_proto),
8924                                                vport->vport_id,
8925                                                vlan_info->vlan_tag,
8926                                                false);
8927                 if (ret)
8928                         return ret;
8929
8930                 /* remove old VLAN tag */
8931                 ret = hclge_set_vlan_filter_hw(hdev,
8932                                                htons(old_vlan_info->vlan_proto),
8933                                                vport->vport_id,
8934                                                old_vlan_info->vlan_tag,
8935                                                true);
8936                 if (ret)
8937                         return ret;
8938
8939                 goto update;
8940         }
8941
8942         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8943                                                old_vlan_info);
8944         if (ret)
8945                 return ret;
8946
8947         /* update state only when disable/enable port based VLAN */
8948         vport->port_base_vlan_cfg.state = state;
8949         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8950                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8951         else
8952                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8953
8954 update:
8955         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8956         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8957         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8958
8959         return 0;
8960 }
8961
8962 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8963                                           enum hnae3_port_base_vlan_state state,
8964                                           u16 vlan)
8965 {
8966         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8967                 if (!vlan)
8968                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8969                 else
8970                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8971         } else {
8972                 if (!vlan)
8973                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8974                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8975                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8976                 else
8977                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8978         }
8979 }
8980
8981 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8982                                     u16 vlan, u8 qos, __be16 proto)
8983 {
8984         struct hclge_vport *vport = hclge_get_vport(handle);
8985         struct hclge_dev *hdev = vport->back;
8986         struct hclge_vlan_info vlan_info;
8987         u16 state;
8988         int ret;
8989
8990         if (hdev->pdev->revision == 0x20)
8991                 return -EOPNOTSUPP;
8992
8993         vport = hclge_get_vf_vport(hdev, vfid);
8994         if (!vport)
8995                 return -EINVAL;
8996
8997         /* qos is a 3 bits value, so can not be bigger than 7 */
8998         if (vlan > VLAN_N_VID - 1 || qos > 7)
8999                 return -EINVAL;
9000         if (proto != htons(ETH_P_8021Q))
9001                 return -EPROTONOSUPPORT;
9002
9003         state = hclge_get_port_base_vlan_state(vport,
9004                                                vport->port_base_vlan_cfg.state,
9005                                                vlan);
9006         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9007                 return 0;
9008
9009         vlan_info.vlan_tag = vlan;
9010         vlan_info.qos = qos;
9011         vlan_info.vlan_proto = ntohs(proto);
9012
9013         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9014                 return hclge_update_port_base_vlan_cfg(vport, state,
9015                                                        &vlan_info);
9016         } else {
9017                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9018                                                         vport->vport_id, state,
9019                                                         vlan, qos,
9020                                                         ntohs(proto));
9021                 return ret;
9022         }
9023 }
9024
9025 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9026 {
9027         struct hclge_vlan_info *vlan_info;
9028         struct hclge_vport *vport;
9029         int ret;
9030         int vf;
9031
9032         /* clear port base vlan for all vf */
9033         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9034                 vport = &hdev->vport[vf];
9035                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9036
9037                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9038                                                vport->vport_id,
9039                                                vlan_info->vlan_tag, true);
9040                 if (ret)
9041                         dev_err(&hdev->pdev->dev,
9042                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9043                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9044         }
9045 }
9046
9047 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9048                           u16 vlan_id, bool is_kill)
9049 {
9050         struct hclge_vport *vport = hclge_get_vport(handle);
9051         struct hclge_dev *hdev = vport->back;
9052         bool writen_to_tbl = false;
9053         int ret = 0;
9054
9055         /* When device is resetting, firmware is unable to handle
9056          * mailbox. Just record the vlan id, and remove it after
9057          * reset finished.
9058          */
9059         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9060                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9061                 return -EBUSY;
9062         }
9063
9064         /* when port base vlan enabled, we use port base vlan as the vlan
9065          * filter entry. In this case, we don't update vlan filter table
9066          * when user add new vlan or remove exist vlan, just update the vport
9067          * vlan list. The vlan id in vlan list will be writen in vlan filter
9068          * table until port base vlan disabled
9069          */
9070         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9071                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9072                                                vlan_id, is_kill);
9073                 writen_to_tbl = true;
9074         }
9075
9076         if (!ret) {
9077                 if (is_kill)
9078                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9079                 else
9080                         hclge_add_vport_vlan_table(vport, vlan_id,
9081                                                    writen_to_tbl);
9082         } else if (is_kill) {
9083                 /* when remove hw vlan filter failed, record the vlan id,
9084                  * and try to remove it from hw later, to be consistence
9085                  * with stack
9086                  */
9087                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9088         }
9089         return ret;
9090 }
9091
9092 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9093 {
9094 #define HCLGE_MAX_SYNC_COUNT    60
9095
9096         int i, ret, sync_cnt = 0;
9097         u16 vlan_id;
9098
9099         /* start from vport 1 for PF is always alive */
9100         for (i = 0; i < hdev->num_alloc_vport; i++) {
9101                 struct hclge_vport *vport = &hdev->vport[i];
9102
9103                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9104                                          VLAN_N_VID);
9105                 while (vlan_id != VLAN_N_VID) {
9106                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9107                                                        vport->vport_id, vlan_id,
9108                                                        true);
9109                         if (ret && ret != -EINVAL)
9110                                 return;
9111
9112                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9113                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9114
9115                         sync_cnt++;
9116                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9117                                 return;
9118
9119                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9120                                                  VLAN_N_VID);
9121                 }
9122         }
9123 }
9124
9125 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9126 {
9127         struct hclge_config_max_frm_size_cmd *req;
9128         struct hclge_desc desc;
9129
9130         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9131
9132         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9133         req->max_frm_size = cpu_to_le16(new_mps);
9134         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9135
9136         return hclge_cmd_send(&hdev->hw, &desc, 1);
9137 }
9138
9139 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9140 {
9141         struct hclge_vport *vport = hclge_get_vport(handle);
9142
9143         return hclge_set_vport_mtu(vport, new_mtu);
9144 }
9145
9146 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9147 {
9148         struct hclge_dev *hdev = vport->back;
9149         int i, max_frm_size, ret;
9150
9151         /* HW supprt 2 layer vlan */
9152         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9153         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9154             max_frm_size > HCLGE_MAC_MAX_FRAME)
9155                 return -EINVAL;
9156
9157         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9158         mutex_lock(&hdev->vport_lock);
9159         /* VF's mps must fit within hdev->mps */
9160         if (vport->vport_id && max_frm_size > hdev->mps) {
9161                 mutex_unlock(&hdev->vport_lock);
9162                 return -EINVAL;
9163         } else if (vport->vport_id) {
9164                 vport->mps = max_frm_size;
9165                 mutex_unlock(&hdev->vport_lock);
9166                 return 0;
9167         }
9168
9169         /* PF's mps must be greater then VF's mps */
9170         for (i = 1; i < hdev->num_alloc_vport; i++)
9171                 if (max_frm_size < hdev->vport[i].mps) {
9172                         mutex_unlock(&hdev->vport_lock);
9173                         return -EINVAL;
9174                 }
9175
9176         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9177
9178         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9179         if (ret) {
9180                 dev_err(&hdev->pdev->dev,
9181                         "Change mtu fail, ret =%d\n", ret);
9182                 goto out;
9183         }
9184
9185         hdev->mps = max_frm_size;
9186         vport->mps = max_frm_size;
9187
9188         ret = hclge_buffer_alloc(hdev);
9189         if (ret)
9190                 dev_err(&hdev->pdev->dev,
9191                         "Allocate buffer fail, ret =%d\n", ret);
9192
9193 out:
9194         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9195         mutex_unlock(&hdev->vport_lock);
9196         return ret;
9197 }
9198
9199 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9200                                     bool enable)
9201 {
9202         struct hclge_reset_tqp_queue_cmd *req;
9203         struct hclge_desc desc;
9204         int ret;
9205
9206         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9207
9208         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9209         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9210         if (enable)
9211                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9212
9213         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9214         if (ret) {
9215                 dev_err(&hdev->pdev->dev,
9216                         "Send tqp reset cmd error, status =%d\n", ret);
9217                 return ret;
9218         }
9219
9220         return 0;
9221 }
9222
9223 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9224 {
9225         struct hclge_reset_tqp_queue_cmd *req;
9226         struct hclge_desc desc;
9227         int ret;
9228
9229         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9230
9231         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9232         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9233
9234         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9235         if (ret) {
9236                 dev_err(&hdev->pdev->dev,
9237                         "Get reset status error, status =%d\n", ret);
9238                 return ret;
9239         }
9240
9241         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9242 }
9243
9244 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9245 {
9246         struct hnae3_queue *queue;
9247         struct hclge_tqp *tqp;
9248
9249         queue = handle->kinfo.tqp[queue_id];
9250         tqp = container_of(queue, struct hclge_tqp, q);
9251
9252         return tqp->index;
9253 }
9254
9255 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9256 {
9257         struct hclge_vport *vport = hclge_get_vport(handle);
9258         struct hclge_dev *hdev = vport->back;
9259         int reset_try_times = 0;
9260         int reset_status;
9261         u16 queue_gid;
9262         int ret;
9263
9264         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9265
9266         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9267         if (ret) {
9268                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9269                 return ret;
9270         }
9271
9272         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9273         if (ret) {
9274                 dev_err(&hdev->pdev->dev,
9275                         "Send reset tqp cmd fail, ret = %d\n", ret);
9276                 return ret;
9277         }
9278
9279         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9280                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9281                 if (reset_status)
9282                         break;
9283
9284                 /* Wait for tqp hw reset */
9285                 usleep_range(1000, 1200);
9286         }
9287
9288         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9289                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9290                 return ret;
9291         }
9292
9293         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9294         if (ret)
9295                 dev_err(&hdev->pdev->dev,
9296                         "Deassert the soft reset fail, ret = %d\n", ret);
9297
9298         return ret;
9299 }
9300
9301 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9302 {
9303         struct hclge_dev *hdev = vport->back;
9304         int reset_try_times = 0;
9305         int reset_status;
9306         u16 queue_gid;
9307         int ret;
9308
9309         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9310
9311         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9312         if (ret) {
9313                 dev_warn(&hdev->pdev->dev,
9314                          "Send reset tqp cmd fail, ret = %d\n", ret);
9315                 return;
9316         }
9317
9318         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9319                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9320                 if (reset_status)
9321                         break;
9322
9323                 /* Wait for tqp hw reset */
9324                 usleep_range(1000, 1200);
9325         }
9326
9327         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9328                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9329                 return;
9330         }
9331
9332         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9333         if (ret)
9334                 dev_warn(&hdev->pdev->dev,
9335                          "Deassert the soft reset fail, ret = %d\n", ret);
9336 }
9337
9338 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9339 {
9340         struct hclge_vport *vport = hclge_get_vport(handle);
9341         struct hclge_dev *hdev = vport->back;
9342
9343         return hdev->fw_version;
9344 }
9345
9346 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9347 {
9348         struct phy_device *phydev = hdev->hw.mac.phydev;
9349
9350         if (!phydev)
9351                 return;
9352
9353         phy_set_asym_pause(phydev, rx_en, tx_en);
9354 }
9355
9356 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9357 {
9358         int ret;
9359
9360         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9361                 return 0;
9362
9363         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9364         if (ret)
9365                 dev_err(&hdev->pdev->dev,
9366                         "configure pauseparam error, ret = %d.\n", ret);
9367
9368         return ret;
9369 }
9370
9371 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9372 {
9373         struct phy_device *phydev = hdev->hw.mac.phydev;
9374         u16 remote_advertising = 0;
9375         u16 local_advertising;
9376         u32 rx_pause, tx_pause;
9377         u8 flowctl;
9378
9379         if (!phydev->link || !phydev->autoneg)
9380                 return 0;
9381
9382         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9383
9384         if (phydev->pause)
9385                 remote_advertising = LPA_PAUSE_CAP;
9386
9387         if (phydev->asym_pause)
9388                 remote_advertising |= LPA_PAUSE_ASYM;
9389
9390         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9391                                            remote_advertising);
9392         tx_pause = flowctl & FLOW_CTRL_TX;
9393         rx_pause = flowctl & FLOW_CTRL_RX;
9394
9395         if (phydev->duplex == HCLGE_MAC_HALF) {
9396                 tx_pause = 0;
9397                 rx_pause = 0;
9398         }
9399
9400         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9401 }
9402
9403 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9404                                  u32 *rx_en, u32 *tx_en)
9405 {
9406         struct hclge_vport *vport = hclge_get_vport(handle);
9407         struct hclge_dev *hdev = vport->back;
9408         struct phy_device *phydev = hdev->hw.mac.phydev;
9409
9410         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9411
9412         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9413                 *rx_en = 0;
9414                 *tx_en = 0;
9415                 return;
9416         }
9417
9418         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9419                 *rx_en = 1;
9420                 *tx_en = 0;
9421         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9422                 *tx_en = 1;
9423                 *rx_en = 0;
9424         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9425                 *rx_en = 1;
9426                 *tx_en = 1;
9427         } else {
9428                 *rx_en = 0;
9429                 *tx_en = 0;
9430         }
9431 }
9432
9433 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9434                                          u32 rx_en, u32 tx_en)
9435 {
9436         if (rx_en && tx_en)
9437                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9438         else if (rx_en && !tx_en)
9439                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9440         else if (!rx_en && tx_en)
9441                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9442         else
9443                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9444
9445         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9446 }
9447
9448 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9449                                 u32 rx_en, u32 tx_en)
9450 {
9451         struct hclge_vport *vport = hclge_get_vport(handle);
9452         struct hclge_dev *hdev = vport->back;
9453         struct phy_device *phydev = hdev->hw.mac.phydev;
9454         u32 fc_autoneg;
9455
9456         if (phydev) {
9457                 fc_autoneg = hclge_get_autoneg(handle);
9458                 if (auto_neg != fc_autoneg) {
9459                         dev_info(&hdev->pdev->dev,
9460                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9461                         return -EOPNOTSUPP;
9462                 }
9463         }
9464
9465         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9466                 dev_info(&hdev->pdev->dev,
9467                          "Priority flow control enabled. Cannot set link flow control.\n");
9468                 return -EOPNOTSUPP;
9469         }
9470
9471         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9472
9473         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9474
9475         if (!auto_neg)
9476                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9477
9478         if (phydev)
9479                 return phy_start_aneg(phydev);
9480
9481         return -EOPNOTSUPP;
9482 }
9483
9484 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9485                                           u8 *auto_neg, u32 *speed, u8 *duplex)
9486 {
9487         struct hclge_vport *vport = hclge_get_vport(handle);
9488         struct hclge_dev *hdev = vport->back;
9489
9490         if (speed)
9491                 *speed = hdev->hw.mac.speed;
9492         if (duplex)
9493                 *duplex = hdev->hw.mac.duplex;
9494         if (auto_neg)
9495                 *auto_neg = hdev->hw.mac.autoneg;
9496 }
9497
9498 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9499                                  u8 *module_type)
9500 {
9501         struct hclge_vport *vport = hclge_get_vport(handle);
9502         struct hclge_dev *hdev = vport->back;
9503
9504         /* When nic is down, the service task is not running, doesn't update
9505          * the port information per second. Query the port information before
9506          * return the media type, ensure getting the correct media information.
9507          */
9508         hclge_update_port_info(hdev);
9509
9510         if (media_type)
9511                 *media_type = hdev->hw.mac.media_type;
9512
9513         if (module_type)
9514                 *module_type = hdev->hw.mac.module_type;
9515 }
9516
9517 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9518                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9519 {
9520         struct hclge_vport *vport = hclge_get_vport(handle);
9521         struct hclge_dev *hdev = vport->back;
9522         struct phy_device *phydev = hdev->hw.mac.phydev;
9523         int mdix_ctrl, mdix, is_resolved;
9524         unsigned int retval;
9525
9526         if (!phydev) {
9527                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9528                 *tp_mdix = ETH_TP_MDI_INVALID;
9529                 return;
9530         }
9531
9532         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9533
9534         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9535         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9536                                     HCLGE_PHY_MDIX_CTRL_S);
9537
9538         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9539         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9540         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9541
9542         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9543
9544         switch (mdix_ctrl) {
9545         case 0x0:
9546                 *tp_mdix_ctrl = ETH_TP_MDI;
9547                 break;
9548         case 0x1:
9549                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9550                 break;
9551         case 0x3:
9552                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9553                 break;
9554         default:
9555                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9556                 break;
9557         }
9558
9559         if (!is_resolved)
9560                 *tp_mdix = ETH_TP_MDI_INVALID;
9561         else if (mdix)
9562                 *tp_mdix = ETH_TP_MDI_X;
9563         else
9564                 *tp_mdix = ETH_TP_MDI;
9565 }
9566
9567 static void hclge_info_show(struct hclge_dev *hdev)
9568 {
9569         struct device *dev = &hdev->pdev->dev;
9570
9571         dev_info(dev, "PF info begin:\n");
9572
9573         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9574         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9575         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9576         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9577         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9578         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9579         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9580         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9581         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9582         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9583         dev_info(dev, "This is %s PF\n",
9584                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9585         dev_info(dev, "DCB %s\n",
9586                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9587         dev_info(dev, "MQPRIO %s\n",
9588                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9589
9590         dev_info(dev, "PF info end.\n");
9591 }
9592
9593 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9594                                           struct hclge_vport *vport)
9595 {
9596         struct hnae3_client *client = vport->nic.client;
9597         struct hclge_dev *hdev = ae_dev->priv;
9598         int rst_cnt = hdev->rst_stats.reset_cnt;
9599         int ret;
9600
9601         ret = client->ops->init_instance(&vport->nic);
9602         if (ret)
9603                 return ret;
9604
9605         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9606         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9607             rst_cnt != hdev->rst_stats.reset_cnt) {
9608                 ret = -EBUSY;
9609                 goto init_nic_err;
9610         }
9611
9612         /* Enable nic hw error interrupts */
9613         ret = hclge_config_nic_hw_error(hdev, true);
9614         if (ret) {
9615                 dev_err(&ae_dev->pdev->dev,
9616                         "fail(%d) to enable hw error interrupts\n", ret);
9617                 goto init_nic_err;
9618         }
9619
9620         hnae3_set_client_init_flag(client, ae_dev, 1);
9621
9622         if (netif_msg_drv(&hdev->vport->nic))
9623                 hclge_info_show(hdev);
9624
9625         return ret;
9626
9627 init_nic_err:
9628         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9629         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9630                 msleep(HCLGE_WAIT_RESET_DONE);
9631
9632         client->ops->uninit_instance(&vport->nic, 0);
9633
9634         return ret;
9635 }
9636
9637 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9638                                            struct hclge_vport *vport)
9639 {
9640         struct hclge_dev *hdev = ae_dev->priv;
9641         struct hnae3_client *client;
9642         int rst_cnt;
9643         int ret;
9644
9645         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9646             !hdev->nic_client)
9647                 return 0;
9648
9649         client = hdev->roce_client;
9650         ret = hclge_init_roce_base_info(vport);
9651         if (ret)
9652                 return ret;
9653
9654         rst_cnt = hdev->rst_stats.reset_cnt;
9655         ret = client->ops->init_instance(&vport->roce);
9656         if (ret)
9657                 return ret;
9658
9659         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9660         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9661             rst_cnt != hdev->rst_stats.reset_cnt) {
9662                 ret = -EBUSY;
9663                 goto init_roce_err;
9664         }
9665
9666         /* Enable roce ras interrupts */
9667         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9668         if (ret) {
9669                 dev_err(&ae_dev->pdev->dev,
9670                         "fail(%d) to enable roce ras interrupts\n", ret);
9671                 goto init_roce_err;
9672         }
9673
9674         hnae3_set_client_init_flag(client, ae_dev, 1);
9675
9676         return 0;
9677
9678 init_roce_err:
9679         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9680         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9681                 msleep(HCLGE_WAIT_RESET_DONE);
9682
9683         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9684
9685         return ret;
9686 }
9687
9688 static int hclge_init_client_instance(struct hnae3_client *client,
9689                                       struct hnae3_ae_dev *ae_dev)
9690 {
9691         struct hclge_dev *hdev = ae_dev->priv;
9692         struct hclge_vport *vport;
9693         int i, ret;
9694
9695         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9696                 vport = &hdev->vport[i];
9697
9698                 switch (client->type) {
9699                 case HNAE3_CLIENT_KNIC:
9700                         hdev->nic_client = client;
9701                         vport->nic.client = client;
9702                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9703                         if (ret)
9704                                 goto clear_nic;
9705
9706                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9707                         if (ret)
9708                                 goto clear_roce;
9709
9710                         break;
9711                 case HNAE3_CLIENT_ROCE:
9712                         if (hnae3_dev_roce_supported(hdev)) {
9713                                 hdev->roce_client = client;
9714                                 vport->roce.client = client;
9715                         }
9716
9717                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9718                         if (ret)
9719                                 goto clear_roce;
9720
9721                         break;
9722                 default:
9723                         return -EINVAL;
9724                 }
9725         }
9726
9727         return 0;
9728
9729 clear_nic:
9730         hdev->nic_client = NULL;
9731         vport->nic.client = NULL;
9732         return ret;
9733 clear_roce:
9734         hdev->roce_client = NULL;
9735         vport->roce.client = NULL;
9736         return ret;
9737 }
9738
9739 static void hclge_uninit_client_instance(struct hnae3_client *client,
9740                                          struct hnae3_ae_dev *ae_dev)
9741 {
9742         struct hclge_dev *hdev = ae_dev->priv;
9743         struct hclge_vport *vport;
9744         int i;
9745
9746         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9747                 vport = &hdev->vport[i];
9748                 if (hdev->roce_client) {
9749                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9750                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9751                                 msleep(HCLGE_WAIT_RESET_DONE);
9752
9753                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9754                                                                 0);
9755                         hdev->roce_client = NULL;
9756                         vport->roce.client = NULL;
9757                 }
9758                 if (client->type == HNAE3_CLIENT_ROCE)
9759                         return;
9760                 if (hdev->nic_client && client->ops->uninit_instance) {
9761                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9762                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9763                                 msleep(HCLGE_WAIT_RESET_DONE);
9764
9765                         client->ops->uninit_instance(&vport->nic, 0);
9766                         hdev->nic_client = NULL;
9767                         vport->nic.client = NULL;
9768                 }
9769         }
9770 }
9771
9772 static int hclge_pci_init(struct hclge_dev *hdev)
9773 {
9774         struct pci_dev *pdev = hdev->pdev;
9775         struct hclge_hw *hw;
9776         int ret;
9777
9778         ret = pci_enable_device(pdev);
9779         if (ret) {
9780                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9781                 return ret;
9782         }
9783
9784         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9785         if (ret) {
9786                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9787                 if (ret) {
9788                         dev_err(&pdev->dev,
9789                                 "can't set consistent PCI DMA");
9790                         goto err_disable_device;
9791                 }
9792                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9793         }
9794
9795         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9796         if (ret) {
9797                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9798                 goto err_disable_device;
9799         }
9800
9801         pci_set_master(pdev);
9802         hw = &hdev->hw;
9803         hw->io_base = pcim_iomap(pdev, 2, 0);
9804         if (!hw->io_base) {
9805                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9806                 ret = -ENOMEM;
9807                 goto err_clr_master;
9808         }
9809
9810         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9811
9812         return 0;
9813 err_clr_master:
9814         pci_clear_master(pdev);
9815         pci_release_regions(pdev);
9816 err_disable_device:
9817         pci_disable_device(pdev);
9818
9819         return ret;
9820 }
9821
9822 static void hclge_pci_uninit(struct hclge_dev *hdev)
9823 {
9824         struct pci_dev *pdev = hdev->pdev;
9825
9826         pcim_iounmap(pdev, hdev->hw.io_base);
9827         pci_free_irq_vectors(pdev);
9828         pci_clear_master(pdev);
9829         pci_release_mem_regions(pdev);
9830         pci_disable_device(pdev);
9831 }
9832
9833 static void hclge_state_init(struct hclge_dev *hdev)
9834 {
9835         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9836         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9837         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9838         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9839         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9840         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9841         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9842 }
9843
9844 static void hclge_state_uninit(struct hclge_dev *hdev)
9845 {
9846         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9847         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9848
9849         if (hdev->reset_timer.function)
9850                 del_timer_sync(&hdev->reset_timer);
9851         if (hdev->service_task.work.func)
9852                 cancel_delayed_work_sync(&hdev->service_task);
9853 }
9854
9855 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9856 {
9857 #define HCLGE_FLR_RETRY_WAIT_MS 500
9858 #define HCLGE_FLR_RETRY_CNT     5
9859
9860         struct hclge_dev *hdev = ae_dev->priv;
9861         int retry_cnt = 0;
9862         int ret;
9863
9864 retry:
9865         down(&hdev->reset_sem);
9866         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9867         hdev->reset_type = HNAE3_FLR_RESET;
9868         ret = hclge_reset_prepare(hdev);
9869         if (ret) {
9870                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9871                         ret);
9872                 if (hdev->reset_pending ||
9873                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9874                         dev_err(&hdev->pdev->dev,
9875                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9876                                 hdev->reset_pending, retry_cnt);
9877                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9878                         up(&hdev->reset_sem);
9879                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9880                         goto retry;
9881                 }
9882         }
9883
9884         /* disable misc vector before FLR done */
9885         hclge_enable_vector(&hdev->misc_vector, false);
9886         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9887         hdev->rst_stats.flr_rst_cnt++;
9888 }
9889
9890 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9891 {
9892         struct hclge_dev *hdev = ae_dev->priv;
9893         int ret;
9894
9895         hclge_enable_vector(&hdev->misc_vector, true);
9896
9897         ret = hclge_reset_rebuild(hdev);
9898         if (ret)
9899                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9900
9901         hdev->reset_type = HNAE3_NONE_RESET;
9902         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9903         up(&hdev->reset_sem);
9904 }
9905
9906 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9907 {
9908         u16 i;
9909
9910         for (i = 0; i < hdev->num_alloc_vport; i++) {
9911                 struct hclge_vport *vport = &hdev->vport[i];
9912                 int ret;
9913
9914                  /* Send cmd to clear VF's FUNC_RST_ING */
9915                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9916                 if (ret)
9917                         dev_warn(&hdev->pdev->dev,
9918                                  "clear vf(%u) rst failed %d!\n",
9919                                  vport->vport_id, ret);
9920         }
9921 }
9922
9923 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9924 {
9925         struct pci_dev *pdev = ae_dev->pdev;
9926         struct hclge_dev *hdev;
9927         int ret;
9928
9929         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9930         if (!hdev) {
9931                 ret = -ENOMEM;
9932                 goto out;
9933         }
9934
9935         hdev->pdev = pdev;
9936         hdev->ae_dev = ae_dev;
9937         hdev->reset_type = HNAE3_NONE_RESET;
9938         hdev->reset_level = HNAE3_FUNC_RESET;
9939         ae_dev->priv = hdev;
9940
9941         /* HW supprt 2 layer vlan */
9942         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9943
9944         mutex_init(&hdev->vport_lock);
9945         spin_lock_init(&hdev->fd_rule_lock);
9946         sema_init(&hdev->reset_sem, 1);
9947
9948         ret = hclge_pci_init(hdev);
9949         if (ret)
9950                 goto out;
9951
9952         /* Firmware command queue initialize */
9953         ret = hclge_cmd_queue_init(hdev);
9954         if (ret)
9955                 goto err_pci_uninit;
9956
9957         /* Firmware command initialize */
9958         ret = hclge_cmd_init(hdev);
9959         if (ret)
9960                 goto err_cmd_uninit;
9961
9962         ret = hclge_get_cap(hdev);
9963         if (ret)
9964                 goto err_cmd_uninit;
9965
9966         ret = hclge_configure(hdev);
9967         if (ret) {
9968                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9969                 goto err_cmd_uninit;
9970         }
9971
9972         ret = hclge_init_msi(hdev);
9973         if (ret) {
9974                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9975                 goto err_cmd_uninit;
9976         }
9977
9978         ret = hclge_misc_irq_init(hdev);
9979         if (ret)
9980                 goto err_msi_uninit;
9981
9982         ret = hclge_alloc_tqps(hdev);
9983         if (ret) {
9984                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9985                 goto err_msi_irq_uninit;
9986         }
9987
9988         ret = hclge_alloc_vport(hdev);
9989         if (ret)
9990                 goto err_msi_irq_uninit;
9991
9992         ret = hclge_map_tqp(hdev);
9993         if (ret)
9994                 goto err_msi_irq_uninit;
9995
9996         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9997                 ret = hclge_mac_mdio_config(hdev);
9998                 if (ret)
9999                         goto err_msi_irq_uninit;
10000         }
10001
10002         ret = hclge_init_umv_space(hdev);
10003         if (ret)
10004                 goto err_mdiobus_unreg;
10005
10006         ret = hclge_mac_init(hdev);
10007         if (ret) {
10008                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10009                 goto err_mdiobus_unreg;
10010         }
10011
10012         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10013         if (ret) {
10014                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10015                 goto err_mdiobus_unreg;
10016         }
10017
10018         ret = hclge_config_gro(hdev, true);
10019         if (ret)
10020                 goto err_mdiobus_unreg;
10021
10022         ret = hclge_init_vlan_config(hdev);
10023         if (ret) {
10024                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10025                 goto err_mdiobus_unreg;
10026         }
10027
10028         ret = hclge_tm_schd_init(hdev);
10029         if (ret) {
10030                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10031                 goto err_mdiobus_unreg;
10032         }
10033
10034         hclge_rss_init_cfg(hdev);
10035         ret = hclge_rss_init_hw(hdev);
10036         if (ret) {
10037                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10038                 goto err_mdiobus_unreg;
10039         }
10040
10041         ret = init_mgr_tbl(hdev);
10042         if (ret) {
10043                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10044                 goto err_mdiobus_unreg;
10045         }
10046
10047         ret = hclge_init_fd_config(hdev);
10048         if (ret) {
10049                 dev_err(&pdev->dev,
10050                         "fd table init fail, ret=%d\n", ret);
10051                 goto err_mdiobus_unreg;
10052         }
10053
10054         INIT_KFIFO(hdev->mac_tnl_log);
10055
10056         hclge_dcb_ops_set(hdev);
10057
10058         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10059         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10060
10061         /* Setup affinity after service timer setup because add_timer_on
10062          * is called in affinity notify.
10063          */
10064         hclge_misc_affinity_setup(hdev);
10065
10066         hclge_clear_all_event_cause(hdev);
10067         hclge_clear_resetting_state(hdev);
10068
10069         /* Log and clear the hw errors those already occurred */
10070         hclge_handle_all_hns_hw_errors(ae_dev);
10071
10072         /* request delayed reset for the error recovery because an immediate
10073          * global reset on a PF affecting pending initialization of other PFs
10074          */
10075         if (ae_dev->hw_err_reset_req) {
10076                 enum hnae3_reset_type reset_level;
10077
10078                 reset_level = hclge_get_reset_level(ae_dev,
10079                                                     &ae_dev->hw_err_reset_req);
10080                 hclge_set_def_reset_request(ae_dev, reset_level);
10081                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10082         }
10083
10084         /* Enable MISC vector(vector0) */
10085         hclge_enable_vector(&hdev->misc_vector, true);
10086
10087         hclge_state_init(hdev);
10088         hdev->last_reset_time = jiffies;
10089
10090         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10091                  HCLGE_DRIVER_NAME);
10092
10093         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10094
10095         return 0;
10096
10097 err_mdiobus_unreg:
10098         if (hdev->hw.mac.phydev)
10099                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10100 err_msi_irq_uninit:
10101         hclge_misc_irq_uninit(hdev);
10102 err_msi_uninit:
10103         pci_free_irq_vectors(pdev);
10104 err_cmd_uninit:
10105         hclge_cmd_uninit(hdev);
10106 err_pci_uninit:
10107         pcim_iounmap(pdev, hdev->hw.io_base);
10108         pci_clear_master(pdev);
10109         pci_release_regions(pdev);
10110         pci_disable_device(pdev);
10111 out:
10112         return ret;
10113 }
10114
10115 static void hclge_stats_clear(struct hclge_dev *hdev)
10116 {
10117         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10118 }
10119
10120 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10121 {
10122         return hclge_config_switch_param(hdev, vf, enable,
10123                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10124 }
10125
10126 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10127 {
10128         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10129                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10130                                           enable, vf);
10131 }
10132
10133 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10134 {
10135         int ret;
10136
10137         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10138         if (ret) {
10139                 dev_err(&hdev->pdev->dev,
10140                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10141                         vf, enable ? "on" : "off", ret);
10142                 return ret;
10143         }
10144
10145         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10146         if (ret)
10147                 dev_err(&hdev->pdev->dev,
10148                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10149                         vf, enable ? "on" : "off", ret);
10150
10151         return ret;
10152 }
10153
10154 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10155                                  bool enable)
10156 {
10157         struct hclge_vport *vport = hclge_get_vport(handle);
10158         struct hclge_dev *hdev = vport->back;
10159         u32 new_spoofchk = enable ? 1 : 0;
10160         int ret;
10161
10162         if (hdev->pdev->revision == 0x20)
10163                 return -EOPNOTSUPP;
10164
10165         vport = hclge_get_vf_vport(hdev, vf);
10166         if (!vport)
10167                 return -EINVAL;
10168
10169         if (vport->vf_info.spoofchk == new_spoofchk)
10170                 return 0;
10171
10172         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10173                 dev_warn(&hdev->pdev->dev,
10174                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10175                          vf);
10176         else if (enable && hclge_is_umv_space_full(vport, true))
10177                 dev_warn(&hdev->pdev->dev,
10178                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10179                          vf);
10180
10181         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10182         if (ret)
10183                 return ret;
10184
10185         vport->vf_info.spoofchk = new_spoofchk;
10186         return 0;
10187 }
10188
10189 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10190 {
10191         struct hclge_vport *vport = hdev->vport;
10192         int ret;
10193         int i;
10194
10195         if (hdev->pdev->revision == 0x20)
10196                 return 0;
10197
10198         /* resume the vf spoof check state after reset */
10199         for (i = 0; i < hdev->num_alloc_vport; i++) {
10200                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10201                                                vport->vf_info.spoofchk);
10202                 if (ret)
10203                         return ret;
10204
10205                 vport++;
10206         }
10207
10208         return 0;
10209 }
10210
10211 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10212 {
10213         struct hclge_vport *vport = hclge_get_vport(handle);
10214         struct hclge_dev *hdev = vport->back;
10215         u32 new_trusted = enable ? 1 : 0;
10216         bool en_bc_pmc;
10217         int ret;
10218
10219         vport = hclge_get_vf_vport(hdev, vf);
10220         if (!vport)
10221                 return -EINVAL;
10222
10223         if (vport->vf_info.trusted == new_trusted)
10224                 return 0;
10225
10226         /* Disable promisc mode for VF if it is not trusted any more. */
10227         if (!enable && vport->vf_info.promisc_enable) {
10228                 en_bc_pmc = hdev->pdev->revision != 0x20;
10229                 ret = hclge_set_vport_promisc_mode(vport, false, false,
10230                                                    en_bc_pmc);
10231                 if (ret)
10232                         return ret;
10233                 vport->vf_info.promisc_enable = 0;
10234                 hclge_inform_vf_promisc_info(vport);
10235         }
10236
10237         vport->vf_info.trusted = new_trusted;
10238
10239         return 0;
10240 }
10241
10242 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10243 {
10244         int ret;
10245         int vf;
10246
10247         /* reset vf rate to default value */
10248         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10249                 struct hclge_vport *vport = &hdev->vport[vf];
10250
10251                 vport->vf_info.max_tx_rate = 0;
10252                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10253                 if (ret)
10254                         dev_err(&hdev->pdev->dev,
10255                                 "vf%d failed to reset to default, ret=%d\n",
10256                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10257         }
10258 }
10259
10260 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10261                                      int min_tx_rate, int max_tx_rate)
10262 {
10263         if (min_tx_rate != 0 ||
10264             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10265                 dev_err(&hdev->pdev->dev,
10266                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10267                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10268                 return -EINVAL;
10269         }
10270
10271         return 0;
10272 }
10273
10274 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10275                              int min_tx_rate, int max_tx_rate, bool force)
10276 {
10277         struct hclge_vport *vport = hclge_get_vport(handle);
10278         struct hclge_dev *hdev = vport->back;
10279         int ret;
10280
10281         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10282         if (ret)
10283                 return ret;
10284
10285         vport = hclge_get_vf_vport(hdev, vf);
10286         if (!vport)
10287                 return -EINVAL;
10288
10289         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10290                 return 0;
10291
10292         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10293         if (ret)
10294                 return ret;
10295
10296         vport->vf_info.max_tx_rate = max_tx_rate;
10297
10298         return 0;
10299 }
10300
10301 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10302 {
10303         struct hnae3_handle *handle = &hdev->vport->nic;
10304         struct hclge_vport *vport;
10305         int ret;
10306         int vf;
10307
10308         /* resume the vf max_tx_rate after reset */
10309         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10310                 vport = hclge_get_vf_vport(hdev, vf);
10311                 if (!vport)
10312                         return -EINVAL;
10313
10314                 /* zero means max rate, after reset, firmware already set it to
10315                  * max rate, so just continue.
10316                  */
10317                 if (!vport->vf_info.max_tx_rate)
10318                         continue;
10319
10320                 ret = hclge_set_vf_rate(handle, vf, 0,
10321                                         vport->vf_info.max_tx_rate, true);
10322                 if (ret) {
10323                         dev_err(&hdev->pdev->dev,
10324                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10325                                 vf, vport->vf_info.max_tx_rate, ret);
10326                         return ret;
10327                 }
10328         }
10329
10330         return 0;
10331 }
10332
10333 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10334 {
10335         struct hclge_vport *vport = hdev->vport;
10336         int i;
10337
10338         for (i = 0; i < hdev->num_alloc_vport; i++) {
10339                 hclge_vport_stop(vport);
10340                 vport++;
10341         }
10342 }
10343
10344 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10345 {
10346         struct hclge_dev *hdev = ae_dev->priv;
10347         struct pci_dev *pdev = ae_dev->pdev;
10348         int ret;
10349
10350         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10351
10352         hclge_stats_clear(hdev);
10353         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10354          * so here should not clean table in memory.
10355          */
10356         if (hdev->reset_type == HNAE3_IMP_RESET ||
10357             hdev->reset_type == HNAE3_GLOBAL_RESET) {
10358                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10359                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10360                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10361                 hclge_reset_umv_space(hdev);
10362         }
10363
10364         ret = hclge_cmd_init(hdev);
10365         if (ret) {
10366                 dev_err(&pdev->dev, "Cmd queue init failed\n");
10367                 return ret;
10368         }
10369
10370         ret = hclge_map_tqp(hdev);
10371         if (ret) {
10372                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10373                 return ret;
10374         }
10375
10376         ret = hclge_mac_init(hdev);
10377         if (ret) {
10378                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10379                 return ret;
10380         }
10381
10382         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10383         if (ret) {
10384                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10385                 return ret;
10386         }
10387
10388         ret = hclge_config_gro(hdev, true);
10389         if (ret)
10390                 return ret;
10391
10392         ret = hclge_init_vlan_config(hdev);
10393         if (ret) {
10394                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10395                 return ret;
10396         }
10397
10398         ret = hclge_tm_init_hw(hdev, true);
10399         if (ret) {
10400                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10401                 return ret;
10402         }
10403
10404         ret = hclge_rss_init_hw(hdev);
10405         if (ret) {
10406                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10407                 return ret;
10408         }
10409
10410         ret = init_mgr_tbl(hdev);
10411         if (ret) {
10412                 dev_err(&pdev->dev,
10413                         "failed to reinit manager table, ret = %d\n", ret);
10414                 return ret;
10415         }
10416
10417         ret = hclge_init_fd_config(hdev);
10418         if (ret) {
10419                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10420                 return ret;
10421         }
10422
10423         /* Log and clear the hw errors those already occurred */
10424         hclge_handle_all_hns_hw_errors(ae_dev);
10425
10426         /* Re-enable the hw error interrupts because
10427          * the interrupts get disabled on global reset.
10428          */
10429         ret = hclge_config_nic_hw_error(hdev, true);
10430         if (ret) {
10431                 dev_err(&pdev->dev,
10432                         "fail(%d) to re-enable NIC hw error interrupts\n",
10433                         ret);
10434                 return ret;
10435         }
10436
10437         if (hdev->roce_client) {
10438                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10439                 if (ret) {
10440                         dev_err(&pdev->dev,
10441                                 "fail(%d) to re-enable roce ras interrupts\n",
10442                                 ret);
10443                         return ret;
10444                 }
10445         }
10446
10447         hclge_reset_vport_state(hdev);
10448         ret = hclge_reset_vport_spoofchk(hdev);
10449         if (ret)
10450                 return ret;
10451
10452         ret = hclge_resume_vf_rate(hdev);
10453         if (ret)
10454                 return ret;
10455
10456         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10457                  HCLGE_DRIVER_NAME);
10458
10459         return 0;
10460 }
10461
10462 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10463 {
10464         struct hclge_dev *hdev = ae_dev->priv;
10465         struct hclge_mac *mac = &hdev->hw.mac;
10466
10467         hclge_reset_vf_rate(hdev);
10468         hclge_clear_vf_vlan(hdev);
10469         hclge_misc_affinity_teardown(hdev);
10470         hclge_state_uninit(hdev);
10471         hclge_uninit_mac_table(hdev);
10472
10473         if (mac->phydev)
10474                 mdiobus_unregister(mac->mdio_bus);
10475
10476         /* Disable MISC vector(vector0) */
10477         hclge_enable_vector(&hdev->misc_vector, false);
10478         synchronize_irq(hdev->misc_vector.vector_irq);
10479
10480         /* Disable all hw interrupts */
10481         hclge_config_mac_tnl_int(hdev, false);
10482         hclge_config_nic_hw_error(hdev, false);
10483         hclge_config_rocee_ras_interrupt(hdev, false);
10484
10485         hclge_cmd_uninit(hdev);
10486         hclge_misc_irq_uninit(hdev);
10487         hclge_pci_uninit(hdev);
10488         mutex_destroy(&hdev->vport_lock);
10489         hclge_uninit_vport_vlan_table(hdev);
10490         ae_dev->priv = NULL;
10491 }
10492
10493 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10494 {
10495         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10496         struct hclge_vport *vport = hclge_get_vport(handle);
10497         struct hclge_dev *hdev = vport->back;
10498
10499         return min_t(u32, hdev->rss_size_max,
10500                      vport->alloc_tqps / kinfo->num_tc);
10501 }
10502
10503 static void hclge_get_channels(struct hnae3_handle *handle,
10504                                struct ethtool_channels *ch)
10505 {
10506         ch->max_combined = hclge_get_max_channels(handle);
10507         ch->other_count = 1;
10508         ch->max_other = 1;
10509         ch->combined_count = handle->kinfo.rss_size;
10510 }
10511
10512 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10513                                         u16 *alloc_tqps, u16 *max_rss_size)
10514 {
10515         struct hclge_vport *vport = hclge_get_vport(handle);
10516         struct hclge_dev *hdev = vport->back;
10517
10518         *alloc_tqps = vport->alloc_tqps;
10519         *max_rss_size = hdev->rss_size_max;
10520 }
10521
10522 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10523                               bool rxfh_configured)
10524 {
10525         struct hclge_vport *vport = hclge_get_vport(handle);
10526         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10527         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10528         struct hclge_dev *hdev = vport->back;
10529         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10530         u16 cur_rss_size = kinfo->rss_size;
10531         u16 cur_tqps = kinfo->num_tqps;
10532         u16 tc_valid[HCLGE_MAX_TC_NUM];
10533         u16 roundup_size;
10534         u32 *rss_indir;
10535         unsigned int i;
10536         int ret;
10537
10538         kinfo->req_rss_size = new_tqps_num;
10539
10540         ret = hclge_tm_vport_map_update(hdev);
10541         if (ret) {
10542                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10543                 return ret;
10544         }
10545
10546         roundup_size = roundup_pow_of_two(kinfo->rss_size);
10547         roundup_size = ilog2(roundup_size);
10548         /* Set the RSS TC mode according to the new RSS size */
10549         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10550                 tc_valid[i] = 0;
10551
10552                 if (!(hdev->hw_tc_map & BIT(i)))
10553                         continue;
10554
10555                 tc_valid[i] = 1;
10556                 tc_size[i] = roundup_size;
10557                 tc_offset[i] = kinfo->rss_size * i;
10558         }
10559         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10560         if (ret)
10561                 return ret;
10562
10563         /* RSS indirection table has been configuared by user */
10564         if (rxfh_configured)
10565                 goto out;
10566
10567         /* Reinitializes the rss indirect table according to the new RSS size */
10568         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10569         if (!rss_indir)
10570                 return -ENOMEM;
10571
10572         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10573                 rss_indir[i] = i % kinfo->rss_size;
10574
10575         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10576         if (ret)
10577                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10578                         ret);
10579
10580         kfree(rss_indir);
10581
10582 out:
10583         if (!ret)
10584                 dev_info(&hdev->pdev->dev,
10585                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10586                          cur_rss_size, kinfo->rss_size,
10587                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10588
10589         return ret;
10590 }
10591
10592 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10593                               u32 *regs_num_64_bit)
10594 {
10595         struct hclge_desc desc;
10596         u32 total_num;
10597         int ret;
10598
10599         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10600         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10601         if (ret) {
10602                 dev_err(&hdev->pdev->dev,
10603                         "Query register number cmd failed, ret = %d.\n", ret);
10604                 return ret;
10605         }
10606
10607         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10608         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10609
10610         total_num = *regs_num_32_bit + *regs_num_64_bit;
10611         if (!total_num)
10612                 return -EINVAL;
10613
10614         return 0;
10615 }
10616
10617 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10618                                  void *data)
10619 {
10620 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10621 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10622
10623         struct hclge_desc *desc;
10624         u32 *reg_val = data;
10625         __le32 *desc_data;
10626         int nodata_num;
10627         int cmd_num;
10628         int i, k, n;
10629         int ret;
10630
10631         if (regs_num == 0)
10632                 return 0;
10633
10634         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10635         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10636                                HCLGE_32_BIT_REG_RTN_DATANUM);
10637         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10638         if (!desc)
10639                 return -ENOMEM;
10640
10641         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10642         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10643         if (ret) {
10644                 dev_err(&hdev->pdev->dev,
10645                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10646                 kfree(desc);
10647                 return ret;
10648         }
10649
10650         for (i = 0; i < cmd_num; i++) {
10651                 if (i == 0) {
10652                         desc_data = (__le32 *)(&desc[i].data[0]);
10653                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10654                 } else {
10655                         desc_data = (__le32 *)(&desc[i]);
10656                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10657                 }
10658                 for (k = 0; k < n; k++) {
10659                         *reg_val++ = le32_to_cpu(*desc_data++);
10660
10661                         regs_num--;
10662                         if (!regs_num)
10663                                 break;
10664                 }
10665         }
10666
10667         kfree(desc);
10668         return 0;
10669 }
10670
10671 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10672                                  void *data)
10673 {
10674 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10675 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10676
10677         struct hclge_desc *desc;
10678         u64 *reg_val = data;
10679         __le64 *desc_data;
10680         int nodata_len;
10681         int cmd_num;
10682         int i, k, n;
10683         int ret;
10684
10685         if (regs_num == 0)
10686                 return 0;
10687
10688         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10689         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10690                                HCLGE_64_BIT_REG_RTN_DATANUM);
10691         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10692         if (!desc)
10693                 return -ENOMEM;
10694
10695         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10696         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10697         if (ret) {
10698                 dev_err(&hdev->pdev->dev,
10699                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10700                 kfree(desc);
10701                 return ret;
10702         }
10703
10704         for (i = 0; i < cmd_num; i++) {
10705                 if (i == 0) {
10706                         desc_data = (__le64 *)(&desc[i].data[0]);
10707                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10708                 } else {
10709                         desc_data = (__le64 *)(&desc[i]);
10710                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10711                 }
10712                 for (k = 0; k < n; k++) {
10713                         *reg_val++ = le64_to_cpu(*desc_data++);
10714
10715                         regs_num--;
10716                         if (!regs_num)
10717                                 break;
10718                 }
10719         }
10720
10721         kfree(desc);
10722         return 0;
10723 }
10724
10725 #define MAX_SEPARATE_NUM        4
10726 #define SEPARATOR_VALUE         0xFDFCFBFA
10727 #define REG_NUM_PER_LINE        4
10728 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10729 #define REG_SEPARATOR_LINE      1
10730 #define REG_NUM_REMAIN_MASK     3
10731 #define BD_LIST_MAX_NUM         30
10732
10733 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10734 {
10735         /*prepare 4 commands to query DFX BD number*/
10736         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10737         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10738         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10739         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10740         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10741         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10742         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10743
10744         return hclge_cmd_send(&hdev->hw, desc, 4);
10745 }
10746
10747 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10748                                     int *bd_num_list,
10749                                     u32 type_num)
10750 {
10751         u32 entries_per_desc, desc_index, index, offset, i;
10752         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10753         int ret;
10754
10755         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10756         if (ret) {
10757                 dev_err(&hdev->pdev->dev,
10758                         "Get dfx bd num fail, status is %d.\n", ret);
10759                 return ret;
10760         }
10761
10762         entries_per_desc = ARRAY_SIZE(desc[0].data);
10763         for (i = 0; i < type_num; i++) {
10764                 offset = hclge_dfx_bd_offset_list[i];
10765                 index = offset % entries_per_desc;
10766                 desc_index = offset / entries_per_desc;
10767                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10768         }
10769
10770         return ret;
10771 }
10772
10773 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10774                                   struct hclge_desc *desc_src, int bd_num,
10775                                   enum hclge_opcode_type cmd)
10776 {
10777         struct hclge_desc *desc = desc_src;
10778         int i, ret;
10779
10780         hclge_cmd_setup_basic_desc(desc, cmd, true);
10781         for (i = 0; i < bd_num - 1; i++) {
10782                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10783                 desc++;
10784                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10785         }
10786
10787         desc = desc_src;
10788         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10789         if (ret)
10790                 dev_err(&hdev->pdev->dev,
10791                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10792                         cmd, ret);
10793
10794         return ret;
10795 }
10796
10797 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10798                                     void *data)
10799 {
10800         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10801         struct hclge_desc *desc = desc_src;
10802         u32 *reg = data;
10803
10804         entries_per_desc = ARRAY_SIZE(desc->data);
10805         reg_num = entries_per_desc * bd_num;
10806         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10807         for (i = 0; i < reg_num; i++) {
10808                 index = i % entries_per_desc;
10809                 desc_index = i / entries_per_desc;
10810                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10811         }
10812         for (i = 0; i < separator_num; i++)
10813                 *reg++ = SEPARATOR_VALUE;
10814
10815         return reg_num + separator_num;
10816 }
10817
10818 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10819 {
10820         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10821         int data_len_per_desc, bd_num, i;
10822         int bd_num_list[BD_LIST_MAX_NUM];
10823         u32 data_len;
10824         int ret;
10825
10826         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10827         if (ret) {
10828                 dev_err(&hdev->pdev->dev,
10829                         "Get dfx reg bd num fail, status is %d.\n", ret);
10830                 return ret;
10831         }
10832
10833         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10834         *len = 0;
10835         for (i = 0; i < dfx_reg_type_num; i++) {
10836                 bd_num = bd_num_list[i];
10837                 data_len = data_len_per_desc * bd_num;
10838                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10839         }
10840
10841         return ret;
10842 }
10843
10844 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10845 {
10846         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10847         int bd_num, bd_num_max, buf_len, i;
10848         int bd_num_list[BD_LIST_MAX_NUM];
10849         struct hclge_desc *desc_src;
10850         u32 *reg = data;
10851         int ret;
10852
10853         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10854         if (ret) {
10855                 dev_err(&hdev->pdev->dev,
10856                         "Get dfx reg bd num fail, status is %d.\n", ret);
10857                 return ret;
10858         }
10859
10860         bd_num_max = bd_num_list[0];
10861         for (i = 1; i < dfx_reg_type_num; i++)
10862                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10863
10864         buf_len = sizeof(*desc_src) * bd_num_max;
10865         desc_src = kzalloc(buf_len, GFP_KERNEL);
10866         if (!desc_src)
10867                 return -ENOMEM;
10868
10869         for (i = 0; i < dfx_reg_type_num; i++) {
10870                 bd_num = bd_num_list[i];
10871                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10872                                              hclge_dfx_reg_opcode_list[i]);
10873                 if (ret) {
10874                         dev_err(&hdev->pdev->dev,
10875                                 "Get dfx reg fail, status is %d.\n", ret);
10876                         break;
10877                 }
10878
10879                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10880         }
10881
10882         kfree(desc_src);
10883         return ret;
10884 }
10885
10886 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10887                               struct hnae3_knic_private_info *kinfo)
10888 {
10889 #define HCLGE_RING_REG_OFFSET           0x200
10890 #define HCLGE_RING_INT_REG_OFFSET       0x4
10891
10892         int i, j, reg_num, separator_num;
10893         int data_num_sum;
10894         u32 *reg = data;
10895
10896         /* fetching per-PF registers valus from PF PCIe register space */
10897         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10898         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10899         for (i = 0; i < reg_num; i++)
10900                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10901         for (i = 0; i < separator_num; i++)
10902                 *reg++ = SEPARATOR_VALUE;
10903         data_num_sum = reg_num + separator_num;
10904
10905         reg_num = ARRAY_SIZE(common_reg_addr_list);
10906         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10907         for (i = 0; i < reg_num; i++)
10908                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10909         for (i = 0; i < separator_num; i++)
10910                 *reg++ = SEPARATOR_VALUE;
10911         data_num_sum += reg_num + separator_num;
10912
10913         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10914         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10915         for (j = 0; j < kinfo->num_tqps; j++) {
10916                 for (i = 0; i < reg_num; i++)
10917                         *reg++ = hclge_read_dev(&hdev->hw,
10918                                                 ring_reg_addr_list[i] +
10919                                                 HCLGE_RING_REG_OFFSET * j);
10920                 for (i = 0; i < separator_num; i++)
10921                         *reg++ = SEPARATOR_VALUE;
10922         }
10923         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10924
10925         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10926         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10927         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10928                 for (i = 0; i < reg_num; i++)
10929                         *reg++ = hclge_read_dev(&hdev->hw,
10930                                                 tqp_intr_reg_addr_list[i] +
10931                                                 HCLGE_RING_INT_REG_OFFSET * j);
10932                 for (i = 0; i < separator_num; i++)
10933                         *reg++ = SEPARATOR_VALUE;
10934         }
10935         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10936
10937         return data_num_sum;
10938 }
10939
10940 static int hclge_get_regs_len(struct hnae3_handle *handle)
10941 {
10942         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10943         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10944         struct hclge_vport *vport = hclge_get_vport(handle);
10945         struct hclge_dev *hdev = vport->back;
10946         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10947         int regs_lines_32_bit, regs_lines_64_bit;
10948         int ret;
10949
10950         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10951         if (ret) {
10952                 dev_err(&hdev->pdev->dev,
10953                         "Get register number failed, ret = %d.\n", ret);
10954                 return ret;
10955         }
10956
10957         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10958         if (ret) {
10959                 dev_err(&hdev->pdev->dev,
10960                         "Get dfx reg len failed, ret = %d.\n", ret);
10961                 return ret;
10962         }
10963
10964         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10965                 REG_SEPARATOR_LINE;
10966         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10967                 REG_SEPARATOR_LINE;
10968         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10969                 REG_SEPARATOR_LINE;
10970         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10971                 REG_SEPARATOR_LINE;
10972         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10973                 REG_SEPARATOR_LINE;
10974         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10975                 REG_SEPARATOR_LINE;
10976
10977         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10978                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10979                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10980 }
10981
10982 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10983                            void *data)
10984 {
10985         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10986         struct hclge_vport *vport = hclge_get_vport(handle);
10987         struct hclge_dev *hdev = vport->back;
10988         u32 regs_num_32_bit, regs_num_64_bit;
10989         int i, reg_num, separator_num, ret;
10990         u32 *reg = data;
10991
10992         *version = hdev->fw_version;
10993
10994         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10995         if (ret) {
10996                 dev_err(&hdev->pdev->dev,
10997                         "Get register number failed, ret = %d.\n", ret);
10998                 return;
10999         }
11000
11001         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11002
11003         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11004         if (ret) {
11005                 dev_err(&hdev->pdev->dev,
11006                         "Get 32 bit register failed, ret = %d.\n", ret);
11007                 return;
11008         }
11009         reg_num = regs_num_32_bit;
11010         reg += reg_num;
11011         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11012         for (i = 0; i < separator_num; i++)
11013                 *reg++ = SEPARATOR_VALUE;
11014
11015         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11016         if (ret) {
11017                 dev_err(&hdev->pdev->dev,
11018                         "Get 64 bit register failed, ret = %d.\n", ret);
11019                 return;
11020         }
11021         reg_num = regs_num_64_bit * 2;
11022         reg += reg_num;
11023         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11024         for (i = 0; i < separator_num; i++)
11025                 *reg++ = SEPARATOR_VALUE;
11026
11027         ret = hclge_get_dfx_reg(hdev, reg);
11028         if (ret)
11029                 dev_err(&hdev->pdev->dev,
11030                         "Get dfx register failed, ret = %d.\n", ret);
11031 }
11032
11033 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11034 {
11035         struct hclge_set_led_state_cmd *req;
11036         struct hclge_desc desc;
11037         int ret;
11038
11039         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11040
11041         req = (struct hclge_set_led_state_cmd *)desc.data;
11042         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11043                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11044
11045         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11046         if (ret)
11047                 dev_err(&hdev->pdev->dev,
11048                         "Send set led state cmd error, ret =%d\n", ret);
11049
11050         return ret;
11051 }
11052
11053 enum hclge_led_status {
11054         HCLGE_LED_OFF,
11055         HCLGE_LED_ON,
11056         HCLGE_LED_NO_CHANGE = 0xFF,
11057 };
11058
11059 static int hclge_set_led_id(struct hnae3_handle *handle,
11060                             enum ethtool_phys_id_state status)
11061 {
11062         struct hclge_vport *vport = hclge_get_vport(handle);
11063         struct hclge_dev *hdev = vport->back;
11064
11065         switch (status) {
11066         case ETHTOOL_ID_ACTIVE:
11067                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11068         case ETHTOOL_ID_INACTIVE:
11069                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11070         default:
11071                 return -EINVAL;
11072         }
11073 }
11074
11075 static void hclge_get_link_mode(struct hnae3_handle *handle,
11076                                 unsigned long *supported,
11077                                 unsigned long *advertising)
11078 {
11079         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11080         struct hclge_vport *vport = hclge_get_vport(handle);
11081         struct hclge_dev *hdev = vport->back;
11082         unsigned int idx = 0;
11083
11084         for (; idx < size; idx++) {
11085                 supported[idx] = hdev->hw.mac.supported[idx];
11086                 advertising[idx] = hdev->hw.mac.advertising[idx];
11087         }
11088 }
11089
11090 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11091 {
11092         struct hclge_vport *vport = hclge_get_vport(handle);
11093         struct hclge_dev *hdev = vport->back;
11094
11095         return hclge_config_gro(hdev, enable);
11096 }
11097
11098 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11099 {
11100         struct hclge_vport *vport = &hdev->vport[0];
11101         struct hnae3_handle *handle = &vport->nic;
11102         u8 tmp_flags = 0;
11103         int ret;
11104
11105         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11106                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11107                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11108         }
11109
11110         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11111                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11112                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11113                                              tmp_flags & HNAE3_MPE);
11114                 if (!ret) {
11115                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11116                         hclge_enable_vlan_filter(handle,
11117                                                  tmp_flags & HNAE3_VLAN_FLTR);
11118                 }
11119         }
11120 }
11121
11122 static const struct hnae3_ae_ops hclge_ops = {
11123         .init_ae_dev = hclge_init_ae_dev,
11124         .uninit_ae_dev = hclge_uninit_ae_dev,
11125         .flr_prepare = hclge_flr_prepare,
11126         .flr_done = hclge_flr_done,
11127         .init_client_instance = hclge_init_client_instance,
11128         .uninit_client_instance = hclge_uninit_client_instance,
11129         .map_ring_to_vector = hclge_map_ring_to_vector,
11130         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11131         .get_vector = hclge_get_vector,
11132         .put_vector = hclge_put_vector,
11133         .set_promisc_mode = hclge_set_promisc_mode,
11134         .request_update_promisc_mode = hclge_request_update_promisc_mode,
11135         .set_loopback = hclge_set_loopback,
11136         .start = hclge_ae_start,
11137         .stop = hclge_ae_stop,
11138         .client_start = hclge_client_start,
11139         .client_stop = hclge_client_stop,
11140         .get_status = hclge_get_status,
11141         .get_ksettings_an_result = hclge_get_ksettings_an_result,
11142         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11143         .get_media_type = hclge_get_media_type,
11144         .check_port_speed = hclge_check_port_speed,
11145         .get_fec = hclge_get_fec,
11146         .set_fec = hclge_set_fec,
11147         .get_rss_key_size = hclge_get_rss_key_size,
11148         .get_rss_indir_size = hclge_get_rss_indir_size,
11149         .get_rss = hclge_get_rss,
11150         .set_rss = hclge_set_rss,
11151         .set_rss_tuple = hclge_set_rss_tuple,
11152         .get_rss_tuple = hclge_get_rss_tuple,
11153         .get_tc_size = hclge_get_tc_size,
11154         .get_mac_addr = hclge_get_mac_addr,
11155         .set_mac_addr = hclge_set_mac_addr,
11156         .do_ioctl = hclge_do_ioctl,
11157         .add_uc_addr = hclge_add_uc_addr,
11158         .rm_uc_addr = hclge_rm_uc_addr,
11159         .add_mc_addr = hclge_add_mc_addr,
11160         .rm_mc_addr = hclge_rm_mc_addr,
11161         .set_autoneg = hclge_set_autoneg,
11162         .get_autoneg = hclge_get_autoneg,
11163         .restart_autoneg = hclge_restart_autoneg,
11164         .halt_autoneg = hclge_halt_autoneg,
11165         .get_pauseparam = hclge_get_pauseparam,
11166         .set_pauseparam = hclge_set_pauseparam,
11167         .set_mtu = hclge_set_mtu,
11168         .reset_queue = hclge_reset_tqp,
11169         .get_stats = hclge_get_stats,
11170         .get_mac_stats = hclge_get_mac_stat,
11171         .update_stats = hclge_update_stats,
11172         .get_strings = hclge_get_strings,
11173         .get_sset_count = hclge_get_sset_count,
11174         .get_fw_version = hclge_get_fw_version,
11175         .get_mdix_mode = hclge_get_mdix_mode,
11176         .enable_vlan_filter = hclge_enable_vlan_filter,
11177         .set_vlan_filter = hclge_set_vlan_filter,
11178         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11179         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11180         .reset_event = hclge_reset_event,
11181         .get_reset_level = hclge_get_reset_level,
11182         .set_default_reset_request = hclge_set_def_reset_request,
11183         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11184         .set_channels = hclge_set_channels,
11185         .get_channels = hclge_get_channels,
11186         .get_regs_len = hclge_get_regs_len,
11187         .get_regs = hclge_get_regs,
11188         .set_led_id = hclge_set_led_id,
11189         .get_link_mode = hclge_get_link_mode,
11190         .add_fd_entry = hclge_add_fd_entry,
11191         .del_fd_entry = hclge_del_fd_entry,
11192         .del_all_fd_entries = hclge_del_all_fd_entries,
11193         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11194         .get_fd_rule_info = hclge_get_fd_rule_info,
11195         .get_fd_all_rules = hclge_get_all_rules,
11196         .enable_fd = hclge_enable_fd,
11197         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11198         .dbg_run_cmd = hclge_dbg_run_cmd,
11199         .handle_hw_ras_error = hclge_handle_hw_ras_error,
11200         .get_hw_reset_stat = hclge_get_hw_reset_stat,
11201         .ae_dev_resetting = hclge_ae_dev_resetting,
11202         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11203         .set_gro_en = hclge_gro_en,
11204         .get_global_queue_id = hclge_covert_handle_qid_global,
11205         .set_timer_task = hclge_set_timer_task,
11206         .mac_connect_phy = hclge_mac_connect_phy,
11207         .mac_disconnect_phy = hclge_mac_disconnect_phy,
11208         .get_vf_config = hclge_get_vf_config,
11209         .set_vf_link_state = hclge_set_vf_link_state,
11210         .set_vf_spoofchk = hclge_set_vf_spoofchk,
11211         .set_vf_trust = hclge_set_vf_trust,
11212         .set_vf_rate = hclge_set_vf_rate,
11213         .set_vf_mac = hclge_set_vf_mac,
11214 };
11215
11216 static struct hnae3_ae_algo ae_algo = {
11217         .ops = &hclge_ops,
11218         .pdev_id_table = ae_algo_pci_tbl,
11219 };
11220
11221 static int hclge_init(void)
11222 {
11223         pr_info("%s is initializing\n", HCLGE_NAME);
11224
11225         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11226         if (!hclge_wq) {
11227                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11228                 return -ENOMEM;
11229         }
11230
11231         hnae3_register_ae_algo(&ae_algo);
11232
11233         return 0;
11234 }
11235
11236 static void hclge_exit(void)
11237 {
11238         hnae3_unregister_ae_algo(&ae_algo);
11239         destroy_workqueue(hclge_wq);
11240 }
11241 module_init(hclge_init);
11242 module_exit(hclge_exit);
11243
11244 MODULE_LICENSE("GPL");
11245 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11246 MODULE_DESCRIPTION("HCLGE Driver");
11247 MODULE_VERSION(HCLGE_MOD_VERSION);