net: hns3: refactor hclge_config_tso()
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i = 0;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->pdev->revision >= 0x21 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if (hdev->hw.mac.phydev) {
755                         count += 1;
756                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757                 }
758
759         } else if (stringset == ETH_SS_STATS) {
760                 count = ARRAY_SIZE(g_mac_stats_string) +
761                         hclge_tqps_get_sset_count(handle, stringset);
762         }
763
764         return count;
765 }
766
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768                               u8 *data)
769 {
770         u8 *p = (char *)data;
771         int size;
772
773         if (stringset == ETH_SS_STATS) {
774                 size = ARRAY_SIZE(g_mac_stats_string);
775                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776                                            size, p);
777                 p = hclge_tqps_get_strings(handle, p);
778         } else if (stringset == ETH_SS_TEST) {
779                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781                                ETH_GSTRING_LEN);
782                         p += ETH_GSTRING_LEN;
783                 }
784                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786                                ETH_GSTRING_LEN);
787                         p += ETH_GSTRING_LEN;
788                 }
789                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790                         memcpy(p,
791                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792                                ETH_GSTRING_LEN);
793                         p += ETH_GSTRING_LEN;
794                 }
795                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797                                ETH_GSTRING_LEN);
798                         p += ETH_GSTRING_LEN;
799                 }
800         }
801 }
802
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804 {
805         struct hclge_vport *vport = hclge_get_vport(handle);
806         struct hclge_dev *hdev = vport->back;
807         u64 *p;
808
809         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810                                  ARRAY_SIZE(g_mac_stats_string), data);
811         p = hclge_tqps_get_stats(handle, p);
812 }
813
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815                                struct hns3_mac_stats *mac_stats)
816 {
817         struct hclge_vport *vport = hclge_get_vport(handle);
818         struct hclge_dev *hdev = vport->back;
819
820         hclge_update_stats(handle, NULL);
821
822         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 }
825
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827                                    struct hclge_func_status_cmd *status)
828 {
829 #define HCLGE_MAC_ID_MASK       0xF
830
831         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832                 return -EINVAL;
833
834         /* Set the pf to main pf */
835         if (status->pf_state & HCLGE_PF_STATE_MAIN)
836                 hdev->flag |= HCLGE_FLAG_MAIN;
837         else
838                 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
840         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841         return 0;
842 }
843
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 #define HCLGE_QUERY_MAX_CNT     5
847
848         struct hclge_func_status_cmd *req;
849         struct hclge_desc desc;
850         int timeout = 0;
851         int ret;
852
853         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854         req = (struct hclge_func_status_cmd *)desc.data;
855
856         do {
857                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858                 if (ret) {
859                         dev_err(&hdev->pdev->dev,
860                                 "query function status failed %d.\n", ret);
861                         return ret;
862                 }
863
864                 /* Check pf reset is done */
865                 if (req->pf_state)
866                         break;
867                 usleep_range(1000, 2000);
868         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
869
870         return hclge_parse_func_status(hdev, req);
871 }
872
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
874 {
875         struct hclge_pf_res_cmd *req;
876         struct hclge_desc desc;
877         int ret;
878
879         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881         if (ret) {
882                 dev_err(&hdev->pdev->dev,
883                         "query pf resource failed %d.\n", ret);
884                 return ret;
885         }
886
887         req = (struct hclge_pf_res_cmd *)desc.data;
888         hdev->num_tqps = le16_to_cpu(req->tqp_num);
889         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890
891         if (req->tx_buf_size)
892                 hdev->tx_buf_size =
893                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
894         else
895                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896
897         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898
899         if (req->dv_buf_size)
900                 hdev->dv_buf_size =
901                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
902         else
903                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904
905         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906
907         if (hnae3_dev_roce_supported(hdev)) {
908                 hdev->roce_base_msix_offset =
909                 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
911                 hdev->num_roce_msi =
912                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
914
915                 /* nic's msix numbers is always equals to the roce's. */
916                 hdev->num_nic_msi = hdev->num_roce_msi;
917
918                 /* PF should have NIC vectors and Roce vectors,
919                  * NIC vectors are queued before Roce vectors.
920                  */
921                 hdev->num_msi = hdev->num_roce_msi +
922                                 hdev->roce_base_msix_offset;
923         } else {
924                 hdev->num_msi =
925                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
927
928                 hdev->num_nic_msi = hdev->num_msi;
929         }
930
931         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932                 dev_err(&hdev->pdev->dev,
933                         "Just %u msi resources, not enough for pf(min:2).\n",
934                         hdev->num_nic_msi);
935                 return -EINVAL;
936         }
937
938         return 0;
939 }
940
941 static int hclge_parse_speed(int speed_cmd, int *speed)
942 {
943         switch (speed_cmd) {
944         case 6:
945                 *speed = HCLGE_MAC_SPEED_10M;
946                 break;
947         case 7:
948                 *speed = HCLGE_MAC_SPEED_100M;
949                 break;
950         case 0:
951                 *speed = HCLGE_MAC_SPEED_1G;
952                 break;
953         case 1:
954                 *speed = HCLGE_MAC_SPEED_10G;
955                 break;
956         case 2:
957                 *speed = HCLGE_MAC_SPEED_25G;
958                 break;
959         case 3:
960                 *speed = HCLGE_MAC_SPEED_40G;
961                 break;
962         case 4:
963                 *speed = HCLGE_MAC_SPEED_50G;
964                 break;
965         case 5:
966                 *speed = HCLGE_MAC_SPEED_100G;
967                 break;
968         default:
969                 return -EINVAL;
970         }
971
972         return 0;
973 }
974
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976 {
977         struct hclge_vport *vport = hclge_get_vport(handle);
978         struct hclge_dev *hdev = vport->back;
979         u32 speed_ability = hdev->hw.mac.speed_ability;
980         u32 speed_bit = 0;
981
982         switch (speed) {
983         case HCLGE_MAC_SPEED_10M:
984                 speed_bit = HCLGE_SUPPORT_10M_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_100M:
987                 speed_bit = HCLGE_SUPPORT_100M_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_1G:
990                 speed_bit = HCLGE_SUPPORT_1G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_10G:
993                 speed_bit = HCLGE_SUPPORT_10G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_25G:
996                 speed_bit = HCLGE_SUPPORT_25G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_40G:
999                 speed_bit = HCLGE_SUPPORT_40G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_50G:
1002                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1003                 break;
1004         case HCLGE_MAC_SPEED_100G:
1005                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006                 break;
1007         default:
1008                 return -EINVAL;
1009         }
1010
1011         if (speed_bit & speed_ability)
1012                 return 0;
1013
1014         return -EINVAL;
1015 }
1016
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1018 {
1019         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030                                  mac->supported);
1031         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033                                  mac->supported);
1034 }
1035
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037 {
1038         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049                                  mac->supported);
1050         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052                                  mac->supported);
1053 }
1054
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056 {
1057         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068                                  mac->supported);
1069         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071                                  mac->supported);
1072 }
1073
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075 {
1076         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081                                  mac->supported);
1082         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084                                  mac->supported);
1085         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087                                  mac->supported);
1088         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090                                  mac->supported);
1091         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093                                  mac->supported);
1094 }
1095
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097 {
1098         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100
1101         switch (mac->speed) {
1102         case HCLGE_MAC_SPEED_10G:
1103         case HCLGE_MAC_SPEED_40G:
1104                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105                                  mac->supported);
1106                 mac->fec_ability =
1107                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108                 break;
1109         case HCLGE_MAC_SPEED_25G:
1110         case HCLGE_MAC_SPEED_50G:
1111                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112                                  mac->supported);
1113                 mac->fec_ability =
1114                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115                         BIT(HNAE3_FEC_AUTO);
1116                 break;
1117         case HCLGE_MAC_SPEED_100G:
1118                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120                 break;
1121         default:
1122                 mac->fec_ability = 0;
1123                 break;
1124         }
1125 }
1126
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128                                         u8 speed_ability)
1129 {
1130         struct hclge_mac *mac = &hdev->hw.mac;
1131
1132         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134                                  mac->supported);
1135
1136         hclge_convert_setting_sr(mac, speed_ability);
1137         hclge_convert_setting_lr(mac, speed_ability);
1138         hclge_convert_setting_cr(mac, speed_ability);
1139         if (hdev->pdev->revision >= 0x21)
1140                 hclge_convert_setting_fec(mac);
1141
1142         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1145 }
1146
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148                                             u8 speed_ability)
1149 {
1150         struct hclge_mac *mac = &hdev->hw.mac;
1151
1152         hclge_convert_setting_kr(mac, speed_ability);
1153         if (hdev->pdev->revision >= 0x21)
1154                 hclge_convert_setting_fec(mac);
1155         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 }
1159
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161                                          u8 speed_ability)
1162 {
1163         unsigned long *supported = hdev->hw.mac.supported;
1164
1165         /* default to support all speed for GE port */
1166         if (!speed_ability)
1167                 speed_ability = HCLGE_SUPPORT_GE;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171                                  supported);
1172
1173         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175                                  supported);
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177                                  supported);
1178         }
1179
1180         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183         }
1184
1185         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1189 }
1190
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192 {
1193         u8 media_type = hdev->hw.mac.media_type;
1194
1195         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198                 hclge_parse_copper_link_mode(hdev, speed_ability);
1199         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1201 }
1202
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1204 {
1205         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206                 return HCLGE_MAC_SPEED_100G;
1207
1208         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209                 return HCLGE_MAC_SPEED_50G;
1210
1211         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212                 return HCLGE_MAC_SPEED_40G;
1213
1214         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215                 return HCLGE_MAC_SPEED_25G;
1216
1217         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218                 return HCLGE_MAC_SPEED_10G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221                 return HCLGE_MAC_SPEED_1G;
1222
1223         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224                 return HCLGE_MAC_SPEED_100M;
1225
1226         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227                 return HCLGE_MAC_SPEED_10M;
1228
1229         return HCLGE_MAC_SPEED_1G;
1230 }
1231
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233 {
1234         struct hclge_cfg_param_cmd *req;
1235         u64 mac_addr_tmp_high;
1236         u64 mac_addr_tmp;
1237         unsigned int i;
1238
1239         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1240
1241         /* get the configuration */
1242         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                               HCLGE_CFG_VMDQ_M,
1244                                               HCLGE_CFG_VMDQ_S);
1245         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248                                             HCLGE_CFG_TQP_DESC_N_M,
1249                                             HCLGE_CFG_TQP_DESC_N_S);
1250
1251         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252                                         HCLGE_CFG_PHY_ADDR_M,
1253                                         HCLGE_CFG_PHY_ADDR_S);
1254         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255                                           HCLGE_CFG_MEDIA_TP_M,
1256                                           HCLGE_CFG_MEDIA_TP_S);
1257         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258                                           HCLGE_CFG_RX_BUF_LEN_M,
1259                                           HCLGE_CFG_RX_BUF_LEN_S);
1260         /* get mac_address */
1261         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263                                             HCLGE_CFG_MAC_ADDR_H_M,
1264                                             HCLGE_CFG_MAC_ADDR_H_S);
1265
1266         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267
1268         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269                                              HCLGE_CFG_DEFAULT_SPEED_M,
1270                                              HCLGE_CFG_DEFAULT_SPEED_S);
1271         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272                                             HCLGE_CFG_RSS_SIZE_M,
1273                                             HCLGE_CFG_RSS_SIZE_S);
1274
1275         for (i = 0; i < ETH_ALEN; i++)
1276                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277
1278         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1280
1281         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                              HCLGE_CFG_SPEED_ABILITY_M,
1283                                              HCLGE_CFG_SPEED_ABILITY_S);
1284         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1286                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1287         if (!cfg->umv_space)
1288                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1289 }
1290
1291 /* hclge_get_cfg: query the static parameter from flash
1292  * @hdev: pointer to struct hclge_dev
1293  * @hcfg: the config structure to be getted
1294  */
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296 {
1297         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298         struct hclge_cfg_param_cmd *req;
1299         unsigned int i;
1300         int ret;
1301
1302         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1303                 u32 offset = 0;
1304
1305                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307                                            true);
1308                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310                 /* Len should be united by 4 bytes when send to hardware */
1311                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313                 req->offset = cpu_to_le32(offset);
1314         }
1315
1316         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317         if (ret) {
1318                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319                 return ret;
1320         }
1321
1322         hclge_parse_cfg(hcfg, desc);
1323
1324         return 0;
1325 }
1326
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1328 {
1329         int ret;
1330
1331         ret = hclge_query_function_status(hdev);
1332         if (ret) {
1333                 dev_err(&hdev->pdev->dev,
1334                         "query function status error %d.\n", ret);
1335                 return ret;
1336         }
1337
1338         /* get pf resource */
1339         return hclge_query_pf_resource(hdev);
1340 }
1341
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 {
1344 #define HCLGE_MIN_TX_DESC       64
1345 #define HCLGE_MIN_RX_DESC       64
1346
1347         if (!is_kdump_kernel())
1348                 return;
1349
1350         dev_info(&hdev->pdev->dev,
1351                  "Running kdump kernel. Using minimal resources\n");
1352
1353         /* minimal queue pairs equals to the number of vports */
1354         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357 }
1358
1359 static int hclge_configure(struct hclge_dev *hdev)
1360 {
1361         struct hclge_cfg cfg;
1362         unsigned int i;
1363         int ret;
1364
1365         ret = hclge_get_cfg(hdev, &cfg);
1366         if (ret)
1367                 return ret;
1368
1369         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370         hdev->base_tqp_pid = 0;
1371         hdev->rss_size_max = cfg.rss_size_max;
1372         hdev->rx_buf_len = cfg.rx_buf_len;
1373         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374         hdev->hw.mac.media_type = cfg.media_type;
1375         hdev->hw.mac.phy_addr = cfg.phy_addr;
1376         hdev->num_tx_desc = cfg.tqp_desc_num;
1377         hdev->num_rx_desc = cfg.tqp_desc_num;
1378         hdev->tm_info.num_pg = 1;
1379         hdev->tc_max = cfg.tc_num;
1380         hdev->tm_info.hw_pfc_map = 0;
1381         hdev->wanted_umv_size = cfg.umv_space;
1382
1383         if (hnae3_dev_fd_supported(hdev)) {
1384                 hdev->fd_en = true;
1385                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386         }
1387
1388         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389         if (ret) {
1390                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391                 return ret;
1392         }
1393
1394         hclge_parse_link_mode(hdev, cfg.speed_ability);
1395
1396         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1397
1398         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399             (hdev->tc_max < 1)) {
1400                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1401                          hdev->tc_max);
1402                 hdev->tc_max = 1;
1403         }
1404
1405         /* Dev does not support DCB */
1406         if (!hnae3_dev_dcb_supported(hdev)) {
1407                 hdev->tc_max = 1;
1408                 hdev->pfc_max = 0;
1409         } else {
1410                 hdev->pfc_max = hdev->tc_max;
1411         }
1412
1413         hdev->tm_info.num_tc = 1;
1414
1415         /* Currently not support uncontiuous tc */
1416         for (i = 0; i < hdev->tm_info.num_tc; i++)
1417                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1418
1419         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1420
1421         hclge_init_kdump_kernel_config(hdev);
1422
1423         /* Set the init affinity based on pci func number */
1424         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427                         &hdev->affinity_mask);
1428
1429         return ret;
1430 }
1431
1432 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1433                             u16 tso_mss_max)
1434 {
1435         struct hclge_cfg_tso_status_cmd *req;
1436         struct hclge_desc desc;
1437
1438         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1439
1440         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1441         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1442         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1443
1444         return hclge_cmd_send(&hdev->hw, &desc, 1);
1445 }
1446
1447 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1448 {
1449         struct hclge_cfg_gro_status_cmd *req;
1450         struct hclge_desc desc;
1451         int ret;
1452
1453         if (!hnae3_dev_gro_supported(hdev))
1454                 return 0;
1455
1456         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1457         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1458
1459         req->gro_en = cpu_to_le16(en ? 1 : 0);
1460
1461         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1462         if (ret)
1463                 dev_err(&hdev->pdev->dev,
1464                         "GRO hardware config cmd failed, ret = %d\n", ret);
1465
1466         return ret;
1467 }
1468
1469 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1470 {
1471         struct hclge_tqp *tqp;
1472         int i;
1473
1474         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1475                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1476         if (!hdev->htqp)
1477                 return -ENOMEM;
1478
1479         tqp = hdev->htqp;
1480
1481         for (i = 0; i < hdev->num_tqps; i++) {
1482                 tqp->dev = &hdev->pdev->dev;
1483                 tqp->index = i;
1484
1485                 tqp->q.ae_algo = &ae_algo;
1486                 tqp->q.buf_size = hdev->rx_buf_len;
1487                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1488                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1489                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1490                         i * HCLGE_TQP_REG_SIZE;
1491
1492                 tqp++;
1493         }
1494
1495         return 0;
1496 }
1497
1498 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1499                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1500 {
1501         struct hclge_tqp_map_cmd *req;
1502         struct hclge_desc desc;
1503         int ret;
1504
1505         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1506
1507         req = (struct hclge_tqp_map_cmd *)desc.data;
1508         req->tqp_id = cpu_to_le16(tqp_pid);
1509         req->tqp_vf = func_id;
1510         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1511         if (!is_pf)
1512                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1513         req->tqp_vid = cpu_to_le16(tqp_vid);
1514
1515         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1516         if (ret)
1517                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1518
1519         return ret;
1520 }
1521
1522 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1523 {
1524         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1525         struct hclge_dev *hdev = vport->back;
1526         int i, alloced;
1527
1528         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1529              alloced < num_tqps; i++) {
1530                 if (!hdev->htqp[i].alloced) {
1531                         hdev->htqp[i].q.handle = &vport->nic;
1532                         hdev->htqp[i].q.tqp_index = alloced;
1533                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1534                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1535                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1536                         hdev->htqp[i].alloced = true;
1537                         alloced++;
1538                 }
1539         }
1540         vport->alloc_tqps = alloced;
1541         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1542                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1543
1544         /* ensure one to one mapping between irq and queue at default */
1545         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1546                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1547
1548         return 0;
1549 }
1550
1551 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1552                             u16 num_tx_desc, u16 num_rx_desc)
1553
1554 {
1555         struct hnae3_handle *nic = &vport->nic;
1556         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1557         struct hclge_dev *hdev = vport->back;
1558         int ret;
1559
1560         kinfo->num_tx_desc = num_tx_desc;
1561         kinfo->num_rx_desc = num_rx_desc;
1562
1563         kinfo->rx_buf_len = hdev->rx_buf_len;
1564
1565         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1566                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1567         if (!kinfo->tqp)
1568                 return -ENOMEM;
1569
1570         ret = hclge_assign_tqp(vport, num_tqps);
1571         if (ret)
1572                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1573
1574         return ret;
1575 }
1576
1577 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1578                                   struct hclge_vport *vport)
1579 {
1580         struct hnae3_handle *nic = &vport->nic;
1581         struct hnae3_knic_private_info *kinfo;
1582         u16 i;
1583
1584         kinfo = &nic->kinfo;
1585         for (i = 0; i < vport->alloc_tqps; i++) {
1586                 struct hclge_tqp *q =
1587                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1588                 bool is_pf;
1589                 int ret;
1590
1591                 is_pf = !(vport->vport_id);
1592                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1593                                              i, is_pf);
1594                 if (ret)
1595                         return ret;
1596         }
1597
1598         return 0;
1599 }
1600
1601 static int hclge_map_tqp(struct hclge_dev *hdev)
1602 {
1603         struct hclge_vport *vport = hdev->vport;
1604         u16 i, num_vport;
1605
1606         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1607         for (i = 0; i < num_vport; i++) {
1608                 int ret;
1609
1610                 ret = hclge_map_tqp_to_vport(hdev, vport);
1611                 if (ret)
1612                         return ret;
1613
1614                 vport++;
1615         }
1616
1617         return 0;
1618 }
1619
1620 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1621 {
1622         struct hnae3_handle *nic = &vport->nic;
1623         struct hclge_dev *hdev = vport->back;
1624         int ret;
1625
1626         nic->pdev = hdev->pdev;
1627         nic->ae_algo = &ae_algo;
1628         nic->numa_node_mask = hdev->numa_node_mask;
1629
1630         ret = hclge_knic_setup(vport, num_tqps,
1631                                hdev->num_tx_desc, hdev->num_rx_desc);
1632         if (ret)
1633                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1634
1635         return ret;
1636 }
1637
1638 static int hclge_alloc_vport(struct hclge_dev *hdev)
1639 {
1640         struct pci_dev *pdev = hdev->pdev;
1641         struct hclge_vport *vport;
1642         u32 tqp_main_vport;
1643         u32 tqp_per_vport;
1644         int num_vport, i;
1645         int ret;
1646
1647         /* We need to alloc a vport for main NIC of PF */
1648         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1649
1650         if (hdev->num_tqps < num_vport) {
1651                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1652                         hdev->num_tqps, num_vport);
1653                 return -EINVAL;
1654         }
1655
1656         /* Alloc the same number of TQPs for every vport */
1657         tqp_per_vport = hdev->num_tqps / num_vport;
1658         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1659
1660         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1661                              GFP_KERNEL);
1662         if (!vport)
1663                 return -ENOMEM;
1664
1665         hdev->vport = vport;
1666         hdev->num_alloc_vport = num_vport;
1667
1668         if (IS_ENABLED(CONFIG_PCI_IOV))
1669                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1670
1671         for (i = 0; i < num_vport; i++) {
1672                 vport->back = hdev;
1673                 vport->vport_id = i;
1674                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1675                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1676                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1677                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1678                 INIT_LIST_HEAD(&vport->vlan_list);
1679                 INIT_LIST_HEAD(&vport->uc_mac_list);
1680                 INIT_LIST_HEAD(&vport->mc_mac_list);
1681                 spin_lock_init(&vport->mac_list_lock);
1682
1683                 if (i == 0)
1684                         ret = hclge_vport_setup(vport, tqp_main_vport);
1685                 else
1686                         ret = hclge_vport_setup(vport, tqp_per_vport);
1687                 if (ret) {
1688                         dev_err(&pdev->dev,
1689                                 "vport setup failed for vport %d, %d\n",
1690                                 i, ret);
1691                         return ret;
1692                 }
1693
1694                 vport++;
1695         }
1696
1697         return 0;
1698 }
1699
1700 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1701                                     struct hclge_pkt_buf_alloc *buf_alloc)
1702 {
1703 /* TX buffer size is unit by 128 byte */
1704 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1705 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1706         struct hclge_tx_buff_alloc_cmd *req;
1707         struct hclge_desc desc;
1708         int ret;
1709         u8 i;
1710
1711         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1712
1713         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1714         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1715                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1716
1717                 req->tx_pkt_buff[i] =
1718                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1719                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1720         }
1721
1722         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1723         if (ret)
1724                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1725                         ret);
1726
1727         return ret;
1728 }
1729
1730 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1731                                  struct hclge_pkt_buf_alloc *buf_alloc)
1732 {
1733         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1734
1735         if (ret)
1736                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1737
1738         return ret;
1739 }
1740
1741 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1742 {
1743         unsigned int i;
1744         u32 cnt = 0;
1745
1746         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1747                 if (hdev->hw_tc_map & BIT(i))
1748                         cnt++;
1749         return cnt;
1750 }
1751
1752 /* Get the number of pfc enabled TCs, which have private buffer */
1753 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1754                                   struct hclge_pkt_buf_alloc *buf_alloc)
1755 {
1756         struct hclge_priv_buf *priv;
1757         unsigned int i;
1758         int cnt = 0;
1759
1760         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1761                 priv = &buf_alloc->priv_buf[i];
1762                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1763                     priv->enable)
1764                         cnt++;
1765         }
1766
1767         return cnt;
1768 }
1769
1770 /* Get the number of pfc disabled TCs, which have private buffer */
1771 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1772                                      struct hclge_pkt_buf_alloc *buf_alloc)
1773 {
1774         struct hclge_priv_buf *priv;
1775         unsigned int i;
1776         int cnt = 0;
1777
1778         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1779                 priv = &buf_alloc->priv_buf[i];
1780                 if (hdev->hw_tc_map & BIT(i) &&
1781                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1782                     priv->enable)
1783                         cnt++;
1784         }
1785
1786         return cnt;
1787 }
1788
1789 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1790 {
1791         struct hclge_priv_buf *priv;
1792         u32 rx_priv = 0;
1793         int i;
1794
1795         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1796                 priv = &buf_alloc->priv_buf[i];
1797                 if (priv->enable)
1798                         rx_priv += priv->buf_size;
1799         }
1800         return rx_priv;
1801 }
1802
1803 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1804 {
1805         u32 i, total_tx_size = 0;
1806
1807         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1808                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1809
1810         return total_tx_size;
1811 }
1812
1813 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1814                                 struct hclge_pkt_buf_alloc *buf_alloc,
1815                                 u32 rx_all)
1816 {
1817         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1818         u32 tc_num = hclge_get_tc_num(hdev);
1819         u32 shared_buf, aligned_mps;
1820         u32 rx_priv;
1821         int i;
1822
1823         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1824
1825         if (hnae3_dev_dcb_supported(hdev))
1826                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1827                                         hdev->dv_buf_size;
1828         else
1829                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1830                                         + hdev->dv_buf_size;
1831
1832         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1833         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1834                              HCLGE_BUF_SIZE_UNIT);
1835
1836         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1837         if (rx_all < rx_priv + shared_std)
1838                 return false;
1839
1840         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1841         buf_alloc->s_buf.buf_size = shared_buf;
1842         if (hnae3_dev_dcb_supported(hdev)) {
1843                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1844                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1845                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1846                                   HCLGE_BUF_SIZE_UNIT);
1847         } else {
1848                 buf_alloc->s_buf.self.high = aligned_mps +
1849                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1850                 buf_alloc->s_buf.self.low = aligned_mps;
1851         }
1852
1853         if (hnae3_dev_dcb_supported(hdev)) {
1854                 hi_thrd = shared_buf - hdev->dv_buf_size;
1855
1856                 if (tc_num <= NEED_RESERVE_TC_NUM)
1857                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1858                                         / BUF_MAX_PERCENT;
1859
1860                 if (tc_num)
1861                         hi_thrd = hi_thrd / tc_num;
1862
1863                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1864                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1865                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1866         } else {
1867                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1868                 lo_thrd = aligned_mps;
1869         }
1870
1871         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1872                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1873                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1874         }
1875
1876         return true;
1877 }
1878
1879 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1880                                 struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         u32 i, total_size;
1883
1884         total_size = hdev->pkt_buf_size;
1885
1886         /* alloc tx buffer for all enabled tc */
1887         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1888                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1889
1890                 if (hdev->hw_tc_map & BIT(i)) {
1891                         if (total_size < hdev->tx_buf_size)
1892                                 return -ENOMEM;
1893
1894                         priv->tx_buf_size = hdev->tx_buf_size;
1895                 } else {
1896                         priv->tx_buf_size = 0;
1897                 }
1898
1899                 total_size -= priv->tx_buf_size;
1900         }
1901
1902         return 0;
1903 }
1904
1905 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1906                                   struct hclge_pkt_buf_alloc *buf_alloc)
1907 {
1908         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1909         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1910         unsigned int i;
1911
1912         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1913                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1914
1915                 priv->enable = 0;
1916                 priv->wl.low = 0;
1917                 priv->wl.high = 0;
1918                 priv->buf_size = 0;
1919
1920                 if (!(hdev->hw_tc_map & BIT(i)))
1921                         continue;
1922
1923                 priv->enable = 1;
1924
1925                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1926                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1927                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1928                                                 HCLGE_BUF_SIZE_UNIT);
1929                 } else {
1930                         priv->wl.low = 0;
1931                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1932                                         aligned_mps;
1933                 }
1934
1935                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1936         }
1937
1938         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1939 }
1940
1941 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1942                                           struct hclge_pkt_buf_alloc *buf_alloc)
1943 {
1944         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1945         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1946         int i;
1947
1948         /* let the last to be cleared first */
1949         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1950                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1951                 unsigned int mask = BIT((unsigned int)i);
1952
1953                 if (hdev->hw_tc_map & mask &&
1954                     !(hdev->tm_info.hw_pfc_map & mask)) {
1955                         /* Clear the no pfc TC private buffer */
1956                         priv->wl.low = 0;
1957                         priv->wl.high = 0;
1958                         priv->buf_size = 0;
1959                         priv->enable = 0;
1960                         no_pfc_priv_num--;
1961                 }
1962
1963                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1964                     no_pfc_priv_num == 0)
1965                         break;
1966         }
1967
1968         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1969 }
1970
1971 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1972                                         struct hclge_pkt_buf_alloc *buf_alloc)
1973 {
1974         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1975         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1976         int i;
1977
1978         /* let the last to be cleared first */
1979         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1980                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1981                 unsigned int mask = BIT((unsigned int)i);
1982
1983                 if (hdev->hw_tc_map & mask &&
1984                     hdev->tm_info.hw_pfc_map & mask) {
1985                         /* Reduce the number of pfc TC with private buffer */
1986                         priv->wl.low = 0;
1987                         priv->enable = 0;
1988                         priv->wl.high = 0;
1989                         priv->buf_size = 0;
1990                         pfc_priv_num--;
1991                 }
1992
1993                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1994                     pfc_priv_num == 0)
1995                         break;
1996         }
1997
1998         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1999 }
2000
2001 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2002                                       struct hclge_pkt_buf_alloc *buf_alloc)
2003 {
2004 #define COMPENSATE_BUFFER       0x3C00
2005 #define COMPENSATE_HALF_MPS_NUM 5
2006 #define PRIV_WL_GAP             0x1800
2007
2008         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2009         u32 tc_num = hclge_get_tc_num(hdev);
2010         u32 half_mps = hdev->mps >> 1;
2011         u32 min_rx_priv;
2012         unsigned int i;
2013
2014         if (tc_num)
2015                 rx_priv = rx_priv / tc_num;
2016
2017         if (tc_num <= NEED_RESERVE_TC_NUM)
2018                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2019
2020         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2021                         COMPENSATE_HALF_MPS_NUM * half_mps;
2022         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2023         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2024
2025         if (rx_priv < min_rx_priv)
2026                 return false;
2027
2028         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2029                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2030
2031                 priv->enable = 0;
2032                 priv->wl.low = 0;
2033                 priv->wl.high = 0;
2034                 priv->buf_size = 0;
2035
2036                 if (!(hdev->hw_tc_map & BIT(i)))
2037                         continue;
2038
2039                 priv->enable = 1;
2040                 priv->buf_size = rx_priv;
2041                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2042                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2043         }
2044
2045         buf_alloc->s_buf.buf_size = 0;
2046
2047         return true;
2048 }
2049
2050 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2051  * @hdev: pointer to struct hclge_dev
2052  * @buf_alloc: pointer to buffer calculation data
2053  * @return: 0: calculate sucessful, negative: fail
2054  */
2055 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2056                                 struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058         /* When DCB is not supported, rx private buffer is not allocated. */
2059         if (!hnae3_dev_dcb_supported(hdev)) {
2060                 u32 rx_all = hdev->pkt_buf_size;
2061
2062                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2063                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2064                         return -ENOMEM;
2065
2066                 return 0;
2067         }
2068
2069         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2070                 return 0;
2071
2072         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2073                 return 0;
2074
2075         /* try to decrease the buffer size */
2076         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2077                 return 0;
2078
2079         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2080                 return 0;
2081
2082         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2083                 return 0;
2084
2085         return -ENOMEM;
2086 }
2087
2088 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2089                                    struct hclge_pkt_buf_alloc *buf_alloc)
2090 {
2091         struct hclge_rx_priv_buff_cmd *req;
2092         struct hclge_desc desc;
2093         int ret;
2094         int i;
2095
2096         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2097         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2098
2099         /* Alloc private buffer TCs */
2100         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2101                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2102
2103                 req->buf_num[i] =
2104                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2105                 req->buf_num[i] |=
2106                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2107         }
2108
2109         req->shared_buf =
2110                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2111                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2112
2113         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2114         if (ret)
2115                 dev_err(&hdev->pdev->dev,
2116                         "rx private buffer alloc cmd failed %d\n", ret);
2117
2118         return ret;
2119 }
2120
2121 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2122                                    struct hclge_pkt_buf_alloc *buf_alloc)
2123 {
2124         struct hclge_rx_priv_wl_buf *req;
2125         struct hclge_priv_buf *priv;
2126         struct hclge_desc desc[2];
2127         int i, j;
2128         int ret;
2129
2130         for (i = 0; i < 2; i++) {
2131                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2132                                            false);
2133                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2134
2135                 /* The first descriptor set the NEXT bit to 1 */
2136                 if (i == 0)
2137                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2138                 else
2139                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2140
2141                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2142                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2143
2144                         priv = &buf_alloc->priv_buf[idx];
2145                         req->tc_wl[j].high =
2146                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2147                         req->tc_wl[j].high |=
2148                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2149                         req->tc_wl[j].low =
2150                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2151                         req->tc_wl[j].low |=
2152                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2153                 }
2154         }
2155
2156         /* Send 2 descriptor at one time */
2157         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2158         if (ret)
2159                 dev_err(&hdev->pdev->dev,
2160                         "rx private waterline config cmd failed %d\n",
2161                         ret);
2162         return ret;
2163 }
2164
2165 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2166                                     struct hclge_pkt_buf_alloc *buf_alloc)
2167 {
2168         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2169         struct hclge_rx_com_thrd *req;
2170         struct hclge_desc desc[2];
2171         struct hclge_tc_thrd *tc;
2172         int i, j;
2173         int ret;
2174
2175         for (i = 0; i < 2; i++) {
2176                 hclge_cmd_setup_basic_desc(&desc[i],
2177                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2178                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2179
2180                 /* The first descriptor set the NEXT bit to 1 */
2181                 if (i == 0)
2182                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2183                 else
2184                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2185
2186                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2187                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2188
2189                         req->com_thrd[j].high =
2190                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2191                         req->com_thrd[j].high |=
2192                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2193                         req->com_thrd[j].low =
2194                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2195                         req->com_thrd[j].low |=
2196                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2197                 }
2198         }
2199
2200         /* Send 2 descriptors at one time */
2201         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2202         if (ret)
2203                 dev_err(&hdev->pdev->dev,
2204                         "common threshold config cmd failed %d\n", ret);
2205         return ret;
2206 }
2207
2208 static int hclge_common_wl_config(struct hclge_dev *hdev,
2209                                   struct hclge_pkt_buf_alloc *buf_alloc)
2210 {
2211         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2212         struct hclge_rx_com_wl *req;
2213         struct hclge_desc desc;
2214         int ret;
2215
2216         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2217
2218         req = (struct hclge_rx_com_wl *)desc.data;
2219         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2220         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2221
2222         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2223         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2224
2225         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2226         if (ret)
2227                 dev_err(&hdev->pdev->dev,
2228                         "common waterline config cmd failed %d\n", ret);
2229
2230         return ret;
2231 }
2232
2233 int hclge_buffer_alloc(struct hclge_dev *hdev)
2234 {
2235         struct hclge_pkt_buf_alloc *pkt_buf;
2236         int ret;
2237
2238         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2239         if (!pkt_buf)
2240                 return -ENOMEM;
2241
2242         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2243         if (ret) {
2244                 dev_err(&hdev->pdev->dev,
2245                         "could not calc tx buffer size for all TCs %d\n", ret);
2246                 goto out;
2247         }
2248
2249         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2250         if (ret) {
2251                 dev_err(&hdev->pdev->dev,
2252                         "could not alloc tx buffers %d\n", ret);
2253                 goto out;
2254         }
2255
2256         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2257         if (ret) {
2258                 dev_err(&hdev->pdev->dev,
2259                         "could not calc rx priv buffer size for all TCs %d\n",
2260                         ret);
2261                 goto out;
2262         }
2263
2264         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2265         if (ret) {
2266                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2267                         ret);
2268                 goto out;
2269         }
2270
2271         if (hnae3_dev_dcb_supported(hdev)) {
2272                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2273                 if (ret) {
2274                         dev_err(&hdev->pdev->dev,
2275                                 "could not configure rx private waterline %d\n",
2276                                 ret);
2277                         goto out;
2278                 }
2279
2280                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2281                 if (ret) {
2282                         dev_err(&hdev->pdev->dev,
2283                                 "could not configure common threshold %d\n",
2284                                 ret);
2285                         goto out;
2286                 }
2287         }
2288
2289         ret = hclge_common_wl_config(hdev, pkt_buf);
2290         if (ret)
2291                 dev_err(&hdev->pdev->dev,
2292                         "could not configure common waterline %d\n", ret);
2293
2294 out:
2295         kfree(pkt_buf);
2296         return ret;
2297 }
2298
2299 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2300 {
2301         struct hnae3_handle *roce = &vport->roce;
2302         struct hnae3_handle *nic = &vport->nic;
2303
2304         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2305
2306         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2307             vport->back->num_msi_left == 0)
2308                 return -EINVAL;
2309
2310         roce->rinfo.base_vector = vport->back->roce_base_vector;
2311
2312         roce->rinfo.netdev = nic->kinfo.netdev;
2313         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2314
2315         roce->pdev = nic->pdev;
2316         roce->ae_algo = nic->ae_algo;
2317         roce->numa_node_mask = nic->numa_node_mask;
2318
2319         return 0;
2320 }
2321
2322 static int hclge_init_msi(struct hclge_dev *hdev)
2323 {
2324         struct pci_dev *pdev = hdev->pdev;
2325         int vectors;
2326         int i;
2327
2328         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2329                                         hdev->num_msi,
2330                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2331         if (vectors < 0) {
2332                 dev_err(&pdev->dev,
2333                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2334                         vectors);
2335                 return vectors;
2336         }
2337         if (vectors < hdev->num_msi)
2338                 dev_warn(&hdev->pdev->dev,
2339                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2340                          hdev->num_msi, vectors);
2341
2342         hdev->num_msi = vectors;
2343         hdev->num_msi_left = vectors;
2344
2345         hdev->base_msi_vector = pdev->irq;
2346         hdev->roce_base_vector = hdev->base_msi_vector +
2347                                 hdev->roce_base_msix_offset;
2348
2349         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2350                                            sizeof(u16), GFP_KERNEL);
2351         if (!hdev->vector_status) {
2352                 pci_free_irq_vectors(pdev);
2353                 return -ENOMEM;
2354         }
2355
2356         for (i = 0; i < hdev->num_msi; i++)
2357                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2358
2359         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2360                                         sizeof(int), GFP_KERNEL);
2361         if (!hdev->vector_irq) {
2362                 pci_free_irq_vectors(pdev);
2363                 return -ENOMEM;
2364         }
2365
2366         return 0;
2367 }
2368
2369 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2370 {
2371         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2372                 duplex = HCLGE_MAC_FULL;
2373
2374         return duplex;
2375 }
2376
2377 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2378                                       u8 duplex)
2379 {
2380         struct hclge_config_mac_speed_dup_cmd *req;
2381         struct hclge_desc desc;
2382         int ret;
2383
2384         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2385
2386         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2387
2388         if (duplex)
2389                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2390
2391         switch (speed) {
2392         case HCLGE_MAC_SPEED_10M:
2393                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2394                                 HCLGE_CFG_SPEED_S, 6);
2395                 break;
2396         case HCLGE_MAC_SPEED_100M:
2397                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2398                                 HCLGE_CFG_SPEED_S, 7);
2399                 break;
2400         case HCLGE_MAC_SPEED_1G:
2401                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2402                                 HCLGE_CFG_SPEED_S, 0);
2403                 break;
2404         case HCLGE_MAC_SPEED_10G:
2405                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2406                                 HCLGE_CFG_SPEED_S, 1);
2407                 break;
2408         case HCLGE_MAC_SPEED_25G:
2409                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2410                                 HCLGE_CFG_SPEED_S, 2);
2411                 break;
2412         case HCLGE_MAC_SPEED_40G:
2413                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2414                                 HCLGE_CFG_SPEED_S, 3);
2415                 break;
2416         case HCLGE_MAC_SPEED_50G:
2417                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2418                                 HCLGE_CFG_SPEED_S, 4);
2419                 break;
2420         case HCLGE_MAC_SPEED_100G:
2421                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2422                                 HCLGE_CFG_SPEED_S, 5);
2423                 break;
2424         default:
2425                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2426                 return -EINVAL;
2427         }
2428
2429         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2430                       1);
2431
2432         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2433         if (ret) {
2434                 dev_err(&hdev->pdev->dev,
2435                         "mac speed/duplex config cmd failed %d.\n", ret);
2436                 return ret;
2437         }
2438
2439         return 0;
2440 }
2441
2442 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2443 {
2444         struct hclge_mac *mac = &hdev->hw.mac;
2445         int ret;
2446
2447         duplex = hclge_check_speed_dup(duplex, speed);
2448         if (!mac->support_autoneg && mac->speed == speed &&
2449             mac->duplex == duplex)
2450                 return 0;
2451
2452         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2453         if (ret)
2454                 return ret;
2455
2456         hdev->hw.mac.speed = speed;
2457         hdev->hw.mac.duplex = duplex;
2458
2459         return 0;
2460 }
2461
2462 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2463                                      u8 duplex)
2464 {
2465         struct hclge_vport *vport = hclge_get_vport(handle);
2466         struct hclge_dev *hdev = vport->back;
2467
2468         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2469 }
2470
2471 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2472 {
2473         struct hclge_config_auto_neg_cmd *req;
2474         struct hclge_desc desc;
2475         u32 flag = 0;
2476         int ret;
2477
2478         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2479
2480         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2481         if (enable)
2482                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2483         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2484
2485         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2486         if (ret)
2487                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2488                         ret);
2489
2490         return ret;
2491 }
2492
2493 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2494 {
2495         struct hclge_vport *vport = hclge_get_vport(handle);
2496         struct hclge_dev *hdev = vport->back;
2497
2498         if (!hdev->hw.mac.support_autoneg) {
2499                 if (enable) {
2500                         dev_err(&hdev->pdev->dev,
2501                                 "autoneg is not supported by current port\n");
2502                         return -EOPNOTSUPP;
2503                 } else {
2504                         return 0;
2505                 }
2506         }
2507
2508         return hclge_set_autoneg_en(hdev, enable);
2509 }
2510
2511 static int hclge_get_autoneg(struct hnae3_handle *handle)
2512 {
2513         struct hclge_vport *vport = hclge_get_vport(handle);
2514         struct hclge_dev *hdev = vport->back;
2515         struct phy_device *phydev = hdev->hw.mac.phydev;
2516
2517         if (phydev)
2518                 return phydev->autoneg;
2519
2520         return hdev->hw.mac.autoneg;
2521 }
2522
2523 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2524 {
2525         struct hclge_vport *vport = hclge_get_vport(handle);
2526         struct hclge_dev *hdev = vport->back;
2527         int ret;
2528
2529         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2530
2531         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2532         if (ret)
2533                 return ret;
2534         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2535 }
2536
2537 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2538 {
2539         struct hclge_vport *vport = hclge_get_vport(handle);
2540         struct hclge_dev *hdev = vport->back;
2541
2542         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2543                 return hclge_set_autoneg_en(hdev, !halt);
2544
2545         return 0;
2546 }
2547
2548 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2549 {
2550         struct hclge_config_fec_cmd *req;
2551         struct hclge_desc desc;
2552         int ret;
2553
2554         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2555
2556         req = (struct hclge_config_fec_cmd *)desc.data;
2557         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2558                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2559         if (fec_mode & BIT(HNAE3_FEC_RS))
2560                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2561                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2562         if (fec_mode & BIT(HNAE3_FEC_BASER))
2563                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2564                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2565
2566         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2567         if (ret)
2568                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2569
2570         return ret;
2571 }
2572
2573 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2574 {
2575         struct hclge_vport *vport = hclge_get_vport(handle);
2576         struct hclge_dev *hdev = vport->back;
2577         struct hclge_mac *mac = &hdev->hw.mac;
2578         int ret;
2579
2580         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2581                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2582                 return -EINVAL;
2583         }
2584
2585         ret = hclge_set_fec_hw(hdev, fec_mode);
2586         if (ret)
2587                 return ret;
2588
2589         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2590         return 0;
2591 }
2592
2593 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2594                           u8 *fec_mode)
2595 {
2596         struct hclge_vport *vport = hclge_get_vport(handle);
2597         struct hclge_dev *hdev = vport->back;
2598         struct hclge_mac *mac = &hdev->hw.mac;
2599
2600         if (fec_ability)
2601                 *fec_ability = mac->fec_ability;
2602         if (fec_mode)
2603                 *fec_mode = mac->fec_mode;
2604 }
2605
2606 static int hclge_mac_init(struct hclge_dev *hdev)
2607 {
2608         struct hclge_mac *mac = &hdev->hw.mac;
2609         int ret;
2610
2611         hdev->support_sfp_query = true;
2612         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2613         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2614                                          hdev->hw.mac.duplex);
2615         if (ret)
2616                 return ret;
2617
2618         if (hdev->hw.mac.support_autoneg) {
2619                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2620                 if (ret)
2621                         return ret;
2622         }
2623
2624         mac->link = 0;
2625
2626         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2627                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2628                 if (ret)
2629                         return ret;
2630         }
2631
2632         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2633         if (ret) {
2634                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2635                 return ret;
2636         }
2637
2638         ret = hclge_set_default_loopback(hdev);
2639         if (ret)
2640                 return ret;
2641
2642         ret = hclge_buffer_alloc(hdev);
2643         if (ret)
2644                 dev_err(&hdev->pdev->dev,
2645                         "allocate buffer fail, ret=%d\n", ret);
2646
2647         return ret;
2648 }
2649
2650 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2651 {
2652         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2653             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2654                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2655                                     hclge_wq, &hdev->service_task, 0);
2656 }
2657
2658 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2659 {
2660         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2661             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2662                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2663                                     hclge_wq, &hdev->service_task, 0);
2664 }
2665
2666 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2667 {
2668         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2669             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2670                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2671                                     hclge_wq, &hdev->service_task,
2672                                     delay_time);
2673 }
2674
2675 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2676 {
2677         struct hclge_link_status_cmd *req;
2678         struct hclge_desc desc;
2679         int link_status;
2680         int ret;
2681
2682         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2683         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2684         if (ret) {
2685                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2686                         ret);
2687                 return ret;
2688         }
2689
2690         req = (struct hclge_link_status_cmd *)desc.data;
2691         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2692
2693         return !!link_status;
2694 }
2695
2696 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2697 {
2698         unsigned int mac_state;
2699         int link_stat;
2700
2701         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2702                 return 0;
2703
2704         mac_state = hclge_get_mac_link_status(hdev);
2705
2706         if (hdev->hw.mac.phydev) {
2707                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2708                         link_stat = mac_state &
2709                                 hdev->hw.mac.phydev->link;
2710                 else
2711                         link_stat = 0;
2712
2713         } else {
2714                 link_stat = mac_state;
2715         }
2716
2717         return !!link_stat;
2718 }
2719
2720 static void hclge_update_link_status(struct hclge_dev *hdev)
2721 {
2722         struct hnae3_client *rclient = hdev->roce_client;
2723         struct hnae3_client *client = hdev->nic_client;
2724         struct hnae3_handle *rhandle;
2725         struct hnae3_handle *handle;
2726         int state;
2727         int i;
2728
2729         if (!client)
2730                 return;
2731
2732         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2733                 return;
2734
2735         state = hclge_get_mac_phy_link(hdev);
2736         if (state != hdev->hw.mac.link) {
2737                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2738                         handle = &hdev->vport[i].nic;
2739                         client->ops->link_status_change(handle, state);
2740                         hclge_config_mac_tnl_int(hdev, state);
2741                         rhandle = &hdev->vport[i].roce;
2742                         if (rclient && rclient->ops->link_status_change)
2743                                 rclient->ops->link_status_change(rhandle,
2744                                                                  state);
2745                 }
2746                 hdev->hw.mac.link = state;
2747         }
2748
2749         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2750 }
2751
2752 static void hclge_update_port_capability(struct hclge_mac *mac)
2753 {
2754         /* update fec ability by speed */
2755         hclge_convert_setting_fec(mac);
2756
2757         /* firmware can not identify back plane type, the media type
2758          * read from configuration can help deal it
2759          */
2760         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2761             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2762                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2763         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2764                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2765
2766         if (mac->support_autoneg) {
2767                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2768                 linkmode_copy(mac->advertising, mac->supported);
2769         } else {
2770                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2771                                    mac->supported);
2772                 linkmode_zero(mac->advertising);
2773         }
2774 }
2775
2776 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2777 {
2778         struct hclge_sfp_info_cmd *resp;
2779         struct hclge_desc desc;
2780         int ret;
2781
2782         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2783         resp = (struct hclge_sfp_info_cmd *)desc.data;
2784         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2785         if (ret == -EOPNOTSUPP) {
2786                 dev_warn(&hdev->pdev->dev,
2787                          "IMP do not support get SFP speed %d\n", ret);
2788                 return ret;
2789         } else if (ret) {
2790                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2791                 return ret;
2792         }
2793
2794         *speed = le32_to_cpu(resp->speed);
2795
2796         return 0;
2797 }
2798
2799 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2800 {
2801         struct hclge_sfp_info_cmd *resp;
2802         struct hclge_desc desc;
2803         int ret;
2804
2805         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2806         resp = (struct hclge_sfp_info_cmd *)desc.data;
2807
2808         resp->query_type = QUERY_ACTIVE_SPEED;
2809
2810         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2811         if (ret == -EOPNOTSUPP) {
2812                 dev_warn(&hdev->pdev->dev,
2813                          "IMP does not support get SFP info %d\n", ret);
2814                 return ret;
2815         } else if (ret) {
2816                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2817                 return ret;
2818         }
2819
2820         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2821          * set to mac->speed.
2822          */
2823         if (!le32_to_cpu(resp->speed))
2824                 return 0;
2825
2826         mac->speed = le32_to_cpu(resp->speed);
2827         /* if resp->speed_ability is 0, it means it's an old version
2828          * firmware, do not update these params
2829          */
2830         if (resp->speed_ability) {
2831                 mac->module_type = le32_to_cpu(resp->module_type);
2832                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2833                 mac->autoneg = resp->autoneg;
2834                 mac->support_autoneg = resp->autoneg_ability;
2835                 mac->speed_type = QUERY_ACTIVE_SPEED;
2836                 if (!resp->active_fec)
2837                         mac->fec_mode = 0;
2838                 else
2839                         mac->fec_mode = BIT(resp->active_fec);
2840         } else {
2841                 mac->speed_type = QUERY_SFP_SPEED;
2842         }
2843
2844         return 0;
2845 }
2846
2847 static int hclge_update_port_info(struct hclge_dev *hdev)
2848 {
2849         struct hclge_mac *mac = &hdev->hw.mac;
2850         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2851         int ret;
2852
2853         /* get the port info from SFP cmd if not copper port */
2854         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2855                 return 0;
2856
2857         /* if IMP does not support get SFP/qSFP info, return directly */
2858         if (!hdev->support_sfp_query)
2859                 return 0;
2860
2861         if (hdev->pdev->revision >= 0x21)
2862                 ret = hclge_get_sfp_info(hdev, mac);
2863         else
2864                 ret = hclge_get_sfp_speed(hdev, &speed);
2865
2866         if (ret == -EOPNOTSUPP) {
2867                 hdev->support_sfp_query = false;
2868                 return ret;
2869         } else if (ret) {
2870                 return ret;
2871         }
2872
2873         if (hdev->pdev->revision >= 0x21) {
2874                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2875                         hclge_update_port_capability(mac);
2876                         return 0;
2877                 }
2878                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2879                                                HCLGE_MAC_FULL);
2880         } else {
2881                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2882                         return 0; /* do nothing if no SFP */
2883
2884                 /* must config full duplex for SFP */
2885                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2886         }
2887 }
2888
2889 static int hclge_get_status(struct hnae3_handle *handle)
2890 {
2891         struct hclge_vport *vport = hclge_get_vport(handle);
2892         struct hclge_dev *hdev = vport->back;
2893
2894         hclge_update_link_status(hdev);
2895
2896         return hdev->hw.mac.link;
2897 }
2898
2899 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2900 {
2901         if (!pci_num_vf(hdev->pdev)) {
2902                 dev_err(&hdev->pdev->dev,
2903                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2904                 return NULL;
2905         }
2906
2907         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2908                 dev_err(&hdev->pdev->dev,
2909                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2910                         vf, pci_num_vf(hdev->pdev));
2911                 return NULL;
2912         }
2913
2914         /* VF start from 1 in vport */
2915         vf += HCLGE_VF_VPORT_START_NUM;
2916         return &hdev->vport[vf];
2917 }
2918
2919 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2920                                struct ifla_vf_info *ivf)
2921 {
2922         struct hclge_vport *vport = hclge_get_vport(handle);
2923         struct hclge_dev *hdev = vport->back;
2924
2925         vport = hclge_get_vf_vport(hdev, vf);
2926         if (!vport)
2927                 return -EINVAL;
2928
2929         ivf->vf = vf;
2930         ivf->linkstate = vport->vf_info.link_state;
2931         ivf->spoofchk = vport->vf_info.spoofchk;
2932         ivf->trusted = vport->vf_info.trusted;
2933         ivf->min_tx_rate = 0;
2934         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2935         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2936         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2937         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2938         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2939
2940         return 0;
2941 }
2942
2943 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2944                                    int link_state)
2945 {
2946         struct hclge_vport *vport = hclge_get_vport(handle);
2947         struct hclge_dev *hdev = vport->back;
2948
2949         vport = hclge_get_vf_vport(hdev, vf);
2950         if (!vport)
2951                 return -EINVAL;
2952
2953         vport->vf_info.link_state = link_state;
2954
2955         return 0;
2956 }
2957
2958 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2959 {
2960         u32 cmdq_src_reg, msix_src_reg;
2961
2962         /* fetch the events from their corresponding regs */
2963         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2964         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2965
2966         /* Assumption: If by any chance reset and mailbox events are reported
2967          * together then we will only process reset event in this go and will
2968          * defer the processing of the mailbox events. Since, we would have not
2969          * cleared RX CMDQ event this time we would receive again another
2970          * interrupt from H/W just for the mailbox.
2971          *
2972          * check for vector0 reset event sources
2973          */
2974         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2975                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2976                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2977                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2978                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2979                 hdev->rst_stats.imp_rst_cnt++;
2980                 return HCLGE_VECTOR0_EVENT_RST;
2981         }
2982
2983         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2984                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2985                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2986                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2987                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2988                 hdev->rst_stats.global_rst_cnt++;
2989                 return HCLGE_VECTOR0_EVENT_RST;
2990         }
2991
2992         /* check for vector0 msix event source */
2993         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2994                 *clearval = msix_src_reg;
2995                 return HCLGE_VECTOR0_EVENT_ERR;
2996         }
2997
2998         /* check for vector0 mailbox(=CMDQ RX) event source */
2999         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3000                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3001                 *clearval = cmdq_src_reg;
3002                 return HCLGE_VECTOR0_EVENT_MBX;
3003         }
3004
3005         /* print other vector0 event source */
3006         dev_info(&hdev->pdev->dev,
3007                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3008                  cmdq_src_reg, msix_src_reg);
3009         *clearval = msix_src_reg;
3010
3011         return HCLGE_VECTOR0_EVENT_OTHER;
3012 }
3013
3014 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3015                                     u32 regclr)
3016 {
3017         switch (event_type) {
3018         case HCLGE_VECTOR0_EVENT_RST:
3019                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3020                 break;
3021         case HCLGE_VECTOR0_EVENT_MBX:
3022                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3023                 break;
3024         default:
3025                 break;
3026         }
3027 }
3028
3029 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3030 {
3031         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3032                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3033                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3034                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3035         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3036 }
3037
3038 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3039 {
3040         writel(enable ? 1 : 0, vector->addr);
3041 }
3042
3043 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3044 {
3045         struct hclge_dev *hdev = data;
3046         u32 clearval = 0;
3047         u32 event_cause;
3048
3049         hclge_enable_vector(&hdev->misc_vector, false);
3050         event_cause = hclge_check_event_cause(hdev, &clearval);
3051
3052         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3053         switch (event_cause) {
3054         case HCLGE_VECTOR0_EVENT_ERR:
3055                 /* we do not know what type of reset is required now. This could
3056                  * only be decided after we fetch the type of errors which
3057                  * caused this event. Therefore, we will do below for now:
3058                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3059                  *    have defered type of reset to be used.
3060                  * 2. Schedule the reset serivce task.
3061                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3062                  *    will fetch the correct type of reset.  This would be done
3063                  *    by first decoding the types of errors.
3064                  */
3065                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3066                 /* fall through */
3067         case HCLGE_VECTOR0_EVENT_RST:
3068                 hclge_reset_task_schedule(hdev);
3069                 break;
3070         case HCLGE_VECTOR0_EVENT_MBX:
3071                 /* If we are here then,
3072                  * 1. Either we are not handling any mbx task and we are not
3073                  *    scheduled as well
3074                  *                        OR
3075                  * 2. We could be handling a mbx task but nothing more is
3076                  *    scheduled.
3077                  * In both cases, we should schedule mbx task as there are more
3078                  * mbx messages reported by this interrupt.
3079                  */
3080                 hclge_mbx_task_schedule(hdev);
3081                 break;
3082         default:
3083                 dev_warn(&hdev->pdev->dev,
3084                          "received unknown or unhandled event of vector0\n");
3085                 break;
3086         }
3087
3088         hclge_clear_event_cause(hdev, event_cause, clearval);
3089
3090         /* Enable interrupt if it is not cause by reset. And when
3091          * clearval equal to 0, it means interrupt status may be
3092          * cleared by hardware before driver reads status register.
3093          * For this case, vector0 interrupt also should be enabled.
3094          */
3095         if (!clearval ||
3096             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3097                 hclge_enable_vector(&hdev->misc_vector, true);
3098         }
3099
3100         return IRQ_HANDLED;
3101 }
3102
3103 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3104 {
3105         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3106                 dev_warn(&hdev->pdev->dev,
3107                          "vector(vector_id %d) has been freed.\n", vector_id);
3108                 return;
3109         }
3110
3111         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3112         hdev->num_msi_left += 1;
3113         hdev->num_msi_used -= 1;
3114 }
3115
3116 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3117 {
3118         struct hclge_misc_vector *vector = &hdev->misc_vector;
3119
3120         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3121
3122         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3123         hdev->vector_status[0] = 0;
3124
3125         hdev->num_msi_left -= 1;
3126         hdev->num_msi_used += 1;
3127 }
3128
3129 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3130                                       const cpumask_t *mask)
3131 {
3132         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3133                                               affinity_notify);
3134
3135         cpumask_copy(&hdev->affinity_mask, mask);
3136 }
3137
3138 static void hclge_irq_affinity_release(struct kref *ref)
3139 {
3140 }
3141
3142 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3143 {
3144         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3145                               &hdev->affinity_mask);
3146
3147         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3148         hdev->affinity_notify.release = hclge_irq_affinity_release;
3149         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3150                                   &hdev->affinity_notify);
3151 }
3152
3153 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3154 {
3155         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3156         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3157 }
3158
3159 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3160 {
3161         int ret;
3162
3163         hclge_get_misc_vector(hdev);
3164
3165         /* this would be explicitly freed in the end */
3166         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3167                  HCLGE_NAME, pci_name(hdev->pdev));
3168         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3169                           0, hdev->misc_vector.name, hdev);
3170         if (ret) {
3171                 hclge_free_vector(hdev, 0);
3172                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3173                         hdev->misc_vector.vector_irq);
3174         }
3175
3176         return ret;
3177 }
3178
3179 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3180 {
3181         free_irq(hdev->misc_vector.vector_irq, hdev);
3182         hclge_free_vector(hdev, 0);
3183 }
3184
3185 int hclge_notify_client(struct hclge_dev *hdev,
3186                         enum hnae3_reset_notify_type type)
3187 {
3188         struct hnae3_client *client = hdev->nic_client;
3189         u16 i;
3190
3191         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3192                 return 0;
3193
3194         if (!client->ops->reset_notify)
3195                 return -EOPNOTSUPP;
3196
3197         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3198                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3199                 int ret;
3200
3201                 ret = client->ops->reset_notify(handle, type);
3202                 if (ret) {
3203                         dev_err(&hdev->pdev->dev,
3204                                 "notify nic client failed %d(%d)\n", type, ret);
3205                         return ret;
3206                 }
3207         }
3208
3209         return 0;
3210 }
3211
3212 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3213                                     enum hnae3_reset_notify_type type)
3214 {
3215         struct hnae3_client *client = hdev->roce_client;
3216         int ret = 0;
3217         u16 i;
3218
3219         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3220                 return 0;
3221
3222         if (!client->ops->reset_notify)
3223                 return -EOPNOTSUPP;
3224
3225         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3226                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3227
3228                 ret = client->ops->reset_notify(handle, type);
3229                 if (ret) {
3230                         dev_err(&hdev->pdev->dev,
3231                                 "notify roce client failed %d(%d)",
3232                                 type, ret);
3233                         return ret;
3234                 }
3235         }
3236
3237         return ret;
3238 }
3239
3240 static int hclge_reset_wait(struct hclge_dev *hdev)
3241 {
3242 #define HCLGE_RESET_WATI_MS     100
3243 #define HCLGE_RESET_WAIT_CNT    350
3244
3245         u32 val, reg, reg_bit;
3246         u32 cnt = 0;
3247
3248         switch (hdev->reset_type) {
3249         case HNAE3_IMP_RESET:
3250                 reg = HCLGE_GLOBAL_RESET_REG;
3251                 reg_bit = HCLGE_IMP_RESET_BIT;
3252                 break;
3253         case HNAE3_GLOBAL_RESET:
3254                 reg = HCLGE_GLOBAL_RESET_REG;
3255                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3256                 break;
3257         case HNAE3_FUNC_RESET:
3258                 reg = HCLGE_FUN_RST_ING;
3259                 reg_bit = HCLGE_FUN_RST_ING_B;
3260                 break;
3261         default:
3262                 dev_err(&hdev->pdev->dev,
3263                         "Wait for unsupported reset type: %d\n",
3264                         hdev->reset_type);
3265                 return -EINVAL;
3266         }
3267
3268         val = hclge_read_dev(&hdev->hw, reg);
3269         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3270                 msleep(HCLGE_RESET_WATI_MS);
3271                 val = hclge_read_dev(&hdev->hw, reg);
3272                 cnt++;
3273         }
3274
3275         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3276                 dev_warn(&hdev->pdev->dev,
3277                          "Wait for reset timeout: %d\n", hdev->reset_type);
3278                 return -EBUSY;
3279         }
3280
3281         return 0;
3282 }
3283
3284 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3285 {
3286         struct hclge_vf_rst_cmd *req;
3287         struct hclge_desc desc;
3288
3289         req = (struct hclge_vf_rst_cmd *)desc.data;
3290         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3291         req->dest_vfid = func_id;
3292
3293         if (reset)
3294                 req->vf_rst = 0x1;
3295
3296         return hclge_cmd_send(&hdev->hw, &desc, 1);
3297 }
3298
3299 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3300 {
3301         int i;
3302
3303         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3304                 struct hclge_vport *vport = &hdev->vport[i];
3305                 int ret;
3306
3307                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3308                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3309                 if (ret) {
3310                         dev_err(&hdev->pdev->dev,
3311                                 "set vf(%u) rst failed %d!\n",
3312                                 vport->vport_id, ret);
3313                         return ret;
3314                 }
3315
3316                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3317                         continue;
3318
3319                 /* Inform VF to process the reset.
3320                  * hclge_inform_reset_assert_to_vf may fail if VF
3321                  * driver is not loaded.
3322                  */
3323                 ret = hclge_inform_reset_assert_to_vf(vport);
3324                 if (ret)
3325                         dev_warn(&hdev->pdev->dev,
3326                                  "inform reset to vf(%u) failed %d!\n",
3327                                  vport->vport_id, ret);
3328         }
3329
3330         return 0;
3331 }
3332
3333 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3334 {
3335         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3336             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3337             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3338                 return;
3339
3340         hclge_mbx_handler(hdev);
3341
3342         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3343 }
3344
3345 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3346 {
3347         struct hclge_pf_rst_sync_cmd *req;
3348         struct hclge_desc desc;
3349         int cnt = 0;
3350         int ret;
3351
3352         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3353         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3354
3355         do {
3356                 /* vf need to down netdev by mbx during PF or FLR reset */
3357                 hclge_mailbox_service_task(hdev);
3358
3359                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3360                 /* for compatible with old firmware, wait
3361                  * 100 ms for VF to stop IO
3362                  */
3363                 if (ret == -EOPNOTSUPP) {
3364                         msleep(HCLGE_RESET_SYNC_TIME);
3365                         return;
3366                 } else if (ret) {
3367                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3368                                  ret);
3369                         return;
3370                 } else if (req->all_vf_ready) {
3371                         return;
3372                 }
3373                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3374                 hclge_cmd_reuse_desc(&desc, true);
3375         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3376
3377         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3378 }
3379
3380 void hclge_report_hw_error(struct hclge_dev *hdev,
3381                            enum hnae3_hw_error_type type)
3382 {
3383         struct hnae3_client *client = hdev->nic_client;
3384         u16 i;
3385
3386         if (!client || !client->ops->process_hw_error ||
3387             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3388                 return;
3389
3390         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3391                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3392 }
3393
3394 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3395 {
3396         u32 reg_val;
3397
3398         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3399         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3400                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3401                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3402                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3403         }
3404
3405         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3406                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3407                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3408                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3409         }
3410 }
3411
3412 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3413 {
3414         struct hclge_desc desc;
3415         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3416         int ret;
3417
3418         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3419         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3420         req->fun_reset_vfid = func_id;
3421
3422         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3423         if (ret)
3424                 dev_err(&hdev->pdev->dev,
3425                         "send function reset cmd fail, status =%d\n", ret);
3426
3427         return ret;
3428 }
3429
3430 static void hclge_do_reset(struct hclge_dev *hdev)
3431 {
3432         struct hnae3_handle *handle = &hdev->vport[0].nic;
3433         struct pci_dev *pdev = hdev->pdev;
3434         u32 val;
3435
3436         if (hclge_get_hw_reset_stat(handle)) {
3437                 dev_info(&pdev->dev, "hardware reset not finish\n");
3438                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3439                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3440                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3441                 return;
3442         }
3443
3444         switch (hdev->reset_type) {
3445         case HNAE3_GLOBAL_RESET:
3446                 dev_info(&pdev->dev, "global reset requested\n");
3447                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3448                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3449                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3450                 break;
3451         case HNAE3_FUNC_RESET:
3452                 dev_info(&pdev->dev, "PF reset requested\n");
3453                 /* schedule again to check later */
3454                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3455                 hclge_reset_task_schedule(hdev);
3456                 break;
3457         default:
3458                 dev_warn(&pdev->dev,
3459                          "unsupported reset type: %d\n", hdev->reset_type);
3460                 break;
3461         }
3462 }
3463
3464 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3465                                                    unsigned long *addr)
3466 {
3467         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3468         struct hclge_dev *hdev = ae_dev->priv;
3469
3470         /* first, resolve any unknown reset type to the known type(s) */
3471         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3472                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3473                                         HCLGE_MISC_VECTOR_INT_STS);
3474                 /* we will intentionally ignore any errors from this function
3475                  *  as we will end up in *some* reset request in any case
3476                  */
3477                 if (hclge_handle_hw_msix_error(hdev, addr))
3478                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3479                                  msix_sts_reg);
3480
3481                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3482                 /* We defered the clearing of the error event which caused
3483                  * interrupt since it was not posssible to do that in
3484                  * interrupt context (and this is the reason we introduced
3485                  * new UNKNOWN reset type). Now, the errors have been
3486                  * handled and cleared in hardware we can safely enable
3487                  * interrupts. This is an exception to the norm.
3488                  */
3489                 hclge_enable_vector(&hdev->misc_vector, true);
3490         }
3491
3492         /* return the highest priority reset level amongst all */
3493         if (test_bit(HNAE3_IMP_RESET, addr)) {
3494                 rst_level = HNAE3_IMP_RESET;
3495                 clear_bit(HNAE3_IMP_RESET, addr);
3496                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3497                 clear_bit(HNAE3_FUNC_RESET, addr);
3498         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3499                 rst_level = HNAE3_GLOBAL_RESET;
3500                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3501                 clear_bit(HNAE3_FUNC_RESET, addr);
3502         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3503                 rst_level = HNAE3_FUNC_RESET;
3504                 clear_bit(HNAE3_FUNC_RESET, addr);
3505         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3506                 rst_level = HNAE3_FLR_RESET;
3507                 clear_bit(HNAE3_FLR_RESET, addr);
3508         }
3509
3510         if (hdev->reset_type != HNAE3_NONE_RESET &&
3511             rst_level < hdev->reset_type)
3512                 return HNAE3_NONE_RESET;
3513
3514         return rst_level;
3515 }
3516
3517 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3518 {
3519         u32 clearval = 0;
3520
3521         switch (hdev->reset_type) {
3522         case HNAE3_IMP_RESET:
3523                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3524                 break;
3525         case HNAE3_GLOBAL_RESET:
3526                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3527                 break;
3528         default:
3529                 break;
3530         }
3531
3532         if (!clearval)
3533                 return;
3534
3535         /* For revision 0x20, the reset interrupt source
3536          * can only be cleared after hardware reset done
3537          */
3538         if (hdev->pdev->revision == 0x20)
3539                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3540                                 clearval);
3541
3542         hclge_enable_vector(&hdev->misc_vector, true);
3543 }
3544
3545 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3546 {
3547         u32 reg_val;
3548
3549         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3550         if (enable)
3551                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3552         else
3553                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3554
3555         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3556 }
3557
3558 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3559 {
3560         int ret;
3561
3562         ret = hclge_set_all_vf_rst(hdev, true);
3563         if (ret)
3564                 return ret;
3565
3566         hclge_func_reset_sync_vf(hdev);
3567
3568         return 0;
3569 }
3570
3571 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3572 {
3573         u32 reg_val;
3574         int ret = 0;
3575
3576         switch (hdev->reset_type) {
3577         case HNAE3_FUNC_RESET:
3578                 ret = hclge_func_reset_notify_vf(hdev);
3579                 if (ret)
3580                         return ret;
3581
3582                 ret = hclge_func_reset_cmd(hdev, 0);
3583                 if (ret) {
3584                         dev_err(&hdev->pdev->dev,
3585                                 "asserting function reset fail %d!\n", ret);
3586                         return ret;
3587                 }
3588
3589                 /* After performaning pf reset, it is not necessary to do the
3590                  * mailbox handling or send any command to firmware, because
3591                  * any mailbox handling or command to firmware is only valid
3592                  * after hclge_cmd_init is called.
3593                  */
3594                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3595                 hdev->rst_stats.pf_rst_cnt++;
3596                 break;
3597         case HNAE3_FLR_RESET:
3598                 ret = hclge_func_reset_notify_vf(hdev);
3599                 if (ret)
3600                         return ret;
3601                 break;
3602         case HNAE3_IMP_RESET:
3603                 hclge_handle_imp_error(hdev);
3604                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3605                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3606                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3607                 break;
3608         default:
3609                 break;
3610         }
3611
3612         /* inform hardware that preparatory work is done */
3613         msleep(HCLGE_RESET_SYNC_TIME);
3614         hclge_reset_handshake(hdev, true);
3615         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3616
3617         return ret;
3618 }
3619
3620 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3621 {
3622 #define MAX_RESET_FAIL_CNT 5
3623
3624         if (hdev->reset_pending) {
3625                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3626                          hdev->reset_pending);
3627                 return true;
3628         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3629                    HCLGE_RESET_INT_M) {
3630                 dev_info(&hdev->pdev->dev,
3631                          "reset failed because new reset interrupt\n");
3632                 hclge_clear_reset_cause(hdev);
3633                 return false;
3634         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3635                 hdev->rst_stats.reset_fail_cnt++;
3636                 set_bit(hdev->reset_type, &hdev->reset_pending);
3637                 dev_info(&hdev->pdev->dev,
3638                          "re-schedule reset task(%u)\n",
3639                          hdev->rst_stats.reset_fail_cnt);
3640                 return true;
3641         }
3642
3643         hclge_clear_reset_cause(hdev);
3644
3645         /* recover the handshake status when reset fail */
3646         hclge_reset_handshake(hdev, true);
3647
3648         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3649
3650         hclge_dbg_dump_rst_info(hdev);
3651
3652         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3653
3654         return false;
3655 }
3656
3657 static int hclge_set_rst_done(struct hclge_dev *hdev)
3658 {
3659         struct hclge_pf_rst_done_cmd *req;
3660         struct hclge_desc desc;
3661         int ret;
3662
3663         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3664         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3665         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3666
3667         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3668         /* To be compatible with the old firmware, which does not support
3669          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3670          * return success
3671          */
3672         if (ret == -EOPNOTSUPP) {
3673                 dev_warn(&hdev->pdev->dev,
3674                          "current firmware does not support command(0x%x)!\n",
3675                          HCLGE_OPC_PF_RST_DONE);
3676                 return 0;
3677         } else if (ret) {
3678                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3679                         ret);
3680         }
3681
3682         return ret;
3683 }
3684
3685 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3686 {
3687         int ret = 0;
3688
3689         switch (hdev->reset_type) {
3690         case HNAE3_FUNC_RESET:
3691                 /* fall through */
3692         case HNAE3_FLR_RESET:
3693                 ret = hclge_set_all_vf_rst(hdev, false);
3694                 break;
3695         case HNAE3_GLOBAL_RESET:
3696                 /* fall through */
3697         case HNAE3_IMP_RESET:
3698                 ret = hclge_set_rst_done(hdev);
3699                 break;
3700         default:
3701                 break;
3702         }
3703
3704         /* clear up the handshake status after re-initialize done */
3705         hclge_reset_handshake(hdev, false);
3706
3707         return ret;
3708 }
3709
3710 static int hclge_reset_stack(struct hclge_dev *hdev)
3711 {
3712         int ret;
3713
3714         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3715         if (ret)
3716                 return ret;
3717
3718         ret = hclge_reset_ae_dev(hdev->ae_dev);
3719         if (ret)
3720                 return ret;
3721
3722         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3723 }
3724
3725 static int hclge_reset_prepare(struct hclge_dev *hdev)
3726 {
3727         int ret;
3728
3729         hdev->rst_stats.reset_cnt++;
3730         /* perform reset of the stack & ae device for a client */
3731         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3732         if (ret)
3733                 return ret;
3734
3735         rtnl_lock();
3736         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3737         rtnl_unlock();
3738         if (ret)
3739                 return ret;
3740
3741         return hclge_reset_prepare_wait(hdev);
3742 }
3743
3744 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3745 {
3746         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3747         enum hnae3_reset_type reset_level;
3748         int ret;
3749
3750         hdev->rst_stats.hw_reset_done_cnt++;
3751
3752         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3753         if (ret)
3754                 return ret;
3755
3756         rtnl_lock();
3757         ret = hclge_reset_stack(hdev);
3758         rtnl_unlock();
3759         if (ret)
3760                 return ret;
3761
3762         hclge_clear_reset_cause(hdev);
3763
3764         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3765         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3766          * times
3767          */
3768         if (ret &&
3769             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3770                 return ret;
3771
3772         ret = hclge_reset_prepare_up(hdev);
3773         if (ret)
3774                 return ret;
3775
3776         rtnl_lock();
3777         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3778         rtnl_unlock();
3779         if (ret)
3780                 return ret;
3781
3782         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3783         if (ret)
3784                 return ret;
3785
3786         hdev->last_reset_time = jiffies;
3787         hdev->rst_stats.reset_fail_cnt = 0;
3788         hdev->rst_stats.reset_done_cnt++;
3789         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3790
3791         /* if default_reset_request has a higher level reset request,
3792          * it should be handled as soon as possible. since some errors
3793          * need this kind of reset to fix.
3794          */
3795         reset_level = hclge_get_reset_level(ae_dev,
3796                                             &hdev->default_reset_request);
3797         if (reset_level != HNAE3_NONE_RESET)
3798                 set_bit(reset_level, &hdev->reset_request);
3799
3800         return 0;
3801 }
3802
3803 static void hclge_reset(struct hclge_dev *hdev)
3804 {
3805         if (hclge_reset_prepare(hdev))
3806                 goto err_reset;
3807
3808         if (hclge_reset_wait(hdev))
3809                 goto err_reset;
3810
3811         if (hclge_reset_rebuild(hdev))
3812                 goto err_reset;
3813
3814         return;
3815
3816 err_reset:
3817         if (hclge_reset_err_handle(hdev))
3818                 hclge_reset_task_schedule(hdev);
3819 }
3820
3821 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3822 {
3823         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3824         struct hclge_dev *hdev = ae_dev->priv;
3825
3826         /* We might end up getting called broadly because of 2 below cases:
3827          * 1. Recoverable error was conveyed through APEI and only way to bring
3828          *    normalcy is to reset.
3829          * 2. A new reset request from the stack due to timeout
3830          *
3831          * For the first case,error event might not have ae handle available.
3832          * check if this is a new reset request and we are not here just because
3833          * last reset attempt did not succeed and watchdog hit us again. We will
3834          * know this if last reset request did not occur very recently (watchdog
3835          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3836          * In case of new request we reset the "reset level" to PF reset.
3837          * And if it is a repeat reset request of the most recent one then we
3838          * want to make sure we throttle the reset request. Therefore, we will
3839          * not allow it again before 3*HZ times.
3840          */
3841         if (!handle)
3842                 handle = &hdev->vport[0].nic;
3843
3844         if (time_before(jiffies, (hdev->last_reset_time +
3845                                   HCLGE_RESET_INTERVAL))) {
3846                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3847                 return;
3848         } else if (hdev->default_reset_request) {
3849                 hdev->reset_level =
3850                         hclge_get_reset_level(ae_dev,
3851                                               &hdev->default_reset_request);
3852         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3853                 hdev->reset_level = HNAE3_FUNC_RESET;
3854         }
3855
3856         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3857                  hdev->reset_level);
3858
3859         /* request reset & schedule reset task */
3860         set_bit(hdev->reset_level, &hdev->reset_request);
3861         hclge_reset_task_schedule(hdev);
3862
3863         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3864                 hdev->reset_level++;
3865 }
3866
3867 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3868                                         enum hnae3_reset_type rst_type)
3869 {
3870         struct hclge_dev *hdev = ae_dev->priv;
3871
3872         set_bit(rst_type, &hdev->default_reset_request);
3873 }
3874
3875 static void hclge_reset_timer(struct timer_list *t)
3876 {
3877         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3878
3879         /* if default_reset_request has no value, it means that this reset
3880          * request has already be handled, so just return here
3881          */
3882         if (!hdev->default_reset_request)
3883                 return;
3884
3885         dev_info(&hdev->pdev->dev,
3886                  "triggering reset in reset timer\n");
3887         hclge_reset_event(hdev->pdev, NULL);
3888 }
3889
3890 static void hclge_reset_subtask(struct hclge_dev *hdev)
3891 {
3892         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3893
3894         /* check if there is any ongoing reset in the hardware. This status can
3895          * be checked from reset_pending. If there is then, we need to wait for
3896          * hardware to complete reset.
3897          *    a. If we are able to figure out in reasonable time that hardware
3898          *       has fully resetted then, we can proceed with driver, client
3899          *       reset.
3900          *    b. else, we can come back later to check this status so re-sched
3901          *       now.
3902          */
3903         hdev->last_reset_time = jiffies;
3904         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3905         if (hdev->reset_type != HNAE3_NONE_RESET)
3906                 hclge_reset(hdev);
3907
3908         /* check if we got any *new* reset requests to be honored */
3909         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3910         if (hdev->reset_type != HNAE3_NONE_RESET)
3911                 hclge_do_reset(hdev);
3912
3913         hdev->reset_type = HNAE3_NONE_RESET;
3914 }
3915
3916 static void hclge_reset_service_task(struct hclge_dev *hdev)
3917 {
3918         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3919                 return;
3920
3921         down(&hdev->reset_sem);
3922         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3923
3924         hclge_reset_subtask(hdev);
3925
3926         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3927         up(&hdev->reset_sem);
3928 }
3929
3930 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3931 {
3932         int i;
3933
3934         /* start from vport 1 for PF is always alive */
3935         for (i = 1; i < hdev->num_alloc_vport; i++) {
3936                 struct hclge_vport *vport = &hdev->vport[i];
3937
3938                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3939                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3940
3941                 /* If vf is not alive, set to default value */
3942                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3943                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3944         }
3945 }
3946
3947 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3948 {
3949         unsigned long delta = round_jiffies_relative(HZ);
3950
3951         /* Always handle the link updating to make sure link state is
3952          * updated when it is triggered by mbx.
3953          */
3954         hclge_update_link_status(hdev);
3955         hclge_sync_mac_table(hdev);
3956         hclge_sync_promisc_mode(hdev);
3957
3958         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3959                 delta = jiffies - hdev->last_serv_processed;
3960
3961                 if (delta < round_jiffies_relative(HZ)) {
3962                         delta = round_jiffies_relative(HZ) - delta;
3963                         goto out;
3964                 }
3965         }
3966
3967         hdev->serv_processed_cnt++;
3968         hclge_update_vport_alive(hdev);
3969
3970         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3971                 hdev->last_serv_processed = jiffies;
3972                 goto out;
3973         }
3974
3975         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3976                 hclge_update_stats_for_all(hdev);
3977
3978         hclge_update_port_info(hdev);
3979         hclge_sync_vlan_filter(hdev);
3980
3981         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3982                 hclge_rfs_filter_expire(hdev);
3983
3984         hdev->last_serv_processed = jiffies;
3985
3986 out:
3987         hclge_task_schedule(hdev, delta);
3988 }
3989
3990 static void hclge_service_task(struct work_struct *work)
3991 {
3992         struct hclge_dev *hdev =
3993                 container_of(work, struct hclge_dev, service_task.work);
3994
3995         hclge_reset_service_task(hdev);
3996         hclge_mailbox_service_task(hdev);
3997         hclge_periodic_service_task(hdev);
3998
3999         /* Handle reset and mbx again in case periodical task delays the
4000          * handling by calling hclge_task_schedule() in
4001          * hclge_periodic_service_task().
4002          */
4003         hclge_reset_service_task(hdev);
4004         hclge_mailbox_service_task(hdev);
4005 }
4006
4007 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4008 {
4009         /* VF handle has no client */
4010         if (!handle->client)
4011                 return container_of(handle, struct hclge_vport, nic);
4012         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4013                 return container_of(handle, struct hclge_vport, roce);
4014         else
4015                 return container_of(handle, struct hclge_vport, nic);
4016 }
4017
4018 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4019                             struct hnae3_vector_info *vector_info)
4020 {
4021         struct hclge_vport *vport = hclge_get_vport(handle);
4022         struct hnae3_vector_info *vector = vector_info;
4023         struct hclge_dev *hdev = vport->back;
4024         int alloc = 0;
4025         int i, j;
4026
4027         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4028         vector_num = min(hdev->num_msi_left, vector_num);
4029
4030         for (j = 0; j < vector_num; j++) {
4031                 for (i = 1; i < hdev->num_msi; i++) {
4032                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4033                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4034                                 vector->io_addr = hdev->hw.io_base +
4035                                         HCLGE_VECTOR_REG_BASE +
4036                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4037                                         vport->vport_id *
4038                                         HCLGE_VECTOR_VF_OFFSET;
4039                                 hdev->vector_status[i] = vport->vport_id;
4040                                 hdev->vector_irq[i] = vector->vector;
4041
4042                                 vector++;
4043                                 alloc++;
4044
4045                                 break;
4046                         }
4047                 }
4048         }
4049         hdev->num_msi_left -= alloc;
4050         hdev->num_msi_used += alloc;
4051
4052         return alloc;
4053 }
4054
4055 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4056 {
4057         int i;
4058
4059         for (i = 0; i < hdev->num_msi; i++)
4060                 if (vector == hdev->vector_irq[i])
4061                         return i;
4062
4063         return -EINVAL;
4064 }
4065
4066 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4067 {
4068         struct hclge_vport *vport = hclge_get_vport(handle);
4069         struct hclge_dev *hdev = vport->back;
4070         int vector_id;
4071
4072         vector_id = hclge_get_vector_index(hdev, vector);
4073         if (vector_id < 0) {
4074                 dev_err(&hdev->pdev->dev,
4075                         "Get vector index fail. vector = %d\n", vector);
4076                 return vector_id;
4077         }
4078
4079         hclge_free_vector(hdev, vector_id);
4080
4081         return 0;
4082 }
4083
4084 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4085 {
4086         return HCLGE_RSS_KEY_SIZE;
4087 }
4088
4089 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4090 {
4091         return HCLGE_RSS_IND_TBL_SIZE;
4092 }
4093
4094 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4095                                   const u8 hfunc, const u8 *key)
4096 {
4097         struct hclge_rss_config_cmd *req;
4098         unsigned int key_offset = 0;
4099         struct hclge_desc desc;
4100         int key_counts;
4101         int key_size;
4102         int ret;
4103
4104         key_counts = HCLGE_RSS_KEY_SIZE;
4105         req = (struct hclge_rss_config_cmd *)desc.data;
4106
4107         while (key_counts) {
4108                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4109                                            false);
4110
4111                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4112                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4113
4114                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4115                 memcpy(req->hash_key,
4116                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4117
4118                 key_counts -= key_size;
4119                 key_offset++;
4120                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4121                 if (ret) {
4122                         dev_err(&hdev->pdev->dev,
4123                                 "Configure RSS config fail, status = %d\n",
4124                                 ret);
4125                         return ret;
4126                 }
4127         }
4128         return 0;
4129 }
4130
4131 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4132 {
4133         struct hclge_rss_indirection_table_cmd *req;
4134         struct hclge_desc desc;
4135         int i, j;
4136         int ret;
4137
4138         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4139
4140         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4141                 hclge_cmd_setup_basic_desc
4142                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4143
4144                 req->start_table_index =
4145                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4146                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4147
4148                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4149                         req->rss_result[j] =
4150                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4151
4152                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4153                 if (ret) {
4154                         dev_err(&hdev->pdev->dev,
4155                                 "Configure rss indir table fail,status = %d\n",
4156                                 ret);
4157                         return ret;
4158                 }
4159         }
4160         return 0;
4161 }
4162
4163 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4164                                  u16 *tc_size, u16 *tc_offset)
4165 {
4166         struct hclge_rss_tc_mode_cmd *req;
4167         struct hclge_desc desc;
4168         int ret;
4169         int i;
4170
4171         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4172         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4173
4174         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4175                 u16 mode = 0;
4176
4177                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4178                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4179                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4180                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4181                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4182
4183                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4184         }
4185
4186         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4187         if (ret)
4188                 dev_err(&hdev->pdev->dev,
4189                         "Configure rss tc mode fail, status = %d\n", ret);
4190
4191         return ret;
4192 }
4193
4194 static void hclge_get_rss_type(struct hclge_vport *vport)
4195 {
4196         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4197             vport->rss_tuple_sets.ipv4_udp_en ||
4198             vport->rss_tuple_sets.ipv4_sctp_en ||
4199             vport->rss_tuple_sets.ipv6_tcp_en ||
4200             vport->rss_tuple_sets.ipv6_udp_en ||
4201             vport->rss_tuple_sets.ipv6_sctp_en)
4202                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4203         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4204                  vport->rss_tuple_sets.ipv6_fragment_en)
4205                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4206         else
4207                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4208 }
4209
4210 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4211 {
4212         struct hclge_rss_input_tuple_cmd *req;
4213         struct hclge_desc desc;
4214         int ret;
4215
4216         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4217
4218         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4219
4220         /* Get the tuple cfg from pf */
4221         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4222         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4223         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4224         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4225         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4226         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4227         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4228         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4229         hclge_get_rss_type(&hdev->vport[0]);
4230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4231         if (ret)
4232                 dev_err(&hdev->pdev->dev,
4233                         "Configure rss input fail, status = %d\n", ret);
4234         return ret;
4235 }
4236
4237 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4238                          u8 *key, u8 *hfunc)
4239 {
4240         struct hclge_vport *vport = hclge_get_vport(handle);
4241         int i;
4242
4243         /* Get hash algorithm */
4244         if (hfunc) {
4245                 switch (vport->rss_algo) {
4246                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4247                         *hfunc = ETH_RSS_HASH_TOP;
4248                         break;
4249                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4250                         *hfunc = ETH_RSS_HASH_XOR;
4251                         break;
4252                 default:
4253                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4254                         break;
4255                 }
4256         }
4257
4258         /* Get the RSS Key required by the user */
4259         if (key)
4260                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4261
4262         /* Get indirect table */
4263         if (indir)
4264                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4265                         indir[i] =  vport->rss_indirection_tbl[i];
4266
4267         return 0;
4268 }
4269
4270 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4271                          const  u8 *key, const  u8 hfunc)
4272 {
4273         struct hclge_vport *vport = hclge_get_vport(handle);
4274         struct hclge_dev *hdev = vport->back;
4275         u8 hash_algo;
4276         int ret, i;
4277
4278         /* Set the RSS Hash Key if specififed by the user */
4279         if (key) {
4280                 switch (hfunc) {
4281                 case ETH_RSS_HASH_TOP:
4282                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4283                         break;
4284                 case ETH_RSS_HASH_XOR:
4285                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4286                         break;
4287                 case ETH_RSS_HASH_NO_CHANGE:
4288                         hash_algo = vport->rss_algo;
4289                         break;
4290                 default:
4291                         return -EINVAL;
4292                 }
4293
4294                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4295                 if (ret)
4296                         return ret;
4297
4298                 /* Update the shadow RSS key with user specified qids */
4299                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4300                 vport->rss_algo = hash_algo;
4301         }
4302
4303         /* Update the shadow RSS table with user specified qids */
4304         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4305                 vport->rss_indirection_tbl[i] = indir[i];
4306
4307         /* Update the hardware */
4308         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4309 }
4310
4311 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4312 {
4313         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4314
4315         if (nfc->data & RXH_L4_B_2_3)
4316                 hash_sets |= HCLGE_D_PORT_BIT;
4317         else
4318                 hash_sets &= ~HCLGE_D_PORT_BIT;
4319
4320         if (nfc->data & RXH_IP_SRC)
4321                 hash_sets |= HCLGE_S_IP_BIT;
4322         else
4323                 hash_sets &= ~HCLGE_S_IP_BIT;
4324
4325         if (nfc->data & RXH_IP_DST)
4326                 hash_sets |= HCLGE_D_IP_BIT;
4327         else
4328                 hash_sets &= ~HCLGE_D_IP_BIT;
4329
4330         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4331                 hash_sets |= HCLGE_V_TAG_BIT;
4332
4333         return hash_sets;
4334 }
4335
4336 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4337                                struct ethtool_rxnfc *nfc)
4338 {
4339         struct hclge_vport *vport = hclge_get_vport(handle);
4340         struct hclge_dev *hdev = vport->back;
4341         struct hclge_rss_input_tuple_cmd *req;
4342         struct hclge_desc desc;
4343         u8 tuple_sets;
4344         int ret;
4345
4346         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4347                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4348                 return -EINVAL;
4349
4350         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4351         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4352
4353         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4354         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4355         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4356         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4357         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4358         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4359         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4360         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4361
4362         tuple_sets = hclge_get_rss_hash_bits(nfc);
4363         switch (nfc->flow_type) {
4364         case TCP_V4_FLOW:
4365                 req->ipv4_tcp_en = tuple_sets;
4366                 break;
4367         case TCP_V6_FLOW:
4368                 req->ipv6_tcp_en = tuple_sets;
4369                 break;
4370         case UDP_V4_FLOW:
4371                 req->ipv4_udp_en = tuple_sets;
4372                 break;
4373         case UDP_V6_FLOW:
4374                 req->ipv6_udp_en = tuple_sets;
4375                 break;
4376         case SCTP_V4_FLOW:
4377                 req->ipv4_sctp_en = tuple_sets;
4378                 break;
4379         case SCTP_V6_FLOW:
4380                 if ((nfc->data & RXH_L4_B_0_1) ||
4381                     (nfc->data & RXH_L4_B_2_3))
4382                         return -EINVAL;
4383
4384                 req->ipv6_sctp_en = tuple_sets;
4385                 break;
4386         case IPV4_FLOW:
4387                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4388                 break;
4389         case IPV6_FLOW:
4390                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4391                 break;
4392         default:
4393                 return -EINVAL;
4394         }
4395
4396         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4397         if (ret) {
4398                 dev_err(&hdev->pdev->dev,
4399                         "Set rss tuple fail, status = %d\n", ret);
4400                 return ret;
4401         }
4402
4403         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4404         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4405         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4406         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4407         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4408         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4409         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4410         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4411         hclge_get_rss_type(vport);
4412         return 0;
4413 }
4414
4415 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4416                                struct ethtool_rxnfc *nfc)
4417 {
4418         struct hclge_vport *vport = hclge_get_vport(handle);
4419         u8 tuple_sets;
4420
4421         nfc->data = 0;
4422
4423         switch (nfc->flow_type) {
4424         case TCP_V4_FLOW:
4425                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4426                 break;
4427         case UDP_V4_FLOW:
4428                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4429                 break;
4430         case TCP_V6_FLOW:
4431                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4432                 break;
4433         case UDP_V6_FLOW:
4434                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4435                 break;
4436         case SCTP_V4_FLOW:
4437                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4438                 break;
4439         case SCTP_V6_FLOW:
4440                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4441                 break;
4442         case IPV4_FLOW:
4443         case IPV6_FLOW:
4444                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4445                 break;
4446         default:
4447                 return -EINVAL;
4448         }
4449
4450         if (!tuple_sets)
4451                 return 0;
4452
4453         if (tuple_sets & HCLGE_D_PORT_BIT)
4454                 nfc->data |= RXH_L4_B_2_3;
4455         if (tuple_sets & HCLGE_S_PORT_BIT)
4456                 nfc->data |= RXH_L4_B_0_1;
4457         if (tuple_sets & HCLGE_D_IP_BIT)
4458                 nfc->data |= RXH_IP_DST;
4459         if (tuple_sets & HCLGE_S_IP_BIT)
4460                 nfc->data |= RXH_IP_SRC;
4461
4462         return 0;
4463 }
4464
4465 static int hclge_get_tc_size(struct hnae3_handle *handle)
4466 {
4467         struct hclge_vport *vport = hclge_get_vport(handle);
4468         struct hclge_dev *hdev = vport->back;
4469
4470         return hdev->rss_size_max;
4471 }
4472
4473 int hclge_rss_init_hw(struct hclge_dev *hdev)
4474 {
4475         struct hclge_vport *vport = hdev->vport;
4476         u8 *rss_indir = vport[0].rss_indirection_tbl;
4477         u16 rss_size = vport[0].alloc_rss_size;
4478         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4479         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4480         u8 *key = vport[0].rss_hash_key;
4481         u8 hfunc = vport[0].rss_algo;
4482         u16 tc_valid[HCLGE_MAX_TC_NUM];
4483         u16 roundup_size;
4484         unsigned int i;
4485         int ret;
4486
4487         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4488         if (ret)
4489                 return ret;
4490
4491         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4492         if (ret)
4493                 return ret;
4494
4495         ret = hclge_set_rss_input_tuple(hdev);
4496         if (ret)
4497                 return ret;
4498
4499         /* Each TC have the same queue size, and tc_size set to hardware is
4500          * the log2 of roundup power of two of rss_size, the acutal queue
4501          * size is limited by indirection table.
4502          */
4503         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4504                 dev_err(&hdev->pdev->dev,
4505                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4506                         rss_size);
4507                 return -EINVAL;
4508         }
4509
4510         roundup_size = roundup_pow_of_two(rss_size);
4511         roundup_size = ilog2(roundup_size);
4512
4513         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4514                 tc_valid[i] = 0;
4515
4516                 if (!(hdev->hw_tc_map & BIT(i)))
4517                         continue;
4518
4519                 tc_valid[i] = 1;
4520                 tc_size[i] = roundup_size;
4521                 tc_offset[i] = rss_size * i;
4522         }
4523
4524         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4525 }
4526
4527 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4528 {
4529         struct hclge_vport *vport = hdev->vport;
4530         int i, j;
4531
4532         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4533                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4534                         vport[j].rss_indirection_tbl[i] =
4535                                 i % vport[j].alloc_rss_size;
4536         }
4537 }
4538
4539 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4540 {
4541         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4542         struct hclge_vport *vport = hdev->vport;
4543
4544         if (hdev->pdev->revision >= 0x21)
4545                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4546
4547         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4548                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4549                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4550                 vport[i].rss_tuple_sets.ipv4_udp_en =
4551                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4552                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4553                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4554                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4555                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4556                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4557                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4558                 vport[i].rss_tuple_sets.ipv6_udp_en =
4559                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4560                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4561                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4562                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4563                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4564
4565                 vport[i].rss_algo = rss_algo;
4566
4567                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4568                        HCLGE_RSS_KEY_SIZE);
4569         }
4570
4571         hclge_rss_indir_init_cfg(hdev);
4572 }
4573
4574 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4575                                 int vector_id, bool en,
4576                                 struct hnae3_ring_chain_node *ring_chain)
4577 {
4578         struct hclge_dev *hdev = vport->back;
4579         struct hnae3_ring_chain_node *node;
4580         struct hclge_desc desc;
4581         struct hclge_ctrl_vector_chain_cmd *req =
4582                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4583         enum hclge_cmd_status status;
4584         enum hclge_opcode_type op;
4585         u16 tqp_type_and_id;
4586         int i;
4587
4588         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4589         hclge_cmd_setup_basic_desc(&desc, op, false);
4590         req->int_vector_id = vector_id;
4591
4592         i = 0;
4593         for (node = ring_chain; node; node = node->next) {
4594                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4595                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4596                                 HCLGE_INT_TYPE_S,
4597                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4598                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4599                                 HCLGE_TQP_ID_S, node->tqp_index);
4600                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4601                                 HCLGE_INT_GL_IDX_S,
4602                                 hnae3_get_field(node->int_gl_idx,
4603                                                 HNAE3_RING_GL_IDX_M,
4604                                                 HNAE3_RING_GL_IDX_S));
4605                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4606                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4607                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4608                         req->vfid = vport->vport_id;
4609
4610                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4611                         if (status) {
4612                                 dev_err(&hdev->pdev->dev,
4613                                         "Map TQP fail, status is %d.\n",
4614                                         status);
4615                                 return -EIO;
4616                         }
4617                         i = 0;
4618
4619                         hclge_cmd_setup_basic_desc(&desc,
4620                                                    op,
4621                                                    false);
4622                         req->int_vector_id = vector_id;
4623                 }
4624         }
4625
4626         if (i > 0) {
4627                 req->int_cause_num = i;
4628                 req->vfid = vport->vport_id;
4629                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4630                 if (status) {
4631                         dev_err(&hdev->pdev->dev,
4632                                 "Map TQP fail, status is %d.\n", status);
4633                         return -EIO;
4634                 }
4635         }
4636
4637         return 0;
4638 }
4639
4640 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4641                                     struct hnae3_ring_chain_node *ring_chain)
4642 {
4643         struct hclge_vport *vport = hclge_get_vport(handle);
4644         struct hclge_dev *hdev = vport->back;
4645         int vector_id;
4646
4647         vector_id = hclge_get_vector_index(hdev, vector);
4648         if (vector_id < 0) {
4649                 dev_err(&hdev->pdev->dev,
4650                         "failed to get vector index. vector=%d\n", vector);
4651                 return vector_id;
4652         }
4653
4654         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4655 }
4656
4657 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4658                                        struct hnae3_ring_chain_node *ring_chain)
4659 {
4660         struct hclge_vport *vport = hclge_get_vport(handle);
4661         struct hclge_dev *hdev = vport->back;
4662         int vector_id, ret;
4663
4664         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4665                 return 0;
4666
4667         vector_id = hclge_get_vector_index(hdev, vector);
4668         if (vector_id < 0) {
4669                 dev_err(&handle->pdev->dev,
4670                         "Get vector index fail. ret =%d\n", vector_id);
4671                 return vector_id;
4672         }
4673
4674         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4675         if (ret)
4676                 dev_err(&handle->pdev->dev,
4677                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4678                         vector_id, ret);
4679
4680         return ret;
4681 }
4682
4683 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4684                                       struct hclge_promisc_param *param)
4685 {
4686         struct hclge_promisc_cfg_cmd *req;
4687         struct hclge_desc desc;
4688         int ret;
4689
4690         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4691
4692         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4693         req->vf_id = param->vf_id;
4694
4695         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4696          * pdev revision(0x20), new revision support them. The
4697          * value of this two fields will not return error when driver
4698          * send command to fireware in revision(0x20).
4699          */
4700         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4701                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4702
4703         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4704         if (ret)
4705                 dev_err(&hdev->pdev->dev,
4706                         "failed to set vport %d promisc mode, ret = %d.\n",
4707                         param->vf_id, ret);
4708
4709         return ret;
4710 }
4711
4712 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4713                                      bool en_uc, bool en_mc, bool en_bc,
4714                                      int vport_id)
4715 {
4716         if (!param)
4717                 return;
4718
4719         memset(param, 0, sizeof(struct hclge_promisc_param));
4720         if (en_uc)
4721                 param->enable = HCLGE_PROMISC_EN_UC;
4722         if (en_mc)
4723                 param->enable |= HCLGE_PROMISC_EN_MC;
4724         if (en_bc)
4725                 param->enable |= HCLGE_PROMISC_EN_BC;
4726         param->vf_id = vport_id;
4727 }
4728
4729 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4730                                  bool en_mc_pmc, bool en_bc_pmc)
4731 {
4732         struct hclge_dev *hdev = vport->back;
4733         struct hclge_promisc_param param;
4734
4735         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4736                                  vport->vport_id);
4737         return hclge_cmd_set_promisc_mode(hdev, &param);
4738 }
4739
4740 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4741                                   bool en_mc_pmc)
4742 {
4743         struct hclge_vport *vport = hclge_get_vport(handle);
4744         bool en_bc_pmc = true;
4745
4746         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4747          * always bypassed. So broadcast promisc should be disabled until
4748          * user enable promisc mode
4749          */
4750         if (handle->pdev->revision == 0x20)
4751                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4752
4753         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4754                                             en_bc_pmc);
4755 }
4756
4757 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4758 {
4759         struct hclge_vport *vport = hclge_get_vport(handle);
4760         struct hclge_dev *hdev = vport->back;
4761
4762         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4763 }
4764
4765 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4766 {
4767         struct hclge_get_fd_mode_cmd *req;
4768         struct hclge_desc desc;
4769         int ret;
4770
4771         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4772
4773         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4774
4775         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4776         if (ret) {
4777                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4778                 return ret;
4779         }
4780
4781         *fd_mode = req->mode;
4782
4783         return ret;
4784 }
4785
4786 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4787                                    u32 *stage1_entry_num,
4788                                    u32 *stage2_entry_num,
4789                                    u16 *stage1_counter_num,
4790                                    u16 *stage2_counter_num)
4791 {
4792         struct hclge_get_fd_allocation_cmd *req;
4793         struct hclge_desc desc;
4794         int ret;
4795
4796         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4797
4798         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4799
4800         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4801         if (ret) {
4802                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4803                         ret);
4804                 return ret;
4805         }
4806
4807         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4808         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4809         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4810         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4811
4812         return ret;
4813 }
4814
4815 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4816                                    enum HCLGE_FD_STAGE stage_num)
4817 {
4818         struct hclge_set_fd_key_config_cmd *req;
4819         struct hclge_fd_key_cfg *stage;
4820         struct hclge_desc desc;
4821         int ret;
4822
4823         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4824
4825         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4826         stage = &hdev->fd_cfg.key_cfg[stage_num];
4827         req->stage = stage_num;
4828         req->key_select = stage->key_sel;
4829         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4830         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4831         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4832         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4833         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4834         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4835
4836         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4837         if (ret)
4838                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4839
4840         return ret;
4841 }
4842
4843 static int hclge_init_fd_config(struct hclge_dev *hdev)
4844 {
4845 #define LOW_2_WORDS             0x03
4846         struct hclge_fd_key_cfg *key_cfg;
4847         int ret;
4848
4849         if (!hnae3_dev_fd_supported(hdev))
4850                 return 0;
4851
4852         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4853         if (ret)
4854                 return ret;
4855
4856         switch (hdev->fd_cfg.fd_mode) {
4857         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4858                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4859                 break;
4860         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4861                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4862                 break;
4863         default:
4864                 dev_err(&hdev->pdev->dev,
4865                         "Unsupported flow director mode %u\n",
4866                         hdev->fd_cfg.fd_mode);
4867                 return -EOPNOTSUPP;
4868         }
4869
4870         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4871         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4872         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4873         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4874         key_cfg->outer_sipv6_word_en = 0;
4875         key_cfg->outer_dipv6_word_en = 0;
4876
4877         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4878                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4879                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4880                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4881
4882         /* If use max 400bit key, we can support tuples for ether type */
4883         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4884                 key_cfg->tuple_active |=
4885                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4886
4887         /* roce_type is used to filter roce frames
4888          * dst_vport is used to specify the rule
4889          */
4890         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4891
4892         ret = hclge_get_fd_allocation(hdev,
4893                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4894                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4895                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4896                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4897         if (ret)
4898                 return ret;
4899
4900         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4901 }
4902
4903 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4904                                 int loc, u8 *key, bool is_add)
4905 {
4906         struct hclge_fd_tcam_config_1_cmd *req1;
4907         struct hclge_fd_tcam_config_2_cmd *req2;
4908         struct hclge_fd_tcam_config_3_cmd *req3;
4909         struct hclge_desc desc[3];
4910         int ret;
4911
4912         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4913         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4914         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4915         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4916         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4917
4918         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4919         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4920         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4921
4922         req1->stage = stage;
4923         req1->xy_sel = sel_x ? 1 : 0;
4924         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4925         req1->index = cpu_to_le32(loc);
4926         req1->entry_vld = sel_x ? is_add : 0;
4927
4928         if (key) {
4929                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4930                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4931                        sizeof(req2->tcam_data));
4932                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4933                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4934         }
4935
4936         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4937         if (ret)
4938                 dev_err(&hdev->pdev->dev,
4939                         "config tcam key fail, ret=%d\n",
4940                         ret);
4941
4942         return ret;
4943 }
4944
4945 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4946                               struct hclge_fd_ad_data *action)
4947 {
4948         struct hclge_fd_ad_config_cmd *req;
4949         struct hclge_desc desc;
4950         u64 ad_data = 0;
4951         int ret;
4952
4953         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4954
4955         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4956         req->index = cpu_to_le32(loc);
4957         req->stage = stage;
4958
4959         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4960                       action->write_rule_id_to_bd);
4961         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4962                         action->rule_id);
4963         ad_data <<= 32;
4964         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4965         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4966                       action->forward_to_direct_queue);
4967         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4968                         action->queue_id);
4969         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4970         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4971                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4972         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4973         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4974                         action->counter_id);
4975
4976         req->ad_data = cpu_to_le64(ad_data);
4977         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4978         if (ret)
4979                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4980
4981         return ret;
4982 }
4983
4984 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4985                                    struct hclge_fd_rule *rule)
4986 {
4987         u16 tmp_x_s, tmp_y_s;
4988         u32 tmp_x_l, tmp_y_l;
4989         int i;
4990
4991         if (rule->unused_tuple & tuple_bit)
4992                 return true;
4993
4994         switch (tuple_bit) {
4995         case BIT(INNER_DST_MAC):
4996                 for (i = 0; i < ETH_ALEN; i++) {
4997                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4998                                rule->tuples_mask.dst_mac[i]);
4999                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5000                                rule->tuples_mask.dst_mac[i]);
5001                 }
5002
5003                 return true;
5004         case BIT(INNER_SRC_MAC):
5005                 for (i = 0; i < ETH_ALEN; i++) {
5006                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5007                                rule->tuples.src_mac[i]);
5008                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5009                                rule->tuples.src_mac[i]);
5010                 }
5011
5012                 return true;
5013         case BIT(INNER_VLAN_TAG_FST):
5014                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5015                        rule->tuples_mask.vlan_tag1);
5016                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5017                        rule->tuples_mask.vlan_tag1);
5018                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5019                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5020
5021                 return true;
5022         case BIT(INNER_ETH_TYPE):
5023                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5024                        rule->tuples_mask.ether_proto);
5025                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5026                        rule->tuples_mask.ether_proto);
5027                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5028                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5029
5030                 return true;
5031         case BIT(INNER_IP_TOS):
5032                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5033                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5034
5035                 return true;
5036         case BIT(INNER_IP_PROTO):
5037                 calc_x(*key_x, rule->tuples.ip_proto,
5038                        rule->tuples_mask.ip_proto);
5039                 calc_y(*key_y, rule->tuples.ip_proto,
5040                        rule->tuples_mask.ip_proto);
5041
5042                 return true;
5043         case BIT(INNER_SRC_IP):
5044                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5045                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5046                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5047                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5048                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5049                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5050
5051                 return true;
5052         case BIT(INNER_DST_IP):
5053                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5054                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5055                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5056                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5057                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5058                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5059
5060                 return true;
5061         case BIT(INNER_SRC_PORT):
5062                 calc_x(tmp_x_s, rule->tuples.src_port,
5063                        rule->tuples_mask.src_port);
5064                 calc_y(tmp_y_s, rule->tuples.src_port,
5065                        rule->tuples_mask.src_port);
5066                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5067                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5068
5069                 return true;
5070         case BIT(INNER_DST_PORT):
5071                 calc_x(tmp_x_s, rule->tuples.dst_port,
5072                        rule->tuples_mask.dst_port);
5073                 calc_y(tmp_y_s, rule->tuples.dst_port,
5074                        rule->tuples_mask.dst_port);
5075                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5076                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5077
5078                 return true;
5079         default:
5080                 return false;
5081         }
5082 }
5083
5084 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5085                                  u8 vf_id, u8 network_port_id)
5086 {
5087         u32 port_number = 0;
5088
5089         if (port_type == HOST_PORT) {
5090                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5091                                 pf_id);
5092                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5093                                 vf_id);
5094                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5095         } else {
5096                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5097                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5098                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5099         }
5100
5101         return port_number;
5102 }
5103
5104 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5105                                        __le32 *key_x, __le32 *key_y,
5106                                        struct hclge_fd_rule *rule)
5107 {
5108         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5109         u8 cur_pos = 0, tuple_size, shift_bits;
5110         unsigned int i;
5111
5112         for (i = 0; i < MAX_META_DATA; i++) {
5113                 tuple_size = meta_data_key_info[i].key_length;
5114                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5115
5116                 switch (tuple_bit) {
5117                 case BIT(ROCE_TYPE):
5118                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5119                         cur_pos += tuple_size;
5120                         break;
5121                 case BIT(DST_VPORT):
5122                         port_number = hclge_get_port_number(HOST_PORT, 0,
5123                                                             rule->vf_id, 0);
5124                         hnae3_set_field(meta_data,
5125                                         GENMASK(cur_pos + tuple_size, cur_pos),
5126                                         cur_pos, port_number);
5127                         cur_pos += tuple_size;
5128                         break;
5129                 default:
5130                         break;
5131                 }
5132         }
5133
5134         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5135         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5136         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5137
5138         *key_x = cpu_to_le32(tmp_x << shift_bits);
5139         *key_y = cpu_to_le32(tmp_y << shift_bits);
5140 }
5141
5142 /* A complete key is combined with meta data key and tuple key.
5143  * Meta data key is stored at the MSB region, and tuple key is stored at
5144  * the LSB region, unused bits will be filled 0.
5145  */
5146 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5147                             struct hclge_fd_rule *rule)
5148 {
5149         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5150         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5151         u8 *cur_key_x, *cur_key_y;
5152         u8 meta_data_region;
5153         u8 tuple_size;
5154         int ret;
5155         u32 i;
5156
5157         memset(key_x, 0, sizeof(key_x));
5158         memset(key_y, 0, sizeof(key_y));
5159         cur_key_x = key_x;
5160         cur_key_y = key_y;
5161
5162         for (i = 0 ; i < MAX_TUPLE; i++) {
5163                 bool tuple_valid;
5164                 u32 check_tuple;
5165
5166                 tuple_size = tuple_key_info[i].key_length / 8;
5167                 check_tuple = key_cfg->tuple_active & BIT(i);
5168
5169                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5170                                                      cur_key_y, rule);
5171                 if (tuple_valid) {
5172                         cur_key_x += tuple_size;
5173                         cur_key_y += tuple_size;
5174                 }
5175         }
5176
5177         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5178                         MAX_META_DATA_LENGTH / 8;
5179
5180         hclge_fd_convert_meta_data(key_cfg,
5181                                    (__le32 *)(key_x + meta_data_region),
5182                                    (__le32 *)(key_y + meta_data_region),
5183                                    rule);
5184
5185         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5186                                    true);
5187         if (ret) {
5188                 dev_err(&hdev->pdev->dev,
5189                         "fd key_y config fail, loc=%u, ret=%d\n",
5190                         rule->queue_id, ret);
5191                 return ret;
5192         }
5193
5194         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5195                                    true);
5196         if (ret)
5197                 dev_err(&hdev->pdev->dev,
5198                         "fd key_x config fail, loc=%u, ret=%d\n",
5199                         rule->queue_id, ret);
5200         return ret;
5201 }
5202
5203 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5204                                struct hclge_fd_rule *rule)
5205 {
5206         struct hclge_fd_ad_data ad_data;
5207
5208         ad_data.ad_id = rule->location;
5209
5210         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5211                 ad_data.drop_packet = true;
5212                 ad_data.forward_to_direct_queue = false;
5213                 ad_data.queue_id = 0;
5214         } else {
5215                 ad_data.drop_packet = false;
5216                 ad_data.forward_to_direct_queue = true;
5217                 ad_data.queue_id = rule->queue_id;
5218         }
5219
5220         ad_data.use_counter = false;
5221         ad_data.counter_id = 0;
5222
5223         ad_data.use_next_stage = false;
5224         ad_data.next_input_key = 0;
5225
5226         ad_data.write_rule_id_to_bd = true;
5227         ad_data.rule_id = rule->location;
5228
5229         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5230 }
5231
5232 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5233                                        u32 *unused_tuple)
5234 {
5235         if (!spec || !unused_tuple)
5236                 return -EINVAL;
5237
5238         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5239
5240         if (!spec->ip4src)
5241                 *unused_tuple |= BIT(INNER_SRC_IP);
5242
5243         if (!spec->ip4dst)
5244                 *unused_tuple |= BIT(INNER_DST_IP);
5245
5246         if (!spec->psrc)
5247                 *unused_tuple |= BIT(INNER_SRC_PORT);
5248
5249         if (!spec->pdst)
5250                 *unused_tuple |= BIT(INNER_DST_PORT);
5251
5252         if (!spec->tos)
5253                 *unused_tuple |= BIT(INNER_IP_TOS);
5254
5255         return 0;
5256 }
5257
5258 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5259                                     u32 *unused_tuple)
5260 {
5261         if (!spec || !unused_tuple)
5262                 return -EINVAL;
5263
5264         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5265                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5266
5267         if (!spec->ip4src)
5268                 *unused_tuple |= BIT(INNER_SRC_IP);
5269
5270         if (!spec->ip4dst)
5271                 *unused_tuple |= BIT(INNER_DST_IP);
5272
5273         if (!spec->tos)
5274                 *unused_tuple |= BIT(INNER_IP_TOS);
5275
5276         if (!spec->proto)
5277                 *unused_tuple |= BIT(INNER_IP_PROTO);
5278
5279         if (spec->l4_4_bytes)
5280                 return -EOPNOTSUPP;
5281
5282         if (spec->ip_ver != ETH_RX_NFC_IP4)
5283                 return -EOPNOTSUPP;
5284
5285         return 0;
5286 }
5287
5288 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5289                                        u32 *unused_tuple)
5290 {
5291         if (!spec || !unused_tuple)
5292                 return -EINVAL;
5293
5294         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5295                 BIT(INNER_IP_TOS);
5296
5297         /* check whether src/dst ip address used */
5298         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5299             !spec->ip6src[2] && !spec->ip6src[3])
5300                 *unused_tuple |= BIT(INNER_SRC_IP);
5301
5302         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5303             !spec->ip6dst[2] && !spec->ip6dst[3])
5304                 *unused_tuple |= BIT(INNER_DST_IP);
5305
5306         if (!spec->psrc)
5307                 *unused_tuple |= BIT(INNER_SRC_PORT);
5308
5309         if (!spec->pdst)
5310                 *unused_tuple |= BIT(INNER_DST_PORT);
5311
5312         if (spec->tclass)
5313                 return -EOPNOTSUPP;
5314
5315         return 0;
5316 }
5317
5318 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5319                                     u32 *unused_tuple)
5320 {
5321         if (!spec || !unused_tuple)
5322                 return -EINVAL;
5323
5324         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5325                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5326
5327         /* check whether src/dst ip address used */
5328         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5329             !spec->ip6src[2] && !spec->ip6src[3])
5330                 *unused_tuple |= BIT(INNER_SRC_IP);
5331
5332         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5333             !spec->ip6dst[2] && !spec->ip6dst[3])
5334                 *unused_tuple |= BIT(INNER_DST_IP);
5335
5336         if (!spec->l4_proto)
5337                 *unused_tuple |= BIT(INNER_IP_PROTO);
5338
5339         if (spec->tclass)
5340                 return -EOPNOTSUPP;
5341
5342         if (spec->l4_4_bytes)
5343                 return -EOPNOTSUPP;
5344
5345         return 0;
5346 }
5347
5348 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5349 {
5350         if (!spec || !unused_tuple)
5351                 return -EINVAL;
5352
5353         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5354                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5355                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5356
5357         if (is_zero_ether_addr(spec->h_source))
5358                 *unused_tuple |= BIT(INNER_SRC_MAC);
5359
5360         if (is_zero_ether_addr(spec->h_dest))
5361                 *unused_tuple |= BIT(INNER_DST_MAC);
5362
5363         if (!spec->h_proto)
5364                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5365
5366         return 0;
5367 }
5368
5369 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5370                                     struct ethtool_rx_flow_spec *fs,
5371                                     u32 *unused_tuple)
5372 {
5373         if (fs->flow_type & FLOW_EXT) {
5374                 if (fs->h_ext.vlan_etype) {
5375                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5376                         return -EOPNOTSUPP;
5377                 }
5378
5379                 if (!fs->h_ext.vlan_tci)
5380                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5381
5382                 if (fs->m_ext.vlan_tci &&
5383                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5384                         dev_err(&hdev->pdev->dev,
5385                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5386                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5387                         return -EINVAL;
5388                 }
5389         } else {
5390                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5391         }
5392
5393         if (fs->flow_type & FLOW_MAC_EXT) {
5394                 if (hdev->fd_cfg.fd_mode !=
5395                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5396                         dev_err(&hdev->pdev->dev,
5397                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5398                         return -EOPNOTSUPP;
5399                 }
5400
5401                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5402                         *unused_tuple |= BIT(INNER_DST_MAC);
5403                 else
5404                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5405         }
5406
5407         return 0;
5408 }
5409
5410 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5411                                struct ethtool_rx_flow_spec *fs,
5412                                u32 *unused_tuple)
5413 {
5414         u32 flow_type;
5415         int ret;
5416
5417         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5418                 dev_err(&hdev->pdev->dev,
5419                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5420                         fs->location,
5421                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5422                 return -EINVAL;
5423         }
5424
5425         if ((fs->flow_type & FLOW_EXT) &&
5426             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5427                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5428                 return -EOPNOTSUPP;
5429         }
5430
5431         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5432         switch (flow_type) {
5433         case SCTP_V4_FLOW:
5434         case TCP_V4_FLOW:
5435         case UDP_V4_FLOW:
5436                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5437                                                   unused_tuple);
5438                 break;
5439         case IP_USER_FLOW:
5440                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5441                                                unused_tuple);
5442                 break;
5443         case SCTP_V6_FLOW:
5444         case TCP_V6_FLOW:
5445         case UDP_V6_FLOW:
5446                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5447                                                   unused_tuple);
5448                 break;
5449         case IPV6_USER_FLOW:
5450                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5451                                                unused_tuple);
5452                 break;
5453         case ETHER_FLOW:
5454                 if (hdev->fd_cfg.fd_mode !=
5455                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5456                         dev_err(&hdev->pdev->dev,
5457                                 "ETHER_FLOW is not supported in current fd mode!\n");
5458                         return -EOPNOTSUPP;
5459                 }
5460
5461                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5462                                                  unused_tuple);
5463                 break;
5464         default:
5465                 dev_err(&hdev->pdev->dev,
5466                         "unsupported protocol type, protocol type = %#x\n",
5467                         flow_type);
5468                 return -EOPNOTSUPP;
5469         }
5470
5471         if (ret) {
5472                 dev_err(&hdev->pdev->dev,
5473                         "failed to check flow union tuple, ret = %d\n",
5474                         ret);
5475                 return ret;
5476         }
5477
5478         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5479 }
5480
5481 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5482 {
5483         struct hclge_fd_rule *rule = NULL;
5484         struct hlist_node *node2;
5485
5486         spin_lock_bh(&hdev->fd_rule_lock);
5487         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5488                 if (rule->location >= location)
5489                         break;
5490         }
5491
5492         spin_unlock_bh(&hdev->fd_rule_lock);
5493
5494         return  rule && rule->location == location;
5495 }
5496
5497 /* make sure being called after lock up with fd_rule_lock */
5498 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5499                                      struct hclge_fd_rule *new_rule,
5500                                      u16 location,
5501                                      bool is_add)
5502 {
5503         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5504         struct hlist_node *node2;
5505
5506         if (is_add && !new_rule)
5507                 return -EINVAL;
5508
5509         hlist_for_each_entry_safe(rule, node2,
5510                                   &hdev->fd_rule_list, rule_node) {
5511                 if (rule->location >= location)
5512                         break;
5513                 parent = rule;
5514         }
5515
5516         if (rule && rule->location == location) {
5517                 hlist_del(&rule->rule_node);
5518                 kfree(rule);
5519                 hdev->hclge_fd_rule_num--;
5520
5521                 if (!is_add) {
5522                         if (!hdev->hclge_fd_rule_num)
5523                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5524                         clear_bit(location, hdev->fd_bmap);
5525
5526                         return 0;
5527                 }
5528         } else if (!is_add) {
5529                 dev_err(&hdev->pdev->dev,
5530                         "delete fail, rule %u is inexistent\n",
5531                         location);
5532                 return -EINVAL;
5533         }
5534
5535         INIT_HLIST_NODE(&new_rule->rule_node);
5536
5537         if (parent)
5538                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5539         else
5540                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5541
5542         set_bit(location, hdev->fd_bmap);
5543         hdev->hclge_fd_rule_num++;
5544         hdev->fd_active_type = new_rule->rule_type;
5545
5546         return 0;
5547 }
5548
5549 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5550                               struct ethtool_rx_flow_spec *fs,
5551                               struct hclge_fd_rule *rule)
5552 {
5553         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5554
5555         switch (flow_type) {
5556         case SCTP_V4_FLOW:
5557         case TCP_V4_FLOW:
5558         case UDP_V4_FLOW:
5559                 rule->tuples.src_ip[IPV4_INDEX] =
5560                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5561                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5562                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5563
5564                 rule->tuples.dst_ip[IPV4_INDEX] =
5565                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5566                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5567                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5568
5569                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5570                 rule->tuples_mask.src_port =
5571                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5572
5573                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5574                 rule->tuples_mask.dst_port =
5575                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5576
5577                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5578                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5579
5580                 rule->tuples.ether_proto = ETH_P_IP;
5581                 rule->tuples_mask.ether_proto = 0xFFFF;
5582
5583                 break;
5584         case IP_USER_FLOW:
5585                 rule->tuples.src_ip[IPV4_INDEX] =
5586                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5587                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5588                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5589
5590                 rule->tuples.dst_ip[IPV4_INDEX] =
5591                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5592                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5593                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5594
5595                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5596                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5597
5598                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5599                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5600
5601                 rule->tuples.ether_proto = ETH_P_IP;
5602                 rule->tuples_mask.ether_proto = 0xFFFF;
5603
5604                 break;
5605         case SCTP_V6_FLOW:
5606         case TCP_V6_FLOW:
5607         case UDP_V6_FLOW:
5608                 be32_to_cpu_array(rule->tuples.src_ip,
5609                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5610                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5611                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5612
5613                 be32_to_cpu_array(rule->tuples.dst_ip,
5614                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5615                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5616                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5617
5618                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5619                 rule->tuples_mask.src_port =
5620                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5621
5622                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5623                 rule->tuples_mask.dst_port =
5624                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5625
5626                 rule->tuples.ether_proto = ETH_P_IPV6;
5627                 rule->tuples_mask.ether_proto = 0xFFFF;
5628
5629                 break;
5630         case IPV6_USER_FLOW:
5631                 be32_to_cpu_array(rule->tuples.src_ip,
5632                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5633                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5634                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5635
5636                 be32_to_cpu_array(rule->tuples.dst_ip,
5637                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5638                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5639                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5640
5641                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5642                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5643
5644                 rule->tuples.ether_proto = ETH_P_IPV6;
5645                 rule->tuples_mask.ether_proto = 0xFFFF;
5646
5647                 break;
5648         case ETHER_FLOW:
5649                 ether_addr_copy(rule->tuples.src_mac,
5650                                 fs->h_u.ether_spec.h_source);
5651                 ether_addr_copy(rule->tuples_mask.src_mac,
5652                                 fs->m_u.ether_spec.h_source);
5653
5654                 ether_addr_copy(rule->tuples.dst_mac,
5655                                 fs->h_u.ether_spec.h_dest);
5656                 ether_addr_copy(rule->tuples_mask.dst_mac,
5657                                 fs->m_u.ether_spec.h_dest);
5658
5659                 rule->tuples.ether_proto =
5660                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5661                 rule->tuples_mask.ether_proto =
5662                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5663
5664                 break;
5665         default:
5666                 return -EOPNOTSUPP;
5667         }
5668
5669         switch (flow_type) {
5670         case SCTP_V4_FLOW:
5671         case SCTP_V6_FLOW:
5672                 rule->tuples.ip_proto = IPPROTO_SCTP;
5673                 rule->tuples_mask.ip_proto = 0xFF;
5674                 break;
5675         case TCP_V4_FLOW:
5676         case TCP_V6_FLOW:
5677                 rule->tuples.ip_proto = IPPROTO_TCP;
5678                 rule->tuples_mask.ip_proto = 0xFF;
5679                 break;
5680         case UDP_V4_FLOW:
5681         case UDP_V6_FLOW:
5682                 rule->tuples.ip_proto = IPPROTO_UDP;
5683                 rule->tuples_mask.ip_proto = 0xFF;
5684                 break;
5685         default:
5686                 break;
5687         }
5688
5689         if (fs->flow_type & FLOW_EXT) {
5690                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5691                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5692         }
5693
5694         if (fs->flow_type & FLOW_MAC_EXT) {
5695                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5696                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5697         }
5698
5699         return 0;
5700 }
5701
5702 /* make sure being called after lock up with fd_rule_lock */
5703 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5704                                 struct hclge_fd_rule *rule)
5705 {
5706         int ret;
5707
5708         if (!rule) {
5709                 dev_err(&hdev->pdev->dev,
5710                         "The flow director rule is NULL\n");
5711                 return -EINVAL;
5712         }
5713
5714         /* it will never fail here, so needn't to check return value */
5715         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5716
5717         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5718         if (ret)
5719                 goto clear_rule;
5720
5721         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5722         if (ret)
5723                 goto clear_rule;
5724
5725         return 0;
5726
5727 clear_rule:
5728         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5729         return ret;
5730 }
5731
5732 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5733                               struct ethtool_rxnfc *cmd)
5734 {
5735         struct hclge_vport *vport = hclge_get_vport(handle);
5736         struct hclge_dev *hdev = vport->back;
5737         u16 dst_vport_id = 0, q_index = 0;
5738         struct ethtool_rx_flow_spec *fs;
5739         struct hclge_fd_rule *rule;
5740         u32 unused = 0;
5741         u8 action;
5742         int ret;
5743
5744         if (!hnae3_dev_fd_supported(hdev)) {
5745                 dev_err(&hdev->pdev->dev,
5746                         "flow table director is not supported\n");
5747                 return -EOPNOTSUPP;
5748         }
5749
5750         if (!hdev->fd_en) {
5751                 dev_err(&hdev->pdev->dev,
5752                         "please enable flow director first\n");
5753                 return -EOPNOTSUPP;
5754         }
5755
5756         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5757
5758         ret = hclge_fd_check_spec(hdev, fs, &unused);
5759         if (ret)
5760                 return ret;
5761
5762         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5763                 action = HCLGE_FD_ACTION_DROP_PACKET;
5764         } else {
5765                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5766                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5767                 u16 tqps;
5768
5769                 if (vf > hdev->num_req_vfs) {
5770                         dev_err(&hdev->pdev->dev,
5771                                 "Error: vf id (%u) > max vf num (%u)\n",
5772                                 vf, hdev->num_req_vfs);
5773                         return -EINVAL;
5774                 }
5775
5776                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5777                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5778
5779                 if (ring >= tqps) {
5780                         dev_err(&hdev->pdev->dev,
5781                                 "Error: queue id (%u) > max tqp num (%u)\n",
5782                                 ring, tqps - 1);
5783                         return -EINVAL;
5784                 }
5785
5786                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5787                 q_index = ring;
5788         }
5789
5790         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5791         if (!rule)
5792                 return -ENOMEM;
5793
5794         ret = hclge_fd_get_tuple(hdev, fs, rule);
5795         if (ret) {
5796                 kfree(rule);
5797                 return ret;
5798         }
5799
5800         rule->flow_type = fs->flow_type;
5801         rule->location = fs->location;
5802         rule->unused_tuple = unused;
5803         rule->vf_id = dst_vport_id;
5804         rule->queue_id = q_index;
5805         rule->action = action;
5806         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5807
5808         /* to avoid rule conflict, when user configure rule by ethtool,
5809          * we need to clear all arfs rules
5810          */
5811         hclge_clear_arfs_rules(handle);
5812
5813         spin_lock_bh(&hdev->fd_rule_lock);
5814         ret = hclge_fd_config_rule(hdev, rule);
5815
5816         spin_unlock_bh(&hdev->fd_rule_lock);
5817
5818         return ret;
5819 }
5820
5821 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5822                               struct ethtool_rxnfc *cmd)
5823 {
5824         struct hclge_vport *vport = hclge_get_vport(handle);
5825         struct hclge_dev *hdev = vport->back;
5826         struct ethtool_rx_flow_spec *fs;
5827         int ret;
5828
5829         if (!hnae3_dev_fd_supported(hdev))
5830                 return -EOPNOTSUPP;
5831
5832         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5833
5834         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5835                 return -EINVAL;
5836
5837         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5838                 dev_err(&hdev->pdev->dev,
5839                         "Delete fail, rule %u is inexistent\n", fs->location);
5840                 return -ENOENT;
5841         }
5842
5843         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5844                                    NULL, false);
5845         if (ret)
5846                 return ret;
5847
5848         spin_lock_bh(&hdev->fd_rule_lock);
5849         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5850
5851         spin_unlock_bh(&hdev->fd_rule_lock);
5852
5853         return ret;
5854 }
5855
5856 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5857                                      bool clear_list)
5858 {
5859         struct hclge_vport *vport = hclge_get_vport(handle);
5860         struct hclge_dev *hdev = vport->back;
5861         struct hclge_fd_rule *rule;
5862         struct hlist_node *node;
5863         u16 location;
5864
5865         if (!hnae3_dev_fd_supported(hdev))
5866                 return;
5867
5868         spin_lock_bh(&hdev->fd_rule_lock);
5869         for_each_set_bit(location, hdev->fd_bmap,
5870                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5871                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5872                                      NULL, false);
5873
5874         if (clear_list) {
5875                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5876                                           rule_node) {
5877                         hlist_del(&rule->rule_node);
5878                         kfree(rule);
5879                 }
5880                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5881                 hdev->hclge_fd_rule_num = 0;
5882                 bitmap_zero(hdev->fd_bmap,
5883                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5884         }
5885
5886         spin_unlock_bh(&hdev->fd_rule_lock);
5887 }
5888
5889 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5890 {
5891         struct hclge_vport *vport = hclge_get_vport(handle);
5892         struct hclge_dev *hdev = vport->back;
5893         struct hclge_fd_rule *rule;
5894         struct hlist_node *node;
5895         int ret;
5896
5897         /* Return ok here, because reset error handling will check this
5898          * return value. If error is returned here, the reset process will
5899          * fail.
5900          */
5901         if (!hnae3_dev_fd_supported(hdev))
5902                 return 0;
5903
5904         /* if fd is disabled, should not restore it when reset */
5905         if (!hdev->fd_en)
5906                 return 0;
5907
5908         spin_lock_bh(&hdev->fd_rule_lock);
5909         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5910                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5911                 if (!ret)
5912                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5913
5914                 if (ret) {
5915                         dev_warn(&hdev->pdev->dev,
5916                                  "Restore rule %u failed, remove it\n",
5917                                  rule->location);
5918                         clear_bit(rule->location, hdev->fd_bmap);
5919                         hlist_del(&rule->rule_node);
5920                         kfree(rule);
5921                         hdev->hclge_fd_rule_num--;
5922                 }
5923         }
5924
5925         if (hdev->hclge_fd_rule_num)
5926                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5927
5928         spin_unlock_bh(&hdev->fd_rule_lock);
5929
5930         return 0;
5931 }
5932
5933 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5934                                  struct ethtool_rxnfc *cmd)
5935 {
5936         struct hclge_vport *vport = hclge_get_vport(handle);
5937         struct hclge_dev *hdev = vport->back;
5938
5939         if (!hnae3_dev_fd_supported(hdev))
5940                 return -EOPNOTSUPP;
5941
5942         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5943         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5944
5945         return 0;
5946 }
5947
5948 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5949                                      struct ethtool_tcpip4_spec *spec,
5950                                      struct ethtool_tcpip4_spec *spec_mask)
5951 {
5952         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5953         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5954                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5955
5956         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5957         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5958                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5959
5960         spec->psrc = cpu_to_be16(rule->tuples.src_port);
5961         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5962                         0 : cpu_to_be16(rule->tuples_mask.src_port);
5963
5964         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5965         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5966                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
5967
5968         spec->tos = rule->tuples.ip_tos;
5969         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5970                         0 : rule->tuples_mask.ip_tos;
5971 }
5972
5973 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5974                                   struct ethtool_usrip4_spec *spec,
5975                                   struct ethtool_usrip4_spec *spec_mask)
5976 {
5977         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5978         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5979                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5980
5981         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5982         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5983                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5984
5985         spec->tos = rule->tuples.ip_tos;
5986         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5987                         0 : rule->tuples_mask.ip_tos;
5988
5989         spec->proto = rule->tuples.ip_proto;
5990         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5991                         0 : rule->tuples_mask.ip_proto;
5992
5993         spec->ip_ver = ETH_RX_NFC_IP4;
5994 }
5995
5996 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
5997                                      struct ethtool_tcpip6_spec *spec,
5998                                      struct ethtool_tcpip6_spec *spec_mask)
5999 {
6000         cpu_to_be32_array(spec->ip6src,
6001                           rule->tuples.src_ip, IPV6_SIZE);
6002         cpu_to_be32_array(spec->ip6dst,
6003                           rule->tuples.dst_ip, IPV6_SIZE);
6004         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6005                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6006         else
6007                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6008                                   IPV6_SIZE);
6009
6010         if (rule->unused_tuple & BIT(INNER_DST_IP))
6011                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6012         else
6013                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6014                                   IPV6_SIZE);
6015
6016         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6017         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6018                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6019
6020         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6021         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6022                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6023 }
6024
6025 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6026                                   struct ethtool_usrip6_spec *spec,
6027                                   struct ethtool_usrip6_spec *spec_mask)
6028 {
6029         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6030         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6031         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6032                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6033         else
6034                 cpu_to_be32_array(spec_mask->ip6src,
6035                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6036
6037         if (rule->unused_tuple & BIT(INNER_DST_IP))
6038                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6039         else
6040                 cpu_to_be32_array(spec_mask->ip6dst,
6041                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6042
6043         spec->l4_proto = rule->tuples.ip_proto;
6044         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6045                         0 : rule->tuples_mask.ip_proto;
6046 }
6047
6048 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6049                                     struct ethhdr *spec,
6050                                     struct ethhdr *spec_mask)
6051 {
6052         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6053         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6054
6055         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6056                 eth_zero_addr(spec_mask->h_source);
6057         else
6058                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6059
6060         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6061                 eth_zero_addr(spec_mask->h_dest);
6062         else
6063                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6064
6065         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6066         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6067                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6068 }
6069
6070 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6071                                   struct hclge_fd_rule *rule)
6072 {
6073         if (fs->flow_type & FLOW_EXT) {
6074                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6075                 fs->m_ext.vlan_tci =
6076                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6077                                 cpu_to_be16(VLAN_VID_MASK) :
6078                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6079         }
6080
6081         if (fs->flow_type & FLOW_MAC_EXT) {
6082                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6083                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6084                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6085                 else
6086                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6087                                         rule->tuples_mask.dst_mac);
6088         }
6089 }
6090
6091 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6092                                   struct ethtool_rxnfc *cmd)
6093 {
6094         struct hclge_vport *vport = hclge_get_vport(handle);
6095         struct hclge_fd_rule *rule = NULL;
6096         struct hclge_dev *hdev = vport->back;
6097         struct ethtool_rx_flow_spec *fs;
6098         struct hlist_node *node2;
6099
6100         if (!hnae3_dev_fd_supported(hdev))
6101                 return -EOPNOTSUPP;
6102
6103         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6104
6105         spin_lock_bh(&hdev->fd_rule_lock);
6106
6107         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6108                 if (rule->location >= fs->location)
6109                         break;
6110         }
6111
6112         if (!rule || fs->location != rule->location) {
6113                 spin_unlock_bh(&hdev->fd_rule_lock);
6114
6115                 return -ENOENT;
6116         }
6117
6118         fs->flow_type = rule->flow_type;
6119         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6120         case SCTP_V4_FLOW:
6121         case TCP_V4_FLOW:
6122         case UDP_V4_FLOW:
6123                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6124                                          &fs->m_u.tcp_ip4_spec);
6125                 break;
6126         case IP_USER_FLOW:
6127                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6128                                       &fs->m_u.usr_ip4_spec);
6129                 break;
6130         case SCTP_V6_FLOW:
6131         case TCP_V6_FLOW:
6132         case UDP_V6_FLOW:
6133                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6134                                          &fs->m_u.tcp_ip6_spec);
6135                 break;
6136         case IPV6_USER_FLOW:
6137                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6138                                       &fs->m_u.usr_ip6_spec);
6139                 break;
6140         /* The flow type of fd rule has been checked before adding in to rule
6141          * list. As other flow types have been handled, it must be ETHER_FLOW
6142          * for the default case
6143          */
6144         default:
6145                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6146                                         &fs->m_u.ether_spec);
6147                 break;
6148         }
6149
6150         hclge_fd_get_ext_info(fs, rule);
6151
6152         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6153                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6154         } else {
6155                 u64 vf_id;
6156
6157                 fs->ring_cookie = rule->queue_id;
6158                 vf_id = rule->vf_id;
6159                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6160                 fs->ring_cookie |= vf_id;
6161         }
6162
6163         spin_unlock_bh(&hdev->fd_rule_lock);
6164
6165         return 0;
6166 }
6167
6168 static int hclge_get_all_rules(struct hnae3_handle *handle,
6169                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6170 {
6171         struct hclge_vport *vport = hclge_get_vport(handle);
6172         struct hclge_dev *hdev = vport->back;
6173         struct hclge_fd_rule *rule;
6174         struct hlist_node *node2;
6175         int cnt = 0;
6176
6177         if (!hnae3_dev_fd_supported(hdev))
6178                 return -EOPNOTSUPP;
6179
6180         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6181
6182         spin_lock_bh(&hdev->fd_rule_lock);
6183         hlist_for_each_entry_safe(rule, node2,
6184                                   &hdev->fd_rule_list, rule_node) {
6185                 if (cnt == cmd->rule_cnt) {
6186                         spin_unlock_bh(&hdev->fd_rule_lock);
6187                         return -EMSGSIZE;
6188                 }
6189
6190                 rule_locs[cnt] = rule->location;
6191                 cnt++;
6192         }
6193
6194         spin_unlock_bh(&hdev->fd_rule_lock);
6195
6196         cmd->rule_cnt = cnt;
6197
6198         return 0;
6199 }
6200
6201 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6202                                      struct hclge_fd_rule_tuples *tuples)
6203 {
6204 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6205 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6206
6207         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6208         tuples->ip_proto = fkeys->basic.ip_proto;
6209         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6210
6211         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6212                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6213                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6214         } else {
6215                 int i;
6216
6217                 for (i = 0; i < IPV6_SIZE; i++) {
6218                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6219                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6220                 }
6221         }
6222 }
6223
6224 /* traverse all rules, check whether an existed rule has the same tuples */
6225 static struct hclge_fd_rule *
6226 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6227                           const struct hclge_fd_rule_tuples *tuples)
6228 {
6229         struct hclge_fd_rule *rule = NULL;
6230         struct hlist_node *node;
6231
6232         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6233                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6234                         return rule;
6235         }
6236
6237         return NULL;
6238 }
6239
6240 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6241                                      struct hclge_fd_rule *rule)
6242 {
6243         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6244                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6245                              BIT(INNER_SRC_PORT);
6246         rule->action = 0;
6247         rule->vf_id = 0;
6248         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6249         if (tuples->ether_proto == ETH_P_IP) {
6250                 if (tuples->ip_proto == IPPROTO_TCP)
6251                         rule->flow_type = TCP_V4_FLOW;
6252                 else
6253                         rule->flow_type = UDP_V4_FLOW;
6254         } else {
6255                 if (tuples->ip_proto == IPPROTO_TCP)
6256                         rule->flow_type = TCP_V6_FLOW;
6257                 else
6258                         rule->flow_type = UDP_V6_FLOW;
6259         }
6260         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6261         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6262 }
6263
6264 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6265                                       u16 flow_id, struct flow_keys *fkeys)
6266 {
6267         struct hclge_vport *vport = hclge_get_vport(handle);
6268         struct hclge_fd_rule_tuples new_tuples;
6269         struct hclge_dev *hdev = vport->back;
6270         struct hclge_fd_rule *rule;
6271         u16 tmp_queue_id;
6272         u16 bit_id;
6273         int ret;
6274
6275         if (!hnae3_dev_fd_supported(hdev))
6276                 return -EOPNOTSUPP;
6277
6278         memset(&new_tuples, 0, sizeof(new_tuples));
6279         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6280
6281         spin_lock_bh(&hdev->fd_rule_lock);
6282
6283         /* when there is already fd rule existed add by user,
6284          * arfs should not work
6285          */
6286         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6287                 spin_unlock_bh(&hdev->fd_rule_lock);
6288                 return -EOPNOTSUPP;
6289         }
6290
6291         /* check is there flow director filter existed for this flow,
6292          * if not, create a new filter for it;
6293          * if filter exist with different queue id, modify the filter;
6294          * if filter exist with same queue id, do nothing
6295          */
6296         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6297         if (!rule) {
6298                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6299                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6300                         spin_unlock_bh(&hdev->fd_rule_lock);
6301                         return -ENOSPC;
6302                 }
6303
6304                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6305                 if (!rule) {
6306                         spin_unlock_bh(&hdev->fd_rule_lock);
6307                         return -ENOMEM;
6308                 }
6309
6310                 set_bit(bit_id, hdev->fd_bmap);
6311                 rule->location = bit_id;
6312                 rule->flow_id = flow_id;
6313                 rule->queue_id = queue_id;
6314                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6315                 ret = hclge_fd_config_rule(hdev, rule);
6316
6317                 spin_unlock_bh(&hdev->fd_rule_lock);
6318
6319                 if (ret)
6320                         return ret;
6321
6322                 return rule->location;
6323         }
6324
6325         spin_unlock_bh(&hdev->fd_rule_lock);
6326
6327         if (rule->queue_id == queue_id)
6328                 return rule->location;
6329
6330         tmp_queue_id = rule->queue_id;
6331         rule->queue_id = queue_id;
6332         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6333         if (ret) {
6334                 rule->queue_id = tmp_queue_id;
6335                 return ret;
6336         }
6337
6338         return rule->location;
6339 }
6340
6341 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6342 {
6343 #ifdef CONFIG_RFS_ACCEL
6344         struct hnae3_handle *handle = &hdev->vport[0].nic;
6345         struct hclge_fd_rule *rule;
6346         struct hlist_node *node;
6347         HLIST_HEAD(del_list);
6348
6349         spin_lock_bh(&hdev->fd_rule_lock);
6350         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6351                 spin_unlock_bh(&hdev->fd_rule_lock);
6352                 return;
6353         }
6354         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6355                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6356                                         rule->flow_id, rule->location)) {
6357                         hlist_del_init(&rule->rule_node);
6358                         hlist_add_head(&rule->rule_node, &del_list);
6359                         hdev->hclge_fd_rule_num--;
6360                         clear_bit(rule->location, hdev->fd_bmap);
6361                 }
6362         }
6363         spin_unlock_bh(&hdev->fd_rule_lock);
6364
6365         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6366                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6367                                      rule->location, NULL, false);
6368                 kfree(rule);
6369         }
6370 #endif
6371 }
6372
6373 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6374 {
6375 #ifdef CONFIG_RFS_ACCEL
6376         struct hclge_vport *vport = hclge_get_vport(handle);
6377         struct hclge_dev *hdev = vport->back;
6378
6379         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6380                 hclge_del_all_fd_entries(handle, true);
6381 #endif
6382 }
6383
6384 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6385 {
6386         struct hclge_vport *vport = hclge_get_vport(handle);
6387         struct hclge_dev *hdev = vport->back;
6388
6389         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6390                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6391 }
6392
6393 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6394 {
6395         struct hclge_vport *vport = hclge_get_vport(handle);
6396         struct hclge_dev *hdev = vport->back;
6397
6398         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6399 }
6400
6401 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6402 {
6403         struct hclge_vport *vport = hclge_get_vport(handle);
6404         struct hclge_dev *hdev = vport->back;
6405
6406         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6407 }
6408
6409 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6410 {
6411         struct hclge_vport *vport = hclge_get_vport(handle);
6412         struct hclge_dev *hdev = vport->back;
6413
6414         return hdev->rst_stats.hw_reset_done_cnt;
6415 }
6416
6417 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6418 {
6419         struct hclge_vport *vport = hclge_get_vport(handle);
6420         struct hclge_dev *hdev = vport->back;
6421         bool clear;
6422
6423         hdev->fd_en = enable;
6424         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6425         if (!enable)
6426                 hclge_del_all_fd_entries(handle, clear);
6427         else
6428                 hclge_restore_fd_entries(handle);
6429 }
6430
6431 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6432 {
6433         struct hclge_desc desc;
6434         struct hclge_config_mac_mode_cmd *req =
6435                 (struct hclge_config_mac_mode_cmd *)desc.data;
6436         u32 loop_en = 0;
6437         int ret;
6438
6439         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6440
6441         if (enable) {
6442                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6443                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6444                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6445                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6446                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6447                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6448                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6449                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6450                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6451                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6452         }
6453
6454         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6455
6456         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6457         if (ret)
6458                 dev_err(&hdev->pdev->dev,
6459                         "mac enable fail, ret =%d.\n", ret);
6460 }
6461
6462 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6463                                      u8 switch_param, u8 param_mask)
6464 {
6465         struct hclge_mac_vlan_switch_cmd *req;
6466         struct hclge_desc desc;
6467         u32 func_id;
6468         int ret;
6469
6470         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6471         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6472
6473         /* read current config parameter */
6474         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6475                                    true);
6476         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6477         req->func_id = cpu_to_le32(func_id);
6478
6479         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6480         if (ret) {
6481                 dev_err(&hdev->pdev->dev,
6482                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6483                 return ret;
6484         }
6485
6486         /* modify and write new config parameter */
6487         hclge_cmd_reuse_desc(&desc, false);
6488         req->switch_param = (req->switch_param & param_mask) | switch_param;
6489         req->param_mask = param_mask;
6490
6491         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6492         if (ret)
6493                 dev_err(&hdev->pdev->dev,
6494                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6495         return ret;
6496 }
6497
6498 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6499                                        int link_ret)
6500 {
6501 #define HCLGE_PHY_LINK_STATUS_NUM  200
6502
6503         struct phy_device *phydev = hdev->hw.mac.phydev;
6504         int i = 0;
6505         int ret;
6506
6507         do {
6508                 ret = phy_read_status(phydev);
6509                 if (ret) {
6510                         dev_err(&hdev->pdev->dev,
6511                                 "phy update link status fail, ret = %d\n", ret);
6512                         return;
6513                 }
6514
6515                 if (phydev->link == link_ret)
6516                         break;
6517
6518                 msleep(HCLGE_LINK_STATUS_MS);
6519         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6520 }
6521
6522 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6523 {
6524 #define HCLGE_MAC_LINK_STATUS_NUM  100
6525
6526         int i = 0;
6527         int ret;
6528
6529         do {
6530                 ret = hclge_get_mac_link_status(hdev);
6531                 if (ret < 0)
6532                         return ret;
6533                 else if (ret == link_ret)
6534                         return 0;
6535
6536                 msleep(HCLGE_LINK_STATUS_MS);
6537         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6538         return -EBUSY;
6539 }
6540
6541 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6542                                           bool is_phy)
6543 {
6544 #define HCLGE_LINK_STATUS_DOWN 0
6545 #define HCLGE_LINK_STATUS_UP   1
6546
6547         int link_ret;
6548
6549         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6550
6551         if (is_phy)
6552                 hclge_phy_link_status_wait(hdev, link_ret);
6553
6554         return hclge_mac_link_status_wait(hdev, link_ret);
6555 }
6556
6557 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6558 {
6559         struct hclge_config_mac_mode_cmd *req;
6560         struct hclge_desc desc;
6561         u32 loop_en;
6562         int ret;
6563
6564         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6565         /* 1 Read out the MAC mode config at first */
6566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6567         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6568         if (ret) {
6569                 dev_err(&hdev->pdev->dev,
6570                         "mac loopback get fail, ret =%d.\n", ret);
6571                 return ret;
6572         }
6573
6574         /* 2 Then setup the loopback flag */
6575         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6576         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6577
6578         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6579
6580         /* 3 Config mac work mode with loopback flag
6581          * and its original configure parameters
6582          */
6583         hclge_cmd_reuse_desc(&desc, false);
6584         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6585         if (ret)
6586                 dev_err(&hdev->pdev->dev,
6587                         "mac loopback set fail, ret =%d.\n", ret);
6588         return ret;
6589 }
6590
6591 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6592                                      enum hnae3_loop loop_mode)
6593 {
6594 #define HCLGE_SERDES_RETRY_MS   10
6595 #define HCLGE_SERDES_RETRY_NUM  100
6596
6597         struct hclge_serdes_lb_cmd *req;
6598         struct hclge_desc desc;
6599         int ret, i = 0;
6600         u8 loop_mode_b;
6601
6602         req = (struct hclge_serdes_lb_cmd *)desc.data;
6603         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6604
6605         switch (loop_mode) {
6606         case HNAE3_LOOP_SERIAL_SERDES:
6607                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6608                 break;
6609         case HNAE3_LOOP_PARALLEL_SERDES:
6610                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6611                 break;
6612         default:
6613                 dev_err(&hdev->pdev->dev,
6614                         "unsupported serdes loopback mode %d\n", loop_mode);
6615                 return -ENOTSUPP;
6616         }
6617
6618         if (en) {
6619                 req->enable = loop_mode_b;
6620                 req->mask = loop_mode_b;
6621         } else {
6622                 req->mask = loop_mode_b;
6623         }
6624
6625         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6626         if (ret) {
6627                 dev_err(&hdev->pdev->dev,
6628                         "serdes loopback set fail, ret = %d\n", ret);
6629                 return ret;
6630         }
6631
6632         do {
6633                 msleep(HCLGE_SERDES_RETRY_MS);
6634                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6635                                            true);
6636                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6637                 if (ret) {
6638                         dev_err(&hdev->pdev->dev,
6639                                 "serdes loopback get, ret = %d\n", ret);
6640                         return ret;
6641                 }
6642         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6643                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6644
6645         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6646                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6647                 return -EBUSY;
6648         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6649                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6650                 return -EIO;
6651         }
6652         return ret;
6653 }
6654
6655 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6656                                      enum hnae3_loop loop_mode)
6657 {
6658         int ret;
6659
6660         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6661         if (ret)
6662                 return ret;
6663
6664         hclge_cfg_mac_mode(hdev, en);
6665
6666         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6667         if (ret)
6668                 dev_err(&hdev->pdev->dev,
6669                         "serdes loopback config mac mode timeout\n");
6670
6671         return ret;
6672 }
6673
6674 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6675                                      struct phy_device *phydev)
6676 {
6677         int ret;
6678
6679         if (!phydev->suspended) {
6680                 ret = phy_suspend(phydev);
6681                 if (ret)
6682                         return ret;
6683         }
6684
6685         ret = phy_resume(phydev);
6686         if (ret)
6687                 return ret;
6688
6689         return phy_loopback(phydev, true);
6690 }
6691
6692 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6693                                       struct phy_device *phydev)
6694 {
6695         int ret;
6696
6697         ret = phy_loopback(phydev, false);
6698         if (ret)
6699                 return ret;
6700
6701         return phy_suspend(phydev);
6702 }
6703
6704 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6705 {
6706         struct phy_device *phydev = hdev->hw.mac.phydev;
6707         int ret;
6708
6709         if (!phydev)
6710                 return -ENOTSUPP;
6711
6712         if (en)
6713                 ret = hclge_enable_phy_loopback(hdev, phydev);
6714         else
6715                 ret = hclge_disable_phy_loopback(hdev, phydev);
6716         if (ret) {
6717                 dev_err(&hdev->pdev->dev,
6718                         "set phy loopback fail, ret = %d\n", ret);
6719                 return ret;
6720         }
6721
6722         hclge_cfg_mac_mode(hdev, en);
6723
6724         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6725         if (ret)
6726                 dev_err(&hdev->pdev->dev,
6727                         "phy loopback config mac mode timeout\n");
6728
6729         return ret;
6730 }
6731
6732 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6733                             int stream_id, bool enable)
6734 {
6735         struct hclge_desc desc;
6736         struct hclge_cfg_com_tqp_queue_cmd *req =
6737                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6738         int ret;
6739
6740         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6741         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6742         req->stream_id = cpu_to_le16(stream_id);
6743         if (enable)
6744                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6745
6746         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6747         if (ret)
6748                 dev_err(&hdev->pdev->dev,
6749                         "Tqp enable fail, status =%d.\n", ret);
6750         return ret;
6751 }
6752
6753 static int hclge_set_loopback(struct hnae3_handle *handle,
6754                               enum hnae3_loop loop_mode, bool en)
6755 {
6756         struct hclge_vport *vport = hclge_get_vport(handle);
6757         struct hnae3_knic_private_info *kinfo;
6758         struct hclge_dev *hdev = vport->back;
6759         int i, ret;
6760
6761         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6762          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6763          * the same, the packets are looped back in the SSU. If SSU loopback
6764          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6765          */
6766         if (hdev->pdev->revision >= 0x21) {
6767                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6768
6769                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6770                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6771                 if (ret)
6772                         return ret;
6773         }
6774
6775         switch (loop_mode) {
6776         case HNAE3_LOOP_APP:
6777                 ret = hclge_set_app_loopback(hdev, en);
6778                 break;
6779         case HNAE3_LOOP_SERIAL_SERDES:
6780         case HNAE3_LOOP_PARALLEL_SERDES:
6781                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6782                 break;
6783         case HNAE3_LOOP_PHY:
6784                 ret = hclge_set_phy_loopback(hdev, en);
6785                 break;
6786         default:
6787                 ret = -ENOTSUPP;
6788                 dev_err(&hdev->pdev->dev,
6789                         "loop_mode %d is not supported\n", loop_mode);
6790                 break;
6791         }
6792
6793         if (ret)
6794                 return ret;
6795
6796         kinfo = &vport->nic.kinfo;
6797         for (i = 0; i < kinfo->num_tqps; i++) {
6798                 ret = hclge_tqp_enable(hdev, i, 0, en);
6799                 if (ret)
6800                         return ret;
6801         }
6802
6803         return 0;
6804 }
6805
6806 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6807 {
6808         int ret;
6809
6810         ret = hclge_set_app_loopback(hdev, false);
6811         if (ret)
6812                 return ret;
6813
6814         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6815         if (ret)
6816                 return ret;
6817
6818         return hclge_cfg_serdes_loopback(hdev, false,
6819                                          HNAE3_LOOP_PARALLEL_SERDES);
6820 }
6821
6822 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6823 {
6824         struct hclge_vport *vport = hclge_get_vport(handle);
6825         struct hnae3_knic_private_info *kinfo;
6826         struct hnae3_queue *queue;
6827         struct hclge_tqp *tqp;
6828         int i;
6829
6830         kinfo = &vport->nic.kinfo;
6831         for (i = 0; i < kinfo->num_tqps; i++) {
6832                 queue = handle->kinfo.tqp[i];
6833                 tqp = container_of(queue, struct hclge_tqp, q);
6834                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6835         }
6836 }
6837
6838 static void hclge_flush_link_update(struct hclge_dev *hdev)
6839 {
6840 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6841
6842         unsigned long last = hdev->serv_processed_cnt;
6843         int i = 0;
6844
6845         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6846                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6847                last == hdev->serv_processed_cnt)
6848                 usleep_range(1, 1);
6849 }
6850
6851 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6852 {
6853         struct hclge_vport *vport = hclge_get_vport(handle);
6854         struct hclge_dev *hdev = vport->back;
6855
6856         if (enable) {
6857                 hclge_task_schedule(hdev, 0);
6858         } else {
6859                 /* Set the DOWN flag here to disable link updating */
6860                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6861
6862                 /* flush memory to make sure DOWN is seen by service task */
6863                 smp_mb__before_atomic();
6864                 hclge_flush_link_update(hdev);
6865         }
6866 }
6867
6868 static int hclge_ae_start(struct hnae3_handle *handle)
6869 {
6870         struct hclge_vport *vport = hclge_get_vport(handle);
6871         struct hclge_dev *hdev = vport->back;
6872
6873         /* mac enable */
6874         hclge_cfg_mac_mode(hdev, true);
6875         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6876         hdev->hw.mac.link = 0;
6877
6878         /* reset tqp stats */
6879         hclge_reset_tqp_stats(handle);
6880
6881         hclge_mac_start_phy(hdev);
6882
6883         return 0;
6884 }
6885
6886 static void hclge_ae_stop(struct hnae3_handle *handle)
6887 {
6888         struct hclge_vport *vport = hclge_get_vport(handle);
6889         struct hclge_dev *hdev = vport->back;
6890         int i;
6891
6892         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6893
6894         hclge_clear_arfs_rules(handle);
6895
6896         /* If it is not PF reset, the firmware will disable the MAC,
6897          * so it only need to stop phy here.
6898          */
6899         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6900             hdev->reset_type != HNAE3_FUNC_RESET) {
6901                 hclge_mac_stop_phy(hdev);
6902                 hclge_update_link_status(hdev);
6903                 return;
6904         }
6905
6906         for (i = 0; i < handle->kinfo.num_tqps; i++)
6907                 hclge_reset_tqp(handle, i);
6908
6909         hclge_config_mac_tnl_int(hdev, false);
6910
6911         /* Mac disable */
6912         hclge_cfg_mac_mode(hdev, false);
6913
6914         hclge_mac_stop_phy(hdev);
6915
6916         /* reset tqp stats */
6917         hclge_reset_tqp_stats(handle);
6918         hclge_update_link_status(hdev);
6919 }
6920
6921 int hclge_vport_start(struct hclge_vport *vport)
6922 {
6923         struct hclge_dev *hdev = vport->back;
6924
6925         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6926         vport->last_active_jiffies = jiffies;
6927
6928         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6929                 if (vport->vport_id) {
6930                         hclge_restore_mac_table_common(vport);
6931                         hclge_restore_vport_vlan_table(vport);
6932                 } else {
6933                         hclge_restore_hw_table(hdev);
6934                 }
6935         }
6936
6937         clear_bit(vport->vport_id, hdev->vport_config_block);
6938
6939         return 0;
6940 }
6941
6942 void hclge_vport_stop(struct hclge_vport *vport)
6943 {
6944         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6945 }
6946
6947 static int hclge_client_start(struct hnae3_handle *handle)
6948 {
6949         struct hclge_vport *vport = hclge_get_vport(handle);
6950
6951         return hclge_vport_start(vport);
6952 }
6953
6954 static void hclge_client_stop(struct hnae3_handle *handle)
6955 {
6956         struct hclge_vport *vport = hclge_get_vport(handle);
6957
6958         hclge_vport_stop(vport);
6959 }
6960
6961 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6962                                          u16 cmdq_resp, u8  resp_code,
6963                                          enum hclge_mac_vlan_tbl_opcode op)
6964 {
6965         struct hclge_dev *hdev = vport->back;
6966
6967         if (cmdq_resp) {
6968                 dev_err(&hdev->pdev->dev,
6969                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6970                         cmdq_resp);
6971                 return -EIO;
6972         }
6973
6974         if (op == HCLGE_MAC_VLAN_ADD) {
6975                 if (!resp_code || resp_code == 1)
6976                         return 0;
6977                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6978                          resp_code == HCLGE_ADD_MC_OVERFLOW)
6979                         return -ENOSPC;
6980
6981                 dev_err(&hdev->pdev->dev,
6982                         "add mac addr failed for undefined, code=%u.\n",
6983                         resp_code);
6984                 return -EIO;
6985         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6986                 if (!resp_code) {
6987                         return 0;
6988                 } else if (resp_code == 1) {
6989                         dev_dbg(&hdev->pdev->dev,
6990                                 "remove mac addr failed for miss.\n");
6991                         return -ENOENT;
6992                 }
6993
6994                 dev_err(&hdev->pdev->dev,
6995                         "remove mac addr failed for undefined, code=%u.\n",
6996                         resp_code);
6997                 return -EIO;
6998         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6999                 if (!resp_code) {
7000                         return 0;
7001                 } else if (resp_code == 1) {
7002                         dev_dbg(&hdev->pdev->dev,
7003                                 "lookup mac addr failed for miss.\n");
7004                         return -ENOENT;
7005                 }
7006
7007                 dev_err(&hdev->pdev->dev,
7008                         "lookup mac addr failed for undefined, code=%u.\n",
7009                         resp_code);
7010                 return -EIO;
7011         }
7012
7013         dev_err(&hdev->pdev->dev,
7014                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7015
7016         return -EINVAL;
7017 }
7018
7019 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7020 {
7021 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7022
7023         unsigned int word_num;
7024         unsigned int bit_num;
7025
7026         if (vfid > 255 || vfid < 0)
7027                 return -EIO;
7028
7029         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7030                 word_num = vfid / 32;
7031                 bit_num  = vfid % 32;
7032                 if (clr)
7033                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7034                 else
7035                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7036         } else {
7037                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7038                 bit_num  = vfid % 32;
7039                 if (clr)
7040                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7041                 else
7042                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7043         }
7044
7045         return 0;
7046 }
7047
7048 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7049 {
7050 #define HCLGE_DESC_NUMBER 3
7051 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7052         int i, j;
7053
7054         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7055                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7056                         if (desc[i].data[j])
7057                                 return false;
7058
7059         return true;
7060 }
7061
7062 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7063                                    const u8 *addr, bool is_mc)
7064 {
7065         const unsigned char *mac_addr = addr;
7066         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7067                        (mac_addr[0]) | (mac_addr[1] << 8);
7068         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7069
7070         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7071         if (is_mc) {
7072                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7073                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7074         }
7075
7076         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7077         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7078 }
7079
7080 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7081                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7082 {
7083         struct hclge_dev *hdev = vport->back;
7084         struct hclge_desc desc;
7085         u8 resp_code;
7086         u16 retval;
7087         int ret;
7088
7089         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7090
7091         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7092
7093         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7094         if (ret) {
7095                 dev_err(&hdev->pdev->dev,
7096                         "del mac addr failed for cmd_send, ret =%d.\n",
7097                         ret);
7098                 return ret;
7099         }
7100         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7101         retval = le16_to_cpu(desc.retval);
7102
7103         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7104                                              HCLGE_MAC_VLAN_REMOVE);
7105 }
7106
7107 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7108                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7109                                      struct hclge_desc *desc,
7110                                      bool is_mc)
7111 {
7112         struct hclge_dev *hdev = vport->back;
7113         u8 resp_code;
7114         u16 retval;
7115         int ret;
7116
7117         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7118         if (is_mc) {
7119                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7120                 memcpy(desc[0].data,
7121                        req,
7122                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7123                 hclge_cmd_setup_basic_desc(&desc[1],
7124                                            HCLGE_OPC_MAC_VLAN_ADD,
7125                                            true);
7126                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7127                 hclge_cmd_setup_basic_desc(&desc[2],
7128                                            HCLGE_OPC_MAC_VLAN_ADD,
7129                                            true);
7130                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7131         } else {
7132                 memcpy(desc[0].data,
7133                        req,
7134                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7135                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7136         }
7137         if (ret) {
7138                 dev_err(&hdev->pdev->dev,
7139                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7140                         ret);
7141                 return ret;
7142         }
7143         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7144         retval = le16_to_cpu(desc[0].retval);
7145
7146         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7147                                              HCLGE_MAC_VLAN_LKUP);
7148 }
7149
7150 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7151                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7152                                   struct hclge_desc *mc_desc)
7153 {
7154         struct hclge_dev *hdev = vport->back;
7155         int cfg_status;
7156         u8 resp_code;
7157         u16 retval;
7158         int ret;
7159
7160         if (!mc_desc) {
7161                 struct hclge_desc desc;
7162
7163                 hclge_cmd_setup_basic_desc(&desc,
7164                                            HCLGE_OPC_MAC_VLAN_ADD,
7165                                            false);
7166                 memcpy(desc.data, req,
7167                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7168                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7169                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7170                 retval = le16_to_cpu(desc.retval);
7171
7172                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7173                                                            resp_code,
7174                                                            HCLGE_MAC_VLAN_ADD);
7175         } else {
7176                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7177                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7178                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7179                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7180                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7181                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7182                 memcpy(mc_desc[0].data, req,
7183                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7184                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7185                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7186                 retval = le16_to_cpu(mc_desc[0].retval);
7187
7188                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7189                                                            resp_code,
7190                                                            HCLGE_MAC_VLAN_ADD);
7191         }
7192
7193         if (ret) {
7194                 dev_err(&hdev->pdev->dev,
7195                         "add mac addr failed for cmd_send, ret =%d.\n",
7196                         ret);
7197                 return ret;
7198         }
7199
7200         return cfg_status;
7201 }
7202
7203 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7204                                u16 *allocated_size)
7205 {
7206         struct hclge_umv_spc_alc_cmd *req;
7207         struct hclge_desc desc;
7208         int ret;
7209
7210         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7211         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7212
7213         req->space_size = cpu_to_le32(space_size);
7214
7215         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7216         if (ret) {
7217                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7218                         ret);
7219                 return ret;
7220         }
7221
7222         *allocated_size = le32_to_cpu(desc.data[1]);
7223
7224         return 0;
7225 }
7226
7227 static int hclge_init_umv_space(struct hclge_dev *hdev)
7228 {
7229         u16 allocated_size = 0;
7230         int ret;
7231
7232         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7233         if (ret)
7234                 return ret;
7235
7236         if (allocated_size < hdev->wanted_umv_size)
7237                 dev_warn(&hdev->pdev->dev,
7238                          "failed to alloc umv space, want %u, get %u\n",
7239                          hdev->wanted_umv_size, allocated_size);
7240
7241         hdev->max_umv_size = allocated_size;
7242         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7243         hdev->share_umv_size = hdev->priv_umv_size +
7244                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7245
7246         return 0;
7247 }
7248
7249 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7250 {
7251         struct hclge_vport *vport;
7252         int i;
7253
7254         for (i = 0; i < hdev->num_alloc_vport; i++) {
7255                 vport = &hdev->vport[i];
7256                 vport->used_umv_num = 0;
7257         }
7258
7259         mutex_lock(&hdev->vport_lock);
7260         hdev->share_umv_size = hdev->priv_umv_size +
7261                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7262         mutex_unlock(&hdev->vport_lock);
7263 }
7264
7265 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7266 {
7267         struct hclge_dev *hdev = vport->back;
7268         bool is_full;
7269
7270         if (need_lock)
7271                 mutex_lock(&hdev->vport_lock);
7272
7273         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7274                    hdev->share_umv_size == 0);
7275
7276         if (need_lock)
7277                 mutex_unlock(&hdev->vport_lock);
7278
7279         return is_full;
7280 }
7281
7282 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7283 {
7284         struct hclge_dev *hdev = vport->back;
7285
7286         if (is_free) {
7287                 if (vport->used_umv_num > hdev->priv_umv_size)
7288                         hdev->share_umv_size++;
7289
7290                 if (vport->used_umv_num > 0)
7291                         vport->used_umv_num--;
7292         } else {
7293                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7294                     hdev->share_umv_size > 0)
7295                         hdev->share_umv_size--;
7296                 vport->used_umv_num++;
7297         }
7298 }
7299
7300 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7301                                                   const u8 *mac_addr)
7302 {
7303         struct hclge_mac_node *mac_node, *tmp;
7304
7305         list_for_each_entry_safe(mac_node, tmp, list, node)
7306                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7307                         return mac_node;
7308
7309         return NULL;
7310 }
7311
7312 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7313                                   enum HCLGE_MAC_NODE_STATE state)
7314 {
7315         switch (state) {
7316         /* from set_rx_mode or tmp_add_list */
7317         case HCLGE_MAC_TO_ADD:
7318                 if (mac_node->state == HCLGE_MAC_TO_DEL)
7319                         mac_node->state = HCLGE_MAC_ACTIVE;
7320                 break;
7321         /* only from set_rx_mode */
7322         case HCLGE_MAC_TO_DEL:
7323                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7324                         list_del(&mac_node->node);
7325                         kfree(mac_node);
7326                 } else {
7327                         mac_node->state = HCLGE_MAC_TO_DEL;
7328                 }
7329                 break;
7330         /* only from tmp_add_list, the mac_node->state won't be
7331          * ACTIVE.
7332          */
7333         case HCLGE_MAC_ACTIVE:
7334                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7335                         mac_node->state = HCLGE_MAC_ACTIVE;
7336
7337                 break;
7338         }
7339 }
7340
7341 int hclge_update_mac_list(struct hclge_vport *vport,
7342                           enum HCLGE_MAC_NODE_STATE state,
7343                           enum HCLGE_MAC_ADDR_TYPE mac_type,
7344                           const unsigned char *addr)
7345 {
7346         struct hclge_dev *hdev = vport->back;
7347         struct hclge_mac_node *mac_node;
7348         struct list_head *list;
7349
7350         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7351                 &vport->uc_mac_list : &vport->mc_mac_list;
7352
7353         spin_lock_bh(&vport->mac_list_lock);
7354
7355         /* if the mac addr is already in the mac list, no need to add a new
7356          * one into it, just check the mac addr state, convert it to a new
7357          * new state, or just remove it, or do nothing.
7358          */
7359         mac_node = hclge_find_mac_node(list, addr);
7360         if (mac_node) {
7361                 hclge_update_mac_node(mac_node, state);
7362                 spin_unlock_bh(&vport->mac_list_lock);
7363                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7364                 return 0;
7365         }
7366
7367         /* if this address is never added, unnecessary to delete */
7368         if (state == HCLGE_MAC_TO_DEL) {
7369                 spin_unlock_bh(&vport->mac_list_lock);
7370                 dev_err(&hdev->pdev->dev,
7371                         "failed to delete address %pM from mac list\n",
7372                         addr);
7373                 return -ENOENT;
7374         }
7375
7376         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7377         if (!mac_node) {
7378                 spin_unlock_bh(&vport->mac_list_lock);
7379                 return -ENOMEM;
7380         }
7381
7382         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7383
7384         mac_node->state = state;
7385         ether_addr_copy(mac_node->mac_addr, addr);
7386         list_add_tail(&mac_node->node, list);
7387
7388         spin_unlock_bh(&vport->mac_list_lock);
7389
7390         return 0;
7391 }
7392
7393 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7394                              const unsigned char *addr)
7395 {
7396         struct hclge_vport *vport = hclge_get_vport(handle);
7397
7398         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7399                                      addr);
7400 }
7401
7402 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7403                              const unsigned char *addr)
7404 {
7405         struct hclge_dev *hdev = vport->back;
7406         struct hclge_mac_vlan_tbl_entry_cmd req;
7407         struct hclge_desc desc;
7408         u16 egress_port = 0;
7409         int ret;
7410
7411         /* mac addr check */
7412         if (is_zero_ether_addr(addr) ||
7413             is_broadcast_ether_addr(addr) ||
7414             is_multicast_ether_addr(addr)) {
7415                 dev_err(&hdev->pdev->dev,
7416                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7417                          addr, is_zero_ether_addr(addr),
7418                          is_broadcast_ether_addr(addr),
7419                          is_multicast_ether_addr(addr));
7420                 return -EINVAL;
7421         }
7422
7423         memset(&req, 0, sizeof(req));
7424
7425         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7426                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7427
7428         req.egress_port = cpu_to_le16(egress_port);
7429
7430         hclge_prepare_mac_addr(&req, addr, false);
7431
7432         /* Lookup the mac address in the mac_vlan table, and add
7433          * it if the entry is inexistent. Repeated unicast entry
7434          * is not allowed in the mac vlan table.
7435          */
7436         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7437         if (ret == -ENOENT) {
7438                 mutex_lock(&hdev->vport_lock);
7439                 if (!hclge_is_umv_space_full(vport, false)) {
7440                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7441                         if (!ret)
7442                                 hclge_update_umv_space(vport, false);
7443                         mutex_unlock(&hdev->vport_lock);
7444                         return ret;
7445                 }
7446                 mutex_unlock(&hdev->vport_lock);
7447
7448                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7449                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7450                                 hdev->priv_umv_size);
7451
7452                 return -ENOSPC;
7453         }
7454
7455         /* check if we just hit the duplicate */
7456         if (!ret) {
7457                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7458                          vport->vport_id, addr);
7459                 return 0;
7460         }
7461
7462         dev_err(&hdev->pdev->dev,
7463                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7464                 addr);
7465
7466         return ret;
7467 }
7468
7469 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7470                             const unsigned char *addr)
7471 {
7472         struct hclge_vport *vport = hclge_get_vport(handle);
7473
7474         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7475                                      addr);
7476 }
7477
7478 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7479                             const unsigned char *addr)
7480 {
7481         struct hclge_dev *hdev = vport->back;
7482         struct hclge_mac_vlan_tbl_entry_cmd req;
7483         int ret;
7484
7485         /* mac addr check */
7486         if (is_zero_ether_addr(addr) ||
7487             is_broadcast_ether_addr(addr) ||
7488             is_multicast_ether_addr(addr)) {
7489                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7490                         addr);
7491                 return -EINVAL;
7492         }
7493
7494         memset(&req, 0, sizeof(req));
7495         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7496         hclge_prepare_mac_addr(&req, addr, false);
7497         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7498         if (!ret) {
7499                 mutex_lock(&hdev->vport_lock);
7500                 hclge_update_umv_space(vport, true);
7501                 mutex_unlock(&hdev->vport_lock);
7502         } else if (ret == -ENOENT) {
7503                 ret = 0;
7504         }
7505
7506         return ret;
7507 }
7508
7509 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7510                              const unsigned char *addr)
7511 {
7512         struct hclge_vport *vport = hclge_get_vport(handle);
7513
7514         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7515                                      addr);
7516 }
7517
7518 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7519                              const unsigned char *addr)
7520 {
7521         struct hclge_dev *hdev = vport->back;
7522         struct hclge_mac_vlan_tbl_entry_cmd req;
7523         struct hclge_desc desc[3];
7524         int status;
7525
7526         /* mac addr check */
7527         if (!is_multicast_ether_addr(addr)) {
7528                 dev_err(&hdev->pdev->dev,
7529                         "Add mc mac err! invalid mac:%pM.\n",
7530                          addr);
7531                 return -EINVAL;
7532         }
7533         memset(&req, 0, sizeof(req));
7534         hclge_prepare_mac_addr(&req, addr, true);
7535         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7536         if (status) {
7537                 /* This mac addr do not exist, add new entry for it */
7538                 memset(desc[0].data, 0, sizeof(desc[0].data));
7539                 memset(desc[1].data, 0, sizeof(desc[0].data));
7540                 memset(desc[2].data, 0, sizeof(desc[0].data));
7541         }
7542         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7543         if (status)
7544                 return status;
7545         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7546
7547         /* if already overflow, not to print each time */
7548         if (status == -ENOSPC &&
7549             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7550                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7551
7552         return status;
7553 }
7554
7555 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7556                             const unsigned char *addr)
7557 {
7558         struct hclge_vport *vport = hclge_get_vport(handle);
7559
7560         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7561                                      addr);
7562 }
7563
7564 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7565                             const unsigned char *addr)
7566 {
7567         struct hclge_dev *hdev = vport->back;
7568         struct hclge_mac_vlan_tbl_entry_cmd req;
7569         enum hclge_cmd_status status;
7570         struct hclge_desc desc[3];
7571
7572         /* mac addr check */
7573         if (!is_multicast_ether_addr(addr)) {
7574                 dev_dbg(&hdev->pdev->dev,
7575                         "Remove mc mac err! invalid mac:%pM.\n",
7576                          addr);
7577                 return -EINVAL;
7578         }
7579
7580         memset(&req, 0, sizeof(req));
7581         hclge_prepare_mac_addr(&req, addr, true);
7582         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7583         if (!status) {
7584                 /* This mac addr exist, remove this handle's VFID for it */
7585                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7586                 if (status)
7587                         return status;
7588
7589                 if (hclge_is_all_function_id_zero(desc))
7590                         /* All the vfid is zero, so need to delete this entry */
7591                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7592                 else
7593                         /* Not all the vfid is zero, update the vfid */
7594                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7595
7596         } else if (status == -ENOENT) {
7597                 status = 0;
7598         }
7599
7600         return status;
7601 }
7602
7603 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7604                                       struct list_head *list,
7605                                       int (*sync)(struct hclge_vport *,
7606                                                   const unsigned char *))
7607 {
7608         struct hclge_mac_node *mac_node, *tmp;
7609         int ret;
7610
7611         list_for_each_entry_safe(mac_node, tmp, list, node) {
7612                 ret = sync(vport, mac_node->mac_addr);
7613                 if (!ret) {
7614                         mac_node->state = HCLGE_MAC_ACTIVE;
7615                 } else {
7616                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7617                                 &vport->state);
7618                         break;
7619                 }
7620         }
7621 }
7622
7623 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7624                                         struct list_head *list,
7625                                         int (*unsync)(struct hclge_vport *,
7626                                                       const unsigned char *))
7627 {
7628         struct hclge_mac_node *mac_node, *tmp;
7629         int ret;
7630
7631         list_for_each_entry_safe(mac_node, tmp, list, node) {
7632                 ret = unsync(vport, mac_node->mac_addr);
7633                 if (!ret || ret == -ENOENT) {
7634                         list_del(&mac_node->node);
7635                         kfree(mac_node);
7636                 } else {
7637                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7638                                 &vport->state);
7639                         break;
7640                 }
7641         }
7642 }
7643
7644 static bool hclge_sync_from_add_list(struct list_head *add_list,
7645                                      struct list_head *mac_list)
7646 {
7647         struct hclge_mac_node *mac_node, *tmp, *new_node;
7648         bool all_added = true;
7649
7650         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7651                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7652                         all_added = false;
7653
7654                 /* if the mac address from tmp_add_list is not in the
7655                  * uc/mc_mac_list, it means have received a TO_DEL request
7656                  * during the time window of adding the mac address into mac
7657                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7658                  * then it will be removed at next time. else it must be TO_ADD,
7659                  * this address hasn't been added into mac table,
7660                  * so just remove the mac node.
7661                  */
7662                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7663                 if (new_node) {
7664                         hclge_update_mac_node(new_node, mac_node->state);
7665                         list_del(&mac_node->node);
7666                         kfree(mac_node);
7667                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7668                         mac_node->state = HCLGE_MAC_TO_DEL;
7669                         list_del(&mac_node->node);
7670                         list_add_tail(&mac_node->node, mac_list);
7671                 } else {
7672                         list_del(&mac_node->node);
7673                         kfree(mac_node);
7674                 }
7675         }
7676
7677         return all_added;
7678 }
7679
7680 static void hclge_sync_from_del_list(struct list_head *del_list,
7681                                      struct list_head *mac_list)
7682 {
7683         struct hclge_mac_node *mac_node, *tmp, *new_node;
7684
7685         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7686                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7687                 if (new_node) {
7688                         /* If the mac addr exists in the mac list, it means
7689                          * received a new TO_ADD request during the time window
7690                          * of configuring the mac address. For the mac node
7691                          * state is TO_ADD, and the address is already in the
7692                          * in the hardware(due to delete fail), so we just need
7693                          * to change the mac node state to ACTIVE.
7694                          */
7695                         new_node->state = HCLGE_MAC_ACTIVE;
7696                         list_del(&mac_node->node);
7697                         kfree(mac_node);
7698                 } else {
7699                         list_del(&mac_node->node);
7700                         list_add_tail(&mac_node->node, mac_list);
7701                 }
7702         }
7703 }
7704
7705 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7706                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
7707                                         bool is_all_added)
7708 {
7709         if (mac_type == HCLGE_MAC_ADDR_UC) {
7710                 if (is_all_added)
7711                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7712                 else
7713                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7714         } else {
7715                 if (is_all_added)
7716                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7717                 else
7718                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7719         }
7720 }
7721
7722 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7723                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
7724 {
7725         struct hclge_mac_node *mac_node, *tmp, *new_node;
7726         struct list_head tmp_add_list, tmp_del_list;
7727         struct list_head *list;
7728         bool all_added;
7729
7730         INIT_LIST_HEAD(&tmp_add_list);
7731         INIT_LIST_HEAD(&tmp_del_list);
7732
7733         /* move the mac addr to the tmp_add_list and tmp_del_list, then
7734          * we can add/delete these mac addr outside the spin lock
7735          */
7736         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7737                 &vport->uc_mac_list : &vport->mc_mac_list;
7738
7739         spin_lock_bh(&vport->mac_list_lock);
7740
7741         list_for_each_entry_safe(mac_node, tmp, list, node) {
7742                 switch (mac_node->state) {
7743                 case HCLGE_MAC_TO_DEL:
7744                         list_del(&mac_node->node);
7745                         list_add_tail(&mac_node->node, &tmp_del_list);
7746                         break;
7747                 case HCLGE_MAC_TO_ADD:
7748                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7749                         if (!new_node)
7750                                 goto stop_traverse;
7751                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7752                         new_node->state = mac_node->state;
7753                         list_add_tail(&new_node->node, &tmp_add_list);
7754                         break;
7755                 default:
7756                         break;
7757                 }
7758         }
7759
7760 stop_traverse:
7761         spin_unlock_bh(&vport->mac_list_lock);
7762
7763         /* delete first, in order to get max mac table space for adding */
7764         if (mac_type == HCLGE_MAC_ADDR_UC) {
7765                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7766                                             hclge_rm_uc_addr_common);
7767                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7768                                           hclge_add_uc_addr_common);
7769         } else {
7770                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7771                                             hclge_rm_mc_addr_common);
7772                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7773                                           hclge_add_mc_addr_common);
7774         }
7775
7776         /* if some mac addresses were added/deleted fail, move back to the
7777          * mac_list, and retry at next time.
7778          */
7779         spin_lock_bh(&vport->mac_list_lock);
7780
7781         hclge_sync_from_del_list(&tmp_del_list, list);
7782         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7783
7784         spin_unlock_bh(&vport->mac_list_lock);
7785
7786         hclge_update_overflow_flags(vport, mac_type, all_added);
7787 }
7788
7789 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7790 {
7791         struct hclge_dev *hdev = vport->back;
7792
7793         if (test_bit(vport->vport_id, hdev->vport_config_block))
7794                 return false;
7795
7796         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7797                 return true;
7798
7799         return false;
7800 }
7801
7802 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7803 {
7804         int i;
7805
7806         for (i = 0; i < hdev->num_alloc_vport; i++) {
7807                 struct hclge_vport *vport = &hdev->vport[i];
7808
7809                 if (!hclge_need_sync_mac_table(vport))
7810                         continue;
7811
7812                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7813                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7814         }
7815 }
7816
7817 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7818                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7819 {
7820         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7821         struct hclge_mac_node *mac_cfg, *tmp;
7822         struct hclge_dev *hdev = vport->back;
7823         struct list_head tmp_del_list, *list;
7824         int ret;
7825
7826         if (mac_type == HCLGE_MAC_ADDR_UC) {
7827                 list = &vport->uc_mac_list;
7828                 unsync = hclge_rm_uc_addr_common;
7829         } else {
7830                 list = &vport->mc_mac_list;
7831                 unsync = hclge_rm_mc_addr_common;
7832         }
7833
7834         INIT_LIST_HEAD(&tmp_del_list);
7835
7836         if (!is_del_list)
7837                 set_bit(vport->vport_id, hdev->vport_config_block);
7838
7839         spin_lock_bh(&vport->mac_list_lock);
7840
7841         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7842                 switch (mac_cfg->state) {
7843                 case HCLGE_MAC_TO_DEL:
7844                 case HCLGE_MAC_ACTIVE:
7845                         list_del(&mac_cfg->node);
7846                         list_add_tail(&mac_cfg->node, &tmp_del_list);
7847                         break;
7848                 case HCLGE_MAC_TO_ADD:
7849                         if (is_del_list) {
7850                                 list_del(&mac_cfg->node);
7851                                 kfree(mac_cfg);
7852                         }
7853                         break;
7854                 }
7855         }
7856
7857         spin_unlock_bh(&vport->mac_list_lock);
7858
7859         list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7860                 ret = unsync(vport, mac_cfg->mac_addr);
7861                 if (!ret || ret == -ENOENT) {
7862                         /* clear all mac addr from hardware, but remain these
7863                          * mac addr in the mac list, and restore them after
7864                          * vf reset finished.
7865                          */
7866                         if (!is_del_list &&
7867                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
7868                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
7869                         } else {
7870                                 list_del(&mac_cfg->node);
7871                                 kfree(mac_cfg);
7872                         }
7873                 } else if (is_del_list) {
7874                         mac_cfg->state = HCLGE_MAC_TO_DEL;
7875                 }
7876         }
7877
7878         spin_lock_bh(&vport->mac_list_lock);
7879
7880         hclge_sync_from_del_list(&tmp_del_list, list);
7881
7882         spin_unlock_bh(&vport->mac_list_lock);
7883 }
7884
7885 /* remove all mac address when uninitailize */
7886 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7887                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
7888 {
7889         struct hclge_mac_node *mac_node, *tmp;
7890         struct hclge_dev *hdev = vport->back;
7891         struct list_head tmp_del_list, *list;
7892
7893         INIT_LIST_HEAD(&tmp_del_list);
7894
7895         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7896                 &vport->uc_mac_list : &vport->mc_mac_list;
7897
7898         spin_lock_bh(&vport->mac_list_lock);
7899
7900         list_for_each_entry_safe(mac_node, tmp, list, node) {
7901                 switch (mac_node->state) {
7902                 case HCLGE_MAC_TO_DEL:
7903                 case HCLGE_MAC_ACTIVE:
7904                         list_del(&mac_node->node);
7905                         list_add_tail(&mac_node->node, &tmp_del_list);
7906                         break;
7907                 case HCLGE_MAC_TO_ADD:
7908                         list_del(&mac_node->node);
7909                         kfree(mac_node);
7910                         break;
7911                 }
7912         }
7913
7914         spin_unlock_bh(&vport->mac_list_lock);
7915
7916         if (mac_type == HCLGE_MAC_ADDR_UC)
7917                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7918                                             hclge_rm_uc_addr_common);
7919         else
7920                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7921                                             hclge_rm_mc_addr_common);
7922
7923         if (!list_empty(&tmp_del_list))
7924                 dev_warn(&hdev->pdev->dev,
7925                          "uninit %s mac list for vport %u not completely.\n",
7926                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7927                          vport->vport_id);
7928
7929         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7930                 list_del(&mac_node->node);
7931                 kfree(mac_node);
7932         }
7933 }
7934
7935 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7936 {
7937         struct hclge_vport *vport;
7938         int i;
7939
7940         for (i = 0; i < hdev->num_alloc_vport; i++) {
7941                 vport = &hdev->vport[i];
7942                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7943                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7944         }
7945 }
7946
7947 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7948                                               u16 cmdq_resp, u8 resp_code)
7949 {
7950 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7951 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7952 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7953 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7954
7955         int return_status;
7956
7957         if (cmdq_resp) {
7958                 dev_err(&hdev->pdev->dev,
7959                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7960                         cmdq_resp);
7961                 return -EIO;
7962         }
7963
7964         switch (resp_code) {
7965         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7966         case HCLGE_ETHERTYPE_ALREADY_ADD:
7967                 return_status = 0;
7968                 break;
7969         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7970                 dev_err(&hdev->pdev->dev,
7971                         "add mac ethertype failed for manager table overflow.\n");
7972                 return_status = -EIO;
7973                 break;
7974         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7975                 dev_err(&hdev->pdev->dev,
7976                         "add mac ethertype failed for key conflict.\n");
7977                 return_status = -EIO;
7978                 break;
7979         default:
7980                 dev_err(&hdev->pdev->dev,
7981                         "add mac ethertype failed for undefined, code=%u.\n",
7982                         resp_code);
7983                 return_status = -EIO;
7984         }
7985
7986         return return_status;
7987 }
7988
7989 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7990                                      u8 *mac_addr)
7991 {
7992         struct hclge_mac_vlan_tbl_entry_cmd req;
7993         struct hclge_dev *hdev = vport->back;
7994         struct hclge_desc desc;
7995         u16 egress_port = 0;
7996         int i;
7997
7998         if (is_zero_ether_addr(mac_addr))
7999                 return false;
8000
8001         memset(&req, 0, sizeof(req));
8002         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8003                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8004         req.egress_port = cpu_to_le16(egress_port);
8005         hclge_prepare_mac_addr(&req, mac_addr, false);
8006
8007         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8008                 return true;
8009
8010         vf_idx += HCLGE_VF_VPORT_START_NUM;
8011         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8012                 if (i != vf_idx &&
8013                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8014                         return true;
8015
8016         return false;
8017 }
8018
8019 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8020                             u8 *mac_addr)
8021 {
8022         struct hclge_vport *vport = hclge_get_vport(handle);
8023         struct hclge_dev *hdev = vport->back;
8024
8025         vport = hclge_get_vf_vport(hdev, vf);
8026         if (!vport)
8027                 return -EINVAL;
8028
8029         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8030                 dev_info(&hdev->pdev->dev,
8031                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8032                          mac_addr);
8033                 return 0;
8034         }
8035
8036         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8037                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8038                         mac_addr);
8039                 return -EEXIST;
8040         }
8041
8042         ether_addr_copy(vport->vf_info.mac, mac_addr);
8043
8044         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8045                 dev_info(&hdev->pdev->dev,
8046                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8047                          vf, mac_addr);
8048                 return hclge_inform_reset_assert_to_vf(vport);
8049         }
8050
8051         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8052                  vf, mac_addr);
8053         return 0;
8054 }
8055
8056 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8057                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8058 {
8059         struct hclge_desc desc;
8060         u8 resp_code;
8061         u16 retval;
8062         int ret;
8063
8064         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8065         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8066
8067         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8068         if (ret) {
8069                 dev_err(&hdev->pdev->dev,
8070                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8071                         ret);
8072                 return ret;
8073         }
8074
8075         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8076         retval = le16_to_cpu(desc.retval);
8077
8078         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8079 }
8080
8081 static int init_mgr_tbl(struct hclge_dev *hdev)
8082 {
8083         int ret;
8084         int i;
8085
8086         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8087                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8088                 if (ret) {
8089                         dev_err(&hdev->pdev->dev,
8090                                 "add mac ethertype failed, ret =%d.\n",
8091                                 ret);
8092                         return ret;
8093                 }
8094         }
8095
8096         return 0;
8097 }
8098
8099 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8100 {
8101         struct hclge_vport *vport = hclge_get_vport(handle);
8102         struct hclge_dev *hdev = vport->back;
8103
8104         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8105 }
8106
8107 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8108                                        const u8 *old_addr, const u8 *new_addr)
8109 {
8110         struct list_head *list = &vport->uc_mac_list;
8111         struct hclge_mac_node *old_node, *new_node;
8112
8113         new_node = hclge_find_mac_node(list, new_addr);
8114         if (!new_node) {
8115                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8116                 if (!new_node)
8117                         return -ENOMEM;
8118
8119                 new_node->state = HCLGE_MAC_TO_ADD;
8120                 ether_addr_copy(new_node->mac_addr, new_addr);
8121                 list_add(&new_node->node, list);
8122         } else {
8123                 if (new_node->state == HCLGE_MAC_TO_DEL)
8124                         new_node->state = HCLGE_MAC_ACTIVE;
8125
8126                 /* make sure the new addr is in the list head, avoid dev
8127                  * addr may be not re-added into mac table for the umv space
8128                  * limitation after global/imp reset which will clear mac
8129                  * table by hardware.
8130                  */
8131                 list_move(&new_node->node, list);
8132         }
8133
8134         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8135                 old_node = hclge_find_mac_node(list, old_addr);
8136                 if (old_node) {
8137                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8138                                 list_del(&old_node->node);
8139                                 kfree(old_node);
8140                         } else {
8141                                 old_node->state = HCLGE_MAC_TO_DEL;
8142                         }
8143                 }
8144         }
8145
8146         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8147
8148         return 0;
8149 }
8150
8151 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8152                               bool is_first)
8153 {
8154         const unsigned char *new_addr = (const unsigned char *)p;
8155         struct hclge_vport *vport = hclge_get_vport(handle);
8156         struct hclge_dev *hdev = vport->back;
8157         unsigned char *old_addr = NULL;
8158         int ret;
8159
8160         /* mac addr check */
8161         if (is_zero_ether_addr(new_addr) ||
8162             is_broadcast_ether_addr(new_addr) ||
8163             is_multicast_ether_addr(new_addr)) {
8164                 dev_err(&hdev->pdev->dev,
8165                         "change uc mac err! invalid mac: %pM.\n",
8166                          new_addr);
8167                 return -EINVAL;
8168         }
8169
8170         ret = hclge_pause_addr_cfg(hdev, new_addr);
8171         if (ret) {
8172                 dev_err(&hdev->pdev->dev,
8173                         "failed to configure mac pause address, ret = %d\n",
8174                         ret);
8175                 return ret;
8176         }
8177
8178         if (!is_first)
8179                 old_addr = hdev->hw.mac.mac_addr;
8180
8181         spin_lock_bh(&vport->mac_list_lock);
8182         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8183         if (ret) {
8184                 dev_err(&hdev->pdev->dev,
8185                         "failed to change the mac addr:%pM, ret = %d\n",
8186                         new_addr, ret);
8187                 spin_unlock_bh(&vport->mac_list_lock);
8188
8189                 if (!is_first)
8190                         hclge_pause_addr_cfg(hdev, old_addr);
8191
8192                 return ret;
8193         }
8194         /* we must update dev addr with spin lock protect, preventing dev addr
8195          * being removed by set_rx_mode path.
8196          */
8197         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8198         spin_unlock_bh(&vport->mac_list_lock);
8199
8200         hclge_task_schedule(hdev, 0);
8201
8202         return 0;
8203 }
8204
8205 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8206                           int cmd)
8207 {
8208         struct hclge_vport *vport = hclge_get_vport(handle);
8209         struct hclge_dev *hdev = vport->back;
8210
8211         if (!hdev->hw.mac.phydev)
8212                 return -EOPNOTSUPP;
8213
8214         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8215 }
8216
8217 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8218                                       u8 fe_type, bool filter_en, u8 vf_id)
8219 {
8220         struct hclge_vlan_filter_ctrl_cmd *req;
8221         struct hclge_desc desc;
8222         int ret;
8223
8224         /* read current vlan filter parameter */
8225         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8226         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8227         req->vlan_type = vlan_type;
8228         req->vf_id = vf_id;
8229
8230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8231         if (ret) {
8232                 dev_err(&hdev->pdev->dev,
8233                         "failed to get vlan filter config, ret = %d.\n", ret);
8234                 return ret;
8235         }
8236
8237         /* modify and write new config parameter */
8238         hclge_cmd_reuse_desc(&desc, false);
8239         req->vlan_fe = filter_en ?
8240                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8241
8242         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8243         if (ret)
8244                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8245                         ret);
8246
8247         return ret;
8248 }
8249
8250 #define HCLGE_FILTER_TYPE_VF            0
8251 #define HCLGE_FILTER_TYPE_PORT          1
8252 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8253 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8254 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8255 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8256 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8257 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8258                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8259 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8260                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8261
8262 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8263 {
8264         struct hclge_vport *vport = hclge_get_vport(handle);
8265         struct hclge_dev *hdev = vport->back;
8266
8267         if (hdev->pdev->revision >= 0x21) {
8268                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8269                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8270                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8271                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8272         } else {
8273                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8274                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8275                                            0);
8276         }
8277         if (enable)
8278                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8279         else
8280                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8281 }
8282
8283 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8284                                     bool is_kill, u16 vlan,
8285                                     __be16 proto)
8286 {
8287         struct hclge_vport *vport = &hdev->vport[vfid];
8288         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8289         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8290         struct hclge_desc desc[2];
8291         u8 vf_byte_val;
8292         u8 vf_byte_off;
8293         int ret;
8294
8295         /* if vf vlan table is full, firmware will close vf vlan filter, it
8296          * is unable and unnecessary to add new vlan id to vf vlan filter.
8297          * If spoof check is enable, and vf vlan is full, it shouldn't add
8298          * new vlan, because tx packets with these vlan id will be dropped.
8299          */
8300         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8301                 if (vport->vf_info.spoofchk && vlan) {
8302                         dev_err(&hdev->pdev->dev,
8303                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8304                         return -EPERM;
8305                 }
8306                 return 0;
8307         }
8308
8309         hclge_cmd_setup_basic_desc(&desc[0],
8310                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8311         hclge_cmd_setup_basic_desc(&desc[1],
8312                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8313
8314         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8315
8316         vf_byte_off = vfid / 8;
8317         vf_byte_val = 1 << (vfid % 8);
8318
8319         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8320         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8321
8322         req0->vlan_id  = cpu_to_le16(vlan);
8323         req0->vlan_cfg = is_kill;
8324
8325         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8326                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8327         else
8328                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8329
8330         ret = hclge_cmd_send(&hdev->hw, desc, 2);
8331         if (ret) {
8332                 dev_err(&hdev->pdev->dev,
8333                         "Send vf vlan command fail, ret =%d.\n",
8334                         ret);
8335                 return ret;
8336         }
8337
8338         if (!is_kill) {
8339 #define HCLGE_VF_VLAN_NO_ENTRY  2
8340                 if (!req0->resp_code || req0->resp_code == 1)
8341                         return 0;
8342
8343                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8344                         set_bit(vfid, hdev->vf_vlan_full);
8345                         dev_warn(&hdev->pdev->dev,
8346                                  "vf vlan table is full, vf vlan filter is disabled\n");
8347                         return 0;
8348                 }
8349
8350                 dev_err(&hdev->pdev->dev,
8351                         "Add vf vlan filter fail, ret =%u.\n",
8352                         req0->resp_code);
8353         } else {
8354 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
8355                 if (!req0->resp_code)
8356                         return 0;
8357
8358                 /* vf vlan filter is disabled when vf vlan table is full,
8359                  * then new vlan id will not be added into vf vlan table.
8360                  * Just return 0 without warning, avoid massive verbose
8361                  * print logs when unload.
8362                  */
8363                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8364                         return 0;
8365
8366                 dev_err(&hdev->pdev->dev,
8367                         "Kill vf vlan filter fail, ret =%u.\n",
8368                         req0->resp_code);
8369         }
8370
8371         return -EIO;
8372 }
8373
8374 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8375                                       u16 vlan_id, bool is_kill)
8376 {
8377         struct hclge_vlan_filter_pf_cfg_cmd *req;
8378         struct hclge_desc desc;
8379         u8 vlan_offset_byte_val;
8380         u8 vlan_offset_byte;
8381         u8 vlan_offset_160;
8382         int ret;
8383
8384         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8385
8386         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8387         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8388                            HCLGE_VLAN_BYTE_SIZE;
8389         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8390
8391         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8392         req->vlan_offset = vlan_offset_160;
8393         req->vlan_cfg = is_kill;
8394         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8395
8396         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8397         if (ret)
8398                 dev_err(&hdev->pdev->dev,
8399                         "port vlan command, send fail, ret =%d.\n", ret);
8400         return ret;
8401 }
8402
8403 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8404                                     u16 vport_id, u16 vlan_id,
8405                                     bool is_kill)
8406 {
8407         u16 vport_idx, vport_num = 0;
8408         int ret;
8409
8410         if (is_kill && !vlan_id)
8411                 return 0;
8412
8413         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8414                                        proto);
8415         if (ret) {
8416                 dev_err(&hdev->pdev->dev,
8417                         "Set %u vport vlan filter config fail, ret =%d.\n",
8418                         vport_id, ret);
8419                 return ret;
8420         }
8421
8422         /* vlan 0 may be added twice when 8021q module is enabled */
8423         if (!is_kill && !vlan_id &&
8424             test_bit(vport_id, hdev->vlan_table[vlan_id]))
8425                 return 0;
8426
8427         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8428                 dev_err(&hdev->pdev->dev,
8429                         "Add port vlan failed, vport %u is already in vlan %u\n",
8430                         vport_id, vlan_id);
8431                 return -EINVAL;
8432         }
8433
8434         if (is_kill &&
8435             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8436                 dev_err(&hdev->pdev->dev,
8437                         "Delete port vlan failed, vport %u is not in vlan %u\n",
8438                         vport_id, vlan_id);
8439                 return -EINVAL;
8440         }
8441
8442         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8443                 vport_num++;
8444
8445         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8446                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8447                                                  is_kill);
8448
8449         return ret;
8450 }
8451
8452 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8453 {
8454         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8455         struct hclge_vport_vtag_tx_cfg_cmd *req;
8456         struct hclge_dev *hdev = vport->back;
8457         struct hclge_desc desc;
8458         u16 bmap_index;
8459         int status;
8460
8461         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8462
8463         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8464         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8465         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8466         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8467                       vcfg->accept_tag1 ? 1 : 0);
8468         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8469                       vcfg->accept_untag1 ? 1 : 0);
8470         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8471                       vcfg->accept_tag2 ? 1 : 0);
8472         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8473                       vcfg->accept_untag2 ? 1 : 0);
8474         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8475                       vcfg->insert_tag1_en ? 1 : 0);
8476         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8477                       vcfg->insert_tag2_en ? 1 : 0);
8478         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8479
8480         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8481         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8482                         HCLGE_VF_NUM_PER_BYTE;
8483         req->vf_bitmap[bmap_index] =
8484                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8485
8486         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8487         if (status)
8488                 dev_err(&hdev->pdev->dev,
8489                         "Send port txvlan cfg command fail, ret =%d\n",
8490                         status);
8491
8492         return status;
8493 }
8494
8495 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8496 {
8497         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8498         struct hclge_vport_vtag_rx_cfg_cmd *req;
8499         struct hclge_dev *hdev = vport->back;
8500         struct hclge_desc desc;
8501         u16 bmap_index;
8502         int status;
8503
8504         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8505
8506         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8507         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8508                       vcfg->strip_tag1_en ? 1 : 0);
8509         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8510                       vcfg->strip_tag2_en ? 1 : 0);
8511         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8512                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8513         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8514                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8515
8516         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8517         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8518                         HCLGE_VF_NUM_PER_BYTE;
8519         req->vf_bitmap[bmap_index] =
8520                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8521
8522         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8523         if (status)
8524                 dev_err(&hdev->pdev->dev,
8525                         "Send port rxvlan cfg command fail, ret =%d\n",
8526                         status);
8527
8528         return status;
8529 }
8530
8531 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8532                                   u16 port_base_vlan_state,
8533                                   u16 vlan_tag)
8534 {
8535         int ret;
8536
8537         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8538                 vport->txvlan_cfg.accept_tag1 = true;
8539                 vport->txvlan_cfg.insert_tag1_en = false;
8540                 vport->txvlan_cfg.default_tag1 = 0;
8541         } else {
8542                 vport->txvlan_cfg.accept_tag1 = false;
8543                 vport->txvlan_cfg.insert_tag1_en = true;
8544                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8545         }
8546
8547         vport->txvlan_cfg.accept_untag1 = true;
8548
8549         /* accept_tag2 and accept_untag2 are not supported on
8550          * pdev revision(0x20), new revision support them,
8551          * this two fields can not be configured by user.
8552          */
8553         vport->txvlan_cfg.accept_tag2 = true;
8554         vport->txvlan_cfg.accept_untag2 = true;
8555         vport->txvlan_cfg.insert_tag2_en = false;
8556         vport->txvlan_cfg.default_tag2 = 0;
8557
8558         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8559                 vport->rxvlan_cfg.strip_tag1_en = false;
8560                 vport->rxvlan_cfg.strip_tag2_en =
8561                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8562         } else {
8563                 vport->rxvlan_cfg.strip_tag1_en =
8564                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8565                 vport->rxvlan_cfg.strip_tag2_en = true;
8566         }
8567         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8568         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8569
8570         ret = hclge_set_vlan_tx_offload_cfg(vport);
8571         if (ret)
8572                 return ret;
8573
8574         return hclge_set_vlan_rx_offload_cfg(vport);
8575 }
8576
8577 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8578 {
8579         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8580         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8581         struct hclge_desc desc;
8582         int status;
8583
8584         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8585         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8586         rx_req->ot_fst_vlan_type =
8587                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8588         rx_req->ot_sec_vlan_type =
8589                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8590         rx_req->in_fst_vlan_type =
8591                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8592         rx_req->in_sec_vlan_type =
8593                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8594
8595         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8596         if (status) {
8597                 dev_err(&hdev->pdev->dev,
8598                         "Send rxvlan protocol type command fail, ret =%d\n",
8599                         status);
8600                 return status;
8601         }
8602
8603         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8604
8605         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8606         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8607         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8608
8609         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8610         if (status)
8611                 dev_err(&hdev->pdev->dev,
8612                         "Send txvlan protocol type command fail, ret =%d\n",
8613                         status);
8614
8615         return status;
8616 }
8617
8618 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8619 {
8620 #define HCLGE_DEF_VLAN_TYPE             0x8100
8621
8622         struct hnae3_handle *handle = &hdev->vport[0].nic;
8623         struct hclge_vport *vport;
8624         int ret;
8625         int i;
8626
8627         if (hdev->pdev->revision >= 0x21) {
8628                 /* for revision 0x21, vf vlan filter is per function */
8629                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8630                         vport = &hdev->vport[i];
8631                         ret = hclge_set_vlan_filter_ctrl(hdev,
8632                                                          HCLGE_FILTER_TYPE_VF,
8633                                                          HCLGE_FILTER_FE_EGRESS,
8634                                                          true,
8635                                                          vport->vport_id);
8636                         if (ret)
8637                                 return ret;
8638                 }
8639
8640                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8641                                                  HCLGE_FILTER_FE_INGRESS, true,
8642                                                  0);
8643                 if (ret)
8644                         return ret;
8645         } else {
8646                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8647                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8648                                                  true, 0);
8649                 if (ret)
8650                         return ret;
8651         }
8652
8653         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8654
8655         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8656         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8657         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8658         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8659         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8660         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8661
8662         ret = hclge_set_vlan_protocol_type(hdev);
8663         if (ret)
8664                 return ret;
8665
8666         for (i = 0; i < hdev->num_alloc_vport; i++) {
8667                 u16 vlan_tag;
8668
8669                 vport = &hdev->vport[i];
8670                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8671
8672                 ret = hclge_vlan_offload_cfg(vport,
8673                                              vport->port_base_vlan_cfg.state,
8674                                              vlan_tag);
8675                 if (ret)
8676                         return ret;
8677         }
8678
8679         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8680 }
8681
8682 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8683                                        bool writen_to_tbl)
8684 {
8685         struct hclge_vport_vlan_cfg *vlan;
8686
8687         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8688         if (!vlan)
8689                 return;
8690
8691         vlan->hd_tbl_status = writen_to_tbl;
8692         vlan->vlan_id = vlan_id;
8693
8694         list_add_tail(&vlan->node, &vport->vlan_list);
8695 }
8696
8697 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8698 {
8699         struct hclge_vport_vlan_cfg *vlan, *tmp;
8700         struct hclge_dev *hdev = vport->back;
8701         int ret;
8702
8703         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8704                 if (!vlan->hd_tbl_status) {
8705                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8706                                                        vport->vport_id,
8707                                                        vlan->vlan_id, false);
8708                         if (ret) {
8709                                 dev_err(&hdev->pdev->dev,
8710                                         "restore vport vlan list failed, ret=%d\n",
8711                                         ret);
8712                                 return ret;
8713                         }
8714                 }
8715                 vlan->hd_tbl_status = true;
8716         }
8717
8718         return 0;
8719 }
8720
8721 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8722                                       bool is_write_tbl)
8723 {
8724         struct hclge_vport_vlan_cfg *vlan, *tmp;
8725         struct hclge_dev *hdev = vport->back;
8726
8727         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8728                 if (vlan->vlan_id == vlan_id) {
8729                         if (is_write_tbl && vlan->hd_tbl_status)
8730                                 hclge_set_vlan_filter_hw(hdev,
8731                                                          htons(ETH_P_8021Q),
8732                                                          vport->vport_id,
8733                                                          vlan_id,
8734                                                          true);
8735
8736                         list_del(&vlan->node);
8737                         kfree(vlan);
8738                         break;
8739                 }
8740         }
8741 }
8742
8743 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8744 {
8745         struct hclge_vport_vlan_cfg *vlan, *tmp;
8746         struct hclge_dev *hdev = vport->back;
8747
8748         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8749                 if (vlan->hd_tbl_status)
8750                         hclge_set_vlan_filter_hw(hdev,
8751                                                  htons(ETH_P_8021Q),
8752                                                  vport->vport_id,
8753                                                  vlan->vlan_id,
8754                                                  true);
8755
8756                 vlan->hd_tbl_status = false;
8757                 if (is_del_list) {
8758                         list_del(&vlan->node);
8759                         kfree(vlan);
8760                 }
8761         }
8762         clear_bit(vport->vport_id, hdev->vf_vlan_full);
8763 }
8764
8765 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8766 {
8767         struct hclge_vport_vlan_cfg *vlan, *tmp;
8768         struct hclge_vport *vport;
8769         int i;
8770
8771         for (i = 0; i < hdev->num_alloc_vport; i++) {
8772                 vport = &hdev->vport[i];
8773                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8774                         list_del(&vlan->node);
8775                         kfree(vlan);
8776                 }
8777         }
8778 }
8779
8780 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8781 {
8782         struct hclge_vport_vlan_cfg *vlan, *tmp;
8783         struct hclge_dev *hdev = vport->back;
8784         u16 vlan_proto;
8785         u16 vlan_id;
8786         u16 state;
8787         int ret;
8788
8789         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8790         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8791         state = vport->port_base_vlan_cfg.state;
8792
8793         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8794                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8795                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8796                                          vport->vport_id, vlan_id,
8797                                          false);
8798                 return;
8799         }
8800
8801         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8802                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8803                                                vport->vport_id,
8804                                                vlan->vlan_id, false);
8805                 if (ret)
8806                         break;
8807                 vlan->hd_tbl_status = true;
8808         }
8809 }
8810
8811 /* For global reset and imp reset, hardware will clear the mac table,
8812  * so we change the mac address state from ACTIVE to TO_ADD, then they
8813  * can be restored in the service task after reset complete. Furtherly,
8814  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8815  * be restored after reset, so just remove these mac nodes from mac_list.
8816  */
8817 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8818 {
8819         struct hclge_mac_node *mac_node, *tmp;
8820
8821         list_for_each_entry_safe(mac_node, tmp, list, node) {
8822                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8823                         mac_node->state = HCLGE_MAC_TO_ADD;
8824                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8825                         list_del(&mac_node->node);
8826                         kfree(mac_node);
8827                 }
8828         }
8829 }
8830
8831 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8832 {
8833         spin_lock_bh(&vport->mac_list_lock);
8834
8835         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8836         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8837         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8838
8839         spin_unlock_bh(&vport->mac_list_lock);
8840 }
8841
8842 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8843 {
8844         struct hclge_vport *vport = &hdev->vport[0];
8845         struct hnae3_handle *handle = &vport->nic;
8846
8847         hclge_restore_mac_table_common(vport);
8848         hclge_restore_vport_vlan_table(vport);
8849         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8850
8851         hclge_restore_fd_entries(handle);
8852 }
8853
8854 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8855 {
8856         struct hclge_vport *vport = hclge_get_vport(handle);
8857
8858         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8859                 vport->rxvlan_cfg.strip_tag1_en = false;
8860                 vport->rxvlan_cfg.strip_tag2_en = enable;
8861         } else {
8862                 vport->rxvlan_cfg.strip_tag1_en = enable;
8863                 vport->rxvlan_cfg.strip_tag2_en = true;
8864         }
8865         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8866         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8867         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8868
8869         return hclge_set_vlan_rx_offload_cfg(vport);
8870 }
8871
8872 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8873                                             u16 port_base_vlan_state,
8874                                             struct hclge_vlan_info *new_info,
8875                                             struct hclge_vlan_info *old_info)
8876 {
8877         struct hclge_dev *hdev = vport->back;
8878         int ret;
8879
8880         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8881                 hclge_rm_vport_all_vlan_table(vport, false);
8882                 return hclge_set_vlan_filter_hw(hdev,
8883                                                  htons(new_info->vlan_proto),
8884                                                  vport->vport_id,
8885                                                  new_info->vlan_tag,
8886                                                  false);
8887         }
8888
8889         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8890                                        vport->vport_id, old_info->vlan_tag,
8891                                        true);
8892         if (ret)
8893                 return ret;
8894
8895         return hclge_add_vport_all_vlan_table(vport);
8896 }
8897
8898 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8899                                     struct hclge_vlan_info *vlan_info)
8900 {
8901         struct hnae3_handle *nic = &vport->nic;
8902         struct hclge_vlan_info *old_vlan_info;
8903         struct hclge_dev *hdev = vport->back;
8904         int ret;
8905
8906         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8907
8908         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8909         if (ret)
8910                 return ret;
8911
8912         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8913                 /* add new VLAN tag */
8914                 ret = hclge_set_vlan_filter_hw(hdev,
8915                                                htons(vlan_info->vlan_proto),
8916                                                vport->vport_id,
8917                                                vlan_info->vlan_tag,
8918                                                false);
8919                 if (ret)
8920                         return ret;
8921
8922                 /* remove old VLAN tag */
8923                 ret = hclge_set_vlan_filter_hw(hdev,
8924                                                htons(old_vlan_info->vlan_proto),
8925                                                vport->vport_id,
8926                                                old_vlan_info->vlan_tag,
8927                                                true);
8928                 if (ret)
8929                         return ret;
8930
8931                 goto update;
8932         }
8933
8934         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8935                                                old_vlan_info);
8936         if (ret)
8937                 return ret;
8938
8939         /* update state only when disable/enable port based VLAN */
8940         vport->port_base_vlan_cfg.state = state;
8941         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8942                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8943         else
8944                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8945
8946 update:
8947         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8948         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8949         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8950
8951         return 0;
8952 }
8953
8954 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8955                                           enum hnae3_port_base_vlan_state state,
8956                                           u16 vlan)
8957 {
8958         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8959                 if (!vlan)
8960                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8961                 else
8962                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8963         } else {
8964                 if (!vlan)
8965                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8966                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8967                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8968                 else
8969                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8970         }
8971 }
8972
8973 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8974                                     u16 vlan, u8 qos, __be16 proto)
8975 {
8976         struct hclge_vport *vport = hclge_get_vport(handle);
8977         struct hclge_dev *hdev = vport->back;
8978         struct hclge_vlan_info vlan_info;
8979         u16 state;
8980         int ret;
8981
8982         if (hdev->pdev->revision == 0x20)
8983                 return -EOPNOTSUPP;
8984
8985         vport = hclge_get_vf_vport(hdev, vfid);
8986         if (!vport)
8987                 return -EINVAL;
8988
8989         /* qos is a 3 bits value, so can not be bigger than 7 */
8990         if (vlan > VLAN_N_VID - 1 || qos > 7)
8991                 return -EINVAL;
8992         if (proto != htons(ETH_P_8021Q))
8993                 return -EPROTONOSUPPORT;
8994
8995         state = hclge_get_port_base_vlan_state(vport,
8996                                                vport->port_base_vlan_cfg.state,
8997                                                vlan);
8998         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8999                 return 0;
9000
9001         vlan_info.vlan_tag = vlan;
9002         vlan_info.qos = qos;
9003         vlan_info.vlan_proto = ntohs(proto);
9004
9005         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9006                 return hclge_update_port_base_vlan_cfg(vport, state,
9007                                                        &vlan_info);
9008         } else {
9009                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9010                                                         vport->vport_id, state,
9011                                                         vlan, qos,
9012                                                         ntohs(proto));
9013                 return ret;
9014         }
9015 }
9016
9017 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9018 {
9019         struct hclge_vlan_info *vlan_info;
9020         struct hclge_vport *vport;
9021         int ret;
9022         int vf;
9023
9024         /* clear port base vlan for all vf */
9025         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9026                 vport = &hdev->vport[vf];
9027                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9028
9029                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9030                                                vport->vport_id,
9031                                                vlan_info->vlan_tag, true);
9032                 if (ret)
9033                         dev_err(&hdev->pdev->dev,
9034                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9035                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9036         }
9037 }
9038
9039 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9040                           u16 vlan_id, bool is_kill)
9041 {
9042         struct hclge_vport *vport = hclge_get_vport(handle);
9043         struct hclge_dev *hdev = vport->back;
9044         bool writen_to_tbl = false;
9045         int ret = 0;
9046
9047         /* When device is resetting, firmware is unable to handle
9048          * mailbox. Just record the vlan id, and remove it after
9049          * reset finished.
9050          */
9051         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9052                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9053                 return -EBUSY;
9054         }
9055
9056         /* when port base vlan enabled, we use port base vlan as the vlan
9057          * filter entry. In this case, we don't update vlan filter table
9058          * when user add new vlan or remove exist vlan, just update the vport
9059          * vlan list. The vlan id in vlan list will be writen in vlan filter
9060          * table until port base vlan disabled
9061          */
9062         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9063                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9064                                                vlan_id, is_kill);
9065                 writen_to_tbl = true;
9066         }
9067
9068         if (!ret) {
9069                 if (is_kill)
9070                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9071                 else
9072                         hclge_add_vport_vlan_table(vport, vlan_id,
9073                                                    writen_to_tbl);
9074         } else if (is_kill) {
9075                 /* when remove hw vlan filter failed, record the vlan id,
9076                  * and try to remove it from hw later, to be consistence
9077                  * with stack
9078                  */
9079                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9080         }
9081         return ret;
9082 }
9083
9084 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9085 {
9086 #define HCLGE_MAX_SYNC_COUNT    60
9087
9088         int i, ret, sync_cnt = 0;
9089         u16 vlan_id;
9090
9091         /* start from vport 1 for PF is always alive */
9092         for (i = 0; i < hdev->num_alloc_vport; i++) {
9093                 struct hclge_vport *vport = &hdev->vport[i];
9094
9095                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9096                                          VLAN_N_VID);
9097                 while (vlan_id != VLAN_N_VID) {
9098                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9099                                                        vport->vport_id, vlan_id,
9100                                                        true);
9101                         if (ret && ret != -EINVAL)
9102                                 return;
9103
9104                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9105                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9106
9107                         sync_cnt++;
9108                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9109                                 return;
9110
9111                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9112                                                  VLAN_N_VID);
9113                 }
9114         }
9115 }
9116
9117 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9118 {
9119         struct hclge_config_max_frm_size_cmd *req;
9120         struct hclge_desc desc;
9121
9122         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9123
9124         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9125         req->max_frm_size = cpu_to_le16(new_mps);
9126         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9127
9128         return hclge_cmd_send(&hdev->hw, &desc, 1);
9129 }
9130
9131 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9132 {
9133         struct hclge_vport *vport = hclge_get_vport(handle);
9134
9135         return hclge_set_vport_mtu(vport, new_mtu);
9136 }
9137
9138 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9139 {
9140         struct hclge_dev *hdev = vport->back;
9141         int i, max_frm_size, ret;
9142
9143         /* HW supprt 2 layer vlan */
9144         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9145         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9146             max_frm_size > HCLGE_MAC_MAX_FRAME)
9147                 return -EINVAL;
9148
9149         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9150         mutex_lock(&hdev->vport_lock);
9151         /* VF's mps must fit within hdev->mps */
9152         if (vport->vport_id && max_frm_size > hdev->mps) {
9153                 mutex_unlock(&hdev->vport_lock);
9154                 return -EINVAL;
9155         } else if (vport->vport_id) {
9156                 vport->mps = max_frm_size;
9157                 mutex_unlock(&hdev->vport_lock);
9158                 return 0;
9159         }
9160
9161         /* PF's mps must be greater then VF's mps */
9162         for (i = 1; i < hdev->num_alloc_vport; i++)
9163                 if (max_frm_size < hdev->vport[i].mps) {
9164                         mutex_unlock(&hdev->vport_lock);
9165                         return -EINVAL;
9166                 }
9167
9168         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9169
9170         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9171         if (ret) {
9172                 dev_err(&hdev->pdev->dev,
9173                         "Change mtu fail, ret =%d\n", ret);
9174                 goto out;
9175         }
9176
9177         hdev->mps = max_frm_size;
9178         vport->mps = max_frm_size;
9179
9180         ret = hclge_buffer_alloc(hdev);
9181         if (ret)
9182                 dev_err(&hdev->pdev->dev,
9183                         "Allocate buffer fail, ret =%d\n", ret);
9184
9185 out:
9186         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9187         mutex_unlock(&hdev->vport_lock);
9188         return ret;
9189 }
9190
9191 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9192                                     bool enable)
9193 {
9194         struct hclge_reset_tqp_queue_cmd *req;
9195         struct hclge_desc desc;
9196         int ret;
9197
9198         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9199
9200         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9201         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9202         if (enable)
9203                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9204
9205         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9206         if (ret) {
9207                 dev_err(&hdev->pdev->dev,
9208                         "Send tqp reset cmd error, status =%d\n", ret);
9209                 return ret;
9210         }
9211
9212         return 0;
9213 }
9214
9215 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9216 {
9217         struct hclge_reset_tqp_queue_cmd *req;
9218         struct hclge_desc desc;
9219         int ret;
9220
9221         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9222
9223         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9224         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9225
9226         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9227         if (ret) {
9228                 dev_err(&hdev->pdev->dev,
9229                         "Get reset status error, status =%d\n", ret);
9230                 return ret;
9231         }
9232
9233         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9234 }
9235
9236 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9237 {
9238         struct hnae3_queue *queue;
9239         struct hclge_tqp *tqp;
9240
9241         queue = handle->kinfo.tqp[queue_id];
9242         tqp = container_of(queue, struct hclge_tqp, q);
9243
9244         return tqp->index;
9245 }
9246
9247 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9248 {
9249         struct hclge_vport *vport = hclge_get_vport(handle);
9250         struct hclge_dev *hdev = vport->back;
9251         int reset_try_times = 0;
9252         int reset_status;
9253         u16 queue_gid;
9254         int ret;
9255
9256         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9257
9258         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9259         if (ret) {
9260                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9261                 return ret;
9262         }
9263
9264         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9265         if (ret) {
9266                 dev_err(&hdev->pdev->dev,
9267                         "Send reset tqp cmd fail, ret = %d\n", ret);
9268                 return ret;
9269         }
9270
9271         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9272                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9273                 if (reset_status)
9274                         break;
9275
9276                 /* Wait for tqp hw reset */
9277                 usleep_range(1000, 1200);
9278         }
9279
9280         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9281                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9282                 return ret;
9283         }
9284
9285         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9286         if (ret)
9287                 dev_err(&hdev->pdev->dev,
9288                         "Deassert the soft reset fail, ret = %d\n", ret);
9289
9290         return ret;
9291 }
9292
9293 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9294 {
9295         struct hclge_dev *hdev = vport->back;
9296         int reset_try_times = 0;
9297         int reset_status;
9298         u16 queue_gid;
9299         int ret;
9300
9301         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9302
9303         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9304         if (ret) {
9305                 dev_warn(&hdev->pdev->dev,
9306                          "Send reset tqp cmd fail, ret = %d\n", ret);
9307                 return;
9308         }
9309
9310         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9311                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9312                 if (reset_status)
9313                         break;
9314
9315                 /* Wait for tqp hw reset */
9316                 usleep_range(1000, 1200);
9317         }
9318
9319         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9320                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9321                 return;
9322         }
9323
9324         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9325         if (ret)
9326                 dev_warn(&hdev->pdev->dev,
9327                          "Deassert the soft reset fail, ret = %d\n", ret);
9328 }
9329
9330 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9331 {
9332         struct hclge_vport *vport = hclge_get_vport(handle);
9333         struct hclge_dev *hdev = vport->back;
9334
9335         return hdev->fw_version;
9336 }
9337
9338 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9339 {
9340         struct phy_device *phydev = hdev->hw.mac.phydev;
9341
9342         if (!phydev)
9343                 return;
9344
9345         phy_set_asym_pause(phydev, rx_en, tx_en);
9346 }
9347
9348 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9349 {
9350         int ret;
9351
9352         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9353                 return 0;
9354
9355         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9356         if (ret)
9357                 dev_err(&hdev->pdev->dev,
9358                         "configure pauseparam error, ret = %d.\n", ret);
9359
9360         return ret;
9361 }
9362
9363 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9364 {
9365         struct phy_device *phydev = hdev->hw.mac.phydev;
9366         u16 remote_advertising = 0;
9367         u16 local_advertising;
9368         u32 rx_pause, tx_pause;
9369         u8 flowctl;
9370
9371         if (!phydev->link || !phydev->autoneg)
9372                 return 0;
9373
9374         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9375
9376         if (phydev->pause)
9377                 remote_advertising = LPA_PAUSE_CAP;
9378
9379         if (phydev->asym_pause)
9380                 remote_advertising |= LPA_PAUSE_ASYM;
9381
9382         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9383                                            remote_advertising);
9384         tx_pause = flowctl & FLOW_CTRL_TX;
9385         rx_pause = flowctl & FLOW_CTRL_RX;
9386
9387         if (phydev->duplex == HCLGE_MAC_HALF) {
9388                 tx_pause = 0;
9389                 rx_pause = 0;
9390         }
9391
9392         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9393 }
9394
9395 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9396                                  u32 *rx_en, u32 *tx_en)
9397 {
9398         struct hclge_vport *vport = hclge_get_vport(handle);
9399         struct hclge_dev *hdev = vport->back;
9400         struct phy_device *phydev = hdev->hw.mac.phydev;
9401
9402         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9403
9404         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9405                 *rx_en = 0;
9406                 *tx_en = 0;
9407                 return;
9408         }
9409
9410         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9411                 *rx_en = 1;
9412                 *tx_en = 0;
9413         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9414                 *tx_en = 1;
9415                 *rx_en = 0;
9416         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9417                 *rx_en = 1;
9418                 *tx_en = 1;
9419         } else {
9420                 *rx_en = 0;
9421                 *tx_en = 0;
9422         }
9423 }
9424
9425 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9426                                          u32 rx_en, u32 tx_en)
9427 {
9428         if (rx_en && tx_en)
9429                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9430         else if (rx_en && !tx_en)
9431                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9432         else if (!rx_en && tx_en)
9433                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9434         else
9435                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9436
9437         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9438 }
9439
9440 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9441                                 u32 rx_en, u32 tx_en)
9442 {
9443         struct hclge_vport *vport = hclge_get_vport(handle);
9444         struct hclge_dev *hdev = vport->back;
9445         struct phy_device *phydev = hdev->hw.mac.phydev;
9446         u32 fc_autoneg;
9447
9448         if (phydev) {
9449                 fc_autoneg = hclge_get_autoneg(handle);
9450                 if (auto_neg != fc_autoneg) {
9451                         dev_info(&hdev->pdev->dev,
9452                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9453                         return -EOPNOTSUPP;
9454                 }
9455         }
9456
9457         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9458                 dev_info(&hdev->pdev->dev,
9459                          "Priority flow control enabled. Cannot set link flow control.\n");
9460                 return -EOPNOTSUPP;
9461         }
9462
9463         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9464
9465         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9466
9467         if (!auto_neg)
9468                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9469
9470         if (phydev)
9471                 return phy_start_aneg(phydev);
9472
9473         return -EOPNOTSUPP;
9474 }
9475
9476 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9477                                           u8 *auto_neg, u32 *speed, u8 *duplex)
9478 {
9479         struct hclge_vport *vport = hclge_get_vport(handle);
9480         struct hclge_dev *hdev = vport->back;
9481
9482         if (speed)
9483                 *speed = hdev->hw.mac.speed;
9484         if (duplex)
9485                 *duplex = hdev->hw.mac.duplex;
9486         if (auto_neg)
9487                 *auto_neg = hdev->hw.mac.autoneg;
9488 }
9489
9490 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9491                                  u8 *module_type)
9492 {
9493         struct hclge_vport *vport = hclge_get_vport(handle);
9494         struct hclge_dev *hdev = vport->back;
9495
9496         /* When nic is down, the service task is not running, doesn't update
9497          * the port information per second. Query the port information before
9498          * return the media type, ensure getting the correct media information.
9499          */
9500         hclge_update_port_info(hdev);
9501
9502         if (media_type)
9503                 *media_type = hdev->hw.mac.media_type;
9504
9505         if (module_type)
9506                 *module_type = hdev->hw.mac.module_type;
9507 }
9508
9509 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9510                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9511 {
9512         struct hclge_vport *vport = hclge_get_vport(handle);
9513         struct hclge_dev *hdev = vport->back;
9514         struct phy_device *phydev = hdev->hw.mac.phydev;
9515         int mdix_ctrl, mdix, is_resolved;
9516         unsigned int retval;
9517
9518         if (!phydev) {
9519                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9520                 *tp_mdix = ETH_TP_MDI_INVALID;
9521                 return;
9522         }
9523
9524         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9525
9526         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9527         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9528                                     HCLGE_PHY_MDIX_CTRL_S);
9529
9530         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9531         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9532         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9533
9534         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9535
9536         switch (mdix_ctrl) {
9537         case 0x0:
9538                 *tp_mdix_ctrl = ETH_TP_MDI;
9539                 break;
9540         case 0x1:
9541                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9542                 break;
9543         case 0x3:
9544                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9545                 break;
9546         default:
9547                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9548                 break;
9549         }
9550
9551         if (!is_resolved)
9552                 *tp_mdix = ETH_TP_MDI_INVALID;
9553         else if (mdix)
9554                 *tp_mdix = ETH_TP_MDI_X;
9555         else
9556                 *tp_mdix = ETH_TP_MDI;
9557 }
9558
9559 static void hclge_info_show(struct hclge_dev *hdev)
9560 {
9561         struct device *dev = &hdev->pdev->dev;
9562
9563         dev_info(dev, "PF info begin:\n");
9564
9565         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9566         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9567         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9568         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9569         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9570         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9571         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9572         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9573         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9574         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9575         dev_info(dev, "This is %s PF\n",
9576                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9577         dev_info(dev, "DCB %s\n",
9578                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9579         dev_info(dev, "MQPRIO %s\n",
9580                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9581
9582         dev_info(dev, "PF info end.\n");
9583 }
9584
9585 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9586                                           struct hclge_vport *vport)
9587 {
9588         struct hnae3_client *client = vport->nic.client;
9589         struct hclge_dev *hdev = ae_dev->priv;
9590         int rst_cnt = hdev->rst_stats.reset_cnt;
9591         int ret;
9592
9593         ret = client->ops->init_instance(&vport->nic);
9594         if (ret)
9595                 return ret;
9596
9597         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9598         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9599             rst_cnt != hdev->rst_stats.reset_cnt) {
9600                 ret = -EBUSY;
9601                 goto init_nic_err;
9602         }
9603
9604         /* Enable nic hw error interrupts */
9605         ret = hclge_config_nic_hw_error(hdev, true);
9606         if (ret) {
9607                 dev_err(&ae_dev->pdev->dev,
9608                         "fail(%d) to enable hw error interrupts\n", ret);
9609                 goto init_nic_err;
9610         }
9611
9612         hnae3_set_client_init_flag(client, ae_dev, 1);
9613
9614         if (netif_msg_drv(&hdev->vport->nic))
9615                 hclge_info_show(hdev);
9616
9617         return ret;
9618
9619 init_nic_err:
9620         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9621         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9622                 msleep(HCLGE_WAIT_RESET_DONE);
9623
9624         client->ops->uninit_instance(&vport->nic, 0);
9625
9626         return ret;
9627 }
9628
9629 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9630                                            struct hclge_vport *vport)
9631 {
9632         struct hclge_dev *hdev = ae_dev->priv;
9633         struct hnae3_client *client;
9634         int rst_cnt;
9635         int ret;
9636
9637         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9638             !hdev->nic_client)
9639                 return 0;
9640
9641         client = hdev->roce_client;
9642         ret = hclge_init_roce_base_info(vport);
9643         if (ret)
9644                 return ret;
9645
9646         rst_cnt = hdev->rst_stats.reset_cnt;
9647         ret = client->ops->init_instance(&vport->roce);
9648         if (ret)
9649                 return ret;
9650
9651         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9652         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9653             rst_cnt != hdev->rst_stats.reset_cnt) {
9654                 ret = -EBUSY;
9655                 goto init_roce_err;
9656         }
9657
9658         /* Enable roce ras interrupts */
9659         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9660         if (ret) {
9661                 dev_err(&ae_dev->pdev->dev,
9662                         "fail(%d) to enable roce ras interrupts\n", ret);
9663                 goto init_roce_err;
9664         }
9665
9666         hnae3_set_client_init_flag(client, ae_dev, 1);
9667
9668         return 0;
9669
9670 init_roce_err:
9671         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9672         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9673                 msleep(HCLGE_WAIT_RESET_DONE);
9674
9675         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9676
9677         return ret;
9678 }
9679
9680 static int hclge_init_client_instance(struct hnae3_client *client,
9681                                       struct hnae3_ae_dev *ae_dev)
9682 {
9683         struct hclge_dev *hdev = ae_dev->priv;
9684         struct hclge_vport *vport;
9685         int i, ret;
9686
9687         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9688                 vport = &hdev->vport[i];
9689
9690                 switch (client->type) {
9691                 case HNAE3_CLIENT_KNIC:
9692                         hdev->nic_client = client;
9693                         vport->nic.client = client;
9694                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9695                         if (ret)
9696                                 goto clear_nic;
9697
9698                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9699                         if (ret)
9700                                 goto clear_roce;
9701
9702                         break;
9703                 case HNAE3_CLIENT_ROCE:
9704                         if (hnae3_dev_roce_supported(hdev)) {
9705                                 hdev->roce_client = client;
9706                                 vport->roce.client = client;
9707                         }
9708
9709                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9710                         if (ret)
9711                                 goto clear_roce;
9712
9713                         break;
9714                 default:
9715                         return -EINVAL;
9716                 }
9717         }
9718
9719         return 0;
9720
9721 clear_nic:
9722         hdev->nic_client = NULL;
9723         vport->nic.client = NULL;
9724         return ret;
9725 clear_roce:
9726         hdev->roce_client = NULL;
9727         vport->roce.client = NULL;
9728         return ret;
9729 }
9730
9731 static void hclge_uninit_client_instance(struct hnae3_client *client,
9732                                          struct hnae3_ae_dev *ae_dev)
9733 {
9734         struct hclge_dev *hdev = ae_dev->priv;
9735         struct hclge_vport *vport;
9736         int i;
9737
9738         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9739                 vport = &hdev->vport[i];
9740                 if (hdev->roce_client) {
9741                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9742                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9743                                 msleep(HCLGE_WAIT_RESET_DONE);
9744
9745                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9746                                                                 0);
9747                         hdev->roce_client = NULL;
9748                         vport->roce.client = NULL;
9749                 }
9750                 if (client->type == HNAE3_CLIENT_ROCE)
9751                         return;
9752                 if (hdev->nic_client && client->ops->uninit_instance) {
9753                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9754                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9755                                 msleep(HCLGE_WAIT_RESET_DONE);
9756
9757                         client->ops->uninit_instance(&vport->nic, 0);
9758                         hdev->nic_client = NULL;
9759                         vport->nic.client = NULL;
9760                 }
9761         }
9762 }
9763
9764 static int hclge_pci_init(struct hclge_dev *hdev)
9765 {
9766         struct pci_dev *pdev = hdev->pdev;
9767         struct hclge_hw *hw;
9768         int ret;
9769
9770         ret = pci_enable_device(pdev);
9771         if (ret) {
9772                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9773                 return ret;
9774         }
9775
9776         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9777         if (ret) {
9778                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9779                 if (ret) {
9780                         dev_err(&pdev->dev,
9781                                 "can't set consistent PCI DMA");
9782                         goto err_disable_device;
9783                 }
9784                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9785         }
9786
9787         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9788         if (ret) {
9789                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9790                 goto err_disable_device;
9791         }
9792
9793         pci_set_master(pdev);
9794         hw = &hdev->hw;
9795         hw->io_base = pcim_iomap(pdev, 2, 0);
9796         if (!hw->io_base) {
9797                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9798                 ret = -ENOMEM;
9799                 goto err_clr_master;
9800         }
9801
9802         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9803
9804         return 0;
9805 err_clr_master:
9806         pci_clear_master(pdev);
9807         pci_release_regions(pdev);
9808 err_disable_device:
9809         pci_disable_device(pdev);
9810
9811         return ret;
9812 }
9813
9814 static void hclge_pci_uninit(struct hclge_dev *hdev)
9815 {
9816         struct pci_dev *pdev = hdev->pdev;
9817
9818         pcim_iounmap(pdev, hdev->hw.io_base);
9819         pci_free_irq_vectors(pdev);
9820         pci_clear_master(pdev);
9821         pci_release_mem_regions(pdev);
9822         pci_disable_device(pdev);
9823 }
9824
9825 static void hclge_state_init(struct hclge_dev *hdev)
9826 {
9827         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9828         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9829         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9830         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9831         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9832         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9833         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9834 }
9835
9836 static void hclge_state_uninit(struct hclge_dev *hdev)
9837 {
9838         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9839         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9840
9841         if (hdev->reset_timer.function)
9842                 del_timer_sync(&hdev->reset_timer);
9843         if (hdev->service_task.work.func)
9844                 cancel_delayed_work_sync(&hdev->service_task);
9845 }
9846
9847 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9848 {
9849 #define HCLGE_FLR_RETRY_WAIT_MS 500
9850 #define HCLGE_FLR_RETRY_CNT     5
9851
9852         struct hclge_dev *hdev = ae_dev->priv;
9853         int retry_cnt = 0;
9854         int ret;
9855
9856 retry:
9857         down(&hdev->reset_sem);
9858         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9859         hdev->reset_type = HNAE3_FLR_RESET;
9860         ret = hclge_reset_prepare(hdev);
9861         if (ret) {
9862                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9863                         ret);
9864                 if (hdev->reset_pending ||
9865                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9866                         dev_err(&hdev->pdev->dev,
9867                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9868                                 hdev->reset_pending, retry_cnt);
9869                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9870                         up(&hdev->reset_sem);
9871                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9872                         goto retry;
9873                 }
9874         }
9875
9876         /* disable misc vector before FLR done */
9877         hclge_enable_vector(&hdev->misc_vector, false);
9878         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9879         hdev->rst_stats.flr_rst_cnt++;
9880 }
9881
9882 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9883 {
9884         struct hclge_dev *hdev = ae_dev->priv;
9885         int ret;
9886
9887         hclge_enable_vector(&hdev->misc_vector, true);
9888
9889         ret = hclge_reset_rebuild(hdev);
9890         if (ret)
9891                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9892
9893         hdev->reset_type = HNAE3_NONE_RESET;
9894         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9895         up(&hdev->reset_sem);
9896 }
9897
9898 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9899 {
9900         u16 i;
9901
9902         for (i = 0; i < hdev->num_alloc_vport; i++) {
9903                 struct hclge_vport *vport = &hdev->vport[i];
9904                 int ret;
9905
9906                  /* Send cmd to clear VF's FUNC_RST_ING */
9907                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9908                 if (ret)
9909                         dev_warn(&hdev->pdev->dev,
9910                                  "clear vf(%u) rst failed %d!\n",
9911                                  vport->vport_id, ret);
9912         }
9913 }
9914
9915 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9916 {
9917         struct pci_dev *pdev = ae_dev->pdev;
9918         struct hclge_dev *hdev;
9919         int ret;
9920
9921         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9922         if (!hdev)
9923                 return -ENOMEM;
9924
9925         hdev->pdev = pdev;
9926         hdev->ae_dev = ae_dev;
9927         hdev->reset_type = HNAE3_NONE_RESET;
9928         hdev->reset_level = HNAE3_FUNC_RESET;
9929         ae_dev->priv = hdev;
9930
9931         /* HW supprt 2 layer vlan */
9932         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9933
9934         mutex_init(&hdev->vport_lock);
9935         spin_lock_init(&hdev->fd_rule_lock);
9936         sema_init(&hdev->reset_sem, 1);
9937
9938         ret = hclge_pci_init(hdev);
9939         if (ret)
9940                 goto out;
9941
9942         /* Firmware command queue initialize */
9943         ret = hclge_cmd_queue_init(hdev);
9944         if (ret)
9945                 goto err_pci_uninit;
9946
9947         /* Firmware command initialize */
9948         ret = hclge_cmd_init(hdev);
9949         if (ret)
9950                 goto err_cmd_uninit;
9951
9952         ret = hclge_get_cap(hdev);
9953         if (ret)
9954                 goto err_cmd_uninit;
9955
9956         ret = hclge_configure(hdev);
9957         if (ret) {
9958                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9959                 goto err_cmd_uninit;
9960         }
9961
9962         ret = hclge_init_msi(hdev);
9963         if (ret) {
9964                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9965                 goto err_cmd_uninit;
9966         }
9967
9968         ret = hclge_misc_irq_init(hdev);
9969         if (ret)
9970                 goto err_msi_uninit;
9971
9972         ret = hclge_alloc_tqps(hdev);
9973         if (ret) {
9974                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9975                 goto err_msi_irq_uninit;
9976         }
9977
9978         ret = hclge_alloc_vport(hdev);
9979         if (ret)
9980                 goto err_msi_irq_uninit;
9981
9982         ret = hclge_map_tqp(hdev);
9983         if (ret)
9984                 goto err_msi_irq_uninit;
9985
9986         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9987                 ret = hclge_mac_mdio_config(hdev);
9988                 if (ret)
9989                         goto err_msi_irq_uninit;
9990         }
9991
9992         ret = hclge_init_umv_space(hdev);
9993         if (ret)
9994                 goto err_mdiobus_unreg;
9995
9996         ret = hclge_mac_init(hdev);
9997         if (ret) {
9998                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9999                 goto err_mdiobus_unreg;
10000         }
10001
10002         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10003         if (ret) {
10004                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10005                 goto err_mdiobus_unreg;
10006         }
10007
10008         ret = hclge_config_gro(hdev, true);
10009         if (ret)
10010                 goto err_mdiobus_unreg;
10011
10012         ret = hclge_init_vlan_config(hdev);
10013         if (ret) {
10014                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10015                 goto err_mdiobus_unreg;
10016         }
10017
10018         ret = hclge_tm_schd_init(hdev);
10019         if (ret) {
10020                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10021                 goto err_mdiobus_unreg;
10022         }
10023
10024         hclge_rss_init_cfg(hdev);
10025         ret = hclge_rss_init_hw(hdev);
10026         if (ret) {
10027                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10028                 goto err_mdiobus_unreg;
10029         }
10030
10031         ret = init_mgr_tbl(hdev);
10032         if (ret) {
10033                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10034                 goto err_mdiobus_unreg;
10035         }
10036
10037         ret = hclge_init_fd_config(hdev);
10038         if (ret) {
10039                 dev_err(&pdev->dev,
10040                         "fd table init fail, ret=%d\n", ret);
10041                 goto err_mdiobus_unreg;
10042         }
10043
10044         INIT_KFIFO(hdev->mac_tnl_log);
10045
10046         hclge_dcb_ops_set(hdev);
10047
10048         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10049         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10050
10051         /* Setup affinity after service timer setup because add_timer_on
10052          * is called in affinity notify.
10053          */
10054         hclge_misc_affinity_setup(hdev);
10055
10056         hclge_clear_all_event_cause(hdev);
10057         hclge_clear_resetting_state(hdev);
10058
10059         /* Log and clear the hw errors those already occurred */
10060         hclge_handle_all_hns_hw_errors(ae_dev);
10061
10062         /* request delayed reset for the error recovery because an immediate
10063          * global reset on a PF affecting pending initialization of other PFs
10064          */
10065         if (ae_dev->hw_err_reset_req) {
10066                 enum hnae3_reset_type reset_level;
10067
10068                 reset_level = hclge_get_reset_level(ae_dev,
10069                                                     &ae_dev->hw_err_reset_req);
10070                 hclge_set_def_reset_request(ae_dev, reset_level);
10071                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10072         }
10073
10074         /* Enable MISC vector(vector0) */
10075         hclge_enable_vector(&hdev->misc_vector, true);
10076
10077         hclge_state_init(hdev);
10078         hdev->last_reset_time = jiffies;
10079
10080         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10081                  HCLGE_DRIVER_NAME);
10082
10083         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10084
10085         return 0;
10086
10087 err_mdiobus_unreg:
10088         if (hdev->hw.mac.phydev)
10089                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10090 err_msi_irq_uninit:
10091         hclge_misc_irq_uninit(hdev);
10092 err_msi_uninit:
10093         pci_free_irq_vectors(pdev);
10094 err_cmd_uninit:
10095         hclge_cmd_uninit(hdev);
10096 err_pci_uninit:
10097         pcim_iounmap(pdev, hdev->hw.io_base);
10098         pci_clear_master(pdev);
10099         pci_release_regions(pdev);
10100         pci_disable_device(pdev);
10101 out:
10102         mutex_destroy(&hdev->vport_lock);
10103         return ret;
10104 }
10105
10106 static void hclge_stats_clear(struct hclge_dev *hdev)
10107 {
10108         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10109 }
10110
10111 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10112 {
10113         return hclge_config_switch_param(hdev, vf, enable,
10114                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10115 }
10116
10117 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10118 {
10119         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10120                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10121                                           enable, vf);
10122 }
10123
10124 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10125 {
10126         int ret;
10127
10128         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10129         if (ret) {
10130                 dev_err(&hdev->pdev->dev,
10131                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10132                         vf, enable ? "on" : "off", ret);
10133                 return ret;
10134         }
10135
10136         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10137         if (ret)
10138                 dev_err(&hdev->pdev->dev,
10139                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10140                         vf, enable ? "on" : "off", ret);
10141
10142         return ret;
10143 }
10144
10145 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10146                                  bool enable)
10147 {
10148         struct hclge_vport *vport = hclge_get_vport(handle);
10149         struct hclge_dev *hdev = vport->back;
10150         u32 new_spoofchk = enable ? 1 : 0;
10151         int ret;
10152
10153         if (hdev->pdev->revision == 0x20)
10154                 return -EOPNOTSUPP;
10155
10156         vport = hclge_get_vf_vport(hdev, vf);
10157         if (!vport)
10158                 return -EINVAL;
10159
10160         if (vport->vf_info.spoofchk == new_spoofchk)
10161                 return 0;
10162
10163         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10164                 dev_warn(&hdev->pdev->dev,
10165                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10166                          vf);
10167         else if (enable && hclge_is_umv_space_full(vport, true))
10168                 dev_warn(&hdev->pdev->dev,
10169                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10170                          vf);
10171
10172         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10173         if (ret)
10174                 return ret;
10175
10176         vport->vf_info.spoofchk = new_spoofchk;
10177         return 0;
10178 }
10179
10180 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10181 {
10182         struct hclge_vport *vport = hdev->vport;
10183         int ret;
10184         int i;
10185
10186         if (hdev->pdev->revision == 0x20)
10187                 return 0;
10188
10189         /* resume the vf spoof check state after reset */
10190         for (i = 0; i < hdev->num_alloc_vport; i++) {
10191                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10192                                                vport->vf_info.spoofchk);
10193                 if (ret)
10194                         return ret;
10195
10196                 vport++;
10197         }
10198
10199         return 0;
10200 }
10201
10202 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10203 {
10204         struct hclge_vport *vport = hclge_get_vport(handle);
10205         struct hclge_dev *hdev = vport->back;
10206         u32 new_trusted = enable ? 1 : 0;
10207         bool en_bc_pmc;
10208         int ret;
10209
10210         vport = hclge_get_vf_vport(hdev, vf);
10211         if (!vport)
10212                 return -EINVAL;
10213
10214         if (vport->vf_info.trusted == new_trusted)
10215                 return 0;
10216
10217         /* Disable promisc mode for VF if it is not trusted any more. */
10218         if (!enable && vport->vf_info.promisc_enable) {
10219                 en_bc_pmc = hdev->pdev->revision != 0x20;
10220                 ret = hclge_set_vport_promisc_mode(vport, false, false,
10221                                                    en_bc_pmc);
10222                 if (ret)
10223                         return ret;
10224                 vport->vf_info.promisc_enable = 0;
10225                 hclge_inform_vf_promisc_info(vport);
10226         }
10227
10228         vport->vf_info.trusted = new_trusted;
10229
10230         return 0;
10231 }
10232
10233 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10234 {
10235         int ret;
10236         int vf;
10237
10238         /* reset vf rate to default value */
10239         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10240                 struct hclge_vport *vport = &hdev->vport[vf];
10241
10242                 vport->vf_info.max_tx_rate = 0;
10243                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10244                 if (ret)
10245                         dev_err(&hdev->pdev->dev,
10246                                 "vf%d failed to reset to default, ret=%d\n",
10247                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10248         }
10249 }
10250
10251 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10252                                      int min_tx_rate, int max_tx_rate)
10253 {
10254         if (min_tx_rate != 0 ||
10255             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10256                 dev_err(&hdev->pdev->dev,
10257                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10258                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10259                 return -EINVAL;
10260         }
10261
10262         return 0;
10263 }
10264
10265 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10266                              int min_tx_rate, int max_tx_rate, bool force)
10267 {
10268         struct hclge_vport *vport = hclge_get_vport(handle);
10269         struct hclge_dev *hdev = vport->back;
10270         int ret;
10271
10272         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10273         if (ret)
10274                 return ret;
10275
10276         vport = hclge_get_vf_vport(hdev, vf);
10277         if (!vport)
10278                 return -EINVAL;
10279
10280         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10281                 return 0;
10282
10283         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10284         if (ret)
10285                 return ret;
10286
10287         vport->vf_info.max_tx_rate = max_tx_rate;
10288
10289         return 0;
10290 }
10291
10292 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10293 {
10294         struct hnae3_handle *handle = &hdev->vport->nic;
10295         struct hclge_vport *vport;
10296         int ret;
10297         int vf;
10298
10299         /* resume the vf max_tx_rate after reset */
10300         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10301                 vport = hclge_get_vf_vport(hdev, vf);
10302                 if (!vport)
10303                         return -EINVAL;
10304
10305                 /* zero means max rate, after reset, firmware already set it to
10306                  * max rate, so just continue.
10307                  */
10308                 if (!vport->vf_info.max_tx_rate)
10309                         continue;
10310
10311                 ret = hclge_set_vf_rate(handle, vf, 0,
10312                                         vport->vf_info.max_tx_rate, true);
10313                 if (ret) {
10314                         dev_err(&hdev->pdev->dev,
10315                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10316                                 vf, vport->vf_info.max_tx_rate, ret);
10317                         return ret;
10318                 }
10319         }
10320
10321         return 0;
10322 }
10323
10324 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10325 {
10326         struct hclge_vport *vport = hdev->vport;
10327         int i;
10328
10329         for (i = 0; i < hdev->num_alloc_vport; i++) {
10330                 hclge_vport_stop(vport);
10331                 vport++;
10332         }
10333 }
10334
10335 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10336 {
10337         struct hclge_dev *hdev = ae_dev->priv;
10338         struct pci_dev *pdev = ae_dev->pdev;
10339         int ret;
10340
10341         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10342
10343         hclge_stats_clear(hdev);
10344         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10345          * so here should not clean table in memory.
10346          */
10347         if (hdev->reset_type == HNAE3_IMP_RESET ||
10348             hdev->reset_type == HNAE3_GLOBAL_RESET) {
10349                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10350                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10351                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10352                 hclge_reset_umv_space(hdev);
10353         }
10354
10355         ret = hclge_cmd_init(hdev);
10356         if (ret) {
10357                 dev_err(&pdev->dev, "Cmd queue init failed\n");
10358                 return ret;
10359         }
10360
10361         ret = hclge_map_tqp(hdev);
10362         if (ret) {
10363                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10364                 return ret;
10365         }
10366
10367         ret = hclge_mac_init(hdev);
10368         if (ret) {
10369                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10370                 return ret;
10371         }
10372
10373         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10374         if (ret) {
10375                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10376                 return ret;
10377         }
10378
10379         ret = hclge_config_gro(hdev, true);
10380         if (ret)
10381                 return ret;
10382
10383         ret = hclge_init_vlan_config(hdev);
10384         if (ret) {
10385                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10386                 return ret;
10387         }
10388
10389         ret = hclge_tm_init_hw(hdev, true);
10390         if (ret) {
10391                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10392                 return ret;
10393         }
10394
10395         ret = hclge_rss_init_hw(hdev);
10396         if (ret) {
10397                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10398                 return ret;
10399         }
10400
10401         ret = init_mgr_tbl(hdev);
10402         if (ret) {
10403                 dev_err(&pdev->dev,
10404                         "failed to reinit manager table, ret = %d\n", ret);
10405                 return ret;
10406         }
10407
10408         ret = hclge_init_fd_config(hdev);
10409         if (ret) {
10410                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10411                 return ret;
10412         }
10413
10414         /* Log and clear the hw errors those already occurred */
10415         hclge_handle_all_hns_hw_errors(ae_dev);
10416
10417         /* Re-enable the hw error interrupts because
10418          * the interrupts get disabled on global reset.
10419          */
10420         ret = hclge_config_nic_hw_error(hdev, true);
10421         if (ret) {
10422                 dev_err(&pdev->dev,
10423                         "fail(%d) to re-enable NIC hw error interrupts\n",
10424                         ret);
10425                 return ret;
10426         }
10427
10428         if (hdev->roce_client) {
10429                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10430                 if (ret) {
10431                         dev_err(&pdev->dev,
10432                                 "fail(%d) to re-enable roce ras interrupts\n",
10433                                 ret);
10434                         return ret;
10435                 }
10436         }
10437
10438         hclge_reset_vport_state(hdev);
10439         ret = hclge_reset_vport_spoofchk(hdev);
10440         if (ret)
10441                 return ret;
10442
10443         ret = hclge_resume_vf_rate(hdev);
10444         if (ret)
10445                 return ret;
10446
10447         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10448                  HCLGE_DRIVER_NAME);
10449
10450         return 0;
10451 }
10452
10453 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10454 {
10455         struct hclge_dev *hdev = ae_dev->priv;
10456         struct hclge_mac *mac = &hdev->hw.mac;
10457
10458         hclge_reset_vf_rate(hdev);
10459         hclge_clear_vf_vlan(hdev);
10460         hclge_misc_affinity_teardown(hdev);
10461         hclge_state_uninit(hdev);
10462         hclge_uninit_mac_table(hdev);
10463
10464         if (mac->phydev)
10465                 mdiobus_unregister(mac->mdio_bus);
10466
10467         /* Disable MISC vector(vector0) */
10468         hclge_enable_vector(&hdev->misc_vector, false);
10469         synchronize_irq(hdev->misc_vector.vector_irq);
10470
10471         /* Disable all hw interrupts */
10472         hclge_config_mac_tnl_int(hdev, false);
10473         hclge_config_nic_hw_error(hdev, false);
10474         hclge_config_rocee_ras_interrupt(hdev, false);
10475
10476         hclge_cmd_uninit(hdev);
10477         hclge_misc_irq_uninit(hdev);
10478         hclge_pci_uninit(hdev);
10479         mutex_destroy(&hdev->vport_lock);
10480         hclge_uninit_vport_vlan_table(hdev);
10481         ae_dev->priv = NULL;
10482 }
10483
10484 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10485 {
10486         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10487         struct hclge_vport *vport = hclge_get_vport(handle);
10488         struct hclge_dev *hdev = vport->back;
10489
10490         return min_t(u32, hdev->rss_size_max,
10491                      vport->alloc_tqps / kinfo->num_tc);
10492 }
10493
10494 static void hclge_get_channels(struct hnae3_handle *handle,
10495                                struct ethtool_channels *ch)
10496 {
10497         ch->max_combined = hclge_get_max_channels(handle);
10498         ch->other_count = 1;
10499         ch->max_other = 1;
10500         ch->combined_count = handle->kinfo.rss_size;
10501 }
10502
10503 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10504                                         u16 *alloc_tqps, u16 *max_rss_size)
10505 {
10506         struct hclge_vport *vport = hclge_get_vport(handle);
10507         struct hclge_dev *hdev = vport->back;
10508
10509         *alloc_tqps = vport->alloc_tqps;
10510         *max_rss_size = hdev->rss_size_max;
10511 }
10512
10513 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10514                               bool rxfh_configured)
10515 {
10516         struct hclge_vport *vport = hclge_get_vport(handle);
10517         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10518         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10519         struct hclge_dev *hdev = vport->back;
10520         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10521         u16 cur_rss_size = kinfo->rss_size;
10522         u16 cur_tqps = kinfo->num_tqps;
10523         u16 tc_valid[HCLGE_MAX_TC_NUM];
10524         u16 roundup_size;
10525         u32 *rss_indir;
10526         unsigned int i;
10527         int ret;
10528
10529         kinfo->req_rss_size = new_tqps_num;
10530
10531         ret = hclge_tm_vport_map_update(hdev);
10532         if (ret) {
10533                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10534                 return ret;
10535         }
10536
10537         roundup_size = roundup_pow_of_two(kinfo->rss_size);
10538         roundup_size = ilog2(roundup_size);
10539         /* Set the RSS TC mode according to the new RSS size */
10540         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10541                 tc_valid[i] = 0;
10542
10543                 if (!(hdev->hw_tc_map & BIT(i)))
10544                         continue;
10545
10546                 tc_valid[i] = 1;
10547                 tc_size[i] = roundup_size;
10548                 tc_offset[i] = kinfo->rss_size * i;
10549         }
10550         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10551         if (ret)
10552                 return ret;
10553
10554         /* RSS indirection table has been configuared by user */
10555         if (rxfh_configured)
10556                 goto out;
10557
10558         /* Reinitializes the rss indirect table according to the new RSS size */
10559         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10560         if (!rss_indir)
10561                 return -ENOMEM;
10562
10563         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10564                 rss_indir[i] = i % kinfo->rss_size;
10565
10566         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10567         if (ret)
10568                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10569                         ret);
10570
10571         kfree(rss_indir);
10572
10573 out:
10574         if (!ret)
10575                 dev_info(&hdev->pdev->dev,
10576                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10577                          cur_rss_size, kinfo->rss_size,
10578                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10579
10580         return ret;
10581 }
10582
10583 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10584                               u32 *regs_num_64_bit)
10585 {
10586         struct hclge_desc desc;
10587         u32 total_num;
10588         int ret;
10589
10590         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10591         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10592         if (ret) {
10593                 dev_err(&hdev->pdev->dev,
10594                         "Query register number cmd failed, ret = %d.\n", ret);
10595                 return ret;
10596         }
10597
10598         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10599         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10600
10601         total_num = *regs_num_32_bit + *regs_num_64_bit;
10602         if (!total_num)
10603                 return -EINVAL;
10604
10605         return 0;
10606 }
10607
10608 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10609                                  void *data)
10610 {
10611 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10612 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10613
10614         struct hclge_desc *desc;
10615         u32 *reg_val = data;
10616         __le32 *desc_data;
10617         int nodata_num;
10618         int cmd_num;
10619         int i, k, n;
10620         int ret;
10621
10622         if (regs_num == 0)
10623                 return 0;
10624
10625         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10626         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10627                                HCLGE_32_BIT_REG_RTN_DATANUM);
10628         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10629         if (!desc)
10630                 return -ENOMEM;
10631
10632         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10633         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10634         if (ret) {
10635                 dev_err(&hdev->pdev->dev,
10636                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10637                 kfree(desc);
10638                 return ret;
10639         }
10640
10641         for (i = 0; i < cmd_num; i++) {
10642                 if (i == 0) {
10643                         desc_data = (__le32 *)(&desc[i].data[0]);
10644                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10645                 } else {
10646                         desc_data = (__le32 *)(&desc[i]);
10647                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10648                 }
10649                 for (k = 0; k < n; k++) {
10650                         *reg_val++ = le32_to_cpu(*desc_data++);
10651
10652                         regs_num--;
10653                         if (!regs_num)
10654                                 break;
10655                 }
10656         }
10657
10658         kfree(desc);
10659         return 0;
10660 }
10661
10662 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10663                                  void *data)
10664 {
10665 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10666 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10667
10668         struct hclge_desc *desc;
10669         u64 *reg_val = data;
10670         __le64 *desc_data;
10671         int nodata_len;
10672         int cmd_num;
10673         int i, k, n;
10674         int ret;
10675
10676         if (regs_num == 0)
10677                 return 0;
10678
10679         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10680         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10681                                HCLGE_64_BIT_REG_RTN_DATANUM);
10682         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10683         if (!desc)
10684                 return -ENOMEM;
10685
10686         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10687         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10688         if (ret) {
10689                 dev_err(&hdev->pdev->dev,
10690                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10691                 kfree(desc);
10692                 return ret;
10693         }
10694
10695         for (i = 0; i < cmd_num; i++) {
10696                 if (i == 0) {
10697                         desc_data = (__le64 *)(&desc[i].data[0]);
10698                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10699                 } else {
10700                         desc_data = (__le64 *)(&desc[i]);
10701                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10702                 }
10703                 for (k = 0; k < n; k++) {
10704                         *reg_val++ = le64_to_cpu(*desc_data++);
10705
10706                         regs_num--;
10707                         if (!regs_num)
10708                                 break;
10709                 }
10710         }
10711
10712         kfree(desc);
10713         return 0;
10714 }
10715
10716 #define MAX_SEPARATE_NUM        4
10717 #define SEPARATOR_VALUE         0xFDFCFBFA
10718 #define REG_NUM_PER_LINE        4
10719 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10720 #define REG_SEPARATOR_LINE      1
10721 #define REG_NUM_REMAIN_MASK     3
10722 #define BD_LIST_MAX_NUM         30
10723
10724 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10725 {
10726         /*prepare 4 commands to query DFX BD number*/
10727         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10728         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10729         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10730         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10731         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10732         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10733         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10734
10735         return hclge_cmd_send(&hdev->hw, desc, 4);
10736 }
10737
10738 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10739                                     int *bd_num_list,
10740                                     u32 type_num)
10741 {
10742         u32 entries_per_desc, desc_index, index, offset, i;
10743         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10744         int ret;
10745
10746         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10747         if (ret) {
10748                 dev_err(&hdev->pdev->dev,
10749                         "Get dfx bd num fail, status is %d.\n", ret);
10750                 return ret;
10751         }
10752
10753         entries_per_desc = ARRAY_SIZE(desc[0].data);
10754         for (i = 0; i < type_num; i++) {
10755                 offset = hclge_dfx_bd_offset_list[i];
10756                 index = offset % entries_per_desc;
10757                 desc_index = offset / entries_per_desc;
10758                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10759         }
10760
10761         return ret;
10762 }
10763
10764 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10765                                   struct hclge_desc *desc_src, int bd_num,
10766                                   enum hclge_opcode_type cmd)
10767 {
10768         struct hclge_desc *desc = desc_src;
10769         int i, ret;
10770
10771         hclge_cmd_setup_basic_desc(desc, cmd, true);
10772         for (i = 0; i < bd_num - 1; i++) {
10773                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10774                 desc++;
10775                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10776         }
10777
10778         desc = desc_src;
10779         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10780         if (ret)
10781                 dev_err(&hdev->pdev->dev,
10782                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10783                         cmd, ret);
10784
10785         return ret;
10786 }
10787
10788 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10789                                     void *data)
10790 {
10791         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10792         struct hclge_desc *desc = desc_src;
10793         u32 *reg = data;
10794
10795         entries_per_desc = ARRAY_SIZE(desc->data);
10796         reg_num = entries_per_desc * bd_num;
10797         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10798         for (i = 0; i < reg_num; i++) {
10799                 index = i % entries_per_desc;
10800                 desc_index = i / entries_per_desc;
10801                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10802         }
10803         for (i = 0; i < separator_num; i++)
10804                 *reg++ = SEPARATOR_VALUE;
10805
10806         return reg_num + separator_num;
10807 }
10808
10809 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10810 {
10811         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10812         int data_len_per_desc, bd_num, i;
10813         int bd_num_list[BD_LIST_MAX_NUM];
10814         u32 data_len;
10815         int ret;
10816
10817         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10818         if (ret) {
10819                 dev_err(&hdev->pdev->dev,
10820                         "Get dfx reg bd num fail, status is %d.\n", ret);
10821                 return ret;
10822         }
10823
10824         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10825         *len = 0;
10826         for (i = 0; i < dfx_reg_type_num; i++) {
10827                 bd_num = bd_num_list[i];
10828                 data_len = data_len_per_desc * bd_num;
10829                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10830         }
10831
10832         return ret;
10833 }
10834
10835 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10836 {
10837         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10838         int bd_num, bd_num_max, buf_len, i;
10839         int bd_num_list[BD_LIST_MAX_NUM];
10840         struct hclge_desc *desc_src;
10841         u32 *reg = data;
10842         int ret;
10843
10844         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10845         if (ret) {
10846                 dev_err(&hdev->pdev->dev,
10847                         "Get dfx reg bd num fail, status is %d.\n", ret);
10848                 return ret;
10849         }
10850
10851         bd_num_max = bd_num_list[0];
10852         for (i = 1; i < dfx_reg_type_num; i++)
10853                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10854
10855         buf_len = sizeof(*desc_src) * bd_num_max;
10856         desc_src = kzalloc(buf_len, GFP_KERNEL);
10857         if (!desc_src)
10858                 return -ENOMEM;
10859
10860         for (i = 0; i < dfx_reg_type_num; i++) {
10861                 bd_num = bd_num_list[i];
10862                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10863                                              hclge_dfx_reg_opcode_list[i]);
10864                 if (ret) {
10865                         dev_err(&hdev->pdev->dev,
10866                                 "Get dfx reg fail, status is %d.\n", ret);
10867                         break;
10868                 }
10869
10870                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10871         }
10872
10873         kfree(desc_src);
10874         return ret;
10875 }
10876
10877 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10878                               struct hnae3_knic_private_info *kinfo)
10879 {
10880 #define HCLGE_RING_REG_OFFSET           0x200
10881 #define HCLGE_RING_INT_REG_OFFSET       0x4
10882
10883         int i, j, reg_num, separator_num;
10884         int data_num_sum;
10885         u32 *reg = data;
10886
10887         /* fetching per-PF registers valus from PF PCIe register space */
10888         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10889         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10890         for (i = 0; i < reg_num; i++)
10891                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10892         for (i = 0; i < separator_num; i++)
10893                 *reg++ = SEPARATOR_VALUE;
10894         data_num_sum = reg_num + separator_num;
10895
10896         reg_num = ARRAY_SIZE(common_reg_addr_list);
10897         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10898         for (i = 0; i < reg_num; i++)
10899                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10900         for (i = 0; i < separator_num; i++)
10901                 *reg++ = SEPARATOR_VALUE;
10902         data_num_sum += reg_num + separator_num;
10903
10904         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10905         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10906         for (j = 0; j < kinfo->num_tqps; j++) {
10907                 for (i = 0; i < reg_num; i++)
10908                         *reg++ = hclge_read_dev(&hdev->hw,
10909                                                 ring_reg_addr_list[i] +
10910                                                 HCLGE_RING_REG_OFFSET * j);
10911                 for (i = 0; i < separator_num; i++)
10912                         *reg++ = SEPARATOR_VALUE;
10913         }
10914         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10915
10916         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10917         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10918         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10919                 for (i = 0; i < reg_num; i++)
10920                         *reg++ = hclge_read_dev(&hdev->hw,
10921                                                 tqp_intr_reg_addr_list[i] +
10922                                                 HCLGE_RING_INT_REG_OFFSET * j);
10923                 for (i = 0; i < separator_num; i++)
10924                         *reg++ = SEPARATOR_VALUE;
10925         }
10926         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10927
10928         return data_num_sum;
10929 }
10930
10931 static int hclge_get_regs_len(struct hnae3_handle *handle)
10932 {
10933         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10934         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10935         struct hclge_vport *vport = hclge_get_vport(handle);
10936         struct hclge_dev *hdev = vport->back;
10937         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10938         int regs_lines_32_bit, regs_lines_64_bit;
10939         int ret;
10940
10941         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10942         if (ret) {
10943                 dev_err(&hdev->pdev->dev,
10944                         "Get register number failed, ret = %d.\n", ret);
10945                 return ret;
10946         }
10947
10948         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10949         if (ret) {
10950                 dev_err(&hdev->pdev->dev,
10951                         "Get dfx reg len failed, ret = %d.\n", ret);
10952                 return ret;
10953         }
10954
10955         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10956                 REG_SEPARATOR_LINE;
10957         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10958                 REG_SEPARATOR_LINE;
10959         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10960                 REG_SEPARATOR_LINE;
10961         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10962                 REG_SEPARATOR_LINE;
10963         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10964                 REG_SEPARATOR_LINE;
10965         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10966                 REG_SEPARATOR_LINE;
10967
10968         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10969                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10970                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10971 }
10972
10973 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10974                            void *data)
10975 {
10976         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10977         struct hclge_vport *vport = hclge_get_vport(handle);
10978         struct hclge_dev *hdev = vport->back;
10979         u32 regs_num_32_bit, regs_num_64_bit;
10980         int i, reg_num, separator_num, ret;
10981         u32 *reg = data;
10982
10983         *version = hdev->fw_version;
10984
10985         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10986         if (ret) {
10987                 dev_err(&hdev->pdev->dev,
10988                         "Get register number failed, ret = %d.\n", ret);
10989                 return;
10990         }
10991
10992         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10993
10994         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10995         if (ret) {
10996                 dev_err(&hdev->pdev->dev,
10997                         "Get 32 bit register failed, ret = %d.\n", ret);
10998                 return;
10999         }
11000         reg_num = regs_num_32_bit;
11001         reg += reg_num;
11002         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11003         for (i = 0; i < separator_num; i++)
11004                 *reg++ = SEPARATOR_VALUE;
11005
11006         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11007         if (ret) {
11008                 dev_err(&hdev->pdev->dev,
11009                         "Get 64 bit register failed, ret = %d.\n", ret);
11010                 return;
11011         }
11012         reg_num = regs_num_64_bit * 2;
11013         reg += reg_num;
11014         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11015         for (i = 0; i < separator_num; i++)
11016                 *reg++ = SEPARATOR_VALUE;
11017
11018         ret = hclge_get_dfx_reg(hdev, reg);
11019         if (ret)
11020                 dev_err(&hdev->pdev->dev,
11021                         "Get dfx register failed, ret = %d.\n", ret);
11022 }
11023
11024 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11025 {
11026         struct hclge_set_led_state_cmd *req;
11027         struct hclge_desc desc;
11028         int ret;
11029
11030         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11031
11032         req = (struct hclge_set_led_state_cmd *)desc.data;
11033         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11034                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11035
11036         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11037         if (ret)
11038                 dev_err(&hdev->pdev->dev,
11039                         "Send set led state cmd error, ret =%d\n", ret);
11040
11041         return ret;
11042 }
11043
11044 enum hclge_led_status {
11045         HCLGE_LED_OFF,
11046         HCLGE_LED_ON,
11047         HCLGE_LED_NO_CHANGE = 0xFF,
11048 };
11049
11050 static int hclge_set_led_id(struct hnae3_handle *handle,
11051                             enum ethtool_phys_id_state status)
11052 {
11053         struct hclge_vport *vport = hclge_get_vport(handle);
11054         struct hclge_dev *hdev = vport->back;
11055
11056         switch (status) {
11057         case ETHTOOL_ID_ACTIVE:
11058                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11059         case ETHTOOL_ID_INACTIVE:
11060                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11061         default:
11062                 return -EINVAL;
11063         }
11064 }
11065
11066 static void hclge_get_link_mode(struct hnae3_handle *handle,
11067                                 unsigned long *supported,
11068                                 unsigned long *advertising)
11069 {
11070         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11071         struct hclge_vport *vport = hclge_get_vport(handle);
11072         struct hclge_dev *hdev = vport->back;
11073         unsigned int idx = 0;
11074
11075         for (; idx < size; idx++) {
11076                 supported[idx] = hdev->hw.mac.supported[idx];
11077                 advertising[idx] = hdev->hw.mac.advertising[idx];
11078         }
11079 }
11080
11081 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11082 {
11083         struct hclge_vport *vport = hclge_get_vport(handle);
11084         struct hclge_dev *hdev = vport->back;
11085
11086         return hclge_config_gro(hdev, enable);
11087 }
11088
11089 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11090 {
11091         struct hclge_vport *vport = &hdev->vport[0];
11092         struct hnae3_handle *handle = &vport->nic;
11093         u8 tmp_flags = 0;
11094         int ret;
11095
11096         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11097                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11098                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11099         }
11100
11101         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11102                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11103                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11104                                              tmp_flags & HNAE3_MPE);
11105                 if (!ret) {
11106                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11107                         hclge_enable_vlan_filter(handle,
11108                                                  tmp_flags & HNAE3_VLAN_FLTR);
11109                 }
11110         }
11111 }
11112
11113 static bool hclge_module_existed(struct hclge_dev *hdev)
11114 {
11115         struct hclge_desc desc;
11116         u32 existed;
11117         int ret;
11118
11119         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11120         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11121         if (ret) {
11122                 dev_err(&hdev->pdev->dev,
11123                         "failed to get SFP exist state, ret = %d\n", ret);
11124                 return false;
11125         }
11126
11127         existed = le32_to_cpu(desc.data[0]);
11128
11129         return existed != 0;
11130 }
11131
11132 /* need 6 bds(total 140 bytes) in one reading
11133  * return the number of bytes actually read, 0 means read failed.
11134  */
11135 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11136                                      u32 len, u8 *data)
11137 {
11138         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11139         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11140         u16 read_len;
11141         u16 copy_len;
11142         int ret;
11143         int i;
11144
11145         /* setup all 6 bds to read module eeprom info. */
11146         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11147                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11148                                            true);
11149
11150                 /* bd0~bd4 need next flag */
11151                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11152                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11153         }
11154
11155         /* setup bd0, this bd contains offset and read length. */
11156         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11157         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11158         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11159         sfp_info_bd0->read_len = cpu_to_le16(read_len);
11160
11161         ret = hclge_cmd_send(&hdev->hw, desc, i);
11162         if (ret) {
11163                 dev_err(&hdev->pdev->dev,
11164                         "failed to get SFP eeprom info, ret = %d\n", ret);
11165                 return 0;
11166         }
11167
11168         /* copy sfp info from bd0 to out buffer. */
11169         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11170         memcpy(data, sfp_info_bd0->data, copy_len);
11171         read_len = copy_len;
11172
11173         /* copy sfp info from bd1~bd5 to out buffer if needed. */
11174         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11175                 if (read_len >= len)
11176                         return read_len;
11177
11178                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11179                 memcpy(data + read_len, desc[i].data, copy_len);
11180                 read_len += copy_len;
11181         }
11182
11183         return read_len;
11184 }
11185
11186 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11187                                    u32 len, u8 *data)
11188 {
11189         struct hclge_vport *vport = hclge_get_vport(handle);
11190         struct hclge_dev *hdev = vport->back;
11191         u32 read_len = 0;
11192         u16 data_len;
11193
11194         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11195                 return -EOPNOTSUPP;
11196
11197         if (!hclge_module_existed(hdev))
11198                 return -ENXIO;
11199
11200         while (read_len < len) {
11201                 data_len = hclge_get_sfp_eeprom_info(hdev,
11202                                                      offset + read_len,
11203                                                      len - read_len,
11204                                                      data + read_len);
11205                 if (!data_len)
11206                         return -EIO;
11207
11208                 read_len += data_len;
11209         }
11210
11211         return 0;
11212 }
11213
11214 static const struct hnae3_ae_ops hclge_ops = {
11215         .init_ae_dev = hclge_init_ae_dev,
11216         .uninit_ae_dev = hclge_uninit_ae_dev,
11217         .flr_prepare = hclge_flr_prepare,
11218         .flr_done = hclge_flr_done,
11219         .init_client_instance = hclge_init_client_instance,
11220         .uninit_client_instance = hclge_uninit_client_instance,
11221         .map_ring_to_vector = hclge_map_ring_to_vector,
11222         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11223         .get_vector = hclge_get_vector,
11224         .put_vector = hclge_put_vector,
11225         .set_promisc_mode = hclge_set_promisc_mode,
11226         .request_update_promisc_mode = hclge_request_update_promisc_mode,
11227         .set_loopback = hclge_set_loopback,
11228         .start = hclge_ae_start,
11229         .stop = hclge_ae_stop,
11230         .client_start = hclge_client_start,
11231         .client_stop = hclge_client_stop,
11232         .get_status = hclge_get_status,
11233         .get_ksettings_an_result = hclge_get_ksettings_an_result,
11234         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11235         .get_media_type = hclge_get_media_type,
11236         .check_port_speed = hclge_check_port_speed,
11237         .get_fec = hclge_get_fec,
11238         .set_fec = hclge_set_fec,
11239         .get_rss_key_size = hclge_get_rss_key_size,
11240         .get_rss_indir_size = hclge_get_rss_indir_size,
11241         .get_rss = hclge_get_rss,
11242         .set_rss = hclge_set_rss,
11243         .set_rss_tuple = hclge_set_rss_tuple,
11244         .get_rss_tuple = hclge_get_rss_tuple,
11245         .get_tc_size = hclge_get_tc_size,
11246         .get_mac_addr = hclge_get_mac_addr,
11247         .set_mac_addr = hclge_set_mac_addr,
11248         .do_ioctl = hclge_do_ioctl,
11249         .add_uc_addr = hclge_add_uc_addr,
11250         .rm_uc_addr = hclge_rm_uc_addr,
11251         .add_mc_addr = hclge_add_mc_addr,
11252         .rm_mc_addr = hclge_rm_mc_addr,
11253         .set_autoneg = hclge_set_autoneg,
11254         .get_autoneg = hclge_get_autoneg,
11255         .restart_autoneg = hclge_restart_autoneg,
11256         .halt_autoneg = hclge_halt_autoneg,
11257         .get_pauseparam = hclge_get_pauseparam,
11258         .set_pauseparam = hclge_set_pauseparam,
11259         .set_mtu = hclge_set_mtu,
11260         .reset_queue = hclge_reset_tqp,
11261         .get_stats = hclge_get_stats,
11262         .get_mac_stats = hclge_get_mac_stat,
11263         .update_stats = hclge_update_stats,
11264         .get_strings = hclge_get_strings,
11265         .get_sset_count = hclge_get_sset_count,
11266         .get_fw_version = hclge_get_fw_version,
11267         .get_mdix_mode = hclge_get_mdix_mode,
11268         .enable_vlan_filter = hclge_enable_vlan_filter,
11269         .set_vlan_filter = hclge_set_vlan_filter,
11270         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11271         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11272         .reset_event = hclge_reset_event,
11273         .get_reset_level = hclge_get_reset_level,
11274         .set_default_reset_request = hclge_set_def_reset_request,
11275         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11276         .set_channels = hclge_set_channels,
11277         .get_channels = hclge_get_channels,
11278         .get_regs_len = hclge_get_regs_len,
11279         .get_regs = hclge_get_regs,
11280         .set_led_id = hclge_set_led_id,
11281         .get_link_mode = hclge_get_link_mode,
11282         .add_fd_entry = hclge_add_fd_entry,
11283         .del_fd_entry = hclge_del_fd_entry,
11284         .del_all_fd_entries = hclge_del_all_fd_entries,
11285         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11286         .get_fd_rule_info = hclge_get_fd_rule_info,
11287         .get_fd_all_rules = hclge_get_all_rules,
11288         .enable_fd = hclge_enable_fd,
11289         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11290         .dbg_run_cmd = hclge_dbg_run_cmd,
11291         .handle_hw_ras_error = hclge_handle_hw_ras_error,
11292         .get_hw_reset_stat = hclge_get_hw_reset_stat,
11293         .ae_dev_resetting = hclge_ae_dev_resetting,
11294         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11295         .set_gro_en = hclge_gro_en,
11296         .get_global_queue_id = hclge_covert_handle_qid_global,
11297         .set_timer_task = hclge_set_timer_task,
11298         .mac_connect_phy = hclge_mac_connect_phy,
11299         .mac_disconnect_phy = hclge_mac_disconnect_phy,
11300         .get_vf_config = hclge_get_vf_config,
11301         .set_vf_link_state = hclge_set_vf_link_state,
11302         .set_vf_spoofchk = hclge_set_vf_spoofchk,
11303         .set_vf_trust = hclge_set_vf_trust,
11304         .set_vf_rate = hclge_set_vf_rate,
11305         .set_vf_mac = hclge_set_vf_mac,
11306         .get_module_eeprom = hclge_get_module_eeprom,
11307         .get_cmdq_stat = hclge_get_cmdq_stat,
11308 };
11309
11310 static struct hnae3_ae_algo ae_algo = {
11311         .ops = &hclge_ops,
11312         .pdev_id_table = ae_algo_pci_tbl,
11313 };
11314
11315 static int hclge_init(void)
11316 {
11317         pr_info("%s is initializing\n", HCLGE_NAME);
11318
11319         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11320         if (!hclge_wq) {
11321                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11322                 return -ENOMEM;
11323         }
11324
11325         hnae3_register_ae_algo(&ae_algo);
11326
11327         return 0;
11328 }
11329
11330 static void hclge_exit(void)
11331 {
11332         hnae3_unregister_ae_algo(&ae_algo);
11333         destroy_workqueue(hclge_wq);
11334 }
11335 module_init(hclge_init);
11336 module_exit(hclge_exit);
11337
11338 MODULE_LICENSE("GPL");
11339 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11340 MODULE_DESCRIPTION("HCLGE Driver");
11341 MODULE_VERSION(HCLGE_MOD_VERSION);