Merge tag 'zynqmp-soc-for-v5.7' of https://github.com/Xilinx/linux-xlnx into arm/soc
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         return hclge_parse_func_status(hdev, req);
866 }
867
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
869 {
870         struct hclge_pf_res_cmd *req;
871         struct hclge_desc desc;
872         int ret;
873
874         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876         if (ret) {
877                 dev_err(&hdev->pdev->dev,
878                         "query pf resource failed %d.\n", ret);
879                 return ret;
880         }
881
882         req = (struct hclge_pf_res_cmd *)desc.data;
883         hdev->num_tqps = le16_to_cpu(req->tqp_num);
884         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885
886         if (req->tx_buf_size)
887                 hdev->tx_buf_size =
888                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889         else
890                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
892         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
894         if (req->dv_buf_size)
895                 hdev->dv_buf_size =
896                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
900         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (hnae3_dev_roce_supported(hdev)) {
903                 hdev->roce_base_msix_offset =
904                 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
906                 hdev->num_roce_msi =
907                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
909
910                 /* nic's msix numbers is always equals to the roce's. */
911                 hdev->num_nic_msi = hdev->num_roce_msi;
912
913                 /* PF should have NIC vectors and Roce vectors,
914                  * NIC vectors are queued before Roce vectors.
915                  */
916                 hdev->num_msi = hdev->num_roce_msi +
917                                 hdev->roce_base_msix_offset;
918         } else {
919                 hdev->num_msi =
920                 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922
923                 hdev->num_nic_msi = hdev->num_msi;
924         }
925
926         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927                 dev_err(&hdev->pdev->dev,
928                         "Just %u msi resources, not enough for pf(min:2).\n",
929                         hdev->num_nic_msi);
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 static int hclge_parse_speed(int speed_cmd, int *speed)
937 {
938         switch (speed_cmd) {
939         case 6:
940                 *speed = HCLGE_MAC_SPEED_10M;
941                 break;
942         case 7:
943                 *speed = HCLGE_MAC_SPEED_100M;
944                 break;
945         case 0:
946                 *speed = HCLGE_MAC_SPEED_1G;
947                 break;
948         case 1:
949                 *speed = HCLGE_MAC_SPEED_10G;
950                 break;
951         case 2:
952                 *speed = HCLGE_MAC_SPEED_25G;
953                 break;
954         case 3:
955                 *speed = HCLGE_MAC_SPEED_40G;
956                 break;
957         case 4:
958                 *speed = HCLGE_MAC_SPEED_50G;
959                 break;
960         case 5:
961                 *speed = HCLGE_MAC_SPEED_100G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         default:
1003                 return -EINVAL;
1004         }
1005
1006         if (speed_bit & speed_ability)
1007                 return 0;
1008
1009         return -EINVAL;
1010 }
1011
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1013 {
1014         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1016                                  mac->supported);
1017         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028                                  mac->supported);
1029 }
1030
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032 {
1033         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035                                  mac->supported);
1036         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1038                                  mac->supported);
1039         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047                                  mac->supported);
1048 }
1049
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051 {
1052         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054                                  mac->supported);
1055         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057                                  mac->supported);
1058         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060                                  mac->supported);
1061         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066                                  mac->supported);
1067 }
1068
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070 {
1071         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079                                  mac->supported);
1080         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082                                  mac->supported);
1083         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088                                  mac->supported);
1089 }
1090
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092 {
1093         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095
1096         switch (mac->speed) {
1097         case HCLGE_MAC_SPEED_10G:
1098         case HCLGE_MAC_SPEED_40G:
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100                                  mac->supported);
1101                 mac->fec_ability =
1102                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103                 break;
1104         case HCLGE_MAC_SPEED_25G:
1105         case HCLGE_MAC_SPEED_50G:
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107                                  mac->supported);
1108                 mac->fec_ability =
1109                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110                         BIT(HNAE3_FEC_AUTO);
1111                 break;
1112         case HCLGE_MAC_SPEED_100G:
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115                 break;
1116         default:
1117                 mac->fec_ability = 0;
1118                 break;
1119         }
1120 }
1121
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123                                         u8 speed_ability)
1124 {
1125         struct hclge_mac *mac = &hdev->hw.mac;
1126
1127         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129                                  mac->supported);
1130
1131         hclge_convert_setting_sr(mac, speed_ability);
1132         hclge_convert_setting_lr(mac, speed_ability);
1133         hclge_convert_setting_cr(mac, speed_ability);
1134         if (hdev->pdev->revision >= 0x21)
1135                 hclge_convert_setting_fec(mac);
1136
1137         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143                                             u8 speed_ability)
1144 {
1145         struct hclge_mac *mac = &hdev->hw.mac;
1146
1147         hclge_convert_setting_kr(mac, speed_ability);
1148         if (hdev->pdev->revision >= 0x21)
1149                 hclge_convert_setting_fec(mac);
1150         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1153 }
1154
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156                                          u8 speed_ability)
1157 {
1158         unsigned long *supported = hdev->hw.mac.supported;
1159
1160         /* default to support all speed for GE port */
1161         if (!speed_ability)
1162                 speed_ability = HCLGE_SUPPORT_GE;
1163
1164         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166                                  supported);
1167
1168         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170                                  supported);
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172                                  supported);
1173         }
1174
1175         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178         }
1179
1180         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1184 }
1185
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187 {
1188         u8 media_type = hdev->hw.mac.media_type;
1189
1190         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1192         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193                 hclge_parse_copper_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1196 }
1197
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1199 {
1200         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201                 return HCLGE_MAC_SPEED_100G;
1202
1203         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204                 return HCLGE_MAC_SPEED_50G;
1205
1206         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207                 return HCLGE_MAC_SPEED_40G;
1208
1209         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210                 return HCLGE_MAC_SPEED_25G;
1211
1212         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213                 return HCLGE_MAC_SPEED_10G;
1214
1215         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216                 return HCLGE_MAC_SPEED_1G;
1217
1218         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219                 return HCLGE_MAC_SPEED_100M;
1220
1221         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222                 return HCLGE_MAC_SPEED_10M;
1223
1224         return HCLGE_MAC_SPEED_1G;
1225 }
1226
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228 {
1229         struct hclge_cfg_param_cmd *req;
1230         u64 mac_addr_tmp_high;
1231         u64 mac_addr_tmp;
1232         unsigned int i;
1233
1234         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1235
1236         /* get the configuration */
1237         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238                                               HCLGE_CFG_VMDQ_M,
1239                                               HCLGE_CFG_VMDQ_S);
1240         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                             HCLGE_CFG_TQP_DESC_N_M,
1244                                             HCLGE_CFG_TQP_DESC_N_S);
1245
1246         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247                                         HCLGE_CFG_PHY_ADDR_M,
1248                                         HCLGE_CFG_PHY_ADDR_S);
1249         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250                                           HCLGE_CFG_MEDIA_TP_M,
1251                                           HCLGE_CFG_MEDIA_TP_S);
1252         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253                                           HCLGE_CFG_RX_BUF_LEN_M,
1254                                           HCLGE_CFG_RX_BUF_LEN_S);
1255         /* get mac_address */
1256         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258                                             HCLGE_CFG_MAC_ADDR_H_M,
1259                                             HCLGE_CFG_MAC_ADDR_H_S);
1260
1261         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262
1263         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264                                              HCLGE_CFG_DEFAULT_SPEED_M,
1265                                              HCLGE_CFG_DEFAULT_SPEED_S);
1266         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267                                             HCLGE_CFG_RSS_SIZE_M,
1268                                             HCLGE_CFG_RSS_SIZE_S);
1269
1270         for (i = 0; i < ETH_ALEN; i++)
1271                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272
1273         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1275
1276         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                                              HCLGE_CFG_SPEED_ABILITY_M,
1278                                              HCLGE_CFG_SPEED_ABILITY_S);
1279         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1281                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1282         if (!cfg->umv_space)
1283                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1284 }
1285
1286 /* hclge_get_cfg: query the static parameter from flash
1287  * @hdev: pointer to struct hclge_dev
1288  * @hcfg: the config structure to be getted
1289  */
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291 {
1292         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293         struct hclge_cfg_param_cmd *req;
1294         unsigned int i;
1295         int ret;
1296
1297         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1298                 u32 offset = 0;
1299
1300                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302                                            true);
1303                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305                 /* Len should be united by 4 bytes when send to hardware */
1306                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308                 req->offset = cpu_to_le32(offset);
1309         }
1310
1311         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312         if (ret) {
1313                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1314                 return ret;
1315         }
1316
1317         hclge_parse_cfg(hcfg, desc);
1318
1319         return 0;
1320 }
1321
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1323 {
1324         int ret;
1325
1326         ret = hclge_query_function_status(hdev);
1327         if (ret) {
1328                 dev_err(&hdev->pdev->dev,
1329                         "query function status error %d.\n", ret);
1330                 return ret;
1331         }
1332
1333         /* get pf resource */
1334         return hclge_query_pf_resource(hdev);
1335 }
1336
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1338 {
1339 #define HCLGE_MIN_TX_DESC       64
1340 #define HCLGE_MIN_RX_DESC       64
1341
1342         if (!is_kdump_kernel())
1343                 return;
1344
1345         dev_info(&hdev->pdev->dev,
1346                  "Running kdump kernel. Using minimal resources\n");
1347
1348         /* minimal queue pairs equals to the number of vports */
1349         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1352 }
1353
1354 static int hclge_configure(struct hclge_dev *hdev)
1355 {
1356         struct hclge_cfg cfg;
1357         unsigned int i;
1358         int ret;
1359
1360         ret = hclge_get_cfg(hdev, &cfg);
1361         if (ret) {
1362                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1363                 return ret;
1364         }
1365
1366         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367         hdev->base_tqp_pid = 0;
1368         hdev->rss_size_max = cfg.rss_size_max;
1369         hdev->rx_buf_len = cfg.rx_buf_len;
1370         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371         hdev->hw.mac.media_type = cfg.media_type;
1372         hdev->hw.mac.phy_addr = cfg.phy_addr;
1373         hdev->num_tx_desc = cfg.tqp_desc_num;
1374         hdev->num_rx_desc = cfg.tqp_desc_num;
1375         hdev->tm_info.num_pg = 1;
1376         hdev->tc_max = cfg.tc_num;
1377         hdev->tm_info.hw_pfc_map = 0;
1378         hdev->wanted_umv_size = cfg.umv_space;
1379
1380         if (hnae3_dev_fd_supported(hdev)) {
1381                 hdev->fd_en = true;
1382                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1383         }
1384
1385         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1386         if (ret) {
1387                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1388                 return ret;
1389         }
1390
1391         hclge_parse_link_mode(hdev, cfg.speed_ability);
1392
1393         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1394
1395         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396             (hdev->tc_max < 1)) {
1397                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1398                          hdev->tc_max);
1399                 hdev->tc_max = 1;
1400         }
1401
1402         /* Dev does not support DCB */
1403         if (!hnae3_dev_dcb_supported(hdev)) {
1404                 hdev->tc_max = 1;
1405                 hdev->pfc_max = 0;
1406         } else {
1407                 hdev->pfc_max = hdev->tc_max;
1408         }
1409
1410         hdev->tm_info.num_tc = 1;
1411
1412         /* Currently not support uncontiuous tc */
1413         for (i = 0; i < hdev->tm_info.num_tc; i++)
1414                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1415
1416         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1417
1418         hclge_init_kdump_kernel_config(hdev);
1419
1420         /* Set the init affinity based on pci func number */
1421         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424                         &hdev->affinity_mask);
1425
1426         return ret;
1427 }
1428
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430                             unsigned int tso_mss_max)
1431 {
1432         struct hclge_cfg_tso_status_cmd *req;
1433         struct hclge_desc desc;
1434         u16 tso_mss;
1435
1436         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1437
1438         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1439
1440         tso_mss = 0;
1441         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443         req->tso_mss_min = cpu_to_le16(tso_mss);
1444
1445         tso_mss = 0;
1446         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448         req->tso_mss_max = cpu_to_le16(tso_mss);
1449
1450         return hclge_cmd_send(&hdev->hw, &desc, 1);
1451 }
1452
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1454 {
1455         struct hclge_cfg_gro_status_cmd *req;
1456         struct hclge_desc desc;
1457         int ret;
1458
1459         if (!hnae3_dev_gro_supported(hdev))
1460                 return 0;
1461
1462         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1464
1465         req->gro_en = cpu_to_le16(en ? 1 : 0);
1466
1467         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1468         if (ret)
1469                 dev_err(&hdev->pdev->dev,
1470                         "GRO hardware config cmd failed, ret = %d\n", ret);
1471
1472         return ret;
1473 }
1474
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1476 {
1477         struct hclge_tqp *tqp;
1478         int i;
1479
1480         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1482         if (!hdev->htqp)
1483                 return -ENOMEM;
1484
1485         tqp = hdev->htqp;
1486
1487         for (i = 0; i < hdev->num_tqps; i++) {
1488                 tqp->dev = &hdev->pdev->dev;
1489                 tqp->index = i;
1490
1491                 tqp->q.ae_algo = &ae_algo;
1492                 tqp->q.buf_size = hdev->rx_buf_len;
1493                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1494                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1495                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496                         i * HCLGE_TQP_REG_SIZE;
1497
1498                 tqp++;
1499         }
1500
1501         return 0;
1502 }
1503
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1506 {
1507         struct hclge_tqp_map_cmd *req;
1508         struct hclge_desc desc;
1509         int ret;
1510
1511         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1512
1513         req = (struct hclge_tqp_map_cmd *)desc.data;
1514         req->tqp_id = cpu_to_le16(tqp_pid);
1515         req->tqp_vf = func_id;
1516         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1517         if (!is_pf)
1518                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519         req->tqp_vid = cpu_to_le16(tqp_vid);
1520
1521         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1522         if (ret)
1523                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1524
1525         return ret;
1526 }
1527
1528 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1529 {
1530         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531         struct hclge_dev *hdev = vport->back;
1532         int i, alloced;
1533
1534         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535              alloced < num_tqps; i++) {
1536                 if (!hdev->htqp[i].alloced) {
1537                         hdev->htqp[i].q.handle = &vport->nic;
1538                         hdev->htqp[i].q.tqp_index = alloced;
1539                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542                         hdev->htqp[i].alloced = true;
1543                         alloced++;
1544                 }
1545         }
1546         vport->alloc_tqps = alloced;
1547         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1549
1550         /* ensure one to one mapping between irq and queue at default */
1551         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1553
1554         return 0;
1555 }
1556
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558                             u16 num_tx_desc, u16 num_rx_desc)
1559
1560 {
1561         struct hnae3_handle *nic = &vport->nic;
1562         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563         struct hclge_dev *hdev = vport->back;
1564         int ret;
1565
1566         kinfo->num_tx_desc = num_tx_desc;
1567         kinfo->num_rx_desc = num_rx_desc;
1568
1569         kinfo->rx_buf_len = hdev->rx_buf_len;
1570
1571         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1573         if (!kinfo->tqp)
1574                 return -ENOMEM;
1575
1576         ret = hclge_assign_tqp(vport, num_tqps);
1577         if (ret)
1578                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1579
1580         return ret;
1581 }
1582
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584                                   struct hclge_vport *vport)
1585 {
1586         struct hnae3_handle *nic = &vport->nic;
1587         struct hnae3_knic_private_info *kinfo;
1588         u16 i;
1589
1590         kinfo = &nic->kinfo;
1591         for (i = 0; i < vport->alloc_tqps; i++) {
1592                 struct hclge_tqp *q =
1593                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1594                 bool is_pf;
1595                 int ret;
1596
1597                 is_pf = !(vport->vport_id);
1598                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1599                                              i, is_pf);
1600                 if (ret)
1601                         return ret;
1602         }
1603
1604         return 0;
1605 }
1606
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1608 {
1609         struct hclge_vport *vport = hdev->vport;
1610         u16 i, num_vport;
1611
1612         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613         for (i = 0; i < num_vport; i++) {
1614                 int ret;
1615
1616                 ret = hclge_map_tqp_to_vport(hdev, vport);
1617                 if (ret)
1618                         return ret;
1619
1620                 vport++;
1621         }
1622
1623         return 0;
1624 }
1625
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1627 {
1628         struct hnae3_handle *nic = &vport->nic;
1629         struct hclge_dev *hdev = vport->back;
1630         int ret;
1631
1632         nic->pdev = hdev->pdev;
1633         nic->ae_algo = &ae_algo;
1634         nic->numa_node_mask = hdev->numa_node_mask;
1635
1636         ret = hclge_knic_setup(vport, num_tqps,
1637                                hdev->num_tx_desc, hdev->num_rx_desc);
1638         if (ret)
1639                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1640
1641         return ret;
1642 }
1643
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1645 {
1646         struct pci_dev *pdev = hdev->pdev;
1647         struct hclge_vport *vport;
1648         u32 tqp_main_vport;
1649         u32 tqp_per_vport;
1650         int num_vport, i;
1651         int ret;
1652
1653         /* We need to alloc a vport for main NIC of PF */
1654         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1655
1656         if (hdev->num_tqps < num_vport) {
1657                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658                         hdev->num_tqps, num_vport);
1659                 return -EINVAL;
1660         }
1661
1662         /* Alloc the same number of TQPs for every vport */
1663         tqp_per_vport = hdev->num_tqps / num_vport;
1664         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1665
1666         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1667                              GFP_KERNEL);
1668         if (!vport)
1669                 return -ENOMEM;
1670
1671         hdev->vport = vport;
1672         hdev->num_alloc_vport = num_vport;
1673
1674         if (IS_ENABLED(CONFIG_PCI_IOV))
1675                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1676
1677         for (i = 0; i < num_vport; i++) {
1678                 vport->back = hdev;
1679                 vport->vport_id = i;
1680                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684                 INIT_LIST_HEAD(&vport->vlan_list);
1685                 INIT_LIST_HEAD(&vport->uc_mac_list);
1686                 INIT_LIST_HEAD(&vport->mc_mac_list);
1687
1688                 if (i == 0)
1689                         ret = hclge_vport_setup(vport, tqp_main_vport);
1690                 else
1691                         ret = hclge_vport_setup(vport, tqp_per_vport);
1692                 if (ret) {
1693                         dev_err(&pdev->dev,
1694                                 "vport setup failed for vport %d, %d\n",
1695                                 i, ret);
1696                         return ret;
1697                 }
1698
1699                 vport++;
1700         }
1701
1702         return 0;
1703 }
1704
1705 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706                                     struct hclge_pkt_buf_alloc *buf_alloc)
1707 {
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1711         struct hclge_tx_buff_alloc_cmd *req;
1712         struct hclge_desc desc;
1713         int ret;
1714         u8 i;
1715
1716         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1717
1718         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1721
1722                 req->tx_pkt_buff[i] =
1723                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1725         }
1726
1727         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1728         if (ret)
1729                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1730                         ret);
1731
1732         return ret;
1733 }
1734
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736                                  struct hclge_pkt_buf_alloc *buf_alloc)
1737 {
1738         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1739
1740         if (ret)
1741                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1742
1743         return ret;
1744 }
1745
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1747 {
1748         unsigned int i;
1749         u32 cnt = 0;
1750
1751         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752                 if (hdev->hw_tc_map & BIT(i))
1753                         cnt++;
1754         return cnt;
1755 }
1756
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759                                   struct hclge_pkt_buf_alloc *buf_alloc)
1760 {
1761         struct hclge_priv_buf *priv;
1762         unsigned int i;
1763         int cnt = 0;
1764
1765         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766                 priv = &buf_alloc->priv_buf[i];
1767                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1768                     priv->enable)
1769                         cnt++;
1770         }
1771
1772         return cnt;
1773 }
1774
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777                                      struct hclge_pkt_buf_alloc *buf_alloc)
1778 {
1779         struct hclge_priv_buf *priv;
1780         unsigned int i;
1781         int cnt = 0;
1782
1783         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784                 priv = &buf_alloc->priv_buf[i];
1785                 if (hdev->hw_tc_map & BIT(i) &&
1786                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1787                     priv->enable)
1788                         cnt++;
1789         }
1790
1791         return cnt;
1792 }
1793
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1795 {
1796         struct hclge_priv_buf *priv;
1797         u32 rx_priv = 0;
1798         int i;
1799
1800         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801                 priv = &buf_alloc->priv_buf[i];
1802                 if (priv->enable)
1803                         rx_priv += priv->buf_size;
1804         }
1805         return rx_priv;
1806 }
1807
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1809 {
1810         u32 i, total_tx_size = 0;
1811
1812         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1814
1815         return total_tx_size;
1816 }
1817
1818 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819                                 struct hclge_pkt_buf_alloc *buf_alloc,
1820                                 u32 rx_all)
1821 {
1822         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823         u32 tc_num = hclge_get_tc_num(hdev);
1824         u32 shared_buf, aligned_mps;
1825         u32 rx_priv;
1826         int i;
1827
1828         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1829
1830         if (hnae3_dev_dcb_supported(hdev))
1831                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1832                                         hdev->dv_buf_size;
1833         else
1834                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835                                         + hdev->dv_buf_size;
1836
1837         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839                              HCLGE_BUF_SIZE_UNIT);
1840
1841         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842         if (rx_all < rx_priv + shared_std)
1843                 return false;
1844
1845         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846         buf_alloc->s_buf.buf_size = shared_buf;
1847         if (hnae3_dev_dcb_supported(hdev)) {
1848                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851                                   HCLGE_BUF_SIZE_UNIT);
1852         } else {
1853                 buf_alloc->s_buf.self.high = aligned_mps +
1854                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1855                 buf_alloc->s_buf.self.low = aligned_mps;
1856         }
1857
1858         if (hnae3_dev_dcb_supported(hdev)) {
1859                 hi_thrd = shared_buf - hdev->dv_buf_size;
1860
1861                 if (tc_num <= NEED_RESERVE_TC_NUM)
1862                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1863                                         / BUF_MAX_PERCENT;
1864
1865                 if (tc_num)
1866                         hi_thrd = hi_thrd / tc_num;
1867
1868                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1871         } else {
1872                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873                 lo_thrd = aligned_mps;
1874         }
1875
1876         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1879         }
1880
1881         return true;
1882 }
1883
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885                                 struct hclge_pkt_buf_alloc *buf_alloc)
1886 {
1887         u32 i, total_size;
1888
1889         total_size = hdev->pkt_buf_size;
1890
1891         /* alloc tx buffer for all enabled tc */
1892         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1894
1895                 if (hdev->hw_tc_map & BIT(i)) {
1896                         if (total_size < hdev->tx_buf_size)
1897                                 return -ENOMEM;
1898
1899                         priv->tx_buf_size = hdev->tx_buf_size;
1900                 } else {
1901                         priv->tx_buf_size = 0;
1902                 }
1903
1904                 total_size -= priv->tx_buf_size;
1905         }
1906
1907         return 0;
1908 }
1909
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911                                   struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1915         unsigned int i;
1916
1917         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1919
1920                 priv->enable = 0;
1921                 priv->wl.low = 0;
1922                 priv->wl.high = 0;
1923                 priv->buf_size = 0;
1924
1925                 if (!(hdev->hw_tc_map & BIT(i)))
1926                         continue;
1927
1928                 priv->enable = 1;
1929
1930                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933                                                 HCLGE_BUF_SIZE_UNIT);
1934                 } else {
1935                         priv->wl.low = 0;
1936                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1937                                         aligned_mps;
1938                 }
1939
1940                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1941         }
1942
1943         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1944 }
1945
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947                                           struct hclge_pkt_buf_alloc *buf_alloc)
1948 {
1949         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1951         int i;
1952
1953         /* let the last to be cleared first */
1954         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956                 unsigned int mask = BIT((unsigned int)i);
1957
1958                 if (hdev->hw_tc_map & mask &&
1959                     !(hdev->tm_info.hw_pfc_map & mask)) {
1960                         /* Clear the no pfc TC private buffer */
1961                         priv->wl.low = 0;
1962                         priv->wl.high = 0;
1963                         priv->buf_size = 0;
1964                         priv->enable = 0;
1965                         no_pfc_priv_num--;
1966                 }
1967
1968                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969                     no_pfc_priv_num == 0)
1970                         break;
1971         }
1972
1973         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1974 }
1975
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977                                         struct hclge_pkt_buf_alloc *buf_alloc)
1978 {
1979         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1981         int i;
1982
1983         /* let the last to be cleared first */
1984         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986                 unsigned int mask = BIT((unsigned int)i);
1987
1988                 if (hdev->hw_tc_map & mask &&
1989                     hdev->tm_info.hw_pfc_map & mask) {
1990                         /* Reduce the number of pfc TC with private buffer */
1991                         priv->wl.low = 0;
1992                         priv->enable = 0;
1993                         priv->wl.high = 0;
1994                         priv->buf_size = 0;
1995                         pfc_priv_num--;
1996                 }
1997
1998                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1999                     pfc_priv_num == 0)
2000                         break;
2001         }
2002
2003         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2004 }
2005
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007                                       struct hclge_pkt_buf_alloc *buf_alloc)
2008 {
2009 #define COMPENSATE_BUFFER       0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM 5
2011 #define PRIV_WL_GAP             0x1800
2012
2013         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014         u32 tc_num = hclge_get_tc_num(hdev);
2015         u32 half_mps = hdev->mps >> 1;
2016         u32 min_rx_priv;
2017         unsigned int i;
2018
2019         if (tc_num)
2020                 rx_priv = rx_priv / tc_num;
2021
2022         if (tc_num <= NEED_RESERVE_TC_NUM)
2023                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2024
2025         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026                         COMPENSATE_HALF_MPS_NUM * half_mps;
2027         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2029
2030         if (rx_priv < min_rx_priv)
2031                 return false;
2032
2033         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2035
2036                 priv->enable = 0;
2037                 priv->wl.low = 0;
2038                 priv->wl.high = 0;
2039                 priv->buf_size = 0;
2040
2041                 if (!(hdev->hw_tc_map & BIT(i)))
2042                         continue;
2043
2044                 priv->enable = 1;
2045                 priv->buf_size = rx_priv;
2046                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2047                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2048         }
2049
2050         buf_alloc->s_buf.buf_size = 0;
2051
2052         return true;
2053 }
2054
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056  * @hdev: pointer to struct hclge_dev
2057  * @buf_alloc: pointer to buffer calculation data
2058  * @return: 0: calculate sucessful, negative: fail
2059  */
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061                                 struct hclge_pkt_buf_alloc *buf_alloc)
2062 {
2063         /* When DCB is not supported, rx private buffer is not allocated. */
2064         if (!hnae3_dev_dcb_supported(hdev)) {
2065                 u32 rx_all = hdev->pkt_buf_size;
2066
2067                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2069                         return -ENOMEM;
2070
2071                 return 0;
2072         }
2073
2074         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2075                 return 0;
2076
2077         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2078                 return 0;
2079
2080         /* try to decrease the buffer size */
2081         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2082                 return 0;
2083
2084         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2085                 return 0;
2086
2087         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2088                 return 0;
2089
2090         return -ENOMEM;
2091 }
2092
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094                                    struct hclge_pkt_buf_alloc *buf_alloc)
2095 {
2096         struct hclge_rx_priv_buff_cmd *req;
2097         struct hclge_desc desc;
2098         int ret;
2099         int i;
2100
2101         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2103
2104         /* Alloc private buffer TCs */
2105         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2107
2108                 req->buf_num[i] =
2109                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2110                 req->buf_num[i] |=
2111                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2112         }
2113
2114         req->shared_buf =
2115                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2117
2118         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2119         if (ret)
2120                 dev_err(&hdev->pdev->dev,
2121                         "rx private buffer alloc cmd failed %d\n", ret);
2122
2123         return ret;
2124 }
2125
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127                                    struct hclge_pkt_buf_alloc *buf_alloc)
2128 {
2129         struct hclge_rx_priv_wl_buf *req;
2130         struct hclge_priv_buf *priv;
2131         struct hclge_desc desc[2];
2132         int i, j;
2133         int ret;
2134
2135         for (i = 0; i < 2; i++) {
2136                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2137                                            false);
2138                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2139
2140                 /* The first descriptor set the NEXT bit to 1 */
2141                 if (i == 0)
2142                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143                 else
2144                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145
2146                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2148
2149                         priv = &buf_alloc->priv_buf[idx];
2150                         req->tc_wl[j].high =
2151                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152                         req->tc_wl[j].high |=
2153                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154                         req->tc_wl[j].low =
2155                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156                         req->tc_wl[j].low |=
2157                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2158                 }
2159         }
2160
2161         /* Send 2 descriptor at one time */
2162         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2163         if (ret)
2164                 dev_err(&hdev->pdev->dev,
2165                         "rx private waterline config cmd failed %d\n",
2166                         ret);
2167         return ret;
2168 }
2169
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171                                     struct hclge_pkt_buf_alloc *buf_alloc)
2172 {
2173         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174         struct hclge_rx_com_thrd *req;
2175         struct hclge_desc desc[2];
2176         struct hclge_tc_thrd *tc;
2177         int i, j;
2178         int ret;
2179
2180         for (i = 0; i < 2; i++) {
2181                 hclge_cmd_setup_basic_desc(&desc[i],
2182                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2184
2185                 /* The first descriptor set the NEXT bit to 1 */
2186                 if (i == 0)
2187                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2188                 else
2189                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2190
2191                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2193
2194                         req->com_thrd[j].high =
2195                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196                         req->com_thrd[j].high |=
2197                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198                         req->com_thrd[j].low =
2199                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200                         req->com_thrd[j].low |=
2201                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202                 }
2203         }
2204
2205         /* Send 2 descriptors at one time */
2206         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2207         if (ret)
2208                 dev_err(&hdev->pdev->dev,
2209                         "common threshold config cmd failed %d\n", ret);
2210         return ret;
2211 }
2212
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214                                   struct hclge_pkt_buf_alloc *buf_alloc)
2215 {
2216         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217         struct hclge_rx_com_wl *req;
2218         struct hclge_desc desc;
2219         int ret;
2220
2221         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2222
2223         req = (struct hclge_rx_com_wl *)desc.data;
2224         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2226
2227         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229
2230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2231         if (ret)
2232                 dev_err(&hdev->pdev->dev,
2233                         "common waterline config cmd failed %d\n", ret);
2234
2235         return ret;
2236 }
2237
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2239 {
2240         struct hclge_pkt_buf_alloc *pkt_buf;
2241         int ret;
2242
2243         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2244         if (!pkt_buf)
2245                 return -ENOMEM;
2246
2247         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2248         if (ret) {
2249                 dev_err(&hdev->pdev->dev,
2250                         "could not calc tx buffer size for all TCs %d\n", ret);
2251                 goto out;
2252         }
2253
2254         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2255         if (ret) {
2256                 dev_err(&hdev->pdev->dev,
2257                         "could not alloc tx buffers %d\n", ret);
2258                 goto out;
2259         }
2260
2261         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2262         if (ret) {
2263                 dev_err(&hdev->pdev->dev,
2264                         "could not calc rx priv buffer size for all TCs %d\n",
2265                         ret);
2266                 goto out;
2267         }
2268
2269         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2270         if (ret) {
2271                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2272                         ret);
2273                 goto out;
2274         }
2275
2276         if (hnae3_dev_dcb_supported(hdev)) {
2277                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2278                 if (ret) {
2279                         dev_err(&hdev->pdev->dev,
2280                                 "could not configure rx private waterline %d\n",
2281                                 ret);
2282                         goto out;
2283                 }
2284
2285                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2286                 if (ret) {
2287                         dev_err(&hdev->pdev->dev,
2288                                 "could not configure common threshold %d\n",
2289                                 ret);
2290                         goto out;
2291                 }
2292         }
2293
2294         ret = hclge_common_wl_config(hdev, pkt_buf);
2295         if (ret)
2296                 dev_err(&hdev->pdev->dev,
2297                         "could not configure common waterline %d\n", ret);
2298
2299 out:
2300         kfree(pkt_buf);
2301         return ret;
2302 }
2303
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2305 {
2306         struct hnae3_handle *roce = &vport->roce;
2307         struct hnae3_handle *nic = &vport->nic;
2308
2309         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2310
2311         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312             vport->back->num_msi_left == 0)
2313                 return -EINVAL;
2314
2315         roce->rinfo.base_vector = vport->back->roce_base_vector;
2316
2317         roce->rinfo.netdev = nic->kinfo.netdev;
2318         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2319
2320         roce->pdev = nic->pdev;
2321         roce->ae_algo = nic->ae_algo;
2322         roce->numa_node_mask = nic->numa_node_mask;
2323
2324         return 0;
2325 }
2326
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2328 {
2329         struct pci_dev *pdev = hdev->pdev;
2330         int vectors;
2331         int i;
2332
2333         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2334                                         hdev->num_msi,
2335                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2336         if (vectors < 0) {
2337                 dev_err(&pdev->dev,
2338                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2339                         vectors);
2340                 return vectors;
2341         }
2342         if (vectors < hdev->num_msi)
2343                 dev_warn(&hdev->pdev->dev,
2344                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345                          hdev->num_msi, vectors);
2346
2347         hdev->num_msi = vectors;
2348         hdev->num_msi_left = vectors;
2349
2350         hdev->base_msi_vector = pdev->irq;
2351         hdev->roce_base_vector = hdev->base_msi_vector +
2352                                 hdev->roce_base_msix_offset;
2353
2354         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355                                            sizeof(u16), GFP_KERNEL);
2356         if (!hdev->vector_status) {
2357                 pci_free_irq_vectors(pdev);
2358                 return -ENOMEM;
2359         }
2360
2361         for (i = 0; i < hdev->num_msi; i++)
2362                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2363
2364         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365                                         sizeof(int), GFP_KERNEL);
2366         if (!hdev->vector_irq) {
2367                 pci_free_irq_vectors(pdev);
2368                 return -ENOMEM;
2369         }
2370
2371         return 0;
2372 }
2373
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2375 {
2376         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377                 duplex = HCLGE_MAC_FULL;
2378
2379         return duplex;
2380 }
2381
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2383                                       u8 duplex)
2384 {
2385         struct hclge_config_mac_speed_dup_cmd *req;
2386         struct hclge_desc desc;
2387         int ret;
2388
2389         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2390
2391         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2392
2393         if (duplex)
2394                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2395
2396         switch (speed) {
2397         case HCLGE_MAC_SPEED_10M:
2398                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399                                 HCLGE_CFG_SPEED_S, 6);
2400                 break;
2401         case HCLGE_MAC_SPEED_100M:
2402                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403                                 HCLGE_CFG_SPEED_S, 7);
2404                 break;
2405         case HCLGE_MAC_SPEED_1G:
2406                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407                                 HCLGE_CFG_SPEED_S, 0);
2408                 break;
2409         case HCLGE_MAC_SPEED_10G:
2410                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411                                 HCLGE_CFG_SPEED_S, 1);
2412                 break;
2413         case HCLGE_MAC_SPEED_25G:
2414                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415                                 HCLGE_CFG_SPEED_S, 2);
2416                 break;
2417         case HCLGE_MAC_SPEED_40G:
2418                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419                                 HCLGE_CFG_SPEED_S, 3);
2420                 break;
2421         case HCLGE_MAC_SPEED_50G:
2422                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423                                 HCLGE_CFG_SPEED_S, 4);
2424                 break;
2425         case HCLGE_MAC_SPEED_100G:
2426                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427                                 HCLGE_CFG_SPEED_S, 5);
2428                 break;
2429         default:
2430                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2431                 return -EINVAL;
2432         }
2433
2434         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2435                       1);
2436
2437         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438         if (ret) {
2439                 dev_err(&hdev->pdev->dev,
2440                         "mac speed/duplex config cmd failed %d.\n", ret);
2441                 return ret;
2442         }
2443
2444         return 0;
2445 }
2446
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2448 {
2449         int ret;
2450
2451         duplex = hclge_check_speed_dup(duplex, speed);
2452         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2453                 return 0;
2454
2455         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2456         if (ret)
2457                 return ret;
2458
2459         hdev->hw.mac.speed = speed;
2460         hdev->hw.mac.duplex = duplex;
2461
2462         return 0;
2463 }
2464
2465 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2466                                      u8 duplex)
2467 {
2468         struct hclge_vport *vport = hclge_get_vport(handle);
2469         struct hclge_dev *hdev = vport->back;
2470
2471         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2472 }
2473
2474 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2475 {
2476         struct hclge_config_auto_neg_cmd *req;
2477         struct hclge_desc desc;
2478         u32 flag = 0;
2479         int ret;
2480
2481         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2482
2483         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2484         if (enable)
2485                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2486         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2487
2488         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2489         if (ret)
2490                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2491                         ret);
2492
2493         return ret;
2494 }
2495
2496 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2497 {
2498         struct hclge_vport *vport = hclge_get_vport(handle);
2499         struct hclge_dev *hdev = vport->back;
2500
2501         if (!hdev->hw.mac.support_autoneg) {
2502                 if (enable) {
2503                         dev_err(&hdev->pdev->dev,
2504                                 "autoneg is not supported by current port\n");
2505                         return -EOPNOTSUPP;
2506                 } else {
2507                         return 0;
2508                 }
2509         }
2510
2511         return hclge_set_autoneg_en(hdev, enable);
2512 }
2513
2514 static int hclge_get_autoneg(struct hnae3_handle *handle)
2515 {
2516         struct hclge_vport *vport = hclge_get_vport(handle);
2517         struct hclge_dev *hdev = vport->back;
2518         struct phy_device *phydev = hdev->hw.mac.phydev;
2519
2520         if (phydev)
2521                 return phydev->autoneg;
2522
2523         return hdev->hw.mac.autoneg;
2524 }
2525
2526 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2527 {
2528         struct hclge_vport *vport = hclge_get_vport(handle);
2529         struct hclge_dev *hdev = vport->back;
2530         int ret;
2531
2532         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2533
2534         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2535         if (ret)
2536                 return ret;
2537         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2538 }
2539
2540 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2541 {
2542         struct hclge_vport *vport = hclge_get_vport(handle);
2543         struct hclge_dev *hdev = vport->back;
2544
2545         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2546                 return hclge_set_autoneg_en(hdev, !halt);
2547
2548         return 0;
2549 }
2550
2551 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2552 {
2553         struct hclge_config_fec_cmd *req;
2554         struct hclge_desc desc;
2555         int ret;
2556
2557         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2558
2559         req = (struct hclge_config_fec_cmd *)desc.data;
2560         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2561                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2562         if (fec_mode & BIT(HNAE3_FEC_RS))
2563                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2564                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2565         if (fec_mode & BIT(HNAE3_FEC_BASER))
2566                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2567                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2568
2569         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2570         if (ret)
2571                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2572
2573         return ret;
2574 }
2575
2576 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2577 {
2578         struct hclge_vport *vport = hclge_get_vport(handle);
2579         struct hclge_dev *hdev = vport->back;
2580         struct hclge_mac *mac = &hdev->hw.mac;
2581         int ret;
2582
2583         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2584                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2585                 return -EINVAL;
2586         }
2587
2588         ret = hclge_set_fec_hw(hdev, fec_mode);
2589         if (ret)
2590                 return ret;
2591
2592         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2593         return 0;
2594 }
2595
2596 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2597                           u8 *fec_mode)
2598 {
2599         struct hclge_vport *vport = hclge_get_vport(handle);
2600         struct hclge_dev *hdev = vport->back;
2601         struct hclge_mac *mac = &hdev->hw.mac;
2602
2603         if (fec_ability)
2604                 *fec_ability = mac->fec_ability;
2605         if (fec_mode)
2606                 *fec_mode = mac->fec_mode;
2607 }
2608
2609 static int hclge_mac_init(struct hclge_dev *hdev)
2610 {
2611         struct hclge_mac *mac = &hdev->hw.mac;
2612         int ret;
2613
2614         hdev->support_sfp_query = true;
2615         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2616         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2617                                          hdev->hw.mac.duplex);
2618         if (ret)
2619                 return ret;
2620
2621         if (hdev->hw.mac.support_autoneg) {
2622                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2623                 if (ret)
2624                         return ret;
2625         }
2626
2627         mac->link = 0;
2628
2629         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2630                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2631                 if (ret)
2632                         return ret;
2633         }
2634
2635         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2636         if (ret) {
2637                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2638                 return ret;
2639         }
2640
2641         ret = hclge_set_default_loopback(hdev);
2642         if (ret)
2643                 return ret;
2644
2645         ret = hclge_buffer_alloc(hdev);
2646         if (ret)
2647                 dev_err(&hdev->pdev->dev,
2648                         "allocate buffer fail, ret=%d\n", ret);
2649
2650         return ret;
2651 }
2652
2653 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2654 {
2655         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2656             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2657                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2658                                     hclge_wq, &hdev->service_task, 0);
2659 }
2660
2661 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2662 {
2663         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2664             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2665                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2666                                     hclge_wq, &hdev->service_task, 0);
2667 }
2668
2669 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2670 {
2671         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2672             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2673                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2674                                     hclge_wq, &hdev->service_task,
2675                                     delay_time);
2676 }
2677
2678 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2679 {
2680         struct hclge_link_status_cmd *req;
2681         struct hclge_desc desc;
2682         int link_status;
2683         int ret;
2684
2685         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2686         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2687         if (ret) {
2688                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2689                         ret);
2690                 return ret;
2691         }
2692
2693         req = (struct hclge_link_status_cmd *)desc.data;
2694         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2695
2696         return !!link_status;
2697 }
2698
2699 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2700 {
2701         unsigned int mac_state;
2702         int link_stat;
2703
2704         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2705                 return 0;
2706
2707         mac_state = hclge_get_mac_link_status(hdev);
2708
2709         if (hdev->hw.mac.phydev) {
2710                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2711                         link_stat = mac_state &
2712                                 hdev->hw.mac.phydev->link;
2713                 else
2714                         link_stat = 0;
2715
2716         } else {
2717                 link_stat = mac_state;
2718         }
2719
2720         return !!link_stat;
2721 }
2722
2723 static void hclge_update_link_status(struct hclge_dev *hdev)
2724 {
2725         struct hnae3_client *rclient = hdev->roce_client;
2726         struct hnae3_client *client = hdev->nic_client;
2727         struct hnae3_handle *rhandle;
2728         struct hnae3_handle *handle;
2729         int state;
2730         int i;
2731
2732         if (!client)
2733                 return;
2734
2735         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2736                 return;
2737
2738         state = hclge_get_mac_phy_link(hdev);
2739         if (state != hdev->hw.mac.link) {
2740                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2741                         handle = &hdev->vport[i].nic;
2742                         client->ops->link_status_change(handle, state);
2743                         hclge_config_mac_tnl_int(hdev, state);
2744                         rhandle = &hdev->vport[i].roce;
2745                         if (rclient && rclient->ops->link_status_change)
2746                                 rclient->ops->link_status_change(rhandle,
2747                                                                  state);
2748                 }
2749                 hdev->hw.mac.link = state;
2750         }
2751
2752         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2753 }
2754
2755 static void hclge_update_port_capability(struct hclge_mac *mac)
2756 {
2757         /* update fec ability by speed */
2758         hclge_convert_setting_fec(mac);
2759
2760         /* firmware can not identify back plane type, the media type
2761          * read from configuration can help deal it
2762          */
2763         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2764             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2765                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2766         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2767                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2768
2769         if (mac->support_autoneg) {
2770                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2771                 linkmode_copy(mac->advertising, mac->supported);
2772         } else {
2773                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2774                                    mac->supported);
2775                 linkmode_zero(mac->advertising);
2776         }
2777 }
2778
2779 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2780 {
2781         struct hclge_sfp_info_cmd *resp;
2782         struct hclge_desc desc;
2783         int ret;
2784
2785         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2786         resp = (struct hclge_sfp_info_cmd *)desc.data;
2787         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2788         if (ret == -EOPNOTSUPP) {
2789                 dev_warn(&hdev->pdev->dev,
2790                          "IMP do not support get SFP speed %d\n", ret);
2791                 return ret;
2792         } else if (ret) {
2793                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2794                 return ret;
2795         }
2796
2797         *speed = le32_to_cpu(resp->speed);
2798
2799         return 0;
2800 }
2801
2802 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2803 {
2804         struct hclge_sfp_info_cmd *resp;
2805         struct hclge_desc desc;
2806         int ret;
2807
2808         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2809         resp = (struct hclge_sfp_info_cmd *)desc.data;
2810
2811         resp->query_type = QUERY_ACTIVE_SPEED;
2812
2813         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2814         if (ret == -EOPNOTSUPP) {
2815                 dev_warn(&hdev->pdev->dev,
2816                          "IMP does not support get SFP info %d\n", ret);
2817                 return ret;
2818         } else if (ret) {
2819                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2820                 return ret;
2821         }
2822
2823         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2824          * set to mac->speed.
2825          */
2826         if (!le32_to_cpu(resp->speed))
2827                 return 0;
2828
2829         mac->speed = le32_to_cpu(resp->speed);
2830         /* if resp->speed_ability is 0, it means it's an old version
2831          * firmware, do not update these params
2832          */
2833         if (resp->speed_ability) {
2834                 mac->module_type = le32_to_cpu(resp->module_type);
2835                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2836                 mac->autoneg = resp->autoneg;
2837                 mac->support_autoneg = resp->autoneg_ability;
2838                 mac->speed_type = QUERY_ACTIVE_SPEED;
2839                 if (!resp->active_fec)
2840                         mac->fec_mode = 0;
2841                 else
2842                         mac->fec_mode = BIT(resp->active_fec);
2843         } else {
2844                 mac->speed_type = QUERY_SFP_SPEED;
2845         }
2846
2847         return 0;
2848 }
2849
2850 static int hclge_update_port_info(struct hclge_dev *hdev)
2851 {
2852         struct hclge_mac *mac = &hdev->hw.mac;
2853         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2854         int ret;
2855
2856         /* get the port info from SFP cmd if not copper port */
2857         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2858                 return 0;
2859
2860         /* if IMP does not support get SFP/qSFP info, return directly */
2861         if (!hdev->support_sfp_query)
2862                 return 0;
2863
2864         if (hdev->pdev->revision >= 0x21)
2865                 ret = hclge_get_sfp_info(hdev, mac);
2866         else
2867                 ret = hclge_get_sfp_speed(hdev, &speed);
2868
2869         if (ret == -EOPNOTSUPP) {
2870                 hdev->support_sfp_query = false;
2871                 return ret;
2872         } else if (ret) {
2873                 return ret;
2874         }
2875
2876         if (hdev->pdev->revision >= 0x21) {
2877                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2878                         hclge_update_port_capability(mac);
2879                         return 0;
2880                 }
2881                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2882                                                HCLGE_MAC_FULL);
2883         } else {
2884                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2885                         return 0; /* do nothing if no SFP */
2886
2887                 /* must config full duplex for SFP */
2888                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2889         }
2890 }
2891
2892 static int hclge_get_status(struct hnae3_handle *handle)
2893 {
2894         struct hclge_vport *vport = hclge_get_vport(handle);
2895         struct hclge_dev *hdev = vport->back;
2896
2897         hclge_update_link_status(hdev);
2898
2899         return hdev->hw.mac.link;
2900 }
2901
2902 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2903 {
2904         if (!pci_num_vf(hdev->pdev)) {
2905                 dev_err(&hdev->pdev->dev,
2906                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2907                 return NULL;
2908         }
2909
2910         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2911                 dev_err(&hdev->pdev->dev,
2912                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2913                         vf, pci_num_vf(hdev->pdev));
2914                 return NULL;
2915         }
2916
2917         /* VF start from 1 in vport */
2918         vf += HCLGE_VF_VPORT_START_NUM;
2919         return &hdev->vport[vf];
2920 }
2921
2922 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2923                                struct ifla_vf_info *ivf)
2924 {
2925         struct hclge_vport *vport = hclge_get_vport(handle);
2926         struct hclge_dev *hdev = vport->back;
2927
2928         vport = hclge_get_vf_vport(hdev, vf);
2929         if (!vport)
2930                 return -EINVAL;
2931
2932         ivf->vf = vf;
2933         ivf->linkstate = vport->vf_info.link_state;
2934         ivf->spoofchk = vport->vf_info.spoofchk;
2935         ivf->trusted = vport->vf_info.trusted;
2936         ivf->min_tx_rate = 0;
2937         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2938         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2939         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2940         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2941         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2942
2943         return 0;
2944 }
2945
2946 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2947                                    int link_state)
2948 {
2949         struct hclge_vport *vport = hclge_get_vport(handle);
2950         struct hclge_dev *hdev = vport->back;
2951
2952         vport = hclge_get_vf_vport(hdev, vf);
2953         if (!vport)
2954                 return -EINVAL;
2955
2956         vport->vf_info.link_state = link_state;
2957
2958         return 0;
2959 }
2960
2961 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2962 {
2963         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2964
2965         /* fetch the events from their corresponding regs */
2966         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2967         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2968         msix_src_reg = hclge_read_dev(&hdev->hw,
2969                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2970
2971         /* Assumption: If by any chance reset and mailbox events are reported
2972          * together then we will only process reset event in this go and will
2973          * defer the processing of the mailbox events. Since, we would have not
2974          * cleared RX CMDQ event this time we would receive again another
2975          * interrupt from H/W just for the mailbox.
2976          *
2977          * check for vector0 reset event sources
2978          */
2979         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2980                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2981                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2982                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2983                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2984                 hdev->rst_stats.imp_rst_cnt++;
2985                 return HCLGE_VECTOR0_EVENT_RST;
2986         }
2987
2988         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2989                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2990                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2991                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2992                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2993                 hdev->rst_stats.global_rst_cnt++;
2994                 return HCLGE_VECTOR0_EVENT_RST;
2995         }
2996
2997         /* check for vector0 msix event source */
2998         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2999                 *clearval = msix_src_reg;
3000                 return HCLGE_VECTOR0_EVENT_ERR;
3001         }
3002
3003         /* check for vector0 mailbox(=CMDQ RX) event source */
3004         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3005                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3006                 *clearval = cmdq_src_reg;
3007                 return HCLGE_VECTOR0_EVENT_MBX;
3008         }
3009
3010         /* print other vector0 event source */
3011         dev_info(&hdev->pdev->dev,
3012                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3013                  cmdq_src_reg, msix_src_reg);
3014         *clearval = msix_src_reg;
3015
3016         return HCLGE_VECTOR0_EVENT_OTHER;
3017 }
3018
3019 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3020                                     u32 regclr)
3021 {
3022         switch (event_type) {
3023         case HCLGE_VECTOR0_EVENT_RST:
3024                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3025                 break;
3026         case HCLGE_VECTOR0_EVENT_MBX:
3027                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3028                 break;
3029         default:
3030                 break;
3031         }
3032 }
3033
3034 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3035 {
3036         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3037                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3038                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3039                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3040         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3041 }
3042
3043 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3044 {
3045         writel(enable ? 1 : 0, vector->addr);
3046 }
3047
3048 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3049 {
3050         struct hclge_dev *hdev = data;
3051         u32 clearval = 0;
3052         u32 event_cause;
3053
3054         hclge_enable_vector(&hdev->misc_vector, false);
3055         event_cause = hclge_check_event_cause(hdev, &clearval);
3056
3057         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3058         switch (event_cause) {
3059         case HCLGE_VECTOR0_EVENT_ERR:
3060                 /* we do not know what type of reset is required now. This could
3061                  * only be decided after we fetch the type of errors which
3062                  * caused this event. Therefore, we will do below for now:
3063                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3064                  *    have defered type of reset to be used.
3065                  * 2. Schedule the reset serivce task.
3066                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3067                  *    will fetch the correct type of reset.  This would be done
3068                  *    by first decoding the types of errors.
3069                  */
3070                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3071                 /* fall through */
3072         case HCLGE_VECTOR0_EVENT_RST:
3073                 hclge_reset_task_schedule(hdev);
3074                 break;
3075         case HCLGE_VECTOR0_EVENT_MBX:
3076                 /* If we are here then,
3077                  * 1. Either we are not handling any mbx task and we are not
3078                  *    scheduled as well
3079                  *                        OR
3080                  * 2. We could be handling a mbx task but nothing more is
3081                  *    scheduled.
3082                  * In both cases, we should schedule mbx task as there are more
3083                  * mbx messages reported by this interrupt.
3084                  */
3085                 hclge_mbx_task_schedule(hdev);
3086                 break;
3087         default:
3088                 dev_warn(&hdev->pdev->dev,
3089                          "received unknown or unhandled event of vector0\n");
3090                 break;
3091         }
3092
3093         hclge_clear_event_cause(hdev, event_cause, clearval);
3094
3095         /* Enable interrupt if it is not cause by reset. And when
3096          * clearval equal to 0, it means interrupt status may be
3097          * cleared by hardware before driver reads status register.
3098          * For this case, vector0 interrupt also should be enabled.
3099          */
3100         if (!clearval ||
3101             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3102                 hclge_enable_vector(&hdev->misc_vector, true);
3103         }
3104
3105         return IRQ_HANDLED;
3106 }
3107
3108 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3109 {
3110         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3111                 dev_warn(&hdev->pdev->dev,
3112                          "vector(vector_id %d) has been freed.\n", vector_id);
3113                 return;
3114         }
3115
3116         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3117         hdev->num_msi_left += 1;
3118         hdev->num_msi_used -= 1;
3119 }
3120
3121 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3122 {
3123         struct hclge_misc_vector *vector = &hdev->misc_vector;
3124
3125         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3126
3127         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3128         hdev->vector_status[0] = 0;
3129
3130         hdev->num_msi_left -= 1;
3131         hdev->num_msi_used += 1;
3132 }
3133
3134 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3135                                       const cpumask_t *mask)
3136 {
3137         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3138                                               affinity_notify);
3139
3140         cpumask_copy(&hdev->affinity_mask, mask);
3141 }
3142
3143 static void hclge_irq_affinity_release(struct kref *ref)
3144 {
3145 }
3146
3147 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3148 {
3149         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3150                               &hdev->affinity_mask);
3151
3152         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3153         hdev->affinity_notify.release = hclge_irq_affinity_release;
3154         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3155                                   &hdev->affinity_notify);
3156 }
3157
3158 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3159 {
3160         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3161         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3162 }
3163
3164 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3165 {
3166         int ret;
3167
3168         hclge_get_misc_vector(hdev);
3169
3170         /* this would be explicitly freed in the end */
3171         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3172                  HCLGE_NAME, pci_name(hdev->pdev));
3173         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3174                           0, hdev->misc_vector.name, hdev);
3175         if (ret) {
3176                 hclge_free_vector(hdev, 0);
3177                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3178                         hdev->misc_vector.vector_irq);
3179         }
3180
3181         return ret;
3182 }
3183
3184 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3185 {
3186         free_irq(hdev->misc_vector.vector_irq, hdev);
3187         hclge_free_vector(hdev, 0);
3188 }
3189
3190 int hclge_notify_client(struct hclge_dev *hdev,
3191                         enum hnae3_reset_notify_type type)
3192 {
3193         struct hnae3_client *client = hdev->nic_client;
3194         u16 i;
3195
3196         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3197                 return 0;
3198
3199         if (!client->ops->reset_notify)
3200                 return -EOPNOTSUPP;
3201
3202         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3203                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3204                 int ret;
3205
3206                 ret = client->ops->reset_notify(handle, type);
3207                 if (ret) {
3208                         dev_err(&hdev->pdev->dev,
3209                                 "notify nic client failed %d(%d)\n", type, ret);
3210                         return ret;
3211                 }
3212         }
3213
3214         return 0;
3215 }
3216
3217 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3218                                     enum hnae3_reset_notify_type type)
3219 {
3220         struct hnae3_client *client = hdev->roce_client;
3221         int ret = 0;
3222         u16 i;
3223
3224         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3225                 return 0;
3226
3227         if (!client->ops->reset_notify)
3228                 return -EOPNOTSUPP;
3229
3230         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3231                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3232
3233                 ret = client->ops->reset_notify(handle, type);
3234                 if (ret) {
3235                         dev_err(&hdev->pdev->dev,
3236                                 "notify roce client failed %d(%d)",
3237                                 type, ret);
3238                         return ret;
3239                 }
3240         }
3241
3242         return ret;
3243 }
3244
3245 static int hclge_reset_wait(struct hclge_dev *hdev)
3246 {
3247 #define HCLGE_RESET_WATI_MS     100
3248 #define HCLGE_RESET_WAIT_CNT    350
3249
3250         u32 val, reg, reg_bit;
3251         u32 cnt = 0;
3252
3253         switch (hdev->reset_type) {
3254         case HNAE3_IMP_RESET:
3255                 reg = HCLGE_GLOBAL_RESET_REG;
3256                 reg_bit = HCLGE_IMP_RESET_BIT;
3257                 break;
3258         case HNAE3_GLOBAL_RESET:
3259                 reg = HCLGE_GLOBAL_RESET_REG;
3260                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3261                 break;
3262         case HNAE3_FUNC_RESET:
3263                 reg = HCLGE_FUN_RST_ING;
3264                 reg_bit = HCLGE_FUN_RST_ING_B;
3265                 break;
3266         default:
3267                 dev_err(&hdev->pdev->dev,
3268                         "Wait for unsupported reset type: %d\n",
3269                         hdev->reset_type);
3270                 return -EINVAL;
3271         }
3272
3273         val = hclge_read_dev(&hdev->hw, reg);
3274         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3275                 msleep(HCLGE_RESET_WATI_MS);
3276                 val = hclge_read_dev(&hdev->hw, reg);
3277                 cnt++;
3278         }
3279
3280         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3281                 dev_warn(&hdev->pdev->dev,
3282                          "Wait for reset timeout: %d\n", hdev->reset_type);
3283                 return -EBUSY;
3284         }
3285
3286         return 0;
3287 }
3288
3289 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3290 {
3291         struct hclge_vf_rst_cmd *req;
3292         struct hclge_desc desc;
3293
3294         req = (struct hclge_vf_rst_cmd *)desc.data;
3295         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3296         req->dest_vfid = func_id;
3297
3298         if (reset)
3299                 req->vf_rst = 0x1;
3300
3301         return hclge_cmd_send(&hdev->hw, &desc, 1);
3302 }
3303
3304 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3305 {
3306         int i;
3307
3308         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3309                 struct hclge_vport *vport = &hdev->vport[i];
3310                 int ret;
3311
3312                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3313                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3314                 if (ret) {
3315                         dev_err(&hdev->pdev->dev,
3316                                 "set vf(%u) rst failed %d!\n",
3317                                 vport->vport_id, ret);
3318                         return ret;
3319                 }
3320
3321                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3322                         continue;
3323
3324                 /* Inform VF to process the reset.
3325                  * hclge_inform_reset_assert_to_vf may fail if VF
3326                  * driver is not loaded.
3327                  */
3328                 ret = hclge_inform_reset_assert_to_vf(vport);
3329                 if (ret)
3330                         dev_warn(&hdev->pdev->dev,
3331                                  "inform reset to vf(%u) failed %d!\n",
3332                                  vport->vport_id, ret);
3333         }
3334
3335         return 0;
3336 }
3337
3338 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3339 {
3340         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3341             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3342             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3343                 return;
3344
3345         hclge_mbx_handler(hdev);
3346
3347         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3348 }
3349
3350 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3351 {
3352         struct hclge_pf_rst_sync_cmd *req;
3353         struct hclge_desc desc;
3354         int cnt = 0;
3355         int ret;
3356
3357         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3359
3360         do {
3361                 /* vf need to down netdev by mbx during PF or FLR reset */
3362                 hclge_mailbox_service_task(hdev);
3363
3364                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3365                 /* for compatible with old firmware, wait
3366                  * 100 ms for VF to stop IO
3367                  */
3368                 if (ret == -EOPNOTSUPP) {
3369                         msleep(HCLGE_RESET_SYNC_TIME);
3370                         return;
3371                 } else if (ret) {
3372                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3373                                  ret);
3374                         return;
3375                 } else if (req->all_vf_ready) {
3376                         return;
3377                 }
3378                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3379                 hclge_cmd_reuse_desc(&desc, true);
3380         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3381
3382         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3383 }
3384
3385 void hclge_report_hw_error(struct hclge_dev *hdev,
3386                            enum hnae3_hw_error_type type)
3387 {
3388         struct hnae3_client *client = hdev->nic_client;
3389         u16 i;
3390
3391         if (!client || !client->ops->process_hw_error ||
3392             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3393                 return;
3394
3395         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3396                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3397 }
3398
3399 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3400 {
3401         u32 reg_val;
3402
3403         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3404         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3405                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3406                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3407                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3408         }
3409
3410         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3411                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3412                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3413                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3414         }
3415 }
3416
3417 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3418 {
3419         struct hclge_desc desc;
3420         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3421         int ret;
3422
3423         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3424         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3425         req->fun_reset_vfid = func_id;
3426
3427         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3428         if (ret)
3429                 dev_err(&hdev->pdev->dev,
3430                         "send function reset cmd fail, status =%d\n", ret);
3431
3432         return ret;
3433 }
3434
3435 static void hclge_do_reset(struct hclge_dev *hdev)
3436 {
3437         struct hnae3_handle *handle = &hdev->vport[0].nic;
3438         struct pci_dev *pdev = hdev->pdev;
3439         u32 val;
3440
3441         if (hclge_get_hw_reset_stat(handle)) {
3442                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3443                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3444                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3445                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3446                 return;
3447         }
3448
3449         switch (hdev->reset_type) {
3450         case HNAE3_GLOBAL_RESET:
3451                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3452                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3453                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3454                 dev_info(&pdev->dev, "Global Reset requested\n");
3455                 break;
3456         case HNAE3_FUNC_RESET:
3457                 dev_info(&pdev->dev, "PF Reset requested\n");
3458                 /* schedule again to check later */
3459                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3460                 hclge_reset_task_schedule(hdev);
3461                 break;
3462         default:
3463                 dev_warn(&pdev->dev,
3464                          "Unsupported reset type: %d\n", hdev->reset_type);
3465                 break;
3466         }
3467 }
3468
3469 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3470                                                    unsigned long *addr)
3471 {
3472         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3473         struct hclge_dev *hdev = ae_dev->priv;
3474
3475         /* first, resolve any unknown reset type to the known type(s) */
3476         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3477                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3478                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3479                 /* we will intentionally ignore any errors from this function
3480                  *  as we will end up in *some* reset request in any case
3481                  */
3482                 if (hclge_handle_hw_msix_error(hdev, addr))
3483                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3484                                  msix_sts_reg);
3485
3486                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3487                 /* We defered the clearing of the error event which caused
3488                  * interrupt since it was not posssible to do that in
3489                  * interrupt context (and this is the reason we introduced
3490                  * new UNKNOWN reset type). Now, the errors have been
3491                  * handled and cleared in hardware we can safely enable
3492                  * interrupts. This is an exception to the norm.
3493                  */
3494                 hclge_enable_vector(&hdev->misc_vector, true);
3495         }
3496
3497         /* return the highest priority reset level amongst all */
3498         if (test_bit(HNAE3_IMP_RESET, addr)) {
3499                 rst_level = HNAE3_IMP_RESET;
3500                 clear_bit(HNAE3_IMP_RESET, addr);
3501                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3502                 clear_bit(HNAE3_FUNC_RESET, addr);
3503         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3504                 rst_level = HNAE3_GLOBAL_RESET;
3505                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3506                 clear_bit(HNAE3_FUNC_RESET, addr);
3507         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3508                 rst_level = HNAE3_FUNC_RESET;
3509                 clear_bit(HNAE3_FUNC_RESET, addr);
3510         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3511                 rst_level = HNAE3_FLR_RESET;
3512                 clear_bit(HNAE3_FLR_RESET, addr);
3513         }
3514
3515         if (hdev->reset_type != HNAE3_NONE_RESET &&
3516             rst_level < hdev->reset_type)
3517                 return HNAE3_NONE_RESET;
3518
3519         return rst_level;
3520 }
3521
3522 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3523 {
3524         u32 clearval = 0;
3525
3526         switch (hdev->reset_type) {
3527         case HNAE3_IMP_RESET:
3528                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3529                 break;
3530         case HNAE3_GLOBAL_RESET:
3531                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3532                 break;
3533         default:
3534                 break;
3535         }
3536
3537         if (!clearval)
3538                 return;
3539
3540         /* For revision 0x20, the reset interrupt source
3541          * can only be cleared after hardware reset done
3542          */
3543         if (hdev->pdev->revision == 0x20)
3544                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3545                                 clearval);
3546
3547         hclge_enable_vector(&hdev->misc_vector, true);
3548 }
3549
3550 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3551 {
3552         u32 reg_val;
3553
3554         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3555         if (enable)
3556                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3557         else
3558                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3559
3560         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3561 }
3562
3563 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3564 {
3565         int ret;
3566
3567         ret = hclge_set_all_vf_rst(hdev, true);
3568         if (ret)
3569                 return ret;
3570
3571         hclge_func_reset_sync_vf(hdev);
3572
3573         return 0;
3574 }
3575
3576 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3577 {
3578         u32 reg_val;
3579         int ret = 0;
3580
3581         switch (hdev->reset_type) {
3582         case HNAE3_FUNC_RESET:
3583                 ret = hclge_func_reset_notify_vf(hdev);
3584                 if (ret)
3585                         return ret;
3586
3587                 ret = hclge_func_reset_cmd(hdev, 0);
3588                 if (ret) {
3589                         dev_err(&hdev->pdev->dev,
3590                                 "asserting function reset fail %d!\n", ret);
3591                         return ret;
3592                 }
3593
3594                 /* After performaning pf reset, it is not necessary to do the
3595                  * mailbox handling or send any command to firmware, because
3596                  * any mailbox handling or command to firmware is only valid
3597                  * after hclge_cmd_init is called.
3598                  */
3599                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3600                 hdev->rst_stats.pf_rst_cnt++;
3601                 break;
3602         case HNAE3_FLR_RESET:
3603                 ret = hclge_func_reset_notify_vf(hdev);
3604                 if (ret)
3605                         return ret;
3606                 break;
3607         case HNAE3_IMP_RESET:
3608                 hclge_handle_imp_error(hdev);
3609                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3610                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3611                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3612                 break;
3613         default:
3614                 break;
3615         }
3616
3617         /* inform hardware that preparatory work is done */
3618         msleep(HCLGE_RESET_SYNC_TIME);
3619         hclge_reset_handshake(hdev, true);
3620         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3621
3622         return ret;
3623 }
3624
3625 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3626 {
3627 #define MAX_RESET_FAIL_CNT 5
3628
3629         if (hdev->reset_pending) {
3630                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3631                          hdev->reset_pending);
3632                 return true;
3633         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3634                    HCLGE_RESET_INT_M) {
3635                 dev_info(&hdev->pdev->dev,
3636                          "reset failed because new reset interrupt\n");
3637                 hclge_clear_reset_cause(hdev);
3638                 return false;
3639         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3640                 hdev->rst_stats.reset_fail_cnt++;
3641                 set_bit(hdev->reset_type, &hdev->reset_pending);
3642                 dev_info(&hdev->pdev->dev,
3643                          "re-schedule reset task(%u)\n",
3644                          hdev->rst_stats.reset_fail_cnt);
3645                 return true;
3646         }
3647
3648         hclge_clear_reset_cause(hdev);
3649
3650         /* recover the handshake status when reset fail */
3651         hclge_reset_handshake(hdev, true);
3652
3653         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3654
3655         hclge_dbg_dump_rst_info(hdev);
3656
3657         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3658
3659         return false;
3660 }
3661
3662 static int hclge_set_rst_done(struct hclge_dev *hdev)
3663 {
3664         struct hclge_pf_rst_done_cmd *req;
3665         struct hclge_desc desc;
3666         int ret;
3667
3668         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3669         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3670         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3671
3672         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673         /* To be compatible with the old firmware, which does not support
3674          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3675          * return success
3676          */
3677         if (ret == -EOPNOTSUPP) {
3678                 dev_warn(&hdev->pdev->dev,
3679                          "current firmware does not support command(0x%x)!\n",
3680                          HCLGE_OPC_PF_RST_DONE);
3681                 return 0;
3682         } else if (ret) {
3683                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3684                         ret);
3685         }
3686
3687         return ret;
3688 }
3689
3690 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3691 {
3692         int ret = 0;
3693
3694         switch (hdev->reset_type) {
3695         case HNAE3_FUNC_RESET:
3696                 /* fall through */
3697         case HNAE3_FLR_RESET:
3698                 ret = hclge_set_all_vf_rst(hdev, false);
3699                 break;
3700         case HNAE3_GLOBAL_RESET:
3701                 /* fall through */
3702         case HNAE3_IMP_RESET:
3703                 ret = hclge_set_rst_done(hdev);
3704                 break;
3705         default:
3706                 break;
3707         }
3708
3709         /* clear up the handshake status after re-initialize done */
3710         hclge_reset_handshake(hdev, false);
3711
3712         return ret;
3713 }
3714
3715 static int hclge_reset_stack(struct hclge_dev *hdev)
3716 {
3717         int ret;
3718
3719         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3720         if (ret)
3721                 return ret;
3722
3723         ret = hclge_reset_ae_dev(hdev->ae_dev);
3724         if (ret)
3725                 return ret;
3726
3727         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3728         if (ret)
3729                 return ret;
3730
3731         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3732 }
3733
3734 static int hclge_reset_prepare(struct hclge_dev *hdev)
3735 {
3736         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3737         int ret;
3738
3739         /* Initialize ae_dev reset status as well, in case enet layer wants to
3740          * know if device is undergoing reset
3741          */
3742         ae_dev->reset_type = hdev->reset_type;
3743         hdev->rst_stats.reset_cnt++;
3744         /* perform reset of the stack & ae device for a client */
3745         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3746         if (ret)
3747                 return ret;
3748
3749         rtnl_lock();
3750         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3751         rtnl_unlock();
3752         if (ret)
3753                 return ret;
3754
3755         return hclge_reset_prepare_wait(hdev);
3756 }
3757
3758 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3759 {
3760         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3761         enum hnae3_reset_type reset_level;
3762         int ret;
3763
3764         hdev->rst_stats.hw_reset_done_cnt++;
3765
3766         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3767         if (ret)
3768                 return ret;
3769
3770         rtnl_lock();
3771         ret = hclge_reset_stack(hdev);
3772         rtnl_unlock();
3773         if (ret)
3774                 return ret;
3775
3776         hclge_clear_reset_cause(hdev);
3777
3778         ret = hclge_reset_prepare_up(hdev);
3779         if (ret)
3780                 return ret;
3781
3782
3783         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3784         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3785          * times
3786          */
3787         if (ret &&
3788             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3789                 return ret;
3790
3791         rtnl_lock();
3792         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3793         rtnl_unlock();
3794         if (ret)
3795                 return ret;
3796
3797         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3798         if (ret)
3799                 return ret;
3800
3801         hdev->last_reset_time = jiffies;
3802         hdev->rst_stats.reset_fail_cnt = 0;
3803         hdev->rst_stats.reset_done_cnt++;
3804         ae_dev->reset_type = HNAE3_NONE_RESET;
3805         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3806
3807         /* if default_reset_request has a higher level reset request,
3808          * it should be handled as soon as possible. since some errors
3809          * need this kind of reset to fix.
3810          */
3811         reset_level = hclge_get_reset_level(ae_dev,
3812                                             &hdev->default_reset_request);
3813         if (reset_level != HNAE3_NONE_RESET)
3814                 set_bit(reset_level, &hdev->reset_request);
3815
3816         return 0;
3817 }
3818
3819 static void hclge_reset(struct hclge_dev *hdev)
3820 {
3821         if (hclge_reset_prepare(hdev))
3822                 goto err_reset;
3823
3824         if (hclge_reset_wait(hdev))
3825                 goto err_reset;
3826
3827         if (hclge_reset_rebuild(hdev))
3828                 goto err_reset;
3829
3830         return;
3831
3832 err_reset:
3833         if (hclge_reset_err_handle(hdev))
3834                 hclge_reset_task_schedule(hdev);
3835 }
3836
3837 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3838 {
3839         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3840         struct hclge_dev *hdev = ae_dev->priv;
3841
3842         /* We might end up getting called broadly because of 2 below cases:
3843          * 1. Recoverable error was conveyed through APEI and only way to bring
3844          *    normalcy is to reset.
3845          * 2. A new reset request from the stack due to timeout
3846          *
3847          * For the first case,error event might not have ae handle available.
3848          * check if this is a new reset request and we are not here just because
3849          * last reset attempt did not succeed and watchdog hit us again. We will
3850          * know this if last reset request did not occur very recently (watchdog
3851          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3852          * In case of new request we reset the "reset level" to PF reset.
3853          * And if it is a repeat reset request of the most recent one then we
3854          * want to make sure we throttle the reset request. Therefore, we will
3855          * not allow it again before 3*HZ times.
3856          */
3857         if (!handle)
3858                 handle = &hdev->vport[0].nic;
3859
3860         if (time_before(jiffies, (hdev->last_reset_time +
3861                                   HCLGE_RESET_INTERVAL))) {
3862                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3863                 return;
3864         } else if (hdev->default_reset_request) {
3865                 hdev->reset_level =
3866                         hclge_get_reset_level(ae_dev,
3867                                               &hdev->default_reset_request);
3868         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3869                 hdev->reset_level = HNAE3_FUNC_RESET;
3870         }
3871
3872         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3873                  hdev->reset_level);
3874
3875         /* request reset & schedule reset task */
3876         set_bit(hdev->reset_level, &hdev->reset_request);
3877         hclge_reset_task_schedule(hdev);
3878
3879         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3880                 hdev->reset_level++;
3881 }
3882
3883 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3884                                         enum hnae3_reset_type rst_type)
3885 {
3886         struct hclge_dev *hdev = ae_dev->priv;
3887
3888         set_bit(rst_type, &hdev->default_reset_request);
3889 }
3890
3891 static void hclge_reset_timer(struct timer_list *t)
3892 {
3893         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3894
3895         /* if default_reset_request has no value, it means that this reset
3896          * request has already be handled, so just return here
3897          */
3898         if (!hdev->default_reset_request)
3899                 return;
3900
3901         dev_info(&hdev->pdev->dev,
3902                  "triggering reset in reset timer\n");
3903         hclge_reset_event(hdev->pdev, NULL);
3904 }
3905
3906 static void hclge_reset_subtask(struct hclge_dev *hdev)
3907 {
3908         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3909
3910         /* check if there is any ongoing reset in the hardware. This status can
3911          * be checked from reset_pending. If there is then, we need to wait for
3912          * hardware to complete reset.
3913          *    a. If we are able to figure out in reasonable time that hardware
3914          *       has fully resetted then, we can proceed with driver, client
3915          *       reset.
3916          *    b. else, we can come back later to check this status so re-sched
3917          *       now.
3918          */
3919         hdev->last_reset_time = jiffies;
3920         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3921         if (hdev->reset_type != HNAE3_NONE_RESET)
3922                 hclge_reset(hdev);
3923
3924         /* check if we got any *new* reset requests to be honored */
3925         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3926         if (hdev->reset_type != HNAE3_NONE_RESET)
3927                 hclge_do_reset(hdev);
3928
3929         hdev->reset_type = HNAE3_NONE_RESET;
3930 }
3931
3932 static void hclge_reset_service_task(struct hclge_dev *hdev)
3933 {
3934         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3935                 return;
3936
3937         down(&hdev->reset_sem);
3938         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3939
3940         hclge_reset_subtask(hdev);
3941
3942         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3943         up(&hdev->reset_sem);
3944 }
3945
3946 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3947 {
3948         int i;
3949
3950         /* start from vport 1 for PF is always alive */
3951         for (i = 1; i < hdev->num_alloc_vport; i++) {
3952                 struct hclge_vport *vport = &hdev->vport[i];
3953
3954                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3955                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3956
3957                 /* If vf is not alive, set to default value */
3958                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3959                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3960         }
3961 }
3962
3963 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3964 {
3965         unsigned long delta = round_jiffies_relative(HZ);
3966
3967         /* Always handle the link updating to make sure link state is
3968          * updated when it is triggered by mbx.
3969          */
3970         hclge_update_link_status(hdev);
3971
3972         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3973                 delta = jiffies - hdev->last_serv_processed;
3974
3975                 if (delta < round_jiffies_relative(HZ)) {
3976                         delta = round_jiffies_relative(HZ) - delta;
3977                         goto out;
3978                 }
3979         }
3980
3981         hdev->serv_processed_cnt++;
3982         hclge_update_vport_alive(hdev);
3983
3984         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3985                 hdev->last_serv_processed = jiffies;
3986                 goto out;
3987         }
3988
3989         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3990                 hclge_update_stats_for_all(hdev);
3991
3992         hclge_update_port_info(hdev);
3993         hclge_sync_vlan_filter(hdev);
3994
3995         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3996                 hclge_rfs_filter_expire(hdev);
3997
3998         hdev->last_serv_processed = jiffies;
3999
4000 out:
4001         hclge_task_schedule(hdev, delta);
4002 }
4003
4004 static void hclge_service_task(struct work_struct *work)
4005 {
4006         struct hclge_dev *hdev =
4007                 container_of(work, struct hclge_dev, service_task.work);
4008
4009         hclge_reset_service_task(hdev);
4010         hclge_mailbox_service_task(hdev);
4011         hclge_periodic_service_task(hdev);
4012
4013         /* Handle reset and mbx again in case periodical task delays the
4014          * handling by calling hclge_task_schedule() in
4015          * hclge_periodic_service_task().
4016          */
4017         hclge_reset_service_task(hdev);
4018         hclge_mailbox_service_task(hdev);
4019 }
4020
4021 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4022 {
4023         /* VF handle has no client */
4024         if (!handle->client)
4025                 return container_of(handle, struct hclge_vport, nic);
4026         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4027                 return container_of(handle, struct hclge_vport, roce);
4028         else
4029                 return container_of(handle, struct hclge_vport, nic);
4030 }
4031
4032 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4033                             struct hnae3_vector_info *vector_info)
4034 {
4035         struct hclge_vport *vport = hclge_get_vport(handle);
4036         struct hnae3_vector_info *vector = vector_info;
4037         struct hclge_dev *hdev = vport->back;
4038         int alloc = 0;
4039         int i, j;
4040
4041         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4042         vector_num = min(hdev->num_msi_left, vector_num);
4043
4044         for (j = 0; j < vector_num; j++) {
4045                 for (i = 1; i < hdev->num_msi; i++) {
4046                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4047                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4048                                 vector->io_addr = hdev->hw.io_base +
4049                                         HCLGE_VECTOR_REG_BASE +
4050                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4051                                         vport->vport_id *
4052                                         HCLGE_VECTOR_VF_OFFSET;
4053                                 hdev->vector_status[i] = vport->vport_id;
4054                                 hdev->vector_irq[i] = vector->vector;
4055
4056                                 vector++;
4057                                 alloc++;
4058
4059                                 break;
4060                         }
4061                 }
4062         }
4063         hdev->num_msi_left -= alloc;
4064         hdev->num_msi_used += alloc;
4065
4066         return alloc;
4067 }
4068
4069 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4070 {
4071         int i;
4072
4073         for (i = 0; i < hdev->num_msi; i++)
4074                 if (vector == hdev->vector_irq[i])
4075                         return i;
4076
4077         return -EINVAL;
4078 }
4079
4080 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4081 {
4082         struct hclge_vport *vport = hclge_get_vport(handle);
4083         struct hclge_dev *hdev = vport->back;
4084         int vector_id;
4085
4086         vector_id = hclge_get_vector_index(hdev, vector);
4087         if (vector_id < 0) {
4088                 dev_err(&hdev->pdev->dev,
4089                         "Get vector index fail. vector = %d\n", vector);
4090                 return vector_id;
4091         }
4092
4093         hclge_free_vector(hdev, vector_id);
4094
4095         return 0;
4096 }
4097
4098 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4099 {
4100         return HCLGE_RSS_KEY_SIZE;
4101 }
4102
4103 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4104 {
4105         return HCLGE_RSS_IND_TBL_SIZE;
4106 }
4107
4108 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4109                                   const u8 hfunc, const u8 *key)
4110 {
4111         struct hclge_rss_config_cmd *req;
4112         unsigned int key_offset = 0;
4113         struct hclge_desc desc;
4114         int key_counts;
4115         int key_size;
4116         int ret;
4117
4118         key_counts = HCLGE_RSS_KEY_SIZE;
4119         req = (struct hclge_rss_config_cmd *)desc.data;
4120
4121         while (key_counts) {
4122                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4123                                            false);
4124
4125                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4126                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4127
4128                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4129                 memcpy(req->hash_key,
4130                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4131
4132                 key_counts -= key_size;
4133                 key_offset++;
4134                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4135                 if (ret) {
4136                         dev_err(&hdev->pdev->dev,
4137                                 "Configure RSS config fail, status = %d\n",
4138                                 ret);
4139                         return ret;
4140                 }
4141         }
4142         return 0;
4143 }
4144
4145 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4146 {
4147         struct hclge_rss_indirection_table_cmd *req;
4148         struct hclge_desc desc;
4149         int i, j;
4150         int ret;
4151
4152         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4153
4154         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4155                 hclge_cmd_setup_basic_desc
4156                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4157
4158                 req->start_table_index =
4159                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4160                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4161
4162                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4163                         req->rss_result[j] =
4164                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4165
4166                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4167                 if (ret) {
4168                         dev_err(&hdev->pdev->dev,
4169                                 "Configure rss indir table fail,status = %d\n",
4170                                 ret);
4171                         return ret;
4172                 }
4173         }
4174         return 0;
4175 }
4176
4177 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4178                                  u16 *tc_size, u16 *tc_offset)
4179 {
4180         struct hclge_rss_tc_mode_cmd *req;
4181         struct hclge_desc desc;
4182         int ret;
4183         int i;
4184
4185         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4186         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4187
4188         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4189                 u16 mode = 0;
4190
4191                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4192                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4193                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4194                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4195                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4196
4197                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4198         }
4199
4200         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4201         if (ret)
4202                 dev_err(&hdev->pdev->dev,
4203                         "Configure rss tc mode fail, status = %d\n", ret);
4204
4205         return ret;
4206 }
4207
4208 static void hclge_get_rss_type(struct hclge_vport *vport)
4209 {
4210         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4211             vport->rss_tuple_sets.ipv4_udp_en ||
4212             vport->rss_tuple_sets.ipv4_sctp_en ||
4213             vport->rss_tuple_sets.ipv6_tcp_en ||
4214             vport->rss_tuple_sets.ipv6_udp_en ||
4215             vport->rss_tuple_sets.ipv6_sctp_en)
4216                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4217         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4218                  vport->rss_tuple_sets.ipv6_fragment_en)
4219                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4220         else
4221                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4222 }
4223
4224 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4225 {
4226         struct hclge_rss_input_tuple_cmd *req;
4227         struct hclge_desc desc;
4228         int ret;
4229
4230         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4231
4232         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4233
4234         /* Get the tuple cfg from pf */
4235         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4236         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4237         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4238         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4239         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4240         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4241         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4242         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4243         hclge_get_rss_type(&hdev->vport[0]);
4244         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4245         if (ret)
4246                 dev_err(&hdev->pdev->dev,
4247                         "Configure rss input fail, status = %d\n", ret);
4248         return ret;
4249 }
4250
4251 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4252                          u8 *key, u8 *hfunc)
4253 {
4254         struct hclge_vport *vport = hclge_get_vport(handle);
4255         int i;
4256
4257         /* Get hash algorithm */
4258         if (hfunc) {
4259                 switch (vport->rss_algo) {
4260                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4261                         *hfunc = ETH_RSS_HASH_TOP;
4262                         break;
4263                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4264                         *hfunc = ETH_RSS_HASH_XOR;
4265                         break;
4266                 default:
4267                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4268                         break;
4269                 }
4270         }
4271
4272         /* Get the RSS Key required by the user */
4273         if (key)
4274                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4275
4276         /* Get indirect table */
4277         if (indir)
4278                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4279                         indir[i] =  vport->rss_indirection_tbl[i];
4280
4281         return 0;
4282 }
4283
4284 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4285                          const  u8 *key, const  u8 hfunc)
4286 {
4287         struct hclge_vport *vport = hclge_get_vport(handle);
4288         struct hclge_dev *hdev = vport->back;
4289         u8 hash_algo;
4290         int ret, i;
4291
4292         /* Set the RSS Hash Key if specififed by the user */
4293         if (key) {
4294                 switch (hfunc) {
4295                 case ETH_RSS_HASH_TOP:
4296                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4297                         break;
4298                 case ETH_RSS_HASH_XOR:
4299                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4300                         break;
4301                 case ETH_RSS_HASH_NO_CHANGE:
4302                         hash_algo = vport->rss_algo;
4303                         break;
4304                 default:
4305                         return -EINVAL;
4306                 }
4307
4308                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4309                 if (ret)
4310                         return ret;
4311
4312                 /* Update the shadow RSS key with user specified qids */
4313                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4314                 vport->rss_algo = hash_algo;
4315         }
4316
4317         /* Update the shadow RSS table with user specified qids */
4318         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4319                 vport->rss_indirection_tbl[i] = indir[i];
4320
4321         /* Update the hardware */
4322         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4323 }
4324
4325 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4326 {
4327         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4328
4329         if (nfc->data & RXH_L4_B_2_3)
4330                 hash_sets |= HCLGE_D_PORT_BIT;
4331         else
4332                 hash_sets &= ~HCLGE_D_PORT_BIT;
4333
4334         if (nfc->data & RXH_IP_SRC)
4335                 hash_sets |= HCLGE_S_IP_BIT;
4336         else
4337                 hash_sets &= ~HCLGE_S_IP_BIT;
4338
4339         if (nfc->data & RXH_IP_DST)
4340                 hash_sets |= HCLGE_D_IP_BIT;
4341         else
4342                 hash_sets &= ~HCLGE_D_IP_BIT;
4343
4344         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4345                 hash_sets |= HCLGE_V_TAG_BIT;
4346
4347         return hash_sets;
4348 }
4349
4350 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4351                                struct ethtool_rxnfc *nfc)
4352 {
4353         struct hclge_vport *vport = hclge_get_vport(handle);
4354         struct hclge_dev *hdev = vport->back;
4355         struct hclge_rss_input_tuple_cmd *req;
4356         struct hclge_desc desc;
4357         u8 tuple_sets;
4358         int ret;
4359
4360         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4361                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4362                 return -EINVAL;
4363
4364         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4365         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4366
4367         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4368         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4369         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4370         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4371         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4372         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4373         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4374         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4375
4376         tuple_sets = hclge_get_rss_hash_bits(nfc);
4377         switch (nfc->flow_type) {
4378         case TCP_V4_FLOW:
4379                 req->ipv4_tcp_en = tuple_sets;
4380                 break;
4381         case TCP_V6_FLOW:
4382                 req->ipv6_tcp_en = tuple_sets;
4383                 break;
4384         case UDP_V4_FLOW:
4385                 req->ipv4_udp_en = tuple_sets;
4386                 break;
4387         case UDP_V6_FLOW:
4388                 req->ipv6_udp_en = tuple_sets;
4389                 break;
4390         case SCTP_V4_FLOW:
4391                 req->ipv4_sctp_en = tuple_sets;
4392                 break;
4393         case SCTP_V6_FLOW:
4394                 if ((nfc->data & RXH_L4_B_0_1) ||
4395                     (nfc->data & RXH_L4_B_2_3))
4396                         return -EINVAL;
4397
4398                 req->ipv6_sctp_en = tuple_sets;
4399                 break;
4400         case IPV4_FLOW:
4401                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4402                 break;
4403         case IPV6_FLOW:
4404                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4405                 break;
4406         default:
4407                 return -EINVAL;
4408         }
4409
4410         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4411         if (ret) {
4412                 dev_err(&hdev->pdev->dev,
4413                         "Set rss tuple fail, status = %d\n", ret);
4414                 return ret;
4415         }
4416
4417         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4418         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4419         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4420         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4421         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4422         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4423         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4424         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4425         hclge_get_rss_type(vport);
4426         return 0;
4427 }
4428
4429 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4430                                struct ethtool_rxnfc *nfc)
4431 {
4432         struct hclge_vport *vport = hclge_get_vport(handle);
4433         u8 tuple_sets;
4434
4435         nfc->data = 0;
4436
4437         switch (nfc->flow_type) {
4438         case TCP_V4_FLOW:
4439                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4440                 break;
4441         case UDP_V4_FLOW:
4442                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4443                 break;
4444         case TCP_V6_FLOW:
4445                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4446                 break;
4447         case UDP_V6_FLOW:
4448                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4449                 break;
4450         case SCTP_V4_FLOW:
4451                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4452                 break;
4453         case SCTP_V6_FLOW:
4454                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4455                 break;
4456         case IPV4_FLOW:
4457         case IPV6_FLOW:
4458                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4459                 break;
4460         default:
4461                 return -EINVAL;
4462         }
4463
4464         if (!tuple_sets)
4465                 return 0;
4466
4467         if (tuple_sets & HCLGE_D_PORT_BIT)
4468                 nfc->data |= RXH_L4_B_2_3;
4469         if (tuple_sets & HCLGE_S_PORT_BIT)
4470                 nfc->data |= RXH_L4_B_0_1;
4471         if (tuple_sets & HCLGE_D_IP_BIT)
4472                 nfc->data |= RXH_IP_DST;
4473         if (tuple_sets & HCLGE_S_IP_BIT)
4474                 nfc->data |= RXH_IP_SRC;
4475
4476         return 0;
4477 }
4478
4479 static int hclge_get_tc_size(struct hnae3_handle *handle)
4480 {
4481         struct hclge_vport *vport = hclge_get_vport(handle);
4482         struct hclge_dev *hdev = vport->back;
4483
4484         return hdev->rss_size_max;
4485 }
4486
4487 int hclge_rss_init_hw(struct hclge_dev *hdev)
4488 {
4489         struct hclge_vport *vport = hdev->vport;
4490         u8 *rss_indir = vport[0].rss_indirection_tbl;
4491         u16 rss_size = vport[0].alloc_rss_size;
4492         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4493         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4494         u8 *key = vport[0].rss_hash_key;
4495         u8 hfunc = vport[0].rss_algo;
4496         u16 tc_valid[HCLGE_MAX_TC_NUM];
4497         u16 roundup_size;
4498         unsigned int i;
4499         int ret;
4500
4501         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4502         if (ret)
4503                 return ret;
4504
4505         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4506         if (ret)
4507                 return ret;
4508
4509         ret = hclge_set_rss_input_tuple(hdev);
4510         if (ret)
4511                 return ret;
4512
4513         /* Each TC have the same queue size, and tc_size set to hardware is
4514          * the log2 of roundup power of two of rss_size, the acutal queue
4515          * size is limited by indirection table.
4516          */
4517         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4518                 dev_err(&hdev->pdev->dev,
4519                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4520                         rss_size);
4521                 return -EINVAL;
4522         }
4523
4524         roundup_size = roundup_pow_of_two(rss_size);
4525         roundup_size = ilog2(roundup_size);
4526
4527         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4528                 tc_valid[i] = 0;
4529
4530                 if (!(hdev->hw_tc_map & BIT(i)))
4531                         continue;
4532
4533                 tc_valid[i] = 1;
4534                 tc_size[i] = roundup_size;
4535                 tc_offset[i] = rss_size * i;
4536         }
4537
4538         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4539 }
4540
4541 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4542 {
4543         struct hclge_vport *vport = hdev->vport;
4544         int i, j;
4545
4546         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4547                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4548                         vport[j].rss_indirection_tbl[i] =
4549                                 i % vport[j].alloc_rss_size;
4550         }
4551 }
4552
4553 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4554 {
4555         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4556         struct hclge_vport *vport = hdev->vport;
4557
4558         if (hdev->pdev->revision >= 0x21)
4559                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4560
4561         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4562                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4563                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4564                 vport[i].rss_tuple_sets.ipv4_udp_en =
4565                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4566                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4567                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4568                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4569                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4570                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4571                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4572                 vport[i].rss_tuple_sets.ipv6_udp_en =
4573                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4574                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4575                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4576                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4577                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4578
4579                 vport[i].rss_algo = rss_algo;
4580
4581                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4582                        HCLGE_RSS_KEY_SIZE);
4583         }
4584
4585         hclge_rss_indir_init_cfg(hdev);
4586 }
4587
4588 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4589                                 int vector_id, bool en,
4590                                 struct hnae3_ring_chain_node *ring_chain)
4591 {
4592         struct hclge_dev *hdev = vport->back;
4593         struct hnae3_ring_chain_node *node;
4594         struct hclge_desc desc;
4595         struct hclge_ctrl_vector_chain_cmd *req =
4596                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4597         enum hclge_cmd_status status;
4598         enum hclge_opcode_type op;
4599         u16 tqp_type_and_id;
4600         int i;
4601
4602         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4603         hclge_cmd_setup_basic_desc(&desc, op, false);
4604         req->int_vector_id = vector_id;
4605
4606         i = 0;
4607         for (node = ring_chain; node; node = node->next) {
4608                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4609                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4610                                 HCLGE_INT_TYPE_S,
4611                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4612                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4613                                 HCLGE_TQP_ID_S, node->tqp_index);
4614                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4615                                 HCLGE_INT_GL_IDX_S,
4616                                 hnae3_get_field(node->int_gl_idx,
4617                                                 HNAE3_RING_GL_IDX_M,
4618                                                 HNAE3_RING_GL_IDX_S));
4619                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4620                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4621                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4622                         req->vfid = vport->vport_id;
4623
4624                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4625                         if (status) {
4626                                 dev_err(&hdev->pdev->dev,
4627                                         "Map TQP fail, status is %d.\n",
4628                                         status);
4629                                 return -EIO;
4630                         }
4631                         i = 0;
4632
4633                         hclge_cmd_setup_basic_desc(&desc,
4634                                                    op,
4635                                                    false);
4636                         req->int_vector_id = vector_id;
4637                 }
4638         }
4639
4640         if (i > 0) {
4641                 req->int_cause_num = i;
4642                 req->vfid = vport->vport_id;
4643                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4644                 if (status) {
4645                         dev_err(&hdev->pdev->dev,
4646                                 "Map TQP fail, status is %d.\n", status);
4647                         return -EIO;
4648                 }
4649         }
4650
4651         return 0;
4652 }
4653
4654 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4655                                     struct hnae3_ring_chain_node *ring_chain)
4656 {
4657         struct hclge_vport *vport = hclge_get_vport(handle);
4658         struct hclge_dev *hdev = vport->back;
4659         int vector_id;
4660
4661         vector_id = hclge_get_vector_index(hdev, vector);
4662         if (vector_id < 0) {
4663                 dev_err(&hdev->pdev->dev,
4664                         "failed to get vector index. vector=%d\n", vector);
4665                 return vector_id;
4666         }
4667
4668         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4669 }
4670
4671 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4672                                        struct hnae3_ring_chain_node *ring_chain)
4673 {
4674         struct hclge_vport *vport = hclge_get_vport(handle);
4675         struct hclge_dev *hdev = vport->back;
4676         int vector_id, ret;
4677
4678         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4679                 return 0;
4680
4681         vector_id = hclge_get_vector_index(hdev, vector);
4682         if (vector_id < 0) {
4683                 dev_err(&handle->pdev->dev,
4684                         "Get vector index fail. ret =%d\n", vector_id);
4685                 return vector_id;
4686         }
4687
4688         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4689         if (ret)
4690                 dev_err(&handle->pdev->dev,
4691                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4692                         vector_id, ret);
4693
4694         return ret;
4695 }
4696
4697 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4698                                       struct hclge_promisc_param *param)
4699 {
4700         struct hclge_promisc_cfg_cmd *req;
4701         struct hclge_desc desc;
4702         int ret;
4703
4704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4705
4706         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4707         req->vf_id = param->vf_id;
4708
4709         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4710          * pdev revision(0x20), new revision support them. The
4711          * value of this two fields will not return error when driver
4712          * send command to fireware in revision(0x20).
4713          */
4714         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4715                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4716
4717         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4718         if (ret)
4719                 dev_err(&hdev->pdev->dev,
4720                         "Set promisc mode fail, status is %d.\n", ret);
4721
4722         return ret;
4723 }
4724
4725 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4726                                      bool en_uc, bool en_mc, bool en_bc,
4727                                      int vport_id)
4728 {
4729         if (!param)
4730                 return;
4731
4732         memset(param, 0, sizeof(struct hclge_promisc_param));
4733         if (en_uc)
4734                 param->enable = HCLGE_PROMISC_EN_UC;
4735         if (en_mc)
4736                 param->enable |= HCLGE_PROMISC_EN_MC;
4737         if (en_bc)
4738                 param->enable |= HCLGE_PROMISC_EN_BC;
4739         param->vf_id = vport_id;
4740 }
4741
4742 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4743                                  bool en_mc_pmc, bool en_bc_pmc)
4744 {
4745         struct hclge_dev *hdev = vport->back;
4746         struct hclge_promisc_param param;
4747
4748         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4749                                  vport->vport_id);
4750         return hclge_cmd_set_promisc_mode(hdev, &param);
4751 }
4752
4753 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4754                                   bool en_mc_pmc)
4755 {
4756         struct hclge_vport *vport = hclge_get_vport(handle);
4757         bool en_bc_pmc = true;
4758
4759         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4760          * always bypassed. So broadcast promisc should be disabled until
4761          * user enable promisc mode
4762          */
4763         if (handle->pdev->revision == 0x20)
4764                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4765
4766         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4767                                             en_bc_pmc);
4768 }
4769
4770 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4771 {
4772         struct hclge_get_fd_mode_cmd *req;
4773         struct hclge_desc desc;
4774         int ret;
4775
4776         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4777
4778         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4779
4780         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4781         if (ret) {
4782                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4783                 return ret;
4784         }
4785
4786         *fd_mode = req->mode;
4787
4788         return ret;
4789 }
4790
4791 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4792                                    u32 *stage1_entry_num,
4793                                    u32 *stage2_entry_num,
4794                                    u16 *stage1_counter_num,
4795                                    u16 *stage2_counter_num)
4796 {
4797         struct hclge_get_fd_allocation_cmd *req;
4798         struct hclge_desc desc;
4799         int ret;
4800
4801         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4802
4803         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4804
4805         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4806         if (ret) {
4807                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4808                         ret);
4809                 return ret;
4810         }
4811
4812         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4813         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4814         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4815         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4816
4817         return ret;
4818 }
4819
4820 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4821 {
4822         struct hclge_set_fd_key_config_cmd *req;
4823         struct hclge_fd_key_cfg *stage;
4824         struct hclge_desc desc;
4825         int ret;
4826
4827         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4828
4829         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4830         stage = &hdev->fd_cfg.key_cfg[stage_num];
4831         req->stage = stage_num;
4832         req->key_select = stage->key_sel;
4833         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4834         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4835         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4836         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4837         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4838         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4839
4840         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4841         if (ret)
4842                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4843
4844         return ret;
4845 }
4846
4847 static int hclge_init_fd_config(struct hclge_dev *hdev)
4848 {
4849 #define LOW_2_WORDS             0x03
4850         struct hclge_fd_key_cfg *key_cfg;
4851         int ret;
4852
4853         if (!hnae3_dev_fd_supported(hdev))
4854                 return 0;
4855
4856         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4857         if (ret)
4858                 return ret;
4859
4860         switch (hdev->fd_cfg.fd_mode) {
4861         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4862                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4863                 break;
4864         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4865                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4866                 break;
4867         default:
4868                 dev_err(&hdev->pdev->dev,
4869                         "Unsupported flow director mode %u\n",
4870                         hdev->fd_cfg.fd_mode);
4871                 return -EOPNOTSUPP;
4872         }
4873
4874         hdev->fd_cfg.proto_support =
4875                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4876                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4877         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4878         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4879         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4880         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4881         key_cfg->outer_sipv6_word_en = 0;
4882         key_cfg->outer_dipv6_word_en = 0;
4883
4884         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4885                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4886                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4887                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4888
4889         /* If use max 400bit key, we can support tuples for ether type */
4890         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4891                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4892                 key_cfg->tuple_active |=
4893                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4894         }
4895
4896         /* roce_type is used to filter roce frames
4897          * dst_vport is used to specify the rule
4898          */
4899         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4900
4901         ret = hclge_get_fd_allocation(hdev,
4902                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4903                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4904                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4905                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4906         if (ret)
4907                 return ret;
4908
4909         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4910 }
4911
4912 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4913                                 int loc, u8 *key, bool is_add)
4914 {
4915         struct hclge_fd_tcam_config_1_cmd *req1;
4916         struct hclge_fd_tcam_config_2_cmd *req2;
4917         struct hclge_fd_tcam_config_3_cmd *req3;
4918         struct hclge_desc desc[3];
4919         int ret;
4920
4921         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4922         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4923         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4924         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4926
4927         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4928         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4929         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4930
4931         req1->stage = stage;
4932         req1->xy_sel = sel_x ? 1 : 0;
4933         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4934         req1->index = cpu_to_le32(loc);
4935         req1->entry_vld = sel_x ? is_add : 0;
4936
4937         if (key) {
4938                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4939                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4940                        sizeof(req2->tcam_data));
4941                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4942                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4943         }
4944
4945         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4946         if (ret)
4947                 dev_err(&hdev->pdev->dev,
4948                         "config tcam key fail, ret=%d\n",
4949                         ret);
4950
4951         return ret;
4952 }
4953
4954 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4955                               struct hclge_fd_ad_data *action)
4956 {
4957         struct hclge_fd_ad_config_cmd *req;
4958         struct hclge_desc desc;
4959         u64 ad_data = 0;
4960         int ret;
4961
4962         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4963
4964         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4965         req->index = cpu_to_le32(loc);
4966         req->stage = stage;
4967
4968         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4969                       action->write_rule_id_to_bd);
4970         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4971                         action->rule_id);
4972         ad_data <<= 32;
4973         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4974         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4975                       action->forward_to_direct_queue);
4976         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4977                         action->queue_id);
4978         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4979         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4980                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4981         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4982         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4983                         action->counter_id);
4984
4985         req->ad_data = cpu_to_le64(ad_data);
4986         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4987         if (ret)
4988                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4989
4990         return ret;
4991 }
4992
4993 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4994                                    struct hclge_fd_rule *rule)
4995 {
4996         u16 tmp_x_s, tmp_y_s;
4997         u32 tmp_x_l, tmp_y_l;
4998         int i;
4999
5000         if (rule->unused_tuple & tuple_bit)
5001                 return true;
5002
5003         switch (tuple_bit) {
5004         case 0:
5005                 return false;
5006         case BIT(INNER_DST_MAC):
5007                 for (i = 0; i < ETH_ALEN; i++) {
5008                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5009                                rule->tuples_mask.dst_mac[i]);
5010                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011                                rule->tuples_mask.dst_mac[i]);
5012                 }
5013
5014                 return true;
5015         case BIT(INNER_SRC_MAC):
5016                 for (i = 0; i < ETH_ALEN; i++) {
5017                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5018                                rule->tuples.src_mac[i]);
5019                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020                                rule->tuples.src_mac[i]);
5021                 }
5022
5023                 return true;
5024         case BIT(INNER_VLAN_TAG_FST):
5025                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5026                        rule->tuples_mask.vlan_tag1);
5027                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5028                        rule->tuples_mask.vlan_tag1);
5029                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5030                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5031
5032                 return true;
5033         case BIT(INNER_ETH_TYPE):
5034                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5035                        rule->tuples_mask.ether_proto);
5036                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5037                        rule->tuples_mask.ether_proto);
5038                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5039                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5040
5041                 return true;
5042         case BIT(INNER_IP_TOS):
5043                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5044                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5045
5046                 return true;
5047         case BIT(INNER_IP_PROTO):
5048                 calc_x(*key_x, rule->tuples.ip_proto,
5049                        rule->tuples_mask.ip_proto);
5050                 calc_y(*key_y, rule->tuples.ip_proto,
5051                        rule->tuples_mask.ip_proto);
5052
5053                 return true;
5054         case BIT(INNER_SRC_IP):
5055                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5056                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5057                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5058                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5059                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5060                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5061
5062                 return true;
5063         case BIT(INNER_DST_IP):
5064                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5065                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5066                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5067                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5069                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5070
5071                 return true;
5072         case BIT(INNER_SRC_PORT):
5073                 calc_x(tmp_x_s, rule->tuples.src_port,
5074                        rule->tuples_mask.src_port);
5075                 calc_y(tmp_y_s, rule->tuples.src_port,
5076                        rule->tuples_mask.src_port);
5077                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5078                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5079
5080                 return true;
5081         case BIT(INNER_DST_PORT):
5082                 calc_x(tmp_x_s, rule->tuples.dst_port,
5083                        rule->tuples_mask.dst_port);
5084                 calc_y(tmp_y_s, rule->tuples.dst_port,
5085                        rule->tuples_mask.dst_port);
5086                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5087                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5088
5089                 return true;
5090         default:
5091                 return false;
5092         }
5093 }
5094
5095 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5096                                  u8 vf_id, u8 network_port_id)
5097 {
5098         u32 port_number = 0;
5099
5100         if (port_type == HOST_PORT) {
5101                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5102                                 pf_id);
5103                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5104                                 vf_id);
5105                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5106         } else {
5107                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5108                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5109                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5110         }
5111
5112         return port_number;
5113 }
5114
5115 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5116                                        __le32 *key_x, __le32 *key_y,
5117                                        struct hclge_fd_rule *rule)
5118 {
5119         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5120         u8 cur_pos = 0, tuple_size, shift_bits;
5121         unsigned int i;
5122
5123         for (i = 0; i < MAX_META_DATA; i++) {
5124                 tuple_size = meta_data_key_info[i].key_length;
5125                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5126
5127                 switch (tuple_bit) {
5128                 case BIT(ROCE_TYPE):
5129                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5130                         cur_pos += tuple_size;
5131                         break;
5132                 case BIT(DST_VPORT):
5133                         port_number = hclge_get_port_number(HOST_PORT, 0,
5134                                                             rule->vf_id, 0);
5135                         hnae3_set_field(meta_data,
5136                                         GENMASK(cur_pos + tuple_size, cur_pos),
5137                                         cur_pos, port_number);
5138                         cur_pos += tuple_size;
5139                         break;
5140                 default:
5141                         break;
5142                 }
5143         }
5144
5145         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5146         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5147         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5148
5149         *key_x = cpu_to_le32(tmp_x << shift_bits);
5150         *key_y = cpu_to_le32(tmp_y << shift_bits);
5151 }
5152
5153 /* A complete key is combined with meta data key and tuple key.
5154  * Meta data key is stored at the MSB region, and tuple key is stored at
5155  * the LSB region, unused bits will be filled 0.
5156  */
5157 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5158                             struct hclge_fd_rule *rule)
5159 {
5160         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5161         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5162         u8 *cur_key_x, *cur_key_y;
5163         unsigned int i;
5164         int ret, tuple_size;
5165         u8 meta_data_region;
5166
5167         memset(key_x, 0, sizeof(key_x));
5168         memset(key_y, 0, sizeof(key_y));
5169         cur_key_x = key_x;
5170         cur_key_y = key_y;
5171
5172         for (i = 0 ; i < MAX_TUPLE; i++) {
5173                 bool tuple_valid;
5174                 u32 check_tuple;
5175
5176                 tuple_size = tuple_key_info[i].key_length / 8;
5177                 check_tuple = key_cfg->tuple_active & BIT(i);
5178
5179                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5180                                                      cur_key_y, rule);
5181                 if (tuple_valid) {
5182                         cur_key_x += tuple_size;
5183                         cur_key_y += tuple_size;
5184                 }
5185         }
5186
5187         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5188                         MAX_META_DATA_LENGTH / 8;
5189
5190         hclge_fd_convert_meta_data(key_cfg,
5191                                    (__le32 *)(key_x + meta_data_region),
5192                                    (__le32 *)(key_y + meta_data_region),
5193                                    rule);
5194
5195         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5196                                    true);
5197         if (ret) {
5198                 dev_err(&hdev->pdev->dev,
5199                         "fd key_y config fail, loc=%u, ret=%d\n",
5200                         rule->queue_id, ret);
5201                 return ret;
5202         }
5203
5204         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5205                                    true);
5206         if (ret)
5207                 dev_err(&hdev->pdev->dev,
5208                         "fd key_x config fail, loc=%u, ret=%d\n",
5209                         rule->queue_id, ret);
5210         return ret;
5211 }
5212
5213 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5214                                struct hclge_fd_rule *rule)
5215 {
5216         struct hclge_fd_ad_data ad_data;
5217
5218         ad_data.ad_id = rule->location;
5219
5220         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5221                 ad_data.drop_packet = true;
5222                 ad_data.forward_to_direct_queue = false;
5223                 ad_data.queue_id = 0;
5224         } else {
5225                 ad_data.drop_packet = false;
5226                 ad_data.forward_to_direct_queue = true;
5227                 ad_data.queue_id = rule->queue_id;
5228         }
5229
5230         ad_data.use_counter = false;
5231         ad_data.counter_id = 0;
5232
5233         ad_data.use_next_stage = false;
5234         ad_data.next_input_key = 0;
5235
5236         ad_data.write_rule_id_to_bd = true;
5237         ad_data.rule_id = rule->location;
5238
5239         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5240 }
5241
5242 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5243                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5244 {
5245         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5246         struct ethtool_usrip4_spec *usr_ip4_spec;
5247         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5248         struct ethtool_usrip6_spec *usr_ip6_spec;
5249         struct ethhdr *ether_spec;
5250
5251         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5252                 return -EINVAL;
5253
5254         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5255                 return -EOPNOTSUPP;
5256
5257         if ((fs->flow_type & FLOW_EXT) &&
5258             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5259                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5260                 return -EOPNOTSUPP;
5261         }
5262
5263         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5264         case SCTP_V4_FLOW:
5265         case TCP_V4_FLOW:
5266         case UDP_V4_FLOW:
5267                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5268                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5269
5270                 if (!tcp_ip4_spec->ip4src)
5271                         *unused |= BIT(INNER_SRC_IP);
5272
5273                 if (!tcp_ip4_spec->ip4dst)
5274                         *unused |= BIT(INNER_DST_IP);
5275
5276                 if (!tcp_ip4_spec->psrc)
5277                         *unused |= BIT(INNER_SRC_PORT);
5278
5279                 if (!tcp_ip4_spec->pdst)
5280                         *unused |= BIT(INNER_DST_PORT);
5281
5282                 if (!tcp_ip4_spec->tos)
5283                         *unused |= BIT(INNER_IP_TOS);
5284
5285                 break;
5286         case IP_USER_FLOW:
5287                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5288                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5289                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5290
5291                 if (!usr_ip4_spec->ip4src)
5292                         *unused |= BIT(INNER_SRC_IP);
5293
5294                 if (!usr_ip4_spec->ip4dst)
5295                         *unused |= BIT(INNER_DST_IP);
5296
5297                 if (!usr_ip4_spec->tos)
5298                         *unused |= BIT(INNER_IP_TOS);
5299
5300                 if (!usr_ip4_spec->proto)
5301                         *unused |= BIT(INNER_IP_PROTO);
5302
5303                 if (usr_ip4_spec->l4_4_bytes)
5304                         return -EOPNOTSUPP;
5305
5306                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5307                         return -EOPNOTSUPP;
5308
5309                 break;
5310         case SCTP_V6_FLOW:
5311         case TCP_V6_FLOW:
5312         case UDP_V6_FLOW:
5313                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5314                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5315                         BIT(INNER_IP_TOS);
5316
5317                 /* check whether src/dst ip address used */
5318                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5319                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5320                         *unused |= BIT(INNER_SRC_IP);
5321
5322                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5323                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5324                         *unused |= BIT(INNER_DST_IP);
5325
5326                 if (!tcp_ip6_spec->psrc)
5327                         *unused |= BIT(INNER_SRC_PORT);
5328
5329                 if (!tcp_ip6_spec->pdst)
5330                         *unused |= BIT(INNER_DST_PORT);
5331
5332                 if (tcp_ip6_spec->tclass)
5333                         return -EOPNOTSUPP;
5334
5335                 break;
5336         case IPV6_USER_FLOW:
5337                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5338                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5339                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5340                         BIT(INNER_DST_PORT);
5341
5342                 /* check whether src/dst ip address used */
5343                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5344                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5345                         *unused |= BIT(INNER_SRC_IP);
5346
5347                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5348                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5349                         *unused |= BIT(INNER_DST_IP);
5350
5351                 if (!usr_ip6_spec->l4_proto)
5352                         *unused |= BIT(INNER_IP_PROTO);
5353
5354                 if (usr_ip6_spec->tclass)
5355                         return -EOPNOTSUPP;
5356
5357                 if (usr_ip6_spec->l4_4_bytes)
5358                         return -EOPNOTSUPP;
5359
5360                 break;
5361         case ETHER_FLOW:
5362                 ether_spec = &fs->h_u.ether_spec;
5363                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5364                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5365                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5366
5367                 if (is_zero_ether_addr(ether_spec->h_source))
5368                         *unused |= BIT(INNER_SRC_MAC);
5369
5370                 if (is_zero_ether_addr(ether_spec->h_dest))
5371                         *unused |= BIT(INNER_DST_MAC);
5372
5373                 if (!ether_spec->h_proto)
5374                         *unused |= BIT(INNER_ETH_TYPE);
5375
5376                 break;
5377         default:
5378                 return -EOPNOTSUPP;
5379         }
5380
5381         if ((fs->flow_type & FLOW_EXT)) {
5382                 if (fs->h_ext.vlan_etype)
5383                         return -EOPNOTSUPP;
5384                 if (!fs->h_ext.vlan_tci)
5385                         *unused |= BIT(INNER_VLAN_TAG_FST);
5386
5387                 if (fs->m_ext.vlan_tci) {
5388                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5389                                 return -EINVAL;
5390                 }
5391         } else {
5392                 *unused |= BIT(INNER_VLAN_TAG_FST);
5393         }
5394
5395         if (fs->flow_type & FLOW_MAC_EXT) {
5396                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5397                         return -EOPNOTSUPP;
5398
5399                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5400                         *unused |= BIT(INNER_DST_MAC);
5401                 else
5402                         *unused &= ~(BIT(INNER_DST_MAC));
5403         }
5404
5405         return 0;
5406 }
5407
5408 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5409 {
5410         struct hclge_fd_rule *rule = NULL;
5411         struct hlist_node *node2;
5412
5413         spin_lock_bh(&hdev->fd_rule_lock);
5414         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5415                 if (rule->location >= location)
5416                         break;
5417         }
5418
5419         spin_unlock_bh(&hdev->fd_rule_lock);
5420
5421         return  rule && rule->location == location;
5422 }
5423
5424 /* make sure being called after lock up with fd_rule_lock */
5425 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5426                                      struct hclge_fd_rule *new_rule,
5427                                      u16 location,
5428                                      bool is_add)
5429 {
5430         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5431         struct hlist_node *node2;
5432
5433         if (is_add && !new_rule)
5434                 return -EINVAL;
5435
5436         hlist_for_each_entry_safe(rule, node2,
5437                                   &hdev->fd_rule_list, rule_node) {
5438                 if (rule->location >= location)
5439                         break;
5440                 parent = rule;
5441         }
5442
5443         if (rule && rule->location == location) {
5444                 hlist_del(&rule->rule_node);
5445                 kfree(rule);
5446                 hdev->hclge_fd_rule_num--;
5447
5448                 if (!is_add) {
5449                         if (!hdev->hclge_fd_rule_num)
5450                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5451                         clear_bit(location, hdev->fd_bmap);
5452
5453                         return 0;
5454                 }
5455         } else if (!is_add) {
5456                 dev_err(&hdev->pdev->dev,
5457                         "delete fail, rule %u is inexistent\n",
5458                         location);
5459                 return -EINVAL;
5460         }
5461
5462         INIT_HLIST_NODE(&new_rule->rule_node);
5463
5464         if (parent)
5465                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5466         else
5467                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5468
5469         set_bit(location, hdev->fd_bmap);
5470         hdev->hclge_fd_rule_num++;
5471         hdev->fd_active_type = new_rule->rule_type;
5472
5473         return 0;
5474 }
5475
5476 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5477                               struct ethtool_rx_flow_spec *fs,
5478                               struct hclge_fd_rule *rule)
5479 {
5480         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5481
5482         switch (flow_type) {
5483         case SCTP_V4_FLOW:
5484         case TCP_V4_FLOW:
5485         case UDP_V4_FLOW:
5486                 rule->tuples.src_ip[IPV4_INDEX] =
5487                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5488                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5489                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5490
5491                 rule->tuples.dst_ip[IPV4_INDEX] =
5492                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5493                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5494                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5495
5496                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5497                 rule->tuples_mask.src_port =
5498                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5499
5500                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5501                 rule->tuples_mask.dst_port =
5502                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5503
5504                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5505                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5506
5507                 rule->tuples.ether_proto = ETH_P_IP;
5508                 rule->tuples_mask.ether_proto = 0xFFFF;
5509
5510                 break;
5511         case IP_USER_FLOW:
5512                 rule->tuples.src_ip[IPV4_INDEX] =
5513                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5514                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5515                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5516
5517                 rule->tuples.dst_ip[IPV4_INDEX] =
5518                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5519                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5520                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5521
5522                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5523                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5524
5525                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5526                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5527
5528                 rule->tuples.ether_proto = ETH_P_IP;
5529                 rule->tuples_mask.ether_proto = 0xFFFF;
5530
5531                 break;
5532         case SCTP_V6_FLOW:
5533         case TCP_V6_FLOW:
5534         case UDP_V6_FLOW:
5535                 be32_to_cpu_array(rule->tuples.src_ip,
5536                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5537                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5538                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5539
5540                 be32_to_cpu_array(rule->tuples.dst_ip,
5541                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5542                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5543                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5544
5545                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5546                 rule->tuples_mask.src_port =
5547                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5548
5549                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5550                 rule->tuples_mask.dst_port =
5551                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5552
5553                 rule->tuples.ether_proto = ETH_P_IPV6;
5554                 rule->tuples_mask.ether_proto = 0xFFFF;
5555
5556                 break;
5557         case IPV6_USER_FLOW:
5558                 be32_to_cpu_array(rule->tuples.src_ip,
5559                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5560                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5561                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5562
5563                 be32_to_cpu_array(rule->tuples.dst_ip,
5564                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5565                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5566                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5567
5568                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5569                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5570
5571                 rule->tuples.ether_proto = ETH_P_IPV6;
5572                 rule->tuples_mask.ether_proto = 0xFFFF;
5573
5574                 break;
5575         case ETHER_FLOW:
5576                 ether_addr_copy(rule->tuples.src_mac,
5577                                 fs->h_u.ether_spec.h_source);
5578                 ether_addr_copy(rule->tuples_mask.src_mac,
5579                                 fs->m_u.ether_spec.h_source);
5580
5581                 ether_addr_copy(rule->tuples.dst_mac,
5582                                 fs->h_u.ether_spec.h_dest);
5583                 ether_addr_copy(rule->tuples_mask.dst_mac,
5584                                 fs->m_u.ether_spec.h_dest);
5585
5586                 rule->tuples.ether_proto =
5587                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5588                 rule->tuples_mask.ether_proto =
5589                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5590
5591                 break;
5592         default:
5593                 return -EOPNOTSUPP;
5594         }
5595
5596         switch (flow_type) {
5597         case SCTP_V4_FLOW:
5598         case SCTP_V6_FLOW:
5599                 rule->tuples.ip_proto = IPPROTO_SCTP;
5600                 rule->tuples_mask.ip_proto = 0xFF;
5601                 break;
5602         case TCP_V4_FLOW:
5603         case TCP_V6_FLOW:
5604                 rule->tuples.ip_proto = IPPROTO_TCP;
5605                 rule->tuples_mask.ip_proto = 0xFF;
5606                 break;
5607         case UDP_V4_FLOW:
5608         case UDP_V6_FLOW:
5609                 rule->tuples.ip_proto = IPPROTO_UDP;
5610                 rule->tuples_mask.ip_proto = 0xFF;
5611                 break;
5612         default:
5613                 break;
5614         }
5615
5616         if ((fs->flow_type & FLOW_EXT)) {
5617                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5618                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5619         }
5620
5621         if (fs->flow_type & FLOW_MAC_EXT) {
5622                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5623                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5624         }
5625
5626         return 0;
5627 }
5628
5629 /* make sure being called after lock up with fd_rule_lock */
5630 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5631                                 struct hclge_fd_rule *rule)
5632 {
5633         int ret;
5634
5635         if (!rule) {
5636                 dev_err(&hdev->pdev->dev,
5637                         "The flow director rule is NULL\n");
5638                 return -EINVAL;
5639         }
5640
5641         /* it will never fail here, so needn't to check return value */
5642         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5643
5644         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5645         if (ret)
5646                 goto clear_rule;
5647
5648         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5649         if (ret)
5650                 goto clear_rule;
5651
5652         return 0;
5653
5654 clear_rule:
5655         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5656         return ret;
5657 }
5658
5659 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5660                               struct ethtool_rxnfc *cmd)
5661 {
5662         struct hclge_vport *vport = hclge_get_vport(handle);
5663         struct hclge_dev *hdev = vport->back;
5664         u16 dst_vport_id = 0, q_index = 0;
5665         struct ethtool_rx_flow_spec *fs;
5666         struct hclge_fd_rule *rule;
5667         u32 unused = 0;
5668         u8 action;
5669         int ret;
5670
5671         if (!hnae3_dev_fd_supported(hdev))
5672                 return -EOPNOTSUPP;
5673
5674         if (!hdev->fd_en) {
5675                 dev_warn(&hdev->pdev->dev,
5676                          "Please enable flow director first\n");
5677                 return -EOPNOTSUPP;
5678         }
5679
5680         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5681
5682         ret = hclge_fd_check_spec(hdev, fs, &unused);
5683         if (ret) {
5684                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5685                 return ret;
5686         }
5687
5688         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5689                 action = HCLGE_FD_ACTION_DROP_PACKET;
5690         } else {
5691                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5692                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5693                 u16 tqps;
5694
5695                 if (vf > hdev->num_req_vfs) {
5696                         dev_err(&hdev->pdev->dev,
5697                                 "Error: vf id (%u) > max vf num (%u)\n",
5698                                 vf, hdev->num_req_vfs);
5699                         return -EINVAL;
5700                 }
5701
5702                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5703                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5704
5705                 if (ring >= tqps) {
5706                         dev_err(&hdev->pdev->dev,
5707                                 "Error: queue id (%u) > max tqp num (%u)\n",
5708                                 ring, tqps - 1);
5709                         return -EINVAL;
5710                 }
5711
5712                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5713                 q_index = ring;
5714         }
5715
5716         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5717         if (!rule)
5718                 return -ENOMEM;
5719
5720         ret = hclge_fd_get_tuple(hdev, fs, rule);
5721         if (ret) {
5722                 kfree(rule);
5723                 return ret;
5724         }
5725
5726         rule->flow_type = fs->flow_type;
5727
5728         rule->location = fs->location;
5729         rule->unused_tuple = unused;
5730         rule->vf_id = dst_vport_id;
5731         rule->queue_id = q_index;
5732         rule->action = action;
5733         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5734
5735         /* to avoid rule conflict, when user configure rule by ethtool,
5736          * we need to clear all arfs rules
5737          */
5738         hclge_clear_arfs_rules(handle);
5739
5740         spin_lock_bh(&hdev->fd_rule_lock);
5741         ret = hclge_fd_config_rule(hdev, rule);
5742
5743         spin_unlock_bh(&hdev->fd_rule_lock);
5744
5745         return ret;
5746 }
5747
5748 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5749                               struct ethtool_rxnfc *cmd)
5750 {
5751         struct hclge_vport *vport = hclge_get_vport(handle);
5752         struct hclge_dev *hdev = vport->back;
5753         struct ethtool_rx_flow_spec *fs;
5754         int ret;
5755
5756         if (!hnae3_dev_fd_supported(hdev))
5757                 return -EOPNOTSUPP;
5758
5759         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5760
5761         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5762                 return -EINVAL;
5763
5764         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5765                 dev_err(&hdev->pdev->dev,
5766                         "Delete fail, rule %u is inexistent\n", fs->location);
5767                 return -ENOENT;
5768         }
5769
5770         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5771                                    NULL, false);
5772         if (ret)
5773                 return ret;
5774
5775         spin_lock_bh(&hdev->fd_rule_lock);
5776         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5777
5778         spin_unlock_bh(&hdev->fd_rule_lock);
5779
5780         return ret;
5781 }
5782
5783 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5784                                      bool clear_list)
5785 {
5786         struct hclge_vport *vport = hclge_get_vport(handle);
5787         struct hclge_dev *hdev = vport->back;
5788         struct hclge_fd_rule *rule;
5789         struct hlist_node *node;
5790         u16 location;
5791
5792         if (!hnae3_dev_fd_supported(hdev))
5793                 return;
5794
5795         spin_lock_bh(&hdev->fd_rule_lock);
5796         for_each_set_bit(location, hdev->fd_bmap,
5797                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5798                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5799                                      NULL, false);
5800
5801         if (clear_list) {
5802                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5803                                           rule_node) {
5804                         hlist_del(&rule->rule_node);
5805                         kfree(rule);
5806                 }
5807                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5808                 hdev->hclge_fd_rule_num = 0;
5809                 bitmap_zero(hdev->fd_bmap,
5810                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5811         }
5812
5813         spin_unlock_bh(&hdev->fd_rule_lock);
5814 }
5815
5816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5817 {
5818         struct hclge_vport *vport = hclge_get_vport(handle);
5819         struct hclge_dev *hdev = vport->back;
5820         struct hclge_fd_rule *rule;
5821         struct hlist_node *node;
5822         int ret;
5823
5824         /* Return ok here, because reset error handling will check this
5825          * return value. If error is returned here, the reset process will
5826          * fail.
5827          */
5828         if (!hnae3_dev_fd_supported(hdev))
5829                 return 0;
5830
5831         /* if fd is disabled, should not restore it when reset */
5832         if (!hdev->fd_en)
5833                 return 0;
5834
5835         spin_lock_bh(&hdev->fd_rule_lock);
5836         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5837                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5838                 if (!ret)
5839                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5840
5841                 if (ret) {
5842                         dev_warn(&hdev->pdev->dev,
5843                                  "Restore rule %u failed, remove it\n",
5844                                  rule->location);
5845                         clear_bit(rule->location, hdev->fd_bmap);
5846                         hlist_del(&rule->rule_node);
5847                         kfree(rule);
5848                         hdev->hclge_fd_rule_num--;
5849                 }
5850         }
5851
5852         if (hdev->hclge_fd_rule_num)
5853                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5854
5855         spin_unlock_bh(&hdev->fd_rule_lock);
5856
5857         return 0;
5858 }
5859
5860 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5861                                  struct ethtool_rxnfc *cmd)
5862 {
5863         struct hclge_vport *vport = hclge_get_vport(handle);
5864         struct hclge_dev *hdev = vport->back;
5865
5866         if (!hnae3_dev_fd_supported(hdev))
5867                 return -EOPNOTSUPP;
5868
5869         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5870         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5871
5872         return 0;
5873 }
5874
5875 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5876                                   struct ethtool_rxnfc *cmd)
5877 {
5878         struct hclge_vport *vport = hclge_get_vport(handle);
5879         struct hclge_fd_rule *rule = NULL;
5880         struct hclge_dev *hdev = vport->back;
5881         struct ethtool_rx_flow_spec *fs;
5882         struct hlist_node *node2;
5883
5884         if (!hnae3_dev_fd_supported(hdev))
5885                 return -EOPNOTSUPP;
5886
5887         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5888
5889         spin_lock_bh(&hdev->fd_rule_lock);
5890
5891         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5892                 if (rule->location >= fs->location)
5893                         break;
5894         }
5895
5896         if (!rule || fs->location != rule->location) {
5897                 spin_unlock_bh(&hdev->fd_rule_lock);
5898
5899                 return -ENOENT;
5900         }
5901
5902         fs->flow_type = rule->flow_type;
5903         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5904         case SCTP_V4_FLOW:
5905         case TCP_V4_FLOW:
5906         case UDP_V4_FLOW:
5907                 fs->h_u.tcp_ip4_spec.ip4src =
5908                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5909                 fs->m_u.tcp_ip4_spec.ip4src =
5910                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5911                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5912
5913                 fs->h_u.tcp_ip4_spec.ip4dst =
5914                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5915                 fs->m_u.tcp_ip4_spec.ip4dst =
5916                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5917                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5918
5919                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5920                 fs->m_u.tcp_ip4_spec.psrc =
5921                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5922                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5923
5924                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5925                 fs->m_u.tcp_ip4_spec.pdst =
5926                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5927                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5928
5929                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5930                 fs->m_u.tcp_ip4_spec.tos =
5931                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5932                                 0 : rule->tuples_mask.ip_tos;
5933
5934                 break;
5935         case IP_USER_FLOW:
5936                 fs->h_u.usr_ip4_spec.ip4src =
5937                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5938                 fs->m_u.tcp_ip4_spec.ip4src =
5939                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5940                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5941
5942                 fs->h_u.usr_ip4_spec.ip4dst =
5943                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5944                 fs->m_u.usr_ip4_spec.ip4dst =
5945                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5946                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5947
5948                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5949                 fs->m_u.usr_ip4_spec.tos =
5950                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5951                                 0 : rule->tuples_mask.ip_tos;
5952
5953                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5954                 fs->m_u.usr_ip4_spec.proto =
5955                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5956                                 0 : rule->tuples_mask.ip_proto;
5957
5958                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5959
5960                 break;
5961         case SCTP_V6_FLOW:
5962         case TCP_V6_FLOW:
5963         case UDP_V6_FLOW:
5964                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5965                                   rule->tuples.src_ip, IPV6_SIZE);
5966                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5967                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5968                                sizeof(int) * IPV6_SIZE);
5969                 else
5970                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5971                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5972
5973                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5974                                   rule->tuples.dst_ip, IPV6_SIZE);
5975                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5976                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5977                                sizeof(int) * IPV6_SIZE);
5978                 else
5979                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5980                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5981
5982                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5983                 fs->m_u.tcp_ip6_spec.psrc =
5984                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5985                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5986
5987                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5988                 fs->m_u.tcp_ip6_spec.pdst =
5989                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5990                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5991
5992                 break;
5993         case IPV6_USER_FLOW:
5994                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5995                                   rule->tuples.src_ip, IPV6_SIZE);
5996                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5997                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5998                                sizeof(int) * IPV6_SIZE);
5999                 else
6000                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6001                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6002
6003                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6004                                   rule->tuples.dst_ip, IPV6_SIZE);
6005                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6006                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6007                                sizeof(int) * IPV6_SIZE);
6008                 else
6009                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6010                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6011
6012                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6013                 fs->m_u.usr_ip6_spec.l4_proto =
6014                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6015                                 0 : rule->tuples_mask.ip_proto;
6016
6017                 break;
6018         case ETHER_FLOW:
6019                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6020                                 rule->tuples.src_mac);
6021                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6022                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6023                 else
6024                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6025                                         rule->tuples_mask.src_mac);
6026
6027                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6028                                 rule->tuples.dst_mac);
6029                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6030                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6031                 else
6032                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6033                                         rule->tuples_mask.dst_mac);
6034
6035                 fs->h_u.ether_spec.h_proto =
6036                                 cpu_to_be16(rule->tuples.ether_proto);
6037                 fs->m_u.ether_spec.h_proto =
6038                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6039                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6040
6041                 break;
6042         default:
6043                 spin_unlock_bh(&hdev->fd_rule_lock);
6044                 return -EOPNOTSUPP;
6045         }
6046
6047         if (fs->flow_type & FLOW_EXT) {
6048                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6049                 fs->m_ext.vlan_tci =
6050                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6051                                 cpu_to_be16(VLAN_VID_MASK) :
6052                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6053         }
6054
6055         if (fs->flow_type & FLOW_MAC_EXT) {
6056                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6057                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6058                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6059                 else
6060                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6061                                         rule->tuples_mask.dst_mac);
6062         }
6063
6064         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6065                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6066         } else {
6067                 u64 vf_id;
6068
6069                 fs->ring_cookie = rule->queue_id;
6070                 vf_id = rule->vf_id;
6071                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6072                 fs->ring_cookie |= vf_id;
6073         }
6074
6075         spin_unlock_bh(&hdev->fd_rule_lock);
6076
6077         return 0;
6078 }
6079
6080 static int hclge_get_all_rules(struct hnae3_handle *handle,
6081                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6082 {
6083         struct hclge_vport *vport = hclge_get_vport(handle);
6084         struct hclge_dev *hdev = vport->back;
6085         struct hclge_fd_rule *rule;
6086         struct hlist_node *node2;
6087         int cnt = 0;
6088
6089         if (!hnae3_dev_fd_supported(hdev))
6090                 return -EOPNOTSUPP;
6091
6092         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6093
6094         spin_lock_bh(&hdev->fd_rule_lock);
6095         hlist_for_each_entry_safe(rule, node2,
6096                                   &hdev->fd_rule_list, rule_node) {
6097                 if (cnt == cmd->rule_cnt) {
6098                         spin_unlock_bh(&hdev->fd_rule_lock);
6099                         return -EMSGSIZE;
6100                 }
6101
6102                 rule_locs[cnt] = rule->location;
6103                 cnt++;
6104         }
6105
6106         spin_unlock_bh(&hdev->fd_rule_lock);
6107
6108         cmd->rule_cnt = cnt;
6109
6110         return 0;
6111 }
6112
6113 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6114                                      struct hclge_fd_rule_tuples *tuples)
6115 {
6116 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6117 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6118
6119         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6120         tuples->ip_proto = fkeys->basic.ip_proto;
6121         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6122
6123         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6124                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6125                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6126         } else {
6127                 int i;
6128
6129                 for (i = 0; i < IPV6_SIZE; i++) {
6130                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6131                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6132                 }
6133         }
6134 }
6135
6136 /* traverse all rules, check whether an existed rule has the same tuples */
6137 static struct hclge_fd_rule *
6138 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6139                           const struct hclge_fd_rule_tuples *tuples)
6140 {
6141         struct hclge_fd_rule *rule = NULL;
6142         struct hlist_node *node;
6143
6144         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6145                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6146                         return rule;
6147         }
6148
6149         return NULL;
6150 }
6151
6152 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6153                                      struct hclge_fd_rule *rule)
6154 {
6155         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6156                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6157                              BIT(INNER_SRC_PORT);
6158         rule->action = 0;
6159         rule->vf_id = 0;
6160         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6161         if (tuples->ether_proto == ETH_P_IP) {
6162                 if (tuples->ip_proto == IPPROTO_TCP)
6163                         rule->flow_type = TCP_V4_FLOW;
6164                 else
6165                         rule->flow_type = UDP_V4_FLOW;
6166         } else {
6167                 if (tuples->ip_proto == IPPROTO_TCP)
6168                         rule->flow_type = TCP_V6_FLOW;
6169                 else
6170                         rule->flow_type = UDP_V6_FLOW;
6171         }
6172         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6173         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6174 }
6175
6176 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6177                                       u16 flow_id, struct flow_keys *fkeys)
6178 {
6179         struct hclge_vport *vport = hclge_get_vport(handle);
6180         struct hclge_fd_rule_tuples new_tuples;
6181         struct hclge_dev *hdev = vport->back;
6182         struct hclge_fd_rule *rule;
6183         u16 tmp_queue_id;
6184         u16 bit_id;
6185         int ret;
6186
6187         if (!hnae3_dev_fd_supported(hdev))
6188                 return -EOPNOTSUPP;
6189
6190         memset(&new_tuples, 0, sizeof(new_tuples));
6191         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6192
6193         spin_lock_bh(&hdev->fd_rule_lock);
6194
6195         /* when there is already fd rule existed add by user,
6196          * arfs should not work
6197          */
6198         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6199                 spin_unlock_bh(&hdev->fd_rule_lock);
6200
6201                 return -EOPNOTSUPP;
6202         }
6203
6204         /* check is there flow director filter existed for this flow,
6205          * if not, create a new filter for it;
6206          * if filter exist with different queue id, modify the filter;
6207          * if filter exist with same queue id, do nothing
6208          */
6209         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6210         if (!rule) {
6211                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6212                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6213                         spin_unlock_bh(&hdev->fd_rule_lock);
6214
6215                         return -ENOSPC;
6216                 }
6217
6218                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6219                 if (!rule) {
6220                         spin_unlock_bh(&hdev->fd_rule_lock);
6221
6222                         return -ENOMEM;
6223                 }
6224
6225                 set_bit(bit_id, hdev->fd_bmap);
6226                 rule->location = bit_id;
6227                 rule->flow_id = flow_id;
6228                 rule->queue_id = queue_id;
6229                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6230                 ret = hclge_fd_config_rule(hdev, rule);
6231
6232                 spin_unlock_bh(&hdev->fd_rule_lock);
6233
6234                 if (ret)
6235                         return ret;
6236
6237                 return rule->location;
6238         }
6239
6240         spin_unlock_bh(&hdev->fd_rule_lock);
6241
6242         if (rule->queue_id == queue_id)
6243                 return rule->location;
6244
6245         tmp_queue_id = rule->queue_id;
6246         rule->queue_id = queue_id;
6247         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6248         if (ret) {
6249                 rule->queue_id = tmp_queue_id;
6250                 return ret;
6251         }
6252
6253         return rule->location;
6254 }
6255
6256 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6257 {
6258 #ifdef CONFIG_RFS_ACCEL
6259         struct hnae3_handle *handle = &hdev->vport[0].nic;
6260         struct hclge_fd_rule *rule;
6261         struct hlist_node *node;
6262         HLIST_HEAD(del_list);
6263
6264         spin_lock_bh(&hdev->fd_rule_lock);
6265         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6266                 spin_unlock_bh(&hdev->fd_rule_lock);
6267                 return;
6268         }
6269         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6270                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6271                                         rule->flow_id, rule->location)) {
6272                         hlist_del_init(&rule->rule_node);
6273                         hlist_add_head(&rule->rule_node, &del_list);
6274                         hdev->hclge_fd_rule_num--;
6275                         clear_bit(rule->location, hdev->fd_bmap);
6276                 }
6277         }
6278         spin_unlock_bh(&hdev->fd_rule_lock);
6279
6280         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6281                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6282                                      rule->location, NULL, false);
6283                 kfree(rule);
6284         }
6285 #endif
6286 }
6287
6288 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6289 {
6290 #ifdef CONFIG_RFS_ACCEL
6291         struct hclge_vport *vport = hclge_get_vport(handle);
6292         struct hclge_dev *hdev = vport->back;
6293
6294         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6295                 hclge_del_all_fd_entries(handle, true);
6296 #endif
6297 }
6298
6299 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6300 {
6301         struct hclge_vport *vport = hclge_get_vport(handle);
6302         struct hclge_dev *hdev = vport->back;
6303
6304         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6305                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6306 }
6307
6308 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6309 {
6310         struct hclge_vport *vport = hclge_get_vport(handle);
6311         struct hclge_dev *hdev = vport->back;
6312
6313         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6314 }
6315
6316 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6317 {
6318         struct hclge_vport *vport = hclge_get_vport(handle);
6319         struct hclge_dev *hdev = vport->back;
6320
6321         return hdev->rst_stats.hw_reset_done_cnt;
6322 }
6323
6324 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6325 {
6326         struct hclge_vport *vport = hclge_get_vport(handle);
6327         struct hclge_dev *hdev = vport->back;
6328         bool clear;
6329
6330         hdev->fd_en = enable;
6331         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6332         if (!enable)
6333                 hclge_del_all_fd_entries(handle, clear);
6334         else
6335                 hclge_restore_fd_entries(handle);
6336 }
6337
6338 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6339 {
6340         struct hclge_desc desc;
6341         struct hclge_config_mac_mode_cmd *req =
6342                 (struct hclge_config_mac_mode_cmd *)desc.data;
6343         u32 loop_en = 0;
6344         int ret;
6345
6346         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6347
6348         if (enable) {
6349                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6350                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6351                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6352                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6353                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6354                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6355                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6356                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6357                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6358                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6359         }
6360
6361         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6362
6363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6364         if (ret)
6365                 dev_err(&hdev->pdev->dev,
6366                         "mac enable fail, ret =%d.\n", ret);
6367 }
6368
6369 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6370                                      u8 switch_param, u8 param_mask)
6371 {
6372         struct hclge_mac_vlan_switch_cmd *req;
6373         struct hclge_desc desc;
6374         u32 func_id;
6375         int ret;
6376
6377         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6378         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6379
6380         /* read current config parameter */
6381         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6382                                    true);
6383         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6384         req->func_id = cpu_to_le32(func_id);
6385
6386         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6387         if (ret) {
6388                 dev_err(&hdev->pdev->dev,
6389                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6390                 return ret;
6391         }
6392
6393         /* modify and write new config parameter */
6394         hclge_cmd_reuse_desc(&desc, false);
6395         req->switch_param = (req->switch_param & param_mask) | switch_param;
6396         req->param_mask = param_mask;
6397
6398         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6399         if (ret)
6400                 dev_err(&hdev->pdev->dev,
6401                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6402         return ret;
6403 }
6404
6405 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6406                                        int link_ret)
6407 {
6408 #define HCLGE_PHY_LINK_STATUS_NUM  200
6409
6410         struct phy_device *phydev = hdev->hw.mac.phydev;
6411         int i = 0;
6412         int ret;
6413
6414         do {
6415                 ret = phy_read_status(phydev);
6416                 if (ret) {
6417                         dev_err(&hdev->pdev->dev,
6418                                 "phy update link status fail, ret = %d\n", ret);
6419                         return;
6420                 }
6421
6422                 if (phydev->link == link_ret)
6423                         break;
6424
6425                 msleep(HCLGE_LINK_STATUS_MS);
6426         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6427 }
6428
6429 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6430 {
6431 #define HCLGE_MAC_LINK_STATUS_NUM  100
6432
6433         int i = 0;
6434         int ret;
6435
6436         do {
6437                 ret = hclge_get_mac_link_status(hdev);
6438                 if (ret < 0)
6439                         return ret;
6440                 else if (ret == link_ret)
6441                         return 0;
6442
6443                 msleep(HCLGE_LINK_STATUS_MS);
6444         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6445         return -EBUSY;
6446 }
6447
6448 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6449                                           bool is_phy)
6450 {
6451 #define HCLGE_LINK_STATUS_DOWN 0
6452 #define HCLGE_LINK_STATUS_UP   1
6453
6454         int link_ret;
6455
6456         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6457
6458         if (is_phy)
6459                 hclge_phy_link_status_wait(hdev, link_ret);
6460
6461         return hclge_mac_link_status_wait(hdev, link_ret);
6462 }
6463
6464 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6465 {
6466         struct hclge_config_mac_mode_cmd *req;
6467         struct hclge_desc desc;
6468         u32 loop_en;
6469         int ret;
6470
6471         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6472         /* 1 Read out the MAC mode config at first */
6473         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6474         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6475         if (ret) {
6476                 dev_err(&hdev->pdev->dev,
6477                         "mac loopback get fail, ret =%d.\n", ret);
6478                 return ret;
6479         }
6480
6481         /* 2 Then setup the loopback flag */
6482         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6483         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6484         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6485         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6486
6487         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6488
6489         /* 3 Config mac work mode with loopback flag
6490          * and its original configure parameters
6491          */
6492         hclge_cmd_reuse_desc(&desc, false);
6493         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6494         if (ret)
6495                 dev_err(&hdev->pdev->dev,
6496                         "mac loopback set fail, ret =%d.\n", ret);
6497         return ret;
6498 }
6499
6500 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6501                                      enum hnae3_loop loop_mode)
6502 {
6503 #define HCLGE_SERDES_RETRY_MS   10
6504 #define HCLGE_SERDES_RETRY_NUM  100
6505
6506         struct hclge_serdes_lb_cmd *req;
6507         struct hclge_desc desc;
6508         int ret, i = 0;
6509         u8 loop_mode_b;
6510
6511         req = (struct hclge_serdes_lb_cmd *)desc.data;
6512         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6513
6514         switch (loop_mode) {
6515         case HNAE3_LOOP_SERIAL_SERDES:
6516                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6517                 break;
6518         case HNAE3_LOOP_PARALLEL_SERDES:
6519                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6520                 break;
6521         default:
6522                 dev_err(&hdev->pdev->dev,
6523                         "unsupported serdes loopback mode %d\n", loop_mode);
6524                 return -ENOTSUPP;
6525         }
6526
6527         if (en) {
6528                 req->enable = loop_mode_b;
6529                 req->mask = loop_mode_b;
6530         } else {
6531                 req->mask = loop_mode_b;
6532         }
6533
6534         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6535         if (ret) {
6536                 dev_err(&hdev->pdev->dev,
6537                         "serdes loopback set fail, ret = %d\n", ret);
6538                 return ret;
6539         }
6540
6541         do {
6542                 msleep(HCLGE_SERDES_RETRY_MS);
6543                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6544                                            true);
6545                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6546                 if (ret) {
6547                         dev_err(&hdev->pdev->dev,
6548                                 "serdes loopback get, ret = %d\n", ret);
6549                         return ret;
6550                 }
6551         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6552                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6553
6554         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6555                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6556                 return -EBUSY;
6557         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6558                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6559                 return -EIO;
6560         }
6561         return ret;
6562 }
6563
6564 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6565                                      enum hnae3_loop loop_mode)
6566 {
6567         int ret;
6568
6569         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6570         if (ret)
6571                 return ret;
6572
6573         hclge_cfg_mac_mode(hdev, en);
6574
6575         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6576         if (ret)
6577                 dev_err(&hdev->pdev->dev,
6578                         "serdes loopback config mac mode timeout\n");
6579
6580         return ret;
6581 }
6582
6583 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6584                                      struct phy_device *phydev)
6585 {
6586         int ret;
6587
6588         if (!phydev->suspended) {
6589                 ret = phy_suspend(phydev);
6590                 if (ret)
6591                         return ret;
6592         }
6593
6594         ret = phy_resume(phydev);
6595         if (ret)
6596                 return ret;
6597
6598         return phy_loopback(phydev, true);
6599 }
6600
6601 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6602                                       struct phy_device *phydev)
6603 {
6604         int ret;
6605
6606         ret = phy_loopback(phydev, false);
6607         if (ret)
6608                 return ret;
6609
6610         return phy_suspend(phydev);
6611 }
6612
6613 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6614 {
6615         struct phy_device *phydev = hdev->hw.mac.phydev;
6616         int ret;
6617
6618         if (!phydev)
6619                 return -ENOTSUPP;
6620
6621         if (en)
6622                 ret = hclge_enable_phy_loopback(hdev, phydev);
6623         else
6624                 ret = hclge_disable_phy_loopback(hdev, phydev);
6625         if (ret) {
6626                 dev_err(&hdev->pdev->dev,
6627                         "set phy loopback fail, ret = %d\n", ret);
6628                 return ret;
6629         }
6630
6631         hclge_cfg_mac_mode(hdev, en);
6632
6633         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6634         if (ret)
6635                 dev_err(&hdev->pdev->dev,
6636                         "phy loopback config mac mode timeout\n");
6637
6638         return ret;
6639 }
6640
6641 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6642                             int stream_id, bool enable)
6643 {
6644         struct hclge_desc desc;
6645         struct hclge_cfg_com_tqp_queue_cmd *req =
6646                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6647         int ret;
6648
6649         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6650         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6651         req->stream_id = cpu_to_le16(stream_id);
6652         if (enable)
6653                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6654
6655         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6656         if (ret)
6657                 dev_err(&hdev->pdev->dev,
6658                         "Tqp enable fail, status =%d.\n", ret);
6659         return ret;
6660 }
6661
6662 static int hclge_set_loopback(struct hnae3_handle *handle,
6663                               enum hnae3_loop loop_mode, bool en)
6664 {
6665         struct hclge_vport *vport = hclge_get_vport(handle);
6666         struct hnae3_knic_private_info *kinfo;
6667         struct hclge_dev *hdev = vport->back;
6668         int i, ret;
6669
6670         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6671          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6672          * the same, the packets are looped back in the SSU. If SSU loopback
6673          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6674          */
6675         if (hdev->pdev->revision >= 0x21) {
6676                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6677
6678                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6679                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6680                 if (ret)
6681                         return ret;
6682         }
6683
6684         switch (loop_mode) {
6685         case HNAE3_LOOP_APP:
6686                 ret = hclge_set_app_loopback(hdev, en);
6687                 break;
6688         case HNAE3_LOOP_SERIAL_SERDES:
6689         case HNAE3_LOOP_PARALLEL_SERDES:
6690                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6691                 break;
6692         case HNAE3_LOOP_PHY:
6693                 ret = hclge_set_phy_loopback(hdev, en);
6694                 break;
6695         default:
6696                 ret = -ENOTSUPP;
6697                 dev_err(&hdev->pdev->dev,
6698                         "loop_mode %d is not supported\n", loop_mode);
6699                 break;
6700         }
6701
6702         if (ret)
6703                 return ret;
6704
6705         kinfo = &vport->nic.kinfo;
6706         for (i = 0; i < kinfo->num_tqps; i++) {
6707                 ret = hclge_tqp_enable(hdev, i, 0, en);
6708                 if (ret)
6709                         return ret;
6710         }
6711
6712         return 0;
6713 }
6714
6715 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6716 {
6717         int ret;
6718
6719         ret = hclge_set_app_loopback(hdev, false);
6720         if (ret)
6721                 return ret;
6722
6723         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6724         if (ret)
6725                 return ret;
6726
6727         return hclge_cfg_serdes_loopback(hdev, false,
6728                                          HNAE3_LOOP_PARALLEL_SERDES);
6729 }
6730
6731 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6732 {
6733         struct hclge_vport *vport = hclge_get_vport(handle);
6734         struct hnae3_knic_private_info *kinfo;
6735         struct hnae3_queue *queue;
6736         struct hclge_tqp *tqp;
6737         int i;
6738
6739         kinfo = &vport->nic.kinfo;
6740         for (i = 0; i < kinfo->num_tqps; i++) {
6741                 queue = handle->kinfo.tqp[i];
6742                 tqp = container_of(queue, struct hclge_tqp, q);
6743                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6744         }
6745 }
6746
6747 static void hclge_flush_link_update(struct hclge_dev *hdev)
6748 {
6749 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6750
6751         unsigned long last = hdev->serv_processed_cnt;
6752         int i = 0;
6753
6754         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6755                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6756                last == hdev->serv_processed_cnt)
6757                 usleep_range(1, 1);
6758 }
6759
6760 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6761 {
6762         struct hclge_vport *vport = hclge_get_vport(handle);
6763         struct hclge_dev *hdev = vport->back;
6764
6765         if (enable) {
6766                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6767         } else {
6768                 /* Set the DOWN flag here to disable link updating */
6769                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6770
6771                 /* flush memory to make sure DOWN is seen by service task */
6772                 smp_mb__before_atomic();
6773                 hclge_flush_link_update(hdev);
6774         }
6775 }
6776
6777 static int hclge_ae_start(struct hnae3_handle *handle)
6778 {
6779         struct hclge_vport *vport = hclge_get_vport(handle);
6780         struct hclge_dev *hdev = vport->back;
6781
6782         /* mac enable */
6783         hclge_cfg_mac_mode(hdev, true);
6784         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6785         hdev->hw.mac.link = 0;
6786
6787         /* reset tqp stats */
6788         hclge_reset_tqp_stats(handle);
6789
6790         hclge_mac_start_phy(hdev);
6791
6792         return 0;
6793 }
6794
6795 static void hclge_ae_stop(struct hnae3_handle *handle)
6796 {
6797         struct hclge_vport *vport = hclge_get_vport(handle);
6798         struct hclge_dev *hdev = vport->back;
6799         int i;
6800
6801         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6802
6803         hclge_clear_arfs_rules(handle);
6804
6805         /* If it is not PF reset, the firmware will disable the MAC,
6806          * so it only need to stop phy here.
6807          */
6808         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6809             hdev->reset_type != HNAE3_FUNC_RESET) {
6810                 hclge_mac_stop_phy(hdev);
6811                 hclge_update_link_status(hdev);
6812                 return;
6813         }
6814
6815         for (i = 0; i < handle->kinfo.num_tqps; i++)
6816                 hclge_reset_tqp(handle, i);
6817
6818         hclge_config_mac_tnl_int(hdev, false);
6819
6820         /* Mac disable */
6821         hclge_cfg_mac_mode(hdev, false);
6822
6823         hclge_mac_stop_phy(hdev);
6824
6825         /* reset tqp stats */
6826         hclge_reset_tqp_stats(handle);
6827         hclge_update_link_status(hdev);
6828 }
6829
6830 int hclge_vport_start(struct hclge_vport *vport)
6831 {
6832         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6833         vport->last_active_jiffies = jiffies;
6834         return 0;
6835 }
6836
6837 void hclge_vport_stop(struct hclge_vport *vport)
6838 {
6839         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6840 }
6841
6842 static int hclge_client_start(struct hnae3_handle *handle)
6843 {
6844         struct hclge_vport *vport = hclge_get_vport(handle);
6845
6846         return hclge_vport_start(vport);
6847 }
6848
6849 static void hclge_client_stop(struct hnae3_handle *handle)
6850 {
6851         struct hclge_vport *vport = hclge_get_vport(handle);
6852
6853         hclge_vport_stop(vport);
6854 }
6855
6856 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6857                                          u16 cmdq_resp, u8  resp_code,
6858                                          enum hclge_mac_vlan_tbl_opcode op)
6859 {
6860         struct hclge_dev *hdev = vport->back;
6861
6862         if (cmdq_resp) {
6863                 dev_err(&hdev->pdev->dev,
6864                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6865                         cmdq_resp);
6866                 return -EIO;
6867         }
6868
6869         if (op == HCLGE_MAC_VLAN_ADD) {
6870                 if ((!resp_code) || (resp_code == 1)) {
6871                         return 0;
6872                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6873                         dev_err(&hdev->pdev->dev,
6874                                 "add mac addr failed for uc_overflow.\n");
6875                         return -ENOSPC;
6876                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6877                         dev_err(&hdev->pdev->dev,
6878                                 "add mac addr failed for mc_overflow.\n");
6879                         return -ENOSPC;
6880                 }
6881
6882                 dev_err(&hdev->pdev->dev,
6883                         "add mac addr failed for undefined, code=%u.\n",
6884                         resp_code);
6885                 return -EIO;
6886         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6887                 if (!resp_code) {
6888                         return 0;
6889                 } else if (resp_code == 1) {
6890                         dev_dbg(&hdev->pdev->dev,
6891                                 "remove mac addr failed for miss.\n");
6892                         return -ENOENT;
6893                 }
6894
6895                 dev_err(&hdev->pdev->dev,
6896                         "remove mac addr failed for undefined, code=%u.\n",
6897                         resp_code);
6898                 return -EIO;
6899         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6900                 if (!resp_code) {
6901                         return 0;
6902                 } else if (resp_code == 1) {
6903                         dev_dbg(&hdev->pdev->dev,
6904                                 "lookup mac addr failed for miss.\n");
6905                         return -ENOENT;
6906                 }
6907
6908                 dev_err(&hdev->pdev->dev,
6909                         "lookup mac addr failed for undefined, code=%u.\n",
6910                         resp_code);
6911                 return -EIO;
6912         }
6913
6914         dev_err(&hdev->pdev->dev,
6915                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6916
6917         return -EINVAL;
6918 }
6919
6920 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6921 {
6922 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6923
6924         unsigned int word_num;
6925         unsigned int bit_num;
6926
6927         if (vfid > 255 || vfid < 0)
6928                 return -EIO;
6929
6930         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6931                 word_num = vfid / 32;
6932                 bit_num  = vfid % 32;
6933                 if (clr)
6934                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6935                 else
6936                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6937         } else {
6938                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6939                 bit_num  = vfid % 32;
6940                 if (clr)
6941                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6942                 else
6943                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6944         }
6945
6946         return 0;
6947 }
6948
6949 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6950 {
6951 #define HCLGE_DESC_NUMBER 3
6952 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6953         int i, j;
6954
6955         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6956                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6957                         if (desc[i].data[j])
6958                                 return false;
6959
6960         return true;
6961 }
6962
6963 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6964                                    const u8 *addr, bool is_mc)
6965 {
6966         const unsigned char *mac_addr = addr;
6967         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6968                        (mac_addr[0]) | (mac_addr[1] << 8);
6969         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6970
6971         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6972         if (is_mc) {
6973                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6974                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6975         }
6976
6977         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6978         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6979 }
6980
6981 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6982                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6983 {
6984         struct hclge_dev *hdev = vport->back;
6985         struct hclge_desc desc;
6986         u8 resp_code;
6987         u16 retval;
6988         int ret;
6989
6990         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6991
6992         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6993
6994         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6995         if (ret) {
6996                 dev_err(&hdev->pdev->dev,
6997                         "del mac addr failed for cmd_send, ret =%d.\n",
6998                         ret);
6999                 return ret;
7000         }
7001         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7002         retval = le16_to_cpu(desc.retval);
7003
7004         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7005                                              HCLGE_MAC_VLAN_REMOVE);
7006 }
7007
7008 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7009                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7010                                      struct hclge_desc *desc,
7011                                      bool is_mc)
7012 {
7013         struct hclge_dev *hdev = vport->back;
7014         u8 resp_code;
7015         u16 retval;
7016         int ret;
7017
7018         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7019         if (is_mc) {
7020                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7021                 memcpy(desc[0].data,
7022                        req,
7023                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7024                 hclge_cmd_setup_basic_desc(&desc[1],
7025                                            HCLGE_OPC_MAC_VLAN_ADD,
7026                                            true);
7027                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7028                 hclge_cmd_setup_basic_desc(&desc[2],
7029                                            HCLGE_OPC_MAC_VLAN_ADD,
7030                                            true);
7031                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7032         } else {
7033                 memcpy(desc[0].data,
7034                        req,
7035                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7036                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7037         }
7038         if (ret) {
7039                 dev_err(&hdev->pdev->dev,
7040                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7041                         ret);
7042                 return ret;
7043         }
7044         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7045         retval = le16_to_cpu(desc[0].retval);
7046
7047         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7048                                              HCLGE_MAC_VLAN_LKUP);
7049 }
7050
7051 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7052                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7053                                   struct hclge_desc *mc_desc)
7054 {
7055         struct hclge_dev *hdev = vport->back;
7056         int cfg_status;
7057         u8 resp_code;
7058         u16 retval;
7059         int ret;
7060
7061         if (!mc_desc) {
7062                 struct hclge_desc desc;
7063
7064                 hclge_cmd_setup_basic_desc(&desc,
7065                                            HCLGE_OPC_MAC_VLAN_ADD,
7066                                            false);
7067                 memcpy(desc.data, req,
7068                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7069                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7070                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7071                 retval = le16_to_cpu(desc.retval);
7072
7073                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7074                                                            resp_code,
7075                                                            HCLGE_MAC_VLAN_ADD);
7076         } else {
7077                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7078                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7079                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7080                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7081                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7082                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7083                 memcpy(mc_desc[0].data, req,
7084                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7085                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7086                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7087                 retval = le16_to_cpu(mc_desc[0].retval);
7088
7089                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7090                                                            resp_code,
7091                                                            HCLGE_MAC_VLAN_ADD);
7092         }
7093
7094         if (ret) {
7095                 dev_err(&hdev->pdev->dev,
7096                         "add mac addr failed for cmd_send, ret =%d.\n",
7097                         ret);
7098                 return ret;
7099         }
7100
7101         return cfg_status;
7102 }
7103
7104 static int hclge_init_umv_space(struct hclge_dev *hdev)
7105 {
7106         u16 allocated_size = 0;
7107         int ret;
7108
7109         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7110                                   true);
7111         if (ret)
7112                 return ret;
7113
7114         if (allocated_size < hdev->wanted_umv_size)
7115                 dev_warn(&hdev->pdev->dev,
7116                          "Alloc umv space failed, want %u, get %u\n",
7117                          hdev->wanted_umv_size, allocated_size);
7118
7119         mutex_init(&hdev->umv_mutex);
7120         hdev->max_umv_size = allocated_size;
7121         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7122          * preserve some unicast mac vlan table entries shared by pf
7123          * and its vfs.
7124          */
7125         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7126         hdev->share_umv_size = hdev->priv_umv_size +
7127                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7128
7129         return 0;
7130 }
7131
7132 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7133 {
7134         int ret;
7135
7136         if (hdev->max_umv_size > 0) {
7137                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7138                                           false);
7139                 if (ret)
7140                         return ret;
7141                 hdev->max_umv_size = 0;
7142         }
7143         mutex_destroy(&hdev->umv_mutex);
7144
7145         return 0;
7146 }
7147
7148 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7149                                u16 *allocated_size, bool is_alloc)
7150 {
7151         struct hclge_umv_spc_alc_cmd *req;
7152         struct hclge_desc desc;
7153         int ret;
7154
7155         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7156         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7157         if (!is_alloc)
7158                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7159
7160         req->space_size = cpu_to_le32(space_size);
7161
7162         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7163         if (ret) {
7164                 dev_err(&hdev->pdev->dev,
7165                         "%s umv space failed for cmd_send, ret =%d\n",
7166                         is_alloc ? "allocate" : "free", ret);
7167                 return ret;
7168         }
7169
7170         if (is_alloc && allocated_size)
7171                 *allocated_size = le32_to_cpu(desc.data[1]);
7172
7173         return 0;
7174 }
7175
7176 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7177 {
7178         struct hclge_vport *vport;
7179         int i;
7180
7181         for (i = 0; i < hdev->num_alloc_vport; i++) {
7182                 vport = &hdev->vport[i];
7183                 vport->used_umv_num = 0;
7184         }
7185
7186         mutex_lock(&hdev->umv_mutex);
7187         hdev->share_umv_size = hdev->priv_umv_size +
7188                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7189         mutex_unlock(&hdev->umv_mutex);
7190 }
7191
7192 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7193 {
7194         struct hclge_dev *hdev = vport->back;
7195         bool is_full;
7196
7197         mutex_lock(&hdev->umv_mutex);
7198         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7199                    hdev->share_umv_size == 0);
7200         mutex_unlock(&hdev->umv_mutex);
7201
7202         return is_full;
7203 }
7204
7205 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7206 {
7207         struct hclge_dev *hdev = vport->back;
7208
7209         mutex_lock(&hdev->umv_mutex);
7210         if (is_free) {
7211                 if (vport->used_umv_num > hdev->priv_umv_size)
7212                         hdev->share_umv_size++;
7213
7214                 if (vport->used_umv_num > 0)
7215                         vport->used_umv_num--;
7216         } else {
7217                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7218                     hdev->share_umv_size > 0)
7219                         hdev->share_umv_size--;
7220                 vport->used_umv_num++;
7221         }
7222         mutex_unlock(&hdev->umv_mutex);
7223 }
7224
7225 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7226                              const unsigned char *addr)
7227 {
7228         struct hclge_vport *vport = hclge_get_vport(handle);
7229
7230         return hclge_add_uc_addr_common(vport, addr);
7231 }
7232
7233 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7234                              const unsigned char *addr)
7235 {
7236         struct hclge_dev *hdev = vport->back;
7237         struct hclge_mac_vlan_tbl_entry_cmd req;
7238         struct hclge_desc desc;
7239         u16 egress_port = 0;
7240         int ret;
7241
7242         /* mac addr check */
7243         if (is_zero_ether_addr(addr) ||
7244             is_broadcast_ether_addr(addr) ||
7245             is_multicast_ether_addr(addr)) {
7246                 dev_err(&hdev->pdev->dev,
7247                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7248                          addr, is_zero_ether_addr(addr),
7249                          is_broadcast_ether_addr(addr),
7250                          is_multicast_ether_addr(addr));
7251                 return -EINVAL;
7252         }
7253
7254         memset(&req, 0, sizeof(req));
7255
7256         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7257                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7258
7259         req.egress_port = cpu_to_le16(egress_port);
7260
7261         hclge_prepare_mac_addr(&req, addr, false);
7262
7263         /* Lookup the mac address in the mac_vlan table, and add
7264          * it if the entry is inexistent. Repeated unicast entry
7265          * is not allowed in the mac vlan table.
7266          */
7267         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7268         if (ret == -ENOENT) {
7269                 if (!hclge_is_umv_space_full(vport)) {
7270                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7271                         if (!ret)
7272                                 hclge_update_umv_space(vport, false);
7273                         return ret;
7274                 }
7275
7276                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7277                         hdev->priv_umv_size);
7278
7279                 return -ENOSPC;
7280         }
7281
7282         /* check if we just hit the duplicate */
7283         if (!ret) {
7284                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7285                          vport->vport_id, addr);
7286                 return 0;
7287         }
7288
7289         dev_err(&hdev->pdev->dev,
7290                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7291                 addr);
7292
7293         return ret;
7294 }
7295
7296 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7297                             const unsigned char *addr)
7298 {
7299         struct hclge_vport *vport = hclge_get_vport(handle);
7300
7301         return hclge_rm_uc_addr_common(vport, addr);
7302 }
7303
7304 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7305                             const unsigned char *addr)
7306 {
7307         struct hclge_dev *hdev = vport->back;
7308         struct hclge_mac_vlan_tbl_entry_cmd req;
7309         int ret;
7310
7311         /* mac addr check */
7312         if (is_zero_ether_addr(addr) ||
7313             is_broadcast_ether_addr(addr) ||
7314             is_multicast_ether_addr(addr)) {
7315                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7316                         addr);
7317                 return -EINVAL;
7318         }
7319
7320         memset(&req, 0, sizeof(req));
7321         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7322         hclge_prepare_mac_addr(&req, addr, false);
7323         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7324         if (!ret)
7325                 hclge_update_umv_space(vport, true);
7326
7327         return ret;
7328 }
7329
7330 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7331                              const unsigned char *addr)
7332 {
7333         struct hclge_vport *vport = hclge_get_vport(handle);
7334
7335         return hclge_add_mc_addr_common(vport, addr);
7336 }
7337
7338 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7339                              const unsigned char *addr)
7340 {
7341         struct hclge_dev *hdev = vport->back;
7342         struct hclge_mac_vlan_tbl_entry_cmd req;
7343         struct hclge_desc desc[3];
7344         int status;
7345
7346         /* mac addr check */
7347         if (!is_multicast_ether_addr(addr)) {
7348                 dev_err(&hdev->pdev->dev,
7349                         "Add mc mac err! invalid mac:%pM.\n",
7350                          addr);
7351                 return -EINVAL;
7352         }
7353         memset(&req, 0, sizeof(req));
7354         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7355         hclge_prepare_mac_addr(&req, addr, true);
7356         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7357         if (status) {
7358                 /* This mac addr do not exist, add new entry for it */
7359                 memset(desc[0].data, 0, sizeof(desc[0].data));
7360                 memset(desc[1].data, 0, sizeof(desc[0].data));
7361                 memset(desc[2].data, 0, sizeof(desc[0].data));
7362         }
7363         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7364         if (status)
7365                 return status;
7366         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7367
7368         if (status == -ENOSPC)
7369                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7370
7371         return status;
7372 }
7373
7374 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7375                             const unsigned char *addr)
7376 {
7377         struct hclge_vport *vport = hclge_get_vport(handle);
7378
7379         return hclge_rm_mc_addr_common(vport, addr);
7380 }
7381
7382 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7383                             const unsigned char *addr)
7384 {
7385         struct hclge_dev *hdev = vport->back;
7386         struct hclge_mac_vlan_tbl_entry_cmd req;
7387         enum hclge_cmd_status status;
7388         struct hclge_desc desc[3];
7389
7390         /* mac addr check */
7391         if (!is_multicast_ether_addr(addr)) {
7392                 dev_dbg(&hdev->pdev->dev,
7393                         "Remove mc mac err! invalid mac:%pM.\n",
7394                          addr);
7395                 return -EINVAL;
7396         }
7397
7398         memset(&req, 0, sizeof(req));
7399         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7400         hclge_prepare_mac_addr(&req, addr, true);
7401         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7402         if (!status) {
7403                 /* This mac addr exist, remove this handle's VFID for it */
7404                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7405                 if (status)
7406                         return status;
7407
7408                 if (hclge_is_all_function_id_zero(desc))
7409                         /* All the vfid is zero, so need to delete this entry */
7410                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7411                 else
7412                         /* Not all the vfid is zero, update the vfid */
7413                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7414
7415         } else {
7416                 /* Maybe this mac address is in mta table, but it cannot be
7417                  * deleted here because an entry of mta represents an address
7418                  * range rather than a specific address. the delete action to
7419                  * all entries will take effect in update_mta_status called by
7420                  * hns3_nic_set_rx_mode.
7421                  */
7422                 status = 0;
7423         }
7424
7425         return status;
7426 }
7427
7428 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7429                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7430 {
7431         struct hclge_vport_mac_addr_cfg *mac_cfg;
7432         struct list_head *list;
7433
7434         if (!vport->vport_id)
7435                 return;
7436
7437         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7438         if (!mac_cfg)
7439                 return;
7440
7441         mac_cfg->hd_tbl_status = true;
7442         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7443
7444         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7445                &vport->uc_mac_list : &vport->mc_mac_list;
7446
7447         list_add_tail(&mac_cfg->node, list);
7448 }
7449
7450 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7451                               bool is_write_tbl,
7452                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7453 {
7454         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7455         struct list_head *list;
7456         bool uc_flag, mc_flag;
7457
7458         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7459                &vport->uc_mac_list : &vport->mc_mac_list;
7460
7461         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7462         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7463
7464         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7465                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7466                         if (uc_flag && mac_cfg->hd_tbl_status)
7467                                 hclge_rm_uc_addr_common(vport, mac_addr);
7468
7469                         if (mc_flag && mac_cfg->hd_tbl_status)
7470                                 hclge_rm_mc_addr_common(vport, mac_addr);
7471
7472                         list_del(&mac_cfg->node);
7473                         kfree(mac_cfg);
7474                         break;
7475                 }
7476         }
7477 }
7478
7479 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7480                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7481 {
7482         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7483         struct list_head *list;
7484
7485         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7486                &vport->uc_mac_list : &vport->mc_mac_list;
7487
7488         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7489                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7490                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7491
7492                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7493                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7494
7495                 mac_cfg->hd_tbl_status = false;
7496                 if (is_del_list) {
7497                         list_del(&mac_cfg->node);
7498                         kfree(mac_cfg);
7499                 }
7500         }
7501 }
7502
7503 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7504 {
7505         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7506         struct hclge_vport *vport;
7507         int i;
7508
7509         for (i = 0; i < hdev->num_alloc_vport; i++) {
7510                 vport = &hdev->vport[i];
7511                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7512                         list_del(&mac->node);
7513                         kfree(mac);
7514                 }
7515
7516                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7517                         list_del(&mac->node);
7518                         kfree(mac);
7519                 }
7520         }
7521 }
7522
7523 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7524                                               u16 cmdq_resp, u8 resp_code)
7525 {
7526 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7527 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7528 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7529 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7530
7531         int return_status;
7532
7533         if (cmdq_resp) {
7534                 dev_err(&hdev->pdev->dev,
7535                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7536                         cmdq_resp);
7537                 return -EIO;
7538         }
7539
7540         switch (resp_code) {
7541         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7542         case HCLGE_ETHERTYPE_ALREADY_ADD:
7543                 return_status = 0;
7544                 break;
7545         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7546                 dev_err(&hdev->pdev->dev,
7547                         "add mac ethertype failed for manager table overflow.\n");
7548                 return_status = -EIO;
7549                 break;
7550         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7551                 dev_err(&hdev->pdev->dev,
7552                         "add mac ethertype failed for key conflict.\n");
7553                 return_status = -EIO;
7554                 break;
7555         default:
7556                 dev_err(&hdev->pdev->dev,
7557                         "add mac ethertype failed for undefined, code=%u.\n",
7558                         resp_code);
7559                 return_status = -EIO;
7560         }
7561
7562         return return_status;
7563 }
7564
7565 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7566                                      u8 *mac_addr)
7567 {
7568         struct hclge_mac_vlan_tbl_entry_cmd req;
7569         struct hclge_dev *hdev = vport->back;
7570         struct hclge_desc desc;
7571         u16 egress_port = 0;
7572         int i;
7573
7574         if (is_zero_ether_addr(mac_addr))
7575                 return false;
7576
7577         memset(&req, 0, sizeof(req));
7578         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7579                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7580         req.egress_port = cpu_to_le16(egress_port);
7581         hclge_prepare_mac_addr(&req, mac_addr, false);
7582
7583         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7584                 return true;
7585
7586         vf_idx += HCLGE_VF_VPORT_START_NUM;
7587         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7588                 if (i != vf_idx &&
7589                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7590                         return true;
7591
7592         return false;
7593 }
7594
7595 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7596                             u8 *mac_addr)
7597 {
7598         struct hclge_vport *vport = hclge_get_vport(handle);
7599         struct hclge_dev *hdev = vport->back;
7600
7601         vport = hclge_get_vf_vport(hdev, vf);
7602         if (!vport)
7603                 return -EINVAL;
7604
7605         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7606                 dev_info(&hdev->pdev->dev,
7607                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7608                          mac_addr);
7609                 return 0;
7610         }
7611
7612         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7613                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7614                         mac_addr);
7615                 return -EEXIST;
7616         }
7617
7618         ether_addr_copy(vport->vf_info.mac, mac_addr);
7619         dev_info(&hdev->pdev->dev,
7620                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7621                  vf, mac_addr);
7622
7623         return hclge_inform_reset_assert_to_vf(vport);
7624 }
7625
7626 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7627                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7628 {
7629         struct hclge_desc desc;
7630         u8 resp_code;
7631         u16 retval;
7632         int ret;
7633
7634         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7635         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7636
7637         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7638         if (ret) {
7639                 dev_err(&hdev->pdev->dev,
7640                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7641                         ret);
7642                 return ret;
7643         }
7644
7645         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7646         retval = le16_to_cpu(desc.retval);
7647
7648         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7649 }
7650
7651 static int init_mgr_tbl(struct hclge_dev *hdev)
7652 {
7653         int ret;
7654         int i;
7655
7656         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7657                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7658                 if (ret) {
7659                         dev_err(&hdev->pdev->dev,
7660                                 "add mac ethertype failed, ret =%d.\n",
7661                                 ret);
7662                         return ret;
7663                 }
7664         }
7665
7666         return 0;
7667 }
7668
7669 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7670 {
7671         struct hclge_vport *vport = hclge_get_vport(handle);
7672         struct hclge_dev *hdev = vport->back;
7673
7674         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7675 }
7676
7677 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7678                               bool is_first)
7679 {
7680         const unsigned char *new_addr = (const unsigned char *)p;
7681         struct hclge_vport *vport = hclge_get_vport(handle);
7682         struct hclge_dev *hdev = vport->back;
7683         int ret;
7684
7685         /* mac addr check */
7686         if (is_zero_ether_addr(new_addr) ||
7687             is_broadcast_ether_addr(new_addr) ||
7688             is_multicast_ether_addr(new_addr)) {
7689                 dev_err(&hdev->pdev->dev,
7690                         "Change uc mac err! invalid mac:%pM.\n",
7691                          new_addr);
7692                 return -EINVAL;
7693         }
7694
7695         if ((!is_first || is_kdump_kernel()) &&
7696             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7697                 dev_warn(&hdev->pdev->dev,
7698                          "remove old uc mac address fail.\n");
7699
7700         ret = hclge_add_uc_addr(handle, new_addr);
7701         if (ret) {
7702                 dev_err(&hdev->pdev->dev,
7703                         "add uc mac address fail, ret =%d.\n",
7704                         ret);
7705
7706                 if (!is_first &&
7707                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7708                         dev_err(&hdev->pdev->dev,
7709                                 "restore uc mac address fail.\n");
7710
7711                 return -EIO;
7712         }
7713
7714         ret = hclge_pause_addr_cfg(hdev, new_addr);
7715         if (ret) {
7716                 dev_err(&hdev->pdev->dev,
7717                         "configure mac pause address fail, ret =%d.\n",
7718                         ret);
7719                 return -EIO;
7720         }
7721
7722         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7723
7724         return 0;
7725 }
7726
7727 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7728                           int cmd)
7729 {
7730         struct hclge_vport *vport = hclge_get_vport(handle);
7731         struct hclge_dev *hdev = vport->back;
7732
7733         if (!hdev->hw.mac.phydev)
7734                 return -EOPNOTSUPP;
7735
7736         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7737 }
7738
7739 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7740                                       u8 fe_type, bool filter_en, u8 vf_id)
7741 {
7742         struct hclge_vlan_filter_ctrl_cmd *req;
7743         struct hclge_desc desc;
7744         int ret;
7745
7746         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7747
7748         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7749         req->vlan_type = vlan_type;
7750         req->vlan_fe = filter_en ? fe_type : 0;
7751         req->vf_id = vf_id;
7752
7753         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7754         if (ret)
7755                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7756                         ret);
7757
7758         return ret;
7759 }
7760
7761 #define HCLGE_FILTER_TYPE_VF            0
7762 #define HCLGE_FILTER_TYPE_PORT          1
7763 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7764 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7765 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7766 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7767 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7768 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7769                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7770 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7771                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7772
7773 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7774 {
7775         struct hclge_vport *vport = hclge_get_vport(handle);
7776         struct hclge_dev *hdev = vport->back;
7777
7778         if (hdev->pdev->revision >= 0x21) {
7779                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7780                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7781                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7782                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7783         } else {
7784                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7785                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7786                                            0);
7787         }
7788         if (enable)
7789                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7790         else
7791                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7792 }
7793
7794 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7795                                     bool is_kill, u16 vlan,
7796                                     __be16 proto)
7797 {
7798         struct hclge_vport *vport = &hdev->vport[vfid];
7799         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7800         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7801         struct hclge_desc desc[2];
7802         u8 vf_byte_val;
7803         u8 vf_byte_off;
7804         int ret;
7805
7806         /* if vf vlan table is full, firmware will close vf vlan filter, it
7807          * is unable and unnecessary to add new vlan id to vf vlan filter.
7808          * If spoof check is enable, and vf vlan is full, it shouldn't add
7809          * new vlan, because tx packets with these vlan id will be dropped.
7810          */
7811         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7812                 if (vport->vf_info.spoofchk && vlan) {
7813                         dev_err(&hdev->pdev->dev,
7814                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7815                         return -EPERM;
7816                 }
7817                 return 0;
7818         }
7819
7820         hclge_cmd_setup_basic_desc(&desc[0],
7821                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7822         hclge_cmd_setup_basic_desc(&desc[1],
7823                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7824
7825         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7826
7827         vf_byte_off = vfid / 8;
7828         vf_byte_val = 1 << (vfid % 8);
7829
7830         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7831         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7832
7833         req0->vlan_id  = cpu_to_le16(vlan);
7834         req0->vlan_cfg = is_kill;
7835
7836         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7837                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7838         else
7839                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7840
7841         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7842         if (ret) {
7843                 dev_err(&hdev->pdev->dev,
7844                         "Send vf vlan command fail, ret =%d.\n",
7845                         ret);
7846                 return ret;
7847         }
7848
7849         if (!is_kill) {
7850 #define HCLGE_VF_VLAN_NO_ENTRY  2
7851                 if (!req0->resp_code || req0->resp_code == 1)
7852                         return 0;
7853
7854                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7855                         set_bit(vfid, hdev->vf_vlan_full);
7856                         dev_warn(&hdev->pdev->dev,
7857                                  "vf vlan table is full, vf vlan filter is disabled\n");
7858                         return 0;
7859                 }
7860
7861                 dev_err(&hdev->pdev->dev,
7862                         "Add vf vlan filter fail, ret =%u.\n",
7863                         req0->resp_code);
7864         } else {
7865 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7866                 if (!req0->resp_code)
7867                         return 0;
7868
7869                 /* vf vlan filter is disabled when vf vlan table is full,
7870                  * then new vlan id will not be added into vf vlan table.
7871                  * Just return 0 without warning, avoid massive verbose
7872                  * print logs when unload.
7873                  */
7874                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7875                         return 0;
7876
7877                 dev_err(&hdev->pdev->dev,
7878                         "Kill vf vlan filter fail, ret =%u.\n",
7879                         req0->resp_code);
7880         }
7881
7882         return -EIO;
7883 }
7884
7885 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7886                                       u16 vlan_id, bool is_kill)
7887 {
7888         struct hclge_vlan_filter_pf_cfg_cmd *req;
7889         struct hclge_desc desc;
7890         u8 vlan_offset_byte_val;
7891         u8 vlan_offset_byte;
7892         u8 vlan_offset_160;
7893         int ret;
7894
7895         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7896
7897         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7898         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7899                            HCLGE_VLAN_BYTE_SIZE;
7900         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7901
7902         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7903         req->vlan_offset = vlan_offset_160;
7904         req->vlan_cfg = is_kill;
7905         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7906
7907         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7908         if (ret)
7909                 dev_err(&hdev->pdev->dev,
7910                         "port vlan command, send fail, ret =%d.\n", ret);
7911         return ret;
7912 }
7913
7914 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7915                                     u16 vport_id, u16 vlan_id,
7916                                     bool is_kill)
7917 {
7918         u16 vport_idx, vport_num = 0;
7919         int ret;
7920
7921         if (is_kill && !vlan_id)
7922                 return 0;
7923
7924         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7925                                        proto);
7926         if (ret) {
7927                 dev_err(&hdev->pdev->dev,
7928                         "Set %u vport vlan filter config fail, ret =%d.\n",
7929                         vport_id, ret);
7930                 return ret;
7931         }
7932
7933         /* vlan 0 may be added twice when 8021q module is enabled */
7934         if (!is_kill && !vlan_id &&
7935             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7936                 return 0;
7937
7938         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7939                 dev_err(&hdev->pdev->dev,
7940                         "Add port vlan failed, vport %u is already in vlan %u\n",
7941                         vport_id, vlan_id);
7942                 return -EINVAL;
7943         }
7944
7945         if (is_kill &&
7946             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7947                 dev_err(&hdev->pdev->dev,
7948                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7949                         vport_id, vlan_id);
7950                 return -EINVAL;
7951         }
7952
7953         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7954                 vport_num++;
7955
7956         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7957                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7958                                                  is_kill);
7959
7960         return ret;
7961 }
7962
7963 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7964 {
7965         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7966         struct hclge_vport_vtag_tx_cfg_cmd *req;
7967         struct hclge_dev *hdev = vport->back;
7968         struct hclge_desc desc;
7969         u16 bmap_index;
7970         int status;
7971
7972         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7973
7974         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7975         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7976         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7977         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7978                       vcfg->accept_tag1 ? 1 : 0);
7979         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7980                       vcfg->accept_untag1 ? 1 : 0);
7981         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7982                       vcfg->accept_tag2 ? 1 : 0);
7983         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7984                       vcfg->accept_untag2 ? 1 : 0);
7985         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7986                       vcfg->insert_tag1_en ? 1 : 0);
7987         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7988                       vcfg->insert_tag2_en ? 1 : 0);
7989         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7990
7991         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7992         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7993                         HCLGE_VF_NUM_PER_BYTE;
7994         req->vf_bitmap[bmap_index] =
7995                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7996
7997         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7998         if (status)
7999                 dev_err(&hdev->pdev->dev,
8000                         "Send port txvlan cfg command fail, ret =%d\n",
8001                         status);
8002
8003         return status;
8004 }
8005
8006 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8007 {
8008         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8009         struct hclge_vport_vtag_rx_cfg_cmd *req;
8010         struct hclge_dev *hdev = vport->back;
8011         struct hclge_desc desc;
8012         u16 bmap_index;
8013         int status;
8014
8015         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8016
8017         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8018         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8019                       vcfg->strip_tag1_en ? 1 : 0);
8020         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8021                       vcfg->strip_tag2_en ? 1 : 0);
8022         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8023                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8024         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8025                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8026
8027         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8028         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8029                         HCLGE_VF_NUM_PER_BYTE;
8030         req->vf_bitmap[bmap_index] =
8031                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8032
8033         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8034         if (status)
8035                 dev_err(&hdev->pdev->dev,
8036                         "Send port rxvlan cfg command fail, ret =%d\n",
8037                         status);
8038
8039         return status;
8040 }
8041
8042 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8043                                   u16 port_base_vlan_state,
8044                                   u16 vlan_tag)
8045 {
8046         int ret;
8047
8048         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8049                 vport->txvlan_cfg.accept_tag1 = true;
8050                 vport->txvlan_cfg.insert_tag1_en = false;
8051                 vport->txvlan_cfg.default_tag1 = 0;
8052         } else {
8053                 vport->txvlan_cfg.accept_tag1 = false;
8054                 vport->txvlan_cfg.insert_tag1_en = true;
8055                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8056         }
8057
8058         vport->txvlan_cfg.accept_untag1 = true;
8059
8060         /* accept_tag2 and accept_untag2 are not supported on
8061          * pdev revision(0x20), new revision support them,
8062          * this two fields can not be configured by user.
8063          */
8064         vport->txvlan_cfg.accept_tag2 = true;
8065         vport->txvlan_cfg.accept_untag2 = true;
8066         vport->txvlan_cfg.insert_tag2_en = false;
8067         vport->txvlan_cfg.default_tag2 = 0;
8068
8069         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8070                 vport->rxvlan_cfg.strip_tag1_en = false;
8071                 vport->rxvlan_cfg.strip_tag2_en =
8072                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8073         } else {
8074                 vport->rxvlan_cfg.strip_tag1_en =
8075                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8076                 vport->rxvlan_cfg.strip_tag2_en = true;
8077         }
8078         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8079         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8080
8081         ret = hclge_set_vlan_tx_offload_cfg(vport);
8082         if (ret)
8083                 return ret;
8084
8085         return hclge_set_vlan_rx_offload_cfg(vport);
8086 }
8087
8088 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8089 {
8090         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8091         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8092         struct hclge_desc desc;
8093         int status;
8094
8095         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8096         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8097         rx_req->ot_fst_vlan_type =
8098                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8099         rx_req->ot_sec_vlan_type =
8100                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8101         rx_req->in_fst_vlan_type =
8102                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8103         rx_req->in_sec_vlan_type =
8104                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8105
8106         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8107         if (status) {
8108                 dev_err(&hdev->pdev->dev,
8109                         "Send rxvlan protocol type command fail, ret =%d\n",
8110                         status);
8111                 return status;
8112         }
8113
8114         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8115
8116         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8117         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8118         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8119
8120         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8121         if (status)
8122                 dev_err(&hdev->pdev->dev,
8123                         "Send txvlan protocol type command fail, ret =%d\n",
8124                         status);
8125
8126         return status;
8127 }
8128
8129 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8130 {
8131 #define HCLGE_DEF_VLAN_TYPE             0x8100
8132
8133         struct hnae3_handle *handle = &hdev->vport[0].nic;
8134         struct hclge_vport *vport;
8135         int ret;
8136         int i;
8137
8138         if (hdev->pdev->revision >= 0x21) {
8139                 /* for revision 0x21, vf vlan filter is per function */
8140                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8141                         vport = &hdev->vport[i];
8142                         ret = hclge_set_vlan_filter_ctrl(hdev,
8143                                                          HCLGE_FILTER_TYPE_VF,
8144                                                          HCLGE_FILTER_FE_EGRESS,
8145                                                          true,
8146                                                          vport->vport_id);
8147                         if (ret)
8148                                 return ret;
8149                 }
8150
8151                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8152                                                  HCLGE_FILTER_FE_INGRESS, true,
8153                                                  0);
8154                 if (ret)
8155                         return ret;
8156         } else {
8157                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8158                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8159                                                  true, 0);
8160                 if (ret)
8161                         return ret;
8162         }
8163
8164         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8165
8166         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8167         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8168         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8169         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8170         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8171         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8172
8173         ret = hclge_set_vlan_protocol_type(hdev);
8174         if (ret)
8175                 return ret;
8176
8177         for (i = 0; i < hdev->num_alloc_vport; i++) {
8178                 u16 vlan_tag;
8179
8180                 vport = &hdev->vport[i];
8181                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8182
8183                 ret = hclge_vlan_offload_cfg(vport,
8184                                              vport->port_base_vlan_cfg.state,
8185                                              vlan_tag);
8186                 if (ret)
8187                         return ret;
8188         }
8189
8190         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8191 }
8192
8193 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8194                                        bool writen_to_tbl)
8195 {
8196         struct hclge_vport_vlan_cfg *vlan;
8197
8198         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8199         if (!vlan)
8200                 return;
8201
8202         vlan->hd_tbl_status = writen_to_tbl;
8203         vlan->vlan_id = vlan_id;
8204
8205         list_add_tail(&vlan->node, &vport->vlan_list);
8206 }
8207
8208 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8209 {
8210         struct hclge_vport_vlan_cfg *vlan, *tmp;
8211         struct hclge_dev *hdev = vport->back;
8212         int ret;
8213
8214         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8215                 if (!vlan->hd_tbl_status) {
8216                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8217                                                        vport->vport_id,
8218                                                        vlan->vlan_id, false);
8219                         if (ret) {
8220                                 dev_err(&hdev->pdev->dev,
8221                                         "restore vport vlan list failed, ret=%d\n",
8222                                         ret);
8223                                 return ret;
8224                         }
8225                 }
8226                 vlan->hd_tbl_status = true;
8227         }
8228
8229         return 0;
8230 }
8231
8232 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8233                                       bool is_write_tbl)
8234 {
8235         struct hclge_vport_vlan_cfg *vlan, *tmp;
8236         struct hclge_dev *hdev = vport->back;
8237
8238         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8239                 if (vlan->vlan_id == vlan_id) {
8240                         if (is_write_tbl && vlan->hd_tbl_status)
8241                                 hclge_set_vlan_filter_hw(hdev,
8242                                                          htons(ETH_P_8021Q),
8243                                                          vport->vport_id,
8244                                                          vlan_id,
8245                                                          true);
8246
8247                         list_del(&vlan->node);
8248                         kfree(vlan);
8249                         break;
8250                 }
8251         }
8252 }
8253
8254 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8255 {
8256         struct hclge_vport_vlan_cfg *vlan, *tmp;
8257         struct hclge_dev *hdev = vport->back;
8258
8259         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8260                 if (vlan->hd_tbl_status)
8261                         hclge_set_vlan_filter_hw(hdev,
8262                                                  htons(ETH_P_8021Q),
8263                                                  vport->vport_id,
8264                                                  vlan->vlan_id,
8265                                                  true);
8266
8267                 vlan->hd_tbl_status = false;
8268                 if (is_del_list) {
8269                         list_del(&vlan->node);
8270                         kfree(vlan);
8271                 }
8272         }
8273 }
8274
8275 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8276 {
8277         struct hclge_vport_vlan_cfg *vlan, *tmp;
8278         struct hclge_vport *vport;
8279         int i;
8280
8281         for (i = 0; i < hdev->num_alloc_vport; i++) {
8282                 vport = &hdev->vport[i];
8283                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8284                         list_del(&vlan->node);
8285                         kfree(vlan);
8286                 }
8287         }
8288 }
8289
8290 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8291 {
8292         struct hclge_vport *vport = hclge_get_vport(handle);
8293         struct hclge_vport_vlan_cfg *vlan, *tmp;
8294         struct hclge_dev *hdev = vport->back;
8295         u16 vlan_proto;
8296         u16 state, vlan_id;
8297         int i;
8298
8299         for (i = 0; i < hdev->num_alloc_vport; i++) {
8300                 vport = &hdev->vport[i];
8301                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8302                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8303                 state = vport->port_base_vlan_cfg.state;
8304
8305                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8306                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8307                                                  vport->vport_id, vlan_id,
8308                                                  false);
8309                         continue;
8310                 }
8311
8312                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8313                         int ret;
8314
8315                         if (!vlan->hd_tbl_status)
8316                                 continue;
8317                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8318                                                        vport->vport_id,
8319                                                        vlan->vlan_id, false);
8320                         if (ret)
8321                                 break;
8322                 }
8323         }
8324 }
8325
8326 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8327 {
8328         struct hclge_vport *vport = hclge_get_vport(handle);
8329
8330         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8331                 vport->rxvlan_cfg.strip_tag1_en = false;
8332                 vport->rxvlan_cfg.strip_tag2_en = enable;
8333         } else {
8334                 vport->rxvlan_cfg.strip_tag1_en = enable;
8335                 vport->rxvlan_cfg.strip_tag2_en = true;
8336         }
8337         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8338         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8339         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8340
8341         return hclge_set_vlan_rx_offload_cfg(vport);
8342 }
8343
8344 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8345                                             u16 port_base_vlan_state,
8346                                             struct hclge_vlan_info *new_info,
8347                                             struct hclge_vlan_info *old_info)
8348 {
8349         struct hclge_dev *hdev = vport->back;
8350         int ret;
8351
8352         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8353                 hclge_rm_vport_all_vlan_table(vport, false);
8354                 return hclge_set_vlan_filter_hw(hdev,
8355                                                  htons(new_info->vlan_proto),
8356                                                  vport->vport_id,
8357                                                  new_info->vlan_tag,
8358                                                  false);
8359         }
8360
8361         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8362                                        vport->vport_id, old_info->vlan_tag,
8363                                        true);
8364         if (ret)
8365                 return ret;
8366
8367         return hclge_add_vport_all_vlan_table(vport);
8368 }
8369
8370 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8371                                     struct hclge_vlan_info *vlan_info)
8372 {
8373         struct hnae3_handle *nic = &vport->nic;
8374         struct hclge_vlan_info *old_vlan_info;
8375         struct hclge_dev *hdev = vport->back;
8376         int ret;
8377
8378         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8379
8380         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8381         if (ret)
8382                 return ret;
8383
8384         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8385                 /* add new VLAN tag */
8386                 ret = hclge_set_vlan_filter_hw(hdev,
8387                                                htons(vlan_info->vlan_proto),
8388                                                vport->vport_id,
8389                                                vlan_info->vlan_tag,
8390                                                false);
8391                 if (ret)
8392                         return ret;
8393
8394                 /* remove old VLAN tag */
8395                 ret = hclge_set_vlan_filter_hw(hdev,
8396                                                htons(old_vlan_info->vlan_proto),
8397                                                vport->vport_id,
8398                                                old_vlan_info->vlan_tag,
8399                                                true);
8400                 if (ret)
8401                         return ret;
8402
8403                 goto update;
8404         }
8405
8406         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8407                                                old_vlan_info);
8408         if (ret)
8409                 return ret;
8410
8411         /* update state only when disable/enable port based VLAN */
8412         vport->port_base_vlan_cfg.state = state;
8413         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8414                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8415         else
8416                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8417
8418 update:
8419         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8420         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8421         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8422
8423         return 0;
8424 }
8425
8426 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8427                                           enum hnae3_port_base_vlan_state state,
8428                                           u16 vlan)
8429 {
8430         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8431                 if (!vlan)
8432                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8433                 else
8434                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8435         } else {
8436                 if (!vlan)
8437                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8438                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8439                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8440                 else
8441                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8442         }
8443 }
8444
8445 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8446                                     u16 vlan, u8 qos, __be16 proto)
8447 {
8448         struct hclge_vport *vport = hclge_get_vport(handle);
8449         struct hclge_dev *hdev = vport->back;
8450         struct hclge_vlan_info vlan_info;
8451         u16 state;
8452         int ret;
8453
8454         if (hdev->pdev->revision == 0x20)
8455                 return -EOPNOTSUPP;
8456
8457         vport = hclge_get_vf_vport(hdev, vfid);
8458         if (!vport)
8459                 return -EINVAL;
8460
8461         /* qos is a 3 bits value, so can not be bigger than 7 */
8462         if (vlan > VLAN_N_VID - 1 || qos > 7)
8463                 return -EINVAL;
8464         if (proto != htons(ETH_P_8021Q))
8465                 return -EPROTONOSUPPORT;
8466
8467         state = hclge_get_port_base_vlan_state(vport,
8468                                                vport->port_base_vlan_cfg.state,
8469                                                vlan);
8470         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8471                 return 0;
8472
8473         vlan_info.vlan_tag = vlan;
8474         vlan_info.qos = qos;
8475         vlan_info.vlan_proto = ntohs(proto);
8476
8477         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8478                 return hclge_update_port_base_vlan_cfg(vport, state,
8479                                                        &vlan_info);
8480         } else {
8481                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8482                                                         vport->vport_id, state,
8483                                                         vlan, qos,
8484                                                         ntohs(proto));
8485                 return ret;
8486         }
8487 }
8488
8489 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8490                           u16 vlan_id, bool is_kill)
8491 {
8492         struct hclge_vport *vport = hclge_get_vport(handle);
8493         struct hclge_dev *hdev = vport->back;
8494         bool writen_to_tbl = false;
8495         int ret = 0;
8496
8497         /* When device is resetting, firmware is unable to handle
8498          * mailbox. Just record the vlan id, and remove it after
8499          * reset finished.
8500          */
8501         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8502                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8503                 return -EBUSY;
8504         }
8505
8506         /* when port base vlan enabled, we use port base vlan as the vlan
8507          * filter entry. In this case, we don't update vlan filter table
8508          * when user add new vlan or remove exist vlan, just update the vport
8509          * vlan list. The vlan id in vlan list will be writen in vlan filter
8510          * table until port base vlan disabled
8511          */
8512         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8513                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8514                                                vlan_id, is_kill);
8515                 writen_to_tbl = true;
8516         }
8517
8518         if (!ret) {
8519                 if (is_kill)
8520                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8521                 else
8522                         hclge_add_vport_vlan_table(vport, vlan_id,
8523                                                    writen_to_tbl);
8524         } else if (is_kill) {
8525                 /* when remove hw vlan filter failed, record the vlan id,
8526                  * and try to remove it from hw later, to be consistence
8527                  * with stack
8528                  */
8529                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8530         }
8531         return ret;
8532 }
8533
8534 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8535 {
8536 #define HCLGE_MAX_SYNC_COUNT    60
8537
8538         int i, ret, sync_cnt = 0;
8539         u16 vlan_id;
8540
8541         /* start from vport 1 for PF is always alive */
8542         for (i = 0; i < hdev->num_alloc_vport; i++) {
8543                 struct hclge_vport *vport = &hdev->vport[i];
8544
8545                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8546                                          VLAN_N_VID);
8547                 while (vlan_id != VLAN_N_VID) {
8548                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8549                                                        vport->vport_id, vlan_id,
8550                                                        true);
8551                         if (ret && ret != -EINVAL)
8552                                 return;
8553
8554                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8555                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8556
8557                         sync_cnt++;
8558                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8559                                 return;
8560
8561                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8562                                                  VLAN_N_VID);
8563                 }
8564         }
8565 }
8566
8567 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8568 {
8569         struct hclge_config_max_frm_size_cmd *req;
8570         struct hclge_desc desc;
8571
8572         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8573
8574         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8575         req->max_frm_size = cpu_to_le16(new_mps);
8576         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8577
8578         return hclge_cmd_send(&hdev->hw, &desc, 1);
8579 }
8580
8581 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8582 {
8583         struct hclge_vport *vport = hclge_get_vport(handle);
8584
8585         return hclge_set_vport_mtu(vport, new_mtu);
8586 }
8587
8588 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8589 {
8590         struct hclge_dev *hdev = vport->back;
8591         int i, max_frm_size, ret;
8592
8593         /* HW supprt 2 layer vlan */
8594         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8595         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8596             max_frm_size > HCLGE_MAC_MAX_FRAME)
8597                 return -EINVAL;
8598
8599         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8600         mutex_lock(&hdev->vport_lock);
8601         /* VF's mps must fit within hdev->mps */
8602         if (vport->vport_id && max_frm_size > hdev->mps) {
8603                 mutex_unlock(&hdev->vport_lock);
8604                 return -EINVAL;
8605         } else if (vport->vport_id) {
8606                 vport->mps = max_frm_size;
8607                 mutex_unlock(&hdev->vport_lock);
8608                 return 0;
8609         }
8610
8611         /* PF's mps must be greater then VF's mps */
8612         for (i = 1; i < hdev->num_alloc_vport; i++)
8613                 if (max_frm_size < hdev->vport[i].mps) {
8614                         mutex_unlock(&hdev->vport_lock);
8615                         return -EINVAL;
8616                 }
8617
8618         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8619
8620         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8621         if (ret) {
8622                 dev_err(&hdev->pdev->dev,
8623                         "Change mtu fail, ret =%d\n", ret);
8624                 goto out;
8625         }
8626
8627         hdev->mps = max_frm_size;
8628         vport->mps = max_frm_size;
8629
8630         ret = hclge_buffer_alloc(hdev);
8631         if (ret)
8632                 dev_err(&hdev->pdev->dev,
8633                         "Allocate buffer fail, ret =%d\n", ret);
8634
8635 out:
8636         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8637         mutex_unlock(&hdev->vport_lock);
8638         return ret;
8639 }
8640
8641 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8642                                     bool enable)
8643 {
8644         struct hclge_reset_tqp_queue_cmd *req;
8645         struct hclge_desc desc;
8646         int ret;
8647
8648         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8649
8650         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8651         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8652         if (enable)
8653                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8654
8655         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8656         if (ret) {
8657                 dev_err(&hdev->pdev->dev,
8658                         "Send tqp reset cmd error, status =%d\n", ret);
8659                 return ret;
8660         }
8661
8662         return 0;
8663 }
8664
8665 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8666 {
8667         struct hclge_reset_tqp_queue_cmd *req;
8668         struct hclge_desc desc;
8669         int ret;
8670
8671         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8672
8673         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8674         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8675
8676         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8677         if (ret) {
8678                 dev_err(&hdev->pdev->dev,
8679                         "Get reset status error, status =%d\n", ret);
8680                 return ret;
8681         }
8682
8683         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8684 }
8685
8686 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8687 {
8688         struct hnae3_queue *queue;
8689         struct hclge_tqp *tqp;
8690
8691         queue = handle->kinfo.tqp[queue_id];
8692         tqp = container_of(queue, struct hclge_tqp, q);
8693
8694         return tqp->index;
8695 }
8696
8697 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8698 {
8699         struct hclge_vport *vport = hclge_get_vport(handle);
8700         struct hclge_dev *hdev = vport->back;
8701         int reset_try_times = 0;
8702         int reset_status;
8703         u16 queue_gid;
8704         int ret;
8705
8706         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8707
8708         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8709         if (ret) {
8710                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8711                 return ret;
8712         }
8713
8714         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8715         if (ret) {
8716                 dev_err(&hdev->pdev->dev,
8717                         "Send reset tqp cmd fail, ret = %d\n", ret);
8718                 return ret;
8719         }
8720
8721         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8722                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8723                 if (reset_status)
8724                         break;
8725
8726                 /* Wait for tqp hw reset */
8727                 usleep_range(1000, 1200);
8728         }
8729
8730         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8731                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8732                 return ret;
8733         }
8734
8735         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8736         if (ret)
8737                 dev_err(&hdev->pdev->dev,
8738                         "Deassert the soft reset fail, ret = %d\n", ret);
8739
8740         return ret;
8741 }
8742
8743 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8744 {
8745         struct hclge_dev *hdev = vport->back;
8746         int reset_try_times = 0;
8747         int reset_status;
8748         u16 queue_gid;
8749         int ret;
8750
8751         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8752
8753         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8754         if (ret) {
8755                 dev_warn(&hdev->pdev->dev,
8756                          "Send reset tqp cmd fail, ret = %d\n", ret);
8757                 return;
8758         }
8759
8760         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8761                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8762                 if (reset_status)
8763                         break;
8764
8765                 /* Wait for tqp hw reset */
8766                 usleep_range(1000, 1200);
8767         }
8768
8769         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8770                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8771                 return;
8772         }
8773
8774         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8775         if (ret)
8776                 dev_warn(&hdev->pdev->dev,
8777                          "Deassert the soft reset fail, ret = %d\n", ret);
8778 }
8779
8780 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8781 {
8782         struct hclge_vport *vport = hclge_get_vport(handle);
8783         struct hclge_dev *hdev = vport->back;
8784
8785         return hdev->fw_version;
8786 }
8787
8788 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8789 {
8790         struct phy_device *phydev = hdev->hw.mac.phydev;
8791
8792         if (!phydev)
8793                 return;
8794
8795         phy_set_asym_pause(phydev, rx_en, tx_en);
8796 }
8797
8798 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8799 {
8800         int ret;
8801
8802         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8803                 return 0;
8804
8805         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8806         if (ret)
8807                 dev_err(&hdev->pdev->dev,
8808                         "configure pauseparam error, ret = %d.\n", ret);
8809
8810         return ret;
8811 }
8812
8813 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8814 {
8815         struct phy_device *phydev = hdev->hw.mac.phydev;
8816         u16 remote_advertising = 0;
8817         u16 local_advertising;
8818         u32 rx_pause, tx_pause;
8819         u8 flowctl;
8820
8821         if (!phydev->link || !phydev->autoneg)
8822                 return 0;
8823
8824         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8825
8826         if (phydev->pause)
8827                 remote_advertising = LPA_PAUSE_CAP;
8828
8829         if (phydev->asym_pause)
8830                 remote_advertising |= LPA_PAUSE_ASYM;
8831
8832         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8833                                            remote_advertising);
8834         tx_pause = flowctl & FLOW_CTRL_TX;
8835         rx_pause = flowctl & FLOW_CTRL_RX;
8836
8837         if (phydev->duplex == HCLGE_MAC_HALF) {
8838                 tx_pause = 0;
8839                 rx_pause = 0;
8840         }
8841
8842         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8843 }
8844
8845 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8846                                  u32 *rx_en, u32 *tx_en)
8847 {
8848         struct hclge_vport *vport = hclge_get_vport(handle);
8849         struct hclge_dev *hdev = vport->back;
8850         struct phy_device *phydev = hdev->hw.mac.phydev;
8851
8852         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8853
8854         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8855                 *rx_en = 0;
8856                 *tx_en = 0;
8857                 return;
8858         }
8859
8860         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8861                 *rx_en = 1;
8862                 *tx_en = 0;
8863         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8864                 *tx_en = 1;
8865                 *rx_en = 0;
8866         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8867                 *rx_en = 1;
8868                 *tx_en = 1;
8869         } else {
8870                 *rx_en = 0;
8871                 *tx_en = 0;
8872         }
8873 }
8874
8875 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8876                                          u32 rx_en, u32 tx_en)
8877 {
8878         if (rx_en && tx_en)
8879                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8880         else if (rx_en && !tx_en)
8881                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8882         else if (!rx_en && tx_en)
8883                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8884         else
8885                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8886
8887         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8888 }
8889
8890 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8891                                 u32 rx_en, u32 tx_en)
8892 {
8893         struct hclge_vport *vport = hclge_get_vport(handle);
8894         struct hclge_dev *hdev = vport->back;
8895         struct phy_device *phydev = hdev->hw.mac.phydev;
8896         u32 fc_autoneg;
8897
8898         if (phydev) {
8899                 fc_autoneg = hclge_get_autoneg(handle);
8900                 if (auto_neg != fc_autoneg) {
8901                         dev_info(&hdev->pdev->dev,
8902                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8903                         return -EOPNOTSUPP;
8904                 }
8905         }
8906
8907         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8908                 dev_info(&hdev->pdev->dev,
8909                          "Priority flow control enabled. Cannot set link flow control.\n");
8910                 return -EOPNOTSUPP;
8911         }
8912
8913         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8914
8915         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8916
8917         if (!auto_neg)
8918                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8919
8920         if (phydev)
8921                 return phy_start_aneg(phydev);
8922
8923         return -EOPNOTSUPP;
8924 }
8925
8926 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8927                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8928 {
8929         struct hclge_vport *vport = hclge_get_vport(handle);
8930         struct hclge_dev *hdev = vport->back;
8931
8932         if (speed)
8933                 *speed = hdev->hw.mac.speed;
8934         if (duplex)
8935                 *duplex = hdev->hw.mac.duplex;
8936         if (auto_neg)
8937                 *auto_neg = hdev->hw.mac.autoneg;
8938 }
8939
8940 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8941                                  u8 *module_type)
8942 {
8943         struct hclge_vport *vport = hclge_get_vport(handle);
8944         struct hclge_dev *hdev = vport->back;
8945
8946         if (media_type)
8947                 *media_type = hdev->hw.mac.media_type;
8948
8949         if (module_type)
8950                 *module_type = hdev->hw.mac.module_type;
8951 }
8952
8953 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8954                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8955 {
8956         struct hclge_vport *vport = hclge_get_vport(handle);
8957         struct hclge_dev *hdev = vport->back;
8958         struct phy_device *phydev = hdev->hw.mac.phydev;
8959         int mdix_ctrl, mdix, is_resolved;
8960         unsigned int retval;
8961
8962         if (!phydev) {
8963                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8964                 *tp_mdix = ETH_TP_MDI_INVALID;
8965                 return;
8966         }
8967
8968         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8969
8970         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8971         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8972                                     HCLGE_PHY_MDIX_CTRL_S);
8973
8974         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8975         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8976         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8977
8978         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8979
8980         switch (mdix_ctrl) {
8981         case 0x0:
8982                 *tp_mdix_ctrl = ETH_TP_MDI;
8983                 break;
8984         case 0x1:
8985                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8986                 break;
8987         case 0x3:
8988                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8989                 break;
8990         default:
8991                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8992                 break;
8993         }
8994
8995         if (!is_resolved)
8996                 *tp_mdix = ETH_TP_MDI_INVALID;
8997         else if (mdix)
8998                 *tp_mdix = ETH_TP_MDI_X;
8999         else
9000                 *tp_mdix = ETH_TP_MDI;
9001 }
9002
9003 static void hclge_info_show(struct hclge_dev *hdev)
9004 {
9005         struct device *dev = &hdev->pdev->dev;
9006
9007         dev_info(dev, "PF info begin:\n");
9008
9009         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9010         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9011         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9012         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9013         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9014         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9015         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9016         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9017         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9018         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9019         dev_info(dev, "This is %s PF\n",
9020                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9021         dev_info(dev, "DCB %s\n",
9022                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9023         dev_info(dev, "MQPRIO %s\n",
9024                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9025
9026         dev_info(dev, "PF info end.\n");
9027 }
9028
9029 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9030                                           struct hclge_vport *vport)
9031 {
9032         struct hnae3_client *client = vport->nic.client;
9033         struct hclge_dev *hdev = ae_dev->priv;
9034         int rst_cnt = hdev->rst_stats.reset_cnt;
9035         int ret;
9036
9037         ret = client->ops->init_instance(&vport->nic);
9038         if (ret)
9039                 return ret;
9040
9041         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9042         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9043             rst_cnt != hdev->rst_stats.reset_cnt) {
9044                 ret = -EBUSY;
9045                 goto init_nic_err;
9046         }
9047
9048         /* Enable nic hw error interrupts */
9049         ret = hclge_config_nic_hw_error(hdev, true);
9050         if (ret) {
9051                 dev_err(&ae_dev->pdev->dev,
9052                         "fail(%d) to enable hw error interrupts\n", ret);
9053                 goto init_nic_err;
9054         }
9055
9056         hnae3_set_client_init_flag(client, ae_dev, 1);
9057
9058         if (netif_msg_drv(&hdev->vport->nic))
9059                 hclge_info_show(hdev);
9060
9061         return ret;
9062
9063 init_nic_err:
9064         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9065         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9066                 msleep(HCLGE_WAIT_RESET_DONE);
9067
9068         client->ops->uninit_instance(&vport->nic, 0);
9069
9070         return ret;
9071 }
9072
9073 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9074                                            struct hclge_vport *vport)
9075 {
9076         struct hnae3_client *client = vport->roce.client;
9077         struct hclge_dev *hdev = ae_dev->priv;
9078         int rst_cnt;
9079         int ret;
9080
9081         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9082             !hdev->nic_client)
9083                 return 0;
9084
9085         client = hdev->roce_client;
9086         ret = hclge_init_roce_base_info(vport);
9087         if (ret)
9088                 return ret;
9089
9090         rst_cnt = hdev->rst_stats.reset_cnt;
9091         ret = client->ops->init_instance(&vport->roce);
9092         if (ret)
9093                 return ret;
9094
9095         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9096         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9097             rst_cnt != hdev->rst_stats.reset_cnt) {
9098                 ret = -EBUSY;
9099                 goto init_roce_err;
9100         }
9101
9102         /* Enable roce ras interrupts */
9103         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9104         if (ret) {
9105                 dev_err(&ae_dev->pdev->dev,
9106                         "fail(%d) to enable roce ras interrupts\n", ret);
9107                 goto init_roce_err;
9108         }
9109
9110         hnae3_set_client_init_flag(client, ae_dev, 1);
9111
9112         return 0;
9113
9114 init_roce_err:
9115         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9116         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9117                 msleep(HCLGE_WAIT_RESET_DONE);
9118
9119         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9120
9121         return ret;
9122 }
9123
9124 static int hclge_init_client_instance(struct hnae3_client *client,
9125                                       struct hnae3_ae_dev *ae_dev)
9126 {
9127         struct hclge_dev *hdev = ae_dev->priv;
9128         struct hclge_vport *vport;
9129         int i, ret;
9130
9131         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9132                 vport = &hdev->vport[i];
9133
9134                 switch (client->type) {
9135                 case HNAE3_CLIENT_KNIC:
9136                         hdev->nic_client = client;
9137                         vport->nic.client = client;
9138                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9139                         if (ret)
9140                                 goto clear_nic;
9141
9142                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9143                         if (ret)
9144                                 goto clear_roce;
9145
9146                         break;
9147                 case HNAE3_CLIENT_ROCE:
9148                         if (hnae3_dev_roce_supported(hdev)) {
9149                                 hdev->roce_client = client;
9150                                 vport->roce.client = client;
9151                         }
9152
9153                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9154                         if (ret)
9155                                 goto clear_roce;
9156
9157                         break;
9158                 default:
9159                         return -EINVAL;
9160                 }
9161         }
9162
9163         return 0;
9164
9165 clear_nic:
9166         hdev->nic_client = NULL;
9167         vport->nic.client = NULL;
9168         return ret;
9169 clear_roce:
9170         hdev->roce_client = NULL;
9171         vport->roce.client = NULL;
9172         return ret;
9173 }
9174
9175 static void hclge_uninit_client_instance(struct hnae3_client *client,
9176                                          struct hnae3_ae_dev *ae_dev)
9177 {
9178         struct hclge_dev *hdev = ae_dev->priv;
9179         struct hclge_vport *vport;
9180         int i;
9181
9182         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9183                 vport = &hdev->vport[i];
9184                 if (hdev->roce_client) {
9185                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9186                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9187                                 msleep(HCLGE_WAIT_RESET_DONE);
9188
9189                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9190                                                                 0);
9191                         hdev->roce_client = NULL;
9192                         vport->roce.client = NULL;
9193                 }
9194                 if (client->type == HNAE3_CLIENT_ROCE)
9195                         return;
9196                 if (hdev->nic_client && client->ops->uninit_instance) {
9197                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9198                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9199                                 msleep(HCLGE_WAIT_RESET_DONE);
9200
9201                         client->ops->uninit_instance(&vport->nic, 0);
9202                         hdev->nic_client = NULL;
9203                         vport->nic.client = NULL;
9204                 }
9205         }
9206 }
9207
9208 static int hclge_pci_init(struct hclge_dev *hdev)
9209 {
9210         struct pci_dev *pdev = hdev->pdev;
9211         struct hclge_hw *hw;
9212         int ret;
9213
9214         ret = pci_enable_device(pdev);
9215         if (ret) {
9216                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9217                 return ret;
9218         }
9219
9220         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9221         if (ret) {
9222                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9223                 if (ret) {
9224                         dev_err(&pdev->dev,
9225                                 "can't set consistent PCI DMA");
9226                         goto err_disable_device;
9227                 }
9228                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9229         }
9230
9231         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9232         if (ret) {
9233                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9234                 goto err_disable_device;
9235         }
9236
9237         pci_set_master(pdev);
9238         hw = &hdev->hw;
9239         hw->io_base = pcim_iomap(pdev, 2, 0);
9240         if (!hw->io_base) {
9241                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9242                 ret = -ENOMEM;
9243                 goto err_clr_master;
9244         }
9245
9246         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9247
9248         return 0;
9249 err_clr_master:
9250         pci_clear_master(pdev);
9251         pci_release_regions(pdev);
9252 err_disable_device:
9253         pci_disable_device(pdev);
9254
9255         return ret;
9256 }
9257
9258 static void hclge_pci_uninit(struct hclge_dev *hdev)
9259 {
9260         struct pci_dev *pdev = hdev->pdev;
9261
9262         pcim_iounmap(pdev, hdev->hw.io_base);
9263         pci_free_irq_vectors(pdev);
9264         pci_clear_master(pdev);
9265         pci_release_mem_regions(pdev);
9266         pci_disable_device(pdev);
9267 }
9268
9269 static void hclge_state_init(struct hclge_dev *hdev)
9270 {
9271         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9272         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9273         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9274         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9275         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9276         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9277         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9278 }
9279
9280 static void hclge_state_uninit(struct hclge_dev *hdev)
9281 {
9282         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9283         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9284
9285         if (hdev->reset_timer.function)
9286                 del_timer_sync(&hdev->reset_timer);
9287         if (hdev->service_task.work.func)
9288                 cancel_delayed_work_sync(&hdev->service_task);
9289 }
9290
9291 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9292 {
9293 #define HCLGE_FLR_RETRY_WAIT_MS 500
9294 #define HCLGE_FLR_RETRY_CNT     5
9295
9296         struct hclge_dev *hdev = ae_dev->priv;
9297         int retry_cnt = 0;
9298         int ret;
9299
9300 retry:
9301         down(&hdev->reset_sem);
9302         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9303         hdev->reset_type = HNAE3_FLR_RESET;
9304         ret = hclge_reset_prepare(hdev);
9305         if (ret) {
9306                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9307                         ret);
9308                 if (hdev->reset_pending ||
9309                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9310                         dev_err(&hdev->pdev->dev,
9311                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9312                                 hdev->reset_pending, retry_cnt);
9313                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9314                         up(&hdev->reset_sem);
9315                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9316                         goto retry;
9317                 }
9318         }
9319
9320         /* disable misc vector before FLR done */
9321         hclge_enable_vector(&hdev->misc_vector, false);
9322         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9323         hdev->rst_stats.flr_rst_cnt++;
9324 }
9325
9326 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9327 {
9328         struct hclge_dev *hdev = ae_dev->priv;
9329         int ret;
9330
9331         hclge_enable_vector(&hdev->misc_vector, true);
9332
9333         ret = hclge_reset_rebuild(hdev);
9334         if (ret)
9335                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9336
9337         hdev->reset_type = HNAE3_NONE_RESET;
9338         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9339         up(&hdev->reset_sem);
9340 }
9341
9342 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9343 {
9344         u16 i;
9345
9346         for (i = 0; i < hdev->num_alloc_vport; i++) {
9347                 struct hclge_vport *vport = &hdev->vport[i];
9348                 int ret;
9349
9350                  /* Send cmd to clear VF's FUNC_RST_ING */
9351                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9352                 if (ret)
9353                         dev_warn(&hdev->pdev->dev,
9354                                  "clear vf(%u) rst failed %d!\n",
9355                                  vport->vport_id, ret);
9356         }
9357 }
9358
9359 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9360 {
9361         struct pci_dev *pdev = ae_dev->pdev;
9362         struct hclge_dev *hdev;
9363         int ret;
9364
9365         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9366         if (!hdev) {
9367                 ret = -ENOMEM;
9368                 goto out;
9369         }
9370
9371         hdev->pdev = pdev;
9372         hdev->ae_dev = ae_dev;
9373         hdev->reset_type = HNAE3_NONE_RESET;
9374         hdev->reset_level = HNAE3_FUNC_RESET;
9375         ae_dev->priv = hdev;
9376
9377         /* HW supprt 2 layer vlan */
9378         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9379
9380         mutex_init(&hdev->vport_lock);
9381         spin_lock_init(&hdev->fd_rule_lock);
9382         sema_init(&hdev->reset_sem, 1);
9383
9384         ret = hclge_pci_init(hdev);
9385         if (ret)
9386                 goto out;
9387
9388         /* Firmware command queue initialize */
9389         ret = hclge_cmd_queue_init(hdev);
9390         if (ret)
9391                 goto err_pci_uninit;
9392
9393         /* Firmware command initialize */
9394         ret = hclge_cmd_init(hdev);
9395         if (ret)
9396                 goto err_cmd_uninit;
9397
9398         ret = hclge_get_cap(hdev);
9399         if (ret)
9400                 goto err_cmd_uninit;
9401
9402         ret = hclge_configure(hdev);
9403         if (ret) {
9404                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9405                 goto err_cmd_uninit;
9406         }
9407
9408         ret = hclge_init_msi(hdev);
9409         if (ret) {
9410                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9411                 goto err_cmd_uninit;
9412         }
9413
9414         ret = hclge_misc_irq_init(hdev);
9415         if (ret)
9416                 goto err_msi_uninit;
9417
9418         ret = hclge_alloc_tqps(hdev);
9419         if (ret) {
9420                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9421                 goto err_msi_irq_uninit;
9422         }
9423
9424         ret = hclge_alloc_vport(hdev);
9425         if (ret)
9426                 goto err_msi_irq_uninit;
9427
9428         ret = hclge_map_tqp(hdev);
9429         if (ret)
9430                 goto err_msi_irq_uninit;
9431
9432         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9433                 ret = hclge_mac_mdio_config(hdev);
9434                 if (ret)
9435                         goto err_msi_irq_uninit;
9436         }
9437
9438         ret = hclge_init_umv_space(hdev);
9439         if (ret)
9440                 goto err_mdiobus_unreg;
9441
9442         ret = hclge_mac_init(hdev);
9443         if (ret) {
9444                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9445                 goto err_mdiobus_unreg;
9446         }
9447
9448         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9449         if (ret) {
9450                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9451                 goto err_mdiobus_unreg;
9452         }
9453
9454         ret = hclge_config_gro(hdev, true);
9455         if (ret)
9456                 goto err_mdiobus_unreg;
9457
9458         ret = hclge_init_vlan_config(hdev);
9459         if (ret) {
9460                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9461                 goto err_mdiobus_unreg;
9462         }
9463
9464         ret = hclge_tm_schd_init(hdev);
9465         if (ret) {
9466                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9467                 goto err_mdiobus_unreg;
9468         }
9469
9470         hclge_rss_init_cfg(hdev);
9471         ret = hclge_rss_init_hw(hdev);
9472         if (ret) {
9473                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9474                 goto err_mdiobus_unreg;
9475         }
9476
9477         ret = init_mgr_tbl(hdev);
9478         if (ret) {
9479                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9480                 goto err_mdiobus_unreg;
9481         }
9482
9483         ret = hclge_init_fd_config(hdev);
9484         if (ret) {
9485                 dev_err(&pdev->dev,
9486                         "fd table init fail, ret=%d\n", ret);
9487                 goto err_mdiobus_unreg;
9488         }
9489
9490         INIT_KFIFO(hdev->mac_tnl_log);
9491
9492         hclge_dcb_ops_set(hdev);
9493
9494         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9495         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9496
9497         /* Setup affinity after service timer setup because add_timer_on
9498          * is called in affinity notify.
9499          */
9500         hclge_misc_affinity_setup(hdev);
9501
9502         hclge_clear_all_event_cause(hdev);
9503         hclge_clear_resetting_state(hdev);
9504
9505         /* Log and clear the hw errors those already occurred */
9506         hclge_handle_all_hns_hw_errors(ae_dev);
9507
9508         /* request delayed reset for the error recovery because an immediate
9509          * global reset on a PF affecting pending initialization of other PFs
9510          */
9511         if (ae_dev->hw_err_reset_req) {
9512                 enum hnae3_reset_type reset_level;
9513
9514                 reset_level = hclge_get_reset_level(ae_dev,
9515                                                     &ae_dev->hw_err_reset_req);
9516                 hclge_set_def_reset_request(ae_dev, reset_level);
9517                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9518         }
9519
9520         /* Enable MISC vector(vector0) */
9521         hclge_enable_vector(&hdev->misc_vector, true);
9522
9523         hclge_state_init(hdev);
9524         hdev->last_reset_time = jiffies;
9525
9526         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9527                  HCLGE_DRIVER_NAME);
9528
9529         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9530
9531         return 0;
9532
9533 err_mdiobus_unreg:
9534         if (hdev->hw.mac.phydev)
9535                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9536 err_msi_irq_uninit:
9537         hclge_misc_irq_uninit(hdev);
9538 err_msi_uninit:
9539         pci_free_irq_vectors(pdev);
9540 err_cmd_uninit:
9541         hclge_cmd_uninit(hdev);
9542 err_pci_uninit:
9543         pcim_iounmap(pdev, hdev->hw.io_base);
9544         pci_clear_master(pdev);
9545         pci_release_regions(pdev);
9546         pci_disable_device(pdev);
9547 out:
9548         return ret;
9549 }
9550
9551 static void hclge_stats_clear(struct hclge_dev *hdev)
9552 {
9553         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9554 }
9555
9556 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9557 {
9558         return hclge_config_switch_param(hdev, vf, enable,
9559                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9560 }
9561
9562 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9563 {
9564         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9565                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9566                                           enable, vf);
9567 }
9568
9569 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9570 {
9571         int ret;
9572
9573         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9574         if (ret) {
9575                 dev_err(&hdev->pdev->dev,
9576                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9577                         vf, enable ? "on" : "off", ret);
9578                 return ret;
9579         }
9580
9581         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9582         if (ret)
9583                 dev_err(&hdev->pdev->dev,
9584                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9585                         vf, enable ? "on" : "off", ret);
9586
9587         return ret;
9588 }
9589
9590 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9591                                  bool enable)
9592 {
9593         struct hclge_vport *vport = hclge_get_vport(handle);
9594         struct hclge_dev *hdev = vport->back;
9595         u32 new_spoofchk = enable ? 1 : 0;
9596         int ret;
9597
9598         if (hdev->pdev->revision == 0x20)
9599                 return -EOPNOTSUPP;
9600
9601         vport = hclge_get_vf_vport(hdev, vf);
9602         if (!vport)
9603                 return -EINVAL;
9604
9605         if (vport->vf_info.spoofchk == new_spoofchk)
9606                 return 0;
9607
9608         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9609                 dev_warn(&hdev->pdev->dev,
9610                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9611                          vf);
9612         else if (enable && hclge_is_umv_space_full(vport))
9613                 dev_warn(&hdev->pdev->dev,
9614                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9615                          vf);
9616
9617         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9618         if (ret)
9619                 return ret;
9620
9621         vport->vf_info.spoofchk = new_spoofchk;
9622         return 0;
9623 }
9624
9625 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9626 {
9627         struct hclge_vport *vport = hdev->vport;
9628         int ret;
9629         int i;
9630
9631         if (hdev->pdev->revision == 0x20)
9632                 return 0;
9633
9634         /* resume the vf spoof check state after reset */
9635         for (i = 0; i < hdev->num_alloc_vport; i++) {
9636                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9637                                                vport->vf_info.spoofchk);
9638                 if (ret)
9639                         return ret;
9640
9641                 vport++;
9642         }
9643
9644         return 0;
9645 }
9646
9647 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9648 {
9649         struct hclge_vport *vport = hclge_get_vport(handle);
9650         struct hclge_dev *hdev = vport->back;
9651         u32 new_trusted = enable ? 1 : 0;
9652         bool en_bc_pmc;
9653         int ret;
9654
9655         vport = hclge_get_vf_vport(hdev, vf);
9656         if (!vport)
9657                 return -EINVAL;
9658
9659         if (vport->vf_info.trusted == new_trusted)
9660                 return 0;
9661
9662         /* Disable promisc mode for VF if it is not trusted any more. */
9663         if (!enable && vport->vf_info.promisc_enable) {
9664                 en_bc_pmc = hdev->pdev->revision != 0x20;
9665                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9666                                                    en_bc_pmc);
9667                 if (ret)
9668                         return ret;
9669                 vport->vf_info.promisc_enable = 0;
9670                 hclge_inform_vf_promisc_info(vport);
9671         }
9672
9673         vport->vf_info.trusted = new_trusted;
9674
9675         return 0;
9676 }
9677
9678 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9679 {
9680         int ret;
9681         int vf;
9682
9683         /* reset vf rate to default value */
9684         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9685                 struct hclge_vport *vport = &hdev->vport[vf];
9686
9687                 vport->vf_info.max_tx_rate = 0;
9688                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9689                 if (ret)
9690                         dev_err(&hdev->pdev->dev,
9691                                 "vf%d failed to reset to default, ret=%d\n",
9692                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9693         }
9694 }
9695
9696 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9697                                      int min_tx_rate, int max_tx_rate)
9698 {
9699         if (min_tx_rate != 0 ||
9700             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9701                 dev_err(&hdev->pdev->dev,
9702                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9703                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9704                 return -EINVAL;
9705         }
9706
9707         return 0;
9708 }
9709
9710 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9711                              int min_tx_rate, int max_tx_rate, bool force)
9712 {
9713         struct hclge_vport *vport = hclge_get_vport(handle);
9714         struct hclge_dev *hdev = vport->back;
9715         int ret;
9716
9717         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9718         if (ret)
9719                 return ret;
9720
9721         vport = hclge_get_vf_vport(hdev, vf);
9722         if (!vport)
9723                 return -EINVAL;
9724
9725         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9726                 return 0;
9727
9728         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9729         if (ret)
9730                 return ret;
9731
9732         vport->vf_info.max_tx_rate = max_tx_rate;
9733
9734         return 0;
9735 }
9736
9737 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9738 {
9739         struct hnae3_handle *handle = &hdev->vport->nic;
9740         struct hclge_vport *vport;
9741         int ret;
9742         int vf;
9743
9744         /* resume the vf max_tx_rate after reset */
9745         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9746                 vport = hclge_get_vf_vport(hdev, vf);
9747                 if (!vport)
9748                         return -EINVAL;
9749
9750                 /* zero means max rate, after reset, firmware already set it to
9751                  * max rate, so just continue.
9752                  */
9753                 if (!vport->vf_info.max_tx_rate)
9754                         continue;
9755
9756                 ret = hclge_set_vf_rate(handle, vf, 0,
9757                                         vport->vf_info.max_tx_rate, true);
9758                 if (ret) {
9759                         dev_err(&hdev->pdev->dev,
9760                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9761                                 vf, vport->vf_info.max_tx_rate, ret);
9762                         return ret;
9763                 }
9764         }
9765
9766         return 0;
9767 }
9768
9769 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9770 {
9771         struct hclge_vport *vport = hdev->vport;
9772         int i;
9773
9774         for (i = 0; i < hdev->num_alloc_vport; i++) {
9775                 hclge_vport_stop(vport);
9776                 vport++;
9777         }
9778 }
9779
9780 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9781 {
9782         struct hclge_dev *hdev = ae_dev->priv;
9783         struct pci_dev *pdev = ae_dev->pdev;
9784         int ret;
9785
9786         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9787
9788         hclge_stats_clear(hdev);
9789         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9790         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9791
9792         ret = hclge_cmd_init(hdev);
9793         if (ret) {
9794                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9795                 return ret;
9796         }
9797
9798         ret = hclge_map_tqp(hdev);
9799         if (ret) {
9800                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9801                 return ret;
9802         }
9803
9804         hclge_reset_umv_space(hdev);
9805
9806         ret = hclge_mac_init(hdev);
9807         if (ret) {
9808                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9809                 return ret;
9810         }
9811
9812         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9813         if (ret) {
9814                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9815                 return ret;
9816         }
9817
9818         ret = hclge_config_gro(hdev, true);
9819         if (ret)
9820                 return ret;
9821
9822         ret = hclge_init_vlan_config(hdev);
9823         if (ret) {
9824                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9825                 return ret;
9826         }
9827
9828         ret = hclge_tm_init_hw(hdev, true);
9829         if (ret) {
9830                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9831                 return ret;
9832         }
9833
9834         ret = hclge_rss_init_hw(hdev);
9835         if (ret) {
9836                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9837                 return ret;
9838         }
9839
9840         ret = init_mgr_tbl(hdev);
9841         if (ret) {
9842                 dev_err(&pdev->dev,
9843                         "failed to reinit manager table, ret = %d\n", ret);
9844                 return ret;
9845         }
9846
9847         ret = hclge_init_fd_config(hdev);
9848         if (ret) {
9849                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9850                 return ret;
9851         }
9852
9853         /* Log and clear the hw errors those already occurred */
9854         hclge_handle_all_hns_hw_errors(ae_dev);
9855
9856         /* Re-enable the hw error interrupts because
9857          * the interrupts get disabled on global reset.
9858          */
9859         ret = hclge_config_nic_hw_error(hdev, true);
9860         if (ret) {
9861                 dev_err(&pdev->dev,
9862                         "fail(%d) to re-enable NIC hw error interrupts\n",
9863                         ret);
9864                 return ret;
9865         }
9866
9867         if (hdev->roce_client) {
9868                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9869                 if (ret) {
9870                         dev_err(&pdev->dev,
9871                                 "fail(%d) to re-enable roce ras interrupts\n",
9872                                 ret);
9873                         return ret;
9874                 }
9875         }
9876
9877         hclge_reset_vport_state(hdev);
9878         ret = hclge_reset_vport_spoofchk(hdev);
9879         if (ret)
9880                 return ret;
9881
9882         ret = hclge_resume_vf_rate(hdev);
9883         if (ret)
9884                 return ret;
9885
9886         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9887                  HCLGE_DRIVER_NAME);
9888
9889         return 0;
9890 }
9891
9892 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9893 {
9894         struct hclge_dev *hdev = ae_dev->priv;
9895         struct hclge_mac *mac = &hdev->hw.mac;
9896
9897         hclge_reset_vf_rate(hdev);
9898         hclge_misc_affinity_teardown(hdev);
9899         hclge_state_uninit(hdev);
9900
9901         if (mac->phydev)
9902                 mdiobus_unregister(mac->mdio_bus);
9903
9904         hclge_uninit_umv_space(hdev);
9905
9906         /* Disable MISC vector(vector0) */
9907         hclge_enable_vector(&hdev->misc_vector, false);
9908         synchronize_irq(hdev->misc_vector.vector_irq);
9909
9910         /* Disable all hw interrupts */
9911         hclge_config_mac_tnl_int(hdev, false);
9912         hclge_config_nic_hw_error(hdev, false);
9913         hclge_config_rocee_ras_interrupt(hdev, false);
9914
9915         hclge_cmd_uninit(hdev);
9916         hclge_misc_irq_uninit(hdev);
9917         hclge_pci_uninit(hdev);
9918         mutex_destroy(&hdev->vport_lock);
9919         hclge_uninit_vport_mac_table(hdev);
9920         hclge_uninit_vport_vlan_table(hdev);
9921         ae_dev->priv = NULL;
9922 }
9923
9924 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9925 {
9926         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9927         struct hclge_vport *vport = hclge_get_vport(handle);
9928         struct hclge_dev *hdev = vport->back;
9929
9930         return min_t(u32, hdev->rss_size_max,
9931                      vport->alloc_tqps / kinfo->num_tc);
9932 }
9933
9934 static void hclge_get_channels(struct hnae3_handle *handle,
9935                                struct ethtool_channels *ch)
9936 {
9937         ch->max_combined = hclge_get_max_channels(handle);
9938         ch->other_count = 1;
9939         ch->max_other = 1;
9940         ch->combined_count = handle->kinfo.rss_size;
9941 }
9942
9943 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9944                                         u16 *alloc_tqps, u16 *max_rss_size)
9945 {
9946         struct hclge_vport *vport = hclge_get_vport(handle);
9947         struct hclge_dev *hdev = vport->back;
9948
9949         *alloc_tqps = vport->alloc_tqps;
9950         *max_rss_size = hdev->rss_size_max;
9951 }
9952
9953 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9954                               bool rxfh_configured)
9955 {
9956         struct hclge_vport *vport = hclge_get_vport(handle);
9957         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9958         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9959         struct hclge_dev *hdev = vport->back;
9960         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9961         u16 cur_rss_size = kinfo->rss_size;
9962         u16 cur_tqps = kinfo->num_tqps;
9963         u16 tc_valid[HCLGE_MAX_TC_NUM];
9964         u16 roundup_size;
9965         u32 *rss_indir;
9966         unsigned int i;
9967         int ret;
9968
9969         kinfo->req_rss_size = new_tqps_num;
9970
9971         ret = hclge_tm_vport_map_update(hdev);
9972         if (ret) {
9973                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9974                 return ret;
9975         }
9976
9977         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9978         roundup_size = ilog2(roundup_size);
9979         /* Set the RSS TC mode according to the new RSS size */
9980         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9981                 tc_valid[i] = 0;
9982
9983                 if (!(hdev->hw_tc_map & BIT(i)))
9984                         continue;
9985
9986                 tc_valid[i] = 1;
9987                 tc_size[i] = roundup_size;
9988                 tc_offset[i] = kinfo->rss_size * i;
9989         }
9990         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9991         if (ret)
9992                 return ret;
9993
9994         /* RSS indirection table has been configuared by user */
9995         if (rxfh_configured)
9996                 goto out;
9997
9998         /* Reinitializes the rss indirect table according to the new RSS size */
9999         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10000         if (!rss_indir)
10001                 return -ENOMEM;
10002
10003         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10004                 rss_indir[i] = i % kinfo->rss_size;
10005
10006         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10007         if (ret)
10008                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10009                         ret);
10010
10011         kfree(rss_indir);
10012
10013 out:
10014         if (!ret)
10015                 dev_info(&hdev->pdev->dev,
10016                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10017                          cur_rss_size, kinfo->rss_size,
10018                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10019
10020         return ret;
10021 }
10022
10023 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10024                               u32 *regs_num_64_bit)
10025 {
10026         struct hclge_desc desc;
10027         u32 total_num;
10028         int ret;
10029
10030         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10031         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10032         if (ret) {
10033                 dev_err(&hdev->pdev->dev,
10034                         "Query register number cmd failed, ret = %d.\n", ret);
10035                 return ret;
10036         }
10037
10038         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10039         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10040
10041         total_num = *regs_num_32_bit + *regs_num_64_bit;
10042         if (!total_num)
10043                 return -EINVAL;
10044
10045         return 0;
10046 }
10047
10048 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10049                                  void *data)
10050 {
10051 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10052 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10053
10054         struct hclge_desc *desc;
10055         u32 *reg_val = data;
10056         __le32 *desc_data;
10057         int nodata_num;
10058         int cmd_num;
10059         int i, k, n;
10060         int ret;
10061
10062         if (regs_num == 0)
10063                 return 0;
10064
10065         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10066         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10067                                HCLGE_32_BIT_REG_RTN_DATANUM);
10068         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10069         if (!desc)
10070                 return -ENOMEM;
10071
10072         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10073         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10074         if (ret) {
10075                 dev_err(&hdev->pdev->dev,
10076                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10077                 kfree(desc);
10078                 return ret;
10079         }
10080
10081         for (i = 0; i < cmd_num; i++) {
10082                 if (i == 0) {
10083                         desc_data = (__le32 *)(&desc[i].data[0]);
10084                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10085                 } else {
10086                         desc_data = (__le32 *)(&desc[i]);
10087                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10088                 }
10089                 for (k = 0; k < n; k++) {
10090                         *reg_val++ = le32_to_cpu(*desc_data++);
10091
10092                         regs_num--;
10093                         if (!regs_num)
10094                                 break;
10095                 }
10096         }
10097
10098         kfree(desc);
10099         return 0;
10100 }
10101
10102 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10103                                  void *data)
10104 {
10105 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10106 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10107
10108         struct hclge_desc *desc;
10109         u64 *reg_val = data;
10110         __le64 *desc_data;
10111         int nodata_len;
10112         int cmd_num;
10113         int i, k, n;
10114         int ret;
10115
10116         if (regs_num == 0)
10117                 return 0;
10118
10119         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10120         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10121                                HCLGE_64_BIT_REG_RTN_DATANUM);
10122         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10123         if (!desc)
10124                 return -ENOMEM;
10125
10126         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10127         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10128         if (ret) {
10129                 dev_err(&hdev->pdev->dev,
10130                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10131                 kfree(desc);
10132                 return ret;
10133         }
10134
10135         for (i = 0; i < cmd_num; i++) {
10136                 if (i == 0) {
10137                         desc_data = (__le64 *)(&desc[i].data[0]);
10138                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10139                 } else {
10140                         desc_data = (__le64 *)(&desc[i]);
10141                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10142                 }
10143                 for (k = 0; k < n; k++) {
10144                         *reg_val++ = le64_to_cpu(*desc_data++);
10145
10146                         regs_num--;
10147                         if (!regs_num)
10148                                 break;
10149                 }
10150         }
10151
10152         kfree(desc);
10153         return 0;
10154 }
10155
10156 #define MAX_SEPARATE_NUM        4
10157 #define SEPARATOR_VALUE         0xFDFCFBFA
10158 #define REG_NUM_PER_LINE        4
10159 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10160 #define REG_SEPARATOR_LINE      1
10161 #define REG_NUM_REMAIN_MASK     3
10162 #define BD_LIST_MAX_NUM         30
10163
10164 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10165 {
10166         /*prepare 4 commands to query DFX BD number*/
10167         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10168         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10169         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10170         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10171         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10172         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10173         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10174
10175         return hclge_cmd_send(&hdev->hw, desc, 4);
10176 }
10177
10178 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10179                                     int *bd_num_list,
10180                                     u32 type_num)
10181 {
10182         u32 entries_per_desc, desc_index, index, offset, i;
10183         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10184         int ret;
10185
10186         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10187         if (ret) {
10188                 dev_err(&hdev->pdev->dev,
10189                         "Get dfx bd num fail, status is %d.\n", ret);
10190                 return ret;
10191         }
10192
10193         entries_per_desc = ARRAY_SIZE(desc[0].data);
10194         for (i = 0; i < type_num; i++) {
10195                 offset = hclge_dfx_bd_offset_list[i];
10196                 index = offset % entries_per_desc;
10197                 desc_index = offset / entries_per_desc;
10198                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10199         }
10200
10201         return ret;
10202 }
10203
10204 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10205                                   struct hclge_desc *desc_src, int bd_num,
10206                                   enum hclge_opcode_type cmd)
10207 {
10208         struct hclge_desc *desc = desc_src;
10209         int i, ret;
10210
10211         hclge_cmd_setup_basic_desc(desc, cmd, true);
10212         for (i = 0; i < bd_num - 1; i++) {
10213                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10214                 desc++;
10215                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10216         }
10217
10218         desc = desc_src;
10219         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10220         if (ret)
10221                 dev_err(&hdev->pdev->dev,
10222                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10223                         cmd, ret);
10224
10225         return ret;
10226 }
10227
10228 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10229                                     void *data)
10230 {
10231         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10232         struct hclge_desc *desc = desc_src;
10233         u32 *reg = data;
10234
10235         entries_per_desc = ARRAY_SIZE(desc->data);
10236         reg_num = entries_per_desc * bd_num;
10237         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10238         for (i = 0; i < reg_num; i++) {
10239                 index = i % entries_per_desc;
10240                 desc_index = i / entries_per_desc;
10241                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10242         }
10243         for (i = 0; i < separator_num; i++)
10244                 *reg++ = SEPARATOR_VALUE;
10245
10246         return reg_num + separator_num;
10247 }
10248
10249 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10250 {
10251         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10252         int data_len_per_desc, data_len, bd_num, i;
10253         int bd_num_list[BD_LIST_MAX_NUM];
10254         int ret;
10255
10256         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10257         if (ret) {
10258                 dev_err(&hdev->pdev->dev,
10259                         "Get dfx reg bd num fail, status is %d.\n", ret);
10260                 return ret;
10261         }
10262
10263         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10264         *len = 0;
10265         for (i = 0; i < dfx_reg_type_num; i++) {
10266                 bd_num = bd_num_list[i];
10267                 data_len = data_len_per_desc * bd_num;
10268                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10269         }
10270
10271         return ret;
10272 }
10273
10274 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10275 {
10276         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10277         int bd_num, bd_num_max, buf_len, i;
10278         int bd_num_list[BD_LIST_MAX_NUM];
10279         struct hclge_desc *desc_src;
10280         u32 *reg = data;
10281         int ret;
10282
10283         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10284         if (ret) {
10285                 dev_err(&hdev->pdev->dev,
10286                         "Get dfx reg bd num fail, status is %d.\n", ret);
10287                 return ret;
10288         }
10289
10290         bd_num_max = bd_num_list[0];
10291         for (i = 1; i < dfx_reg_type_num; i++)
10292                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10293
10294         buf_len = sizeof(*desc_src) * bd_num_max;
10295         desc_src = kzalloc(buf_len, GFP_KERNEL);
10296         if (!desc_src)
10297                 return -ENOMEM;
10298
10299         for (i = 0; i < dfx_reg_type_num; i++) {
10300                 bd_num = bd_num_list[i];
10301                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10302                                              hclge_dfx_reg_opcode_list[i]);
10303                 if (ret) {
10304                         dev_err(&hdev->pdev->dev,
10305                                 "Get dfx reg fail, status is %d.\n", ret);
10306                         break;
10307                 }
10308
10309                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10310         }
10311
10312         kfree(desc_src);
10313         return ret;
10314 }
10315
10316 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10317                               struct hnae3_knic_private_info *kinfo)
10318 {
10319 #define HCLGE_RING_REG_OFFSET           0x200
10320 #define HCLGE_RING_INT_REG_OFFSET       0x4
10321
10322         int i, j, reg_num, separator_num;
10323         int data_num_sum;
10324         u32 *reg = data;
10325
10326         /* fetching per-PF registers valus from PF PCIe register space */
10327         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10328         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10329         for (i = 0; i < reg_num; i++)
10330                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10331         for (i = 0; i < separator_num; i++)
10332                 *reg++ = SEPARATOR_VALUE;
10333         data_num_sum = reg_num + separator_num;
10334
10335         reg_num = ARRAY_SIZE(common_reg_addr_list);
10336         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10337         for (i = 0; i < reg_num; i++)
10338                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10339         for (i = 0; i < separator_num; i++)
10340                 *reg++ = SEPARATOR_VALUE;
10341         data_num_sum += reg_num + separator_num;
10342
10343         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10344         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10345         for (j = 0; j < kinfo->num_tqps; j++) {
10346                 for (i = 0; i < reg_num; i++)
10347                         *reg++ = hclge_read_dev(&hdev->hw,
10348                                                 ring_reg_addr_list[i] +
10349                                                 HCLGE_RING_REG_OFFSET * j);
10350                 for (i = 0; i < separator_num; i++)
10351                         *reg++ = SEPARATOR_VALUE;
10352         }
10353         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10354
10355         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10356         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10357         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10358                 for (i = 0; i < reg_num; i++)
10359                         *reg++ = hclge_read_dev(&hdev->hw,
10360                                                 tqp_intr_reg_addr_list[i] +
10361                                                 HCLGE_RING_INT_REG_OFFSET * j);
10362                 for (i = 0; i < separator_num; i++)
10363                         *reg++ = SEPARATOR_VALUE;
10364         }
10365         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10366
10367         return data_num_sum;
10368 }
10369
10370 static int hclge_get_regs_len(struct hnae3_handle *handle)
10371 {
10372         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10373         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10374         struct hclge_vport *vport = hclge_get_vport(handle);
10375         struct hclge_dev *hdev = vport->back;
10376         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10377         int regs_lines_32_bit, regs_lines_64_bit;
10378         int ret;
10379
10380         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10381         if (ret) {
10382                 dev_err(&hdev->pdev->dev,
10383                         "Get register number failed, ret = %d.\n", ret);
10384                 return ret;
10385         }
10386
10387         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10388         if (ret) {
10389                 dev_err(&hdev->pdev->dev,
10390                         "Get dfx reg len failed, ret = %d.\n", ret);
10391                 return ret;
10392         }
10393
10394         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10395                 REG_SEPARATOR_LINE;
10396         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10397                 REG_SEPARATOR_LINE;
10398         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10399                 REG_SEPARATOR_LINE;
10400         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10401                 REG_SEPARATOR_LINE;
10402         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10403                 REG_SEPARATOR_LINE;
10404         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10405                 REG_SEPARATOR_LINE;
10406
10407         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10408                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10409                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10410 }
10411
10412 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10413                            void *data)
10414 {
10415         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10416         struct hclge_vport *vport = hclge_get_vport(handle);
10417         struct hclge_dev *hdev = vport->back;
10418         u32 regs_num_32_bit, regs_num_64_bit;
10419         int i, reg_num, separator_num, ret;
10420         u32 *reg = data;
10421
10422         *version = hdev->fw_version;
10423
10424         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10425         if (ret) {
10426                 dev_err(&hdev->pdev->dev,
10427                         "Get register number failed, ret = %d.\n", ret);
10428                 return;
10429         }
10430
10431         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10432
10433         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10434         if (ret) {
10435                 dev_err(&hdev->pdev->dev,
10436                         "Get 32 bit register failed, ret = %d.\n", ret);
10437                 return;
10438         }
10439         reg_num = regs_num_32_bit;
10440         reg += reg_num;
10441         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10442         for (i = 0; i < separator_num; i++)
10443                 *reg++ = SEPARATOR_VALUE;
10444
10445         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10446         if (ret) {
10447                 dev_err(&hdev->pdev->dev,
10448                         "Get 64 bit register failed, ret = %d.\n", ret);
10449                 return;
10450         }
10451         reg_num = regs_num_64_bit * 2;
10452         reg += reg_num;
10453         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10454         for (i = 0; i < separator_num; i++)
10455                 *reg++ = SEPARATOR_VALUE;
10456
10457         ret = hclge_get_dfx_reg(hdev, reg);
10458         if (ret)
10459                 dev_err(&hdev->pdev->dev,
10460                         "Get dfx register failed, ret = %d.\n", ret);
10461 }
10462
10463 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10464 {
10465         struct hclge_set_led_state_cmd *req;
10466         struct hclge_desc desc;
10467         int ret;
10468
10469         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10470
10471         req = (struct hclge_set_led_state_cmd *)desc.data;
10472         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10473                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10474
10475         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10476         if (ret)
10477                 dev_err(&hdev->pdev->dev,
10478                         "Send set led state cmd error, ret =%d\n", ret);
10479
10480         return ret;
10481 }
10482
10483 enum hclge_led_status {
10484         HCLGE_LED_OFF,
10485         HCLGE_LED_ON,
10486         HCLGE_LED_NO_CHANGE = 0xFF,
10487 };
10488
10489 static int hclge_set_led_id(struct hnae3_handle *handle,
10490                             enum ethtool_phys_id_state status)
10491 {
10492         struct hclge_vport *vport = hclge_get_vport(handle);
10493         struct hclge_dev *hdev = vport->back;
10494
10495         switch (status) {
10496         case ETHTOOL_ID_ACTIVE:
10497                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10498         case ETHTOOL_ID_INACTIVE:
10499                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10500         default:
10501                 return -EINVAL;
10502         }
10503 }
10504
10505 static void hclge_get_link_mode(struct hnae3_handle *handle,
10506                                 unsigned long *supported,
10507                                 unsigned long *advertising)
10508 {
10509         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10510         struct hclge_vport *vport = hclge_get_vport(handle);
10511         struct hclge_dev *hdev = vport->back;
10512         unsigned int idx = 0;
10513
10514         for (; idx < size; idx++) {
10515                 supported[idx] = hdev->hw.mac.supported[idx];
10516                 advertising[idx] = hdev->hw.mac.advertising[idx];
10517         }
10518 }
10519
10520 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10521 {
10522         struct hclge_vport *vport = hclge_get_vport(handle);
10523         struct hclge_dev *hdev = vport->back;
10524
10525         return hclge_config_gro(hdev, enable);
10526 }
10527
10528 static const struct hnae3_ae_ops hclge_ops = {
10529         .init_ae_dev = hclge_init_ae_dev,
10530         .uninit_ae_dev = hclge_uninit_ae_dev,
10531         .flr_prepare = hclge_flr_prepare,
10532         .flr_done = hclge_flr_done,
10533         .init_client_instance = hclge_init_client_instance,
10534         .uninit_client_instance = hclge_uninit_client_instance,
10535         .map_ring_to_vector = hclge_map_ring_to_vector,
10536         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10537         .get_vector = hclge_get_vector,
10538         .put_vector = hclge_put_vector,
10539         .set_promisc_mode = hclge_set_promisc_mode,
10540         .set_loopback = hclge_set_loopback,
10541         .start = hclge_ae_start,
10542         .stop = hclge_ae_stop,
10543         .client_start = hclge_client_start,
10544         .client_stop = hclge_client_stop,
10545         .get_status = hclge_get_status,
10546         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10547         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10548         .get_media_type = hclge_get_media_type,
10549         .check_port_speed = hclge_check_port_speed,
10550         .get_fec = hclge_get_fec,
10551         .set_fec = hclge_set_fec,
10552         .get_rss_key_size = hclge_get_rss_key_size,
10553         .get_rss_indir_size = hclge_get_rss_indir_size,
10554         .get_rss = hclge_get_rss,
10555         .set_rss = hclge_set_rss,
10556         .set_rss_tuple = hclge_set_rss_tuple,
10557         .get_rss_tuple = hclge_get_rss_tuple,
10558         .get_tc_size = hclge_get_tc_size,
10559         .get_mac_addr = hclge_get_mac_addr,
10560         .set_mac_addr = hclge_set_mac_addr,
10561         .do_ioctl = hclge_do_ioctl,
10562         .add_uc_addr = hclge_add_uc_addr,
10563         .rm_uc_addr = hclge_rm_uc_addr,
10564         .add_mc_addr = hclge_add_mc_addr,
10565         .rm_mc_addr = hclge_rm_mc_addr,
10566         .set_autoneg = hclge_set_autoneg,
10567         .get_autoneg = hclge_get_autoneg,
10568         .restart_autoneg = hclge_restart_autoneg,
10569         .halt_autoneg = hclge_halt_autoneg,
10570         .get_pauseparam = hclge_get_pauseparam,
10571         .set_pauseparam = hclge_set_pauseparam,
10572         .set_mtu = hclge_set_mtu,
10573         .reset_queue = hclge_reset_tqp,
10574         .get_stats = hclge_get_stats,
10575         .get_mac_stats = hclge_get_mac_stat,
10576         .update_stats = hclge_update_stats,
10577         .get_strings = hclge_get_strings,
10578         .get_sset_count = hclge_get_sset_count,
10579         .get_fw_version = hclge_get_fw_version,
10580         .get_mdix_mode = hclge_get_mdix_mode,
10581         .enable_vlan_filter = hclge_enable_vlan_filter,
10582         .set_vlan_filter = hclge_set_vlan_filter,
10583         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10584         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10585         .reset_event = hclge_reset_event,
10586         .get_reset_level = hclge_get_reset_level,
10587         .set_default_reset_request = hclge_set_def_reset_request,
10588         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10589         .set_channels = hclge_set_channels,
10590         .get_channels = hclge_get_channels,
10591         .get_regs_len = hclge_get_regs_len,
10592         .get_regs = hclge_get_regs,
10593         .set_led_id = hclge_set_led_id,
10594         .get_link_mode = hclge_get_link_mode,
10595         .add_fd_entry = hclge_add_fd_entry,
10596         .del_fd_entry = hclge_del_fd_entry,
10597         .del_all_fd_entries = hclge_del_all_fd_entries,
10598         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10599         .get_fd_rule_info = hclge_get_fd_rule_info,
10600         .get_fd_all_rules = hclge_get_all_rules,
10601         .restore_fd_rules = hclge_restore_fd_entries,
10602         .enable_fd = hclge_enable_fd,
10603         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10604         .dbg_run_cmd = hclge_dbg_run_cmd,
10605         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10606         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10607         .ae_dev_resetting = hclge_ae_dev_resetting,
10608         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10609         .set_gro_en = hclge_gro_en,
10610         .get_global_queue_id = hclge_covert_handle_qid_global,
10611         .set_timer_task = hclge_set_timer_task,
10612         .mac_connect_phy = hclge_mac_connect_phy,
10613         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10614         .restore_vlan_table = hclge_restore_vlan_table,
10615         .get_vf_config = hclge_get_vf_config,
10616         .set_vf_link_state = hclge_set_vf_link_state,
10617         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10618         .set_vf_trust = hclge_set_vf_trust,
10619         .set_vf_rate = hclge_set_vf_rate,
10620         .set_vf_mac = hclge_set_vf_mac,
10621 };
10622
10623 static struct hnae3_ae_algo ae_algo = {
10624         .ops = &hclge_ops,
10625         .pdev_id_table = ae_algo_pci_tbl,
10626 };
10627
10628 static int hclge_init(void)
10629 {
10630         pr_info("%s is initializing\n", HCLGE_NAME);
10631
10632         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10633         if (!hclge_wq) {
10634                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10635                 return -ENOMEM;
10636         }
10637
10638         hnae3_register_ae_algo(&ae_algo);
10639
10640         return 0;
10641 }
10642
10643 static void hclge_exit(void)
10644 {
10645         hnae3_unregister_ae_algo(&ae_algo);
10646         destroy_workqueue(hclge_wq);
10647 }
10648 module_init(hclge_init);
10649 module_exit(hclge_exit);
10650
10651 MODULE_LICENSE("GPL");
10652 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10653 MODULE_DESCRIPTION("HCLGE Driver");
10654 MODULE_VERSION(HCLGE_MOD_VERSION);