630f7f88672e4a55ebc564578edaf169e2e29b03
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38
39 static struct hnae3_ae_algo ae_algo;
40
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49         /* required last entry */
50         {0, }
51 };
52
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56                                          HCLGE_CMDQ_TX_ADDR_H_REG,
57                                          HCLGE_CMDQ_TX_DEPTH_REG,
58                                          HCLGE_CMDQ_TX_TAIL_REG,
59                                          HCLGE_CMDQ_TX_HEAD_REG,
60                                          HCLGE_CMDQ_RX_ADDR_L_REG,
61                                          HCLGE_CMDQ_RX_ADDR_H_REG,
62                                          HCLGE_CMDQ_RX_DEPTH_REG,
63                                          HCLGE_CMDQ_RX_TAIL_REG,
64                                          HCLGE_CMDQ_RX_HEAD_REG,
65                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
66                                          HCLGE_CMDQ_INTR_STS_REG,
67                                          HCLGE_CMDQ_INTR_EN_REG,
68                                          HCLGE_CMDQ_INTR_GEN_REG};
69
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71                                            HCLGE_VECTOR0_OTER_EN_REG,
72                                            HCLGE_MISC_RESET_STS_REG,
73                                            HCLGE_MISC_VECTOR_INT_STS,
74                                            HCLGE_GLOBAL_RESET_REG,
75                                            HCLGE_FUN_RST_ING,
76                                            HCLGE_GRO_EN_REG};
77
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79                                          HCLGE_RING_RX_ADDR_H_REG,
80                                          HCLGE_RING_RX_BD_NUM_REG,
81                                          HCLGE_RING_RX_BD_LENGTH_REG,
82                                          HCLGE_RING_RX_MERGE_EN_REG,
83                                          HCLGE_RING_RX_TAIL_REG,
84                                          HCLGE_RING_RX_HEAD_REG,
85                                          HCLGE_RING_RX_FBD_NUM_REG,
86                                          HCLGE_RING_RX_OFFSET_REG,
87                                          HCLGE_RING_RX_FBD_OFFSET_REG,
88                                          HCLGE_RING_RX_STASH_REG,
89                                          HCLGE_RING_RX_BD_ERR_REG,
90                                          HCLGE_RING_TX_ADDR_L_REG,
91                                          HCLGE_RING_TX_ADDR_H_REG,
92                                          HCLGE_RING_TX_BD_NUM_REG,
93                                          HCLGE_RING_TX_PRIORITY_REG,
94                                          HCLGE_RING_TX_TC_REG,
95                                          HCLGE_RING_TX_MERGE_EN_REG,
96                                          HCLGE_RING_TX_TAIL_REG,
97                                          HCLGE_RING_TX_HEAD_REG,
98                                          HCLGE_RING_TX_FBD_NUM_REG,
99                                          HCLGE_RING_TX_OFFSET_REG,
100                                          HCLGE_RING_TX_EBD_NUM_REG,
101                                          HCLGE_RING_TX_EBD_OFFSET_REG,
102                                          HCLGE_RING_TX_BD_ERR_REG,
103                                          HCLGE_RING_EN_REG};
104
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106                                              HCLGE_TQP_INTR_GL0_REG,
107                                              HCLGE_TQP_INTR_GL1_REG,
108                                              HCLGE_TQP_INTR_GL2_REG,
109                                              HCLGE_TQP_INTR_RL_REG};
110
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112         "App    Loopback test",
113         "Serdes serial Loopback test",
114         "Serdes parallel Loopback test",
115         "Phy    Loopback test"
116 };
117
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119         {"mac_tx_mac_pause_num",
120                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121         {"mac_rx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123         {"mac_tx_control_pkt_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125         {"mac_rx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127         {"mac_tx_pfc_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129         {"mac_tx_pfc_pri0_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131         {"mac_tx_pfc_pri1_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133         {"mac_tx_pfc_pri2_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135         {"mac_tx_pfc_pri3_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137         {"mac_tx_pfc_pri4_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139         {"mac_tx_pfc_pri5_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141         {"mac_tx_pfc_pri6_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143         {"mac_tx_pfc_pri7_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145         {"mac_rx_pfc_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147         {"mac_rx_pfc_pri0_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149         {"mac_rx_pfc_pri1_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151         {"mac_rx_pfc_pri2_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153         {"mac_rx_pfc_pri3_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155         {"mac_rx_pfc_pri4_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157         {"mac_rx_pfc_pri5_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159         {"mac_rx_pfc_pri6_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161         {"mac_rx_pfc_pri7_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163         {"mac_tx_total_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165         {"mac_tx_total_oct_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167         {"mac_tx_good_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169         {"mac_tx_bad_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171         {"mac_tx_good_oct_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173         {"mac_tx_bad_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175         {"mac_tx_uni_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177         {"mac_tx_multi_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179         {"mac_tx_broad_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181         {"mac_tx_undersize_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183         {"mac_tx_oversize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185         {"mac_tx_64_oct_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187         {"mac_tx_65_127_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189         {"mac_tx_128_255_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191         {"mac_tx_256_511_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193         {"mac_tx_512_1023_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195         {"mac_tx_1024_1518_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197         {"mac_tx_1519_2047_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199         {"mac_tx_2048_4095_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201         {"mac_tx_4096_8191_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203         {"mac_tx_8192_9216_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205         {"mac_tx_9217_12287_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207         {"mac_tx_12288_16383_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209         {"mac_tx_1519_max_good_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211         {"mac_tx_1519_max_bad_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213         {"mac_rx_total_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215         {"mac_rx_total_oct_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217         {"mac_rx_good_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219         {"mac_rx_bad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221         {"mac_rx_good_oct_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223         {"mac_rx_bad_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225         {"mac_rx_uni_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227         {"mac_rx_multi_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229         {"mac_rx_broad_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231         {"mac_rx_undersize_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233         {"mac_rx_oversize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235         {"mac_rx_64_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237         {"mac_rx_65_127_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239         {"mac_rx_128_255_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241         {"mac_rx_256_511_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243         {"mac_rx_512_1023_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245         {"mac_rx_1024_1518_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247         {"mac_rx_1519_2047_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249         {"mac_rx_2048_4095_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251         {"mac_rx_4096_8191_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253         {"mac_rx_8192_9216_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255         {"mac_rx_9217_12287_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257         {"mac_rx_12288_16383_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259         {"mac_rx_1519_max_good_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261         {"mac_rx_1519_max_bad_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263
264         {"mac_tx_fragment_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266         {"mac_tx_undermin_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268         {"mac_tx_jabber_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270         {"mac_tx_err_all_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272         {"mac_tx_from_app_good_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274         {"mac_tx_from_app_bad_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276         {"mac_rx_fragment_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278         {"mac_rx_undermin_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280         {"mac_rx_jabber_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282         {"mac_rx_fcs_err_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284         {"mac_rx_send_app_good_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286         {"mac_rx_send_app_bad_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291         {
292                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296                 .i_port_bitmap = 0x1,
297         },
298 };
299
300 static const u8 hclge_hash_key[] = {
301         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311
312         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314         __le64 *desc_data;
315         int i, k, n;
316         int ret;
317
318         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320         if (ret) {
321                 dev_err(&hdev->pdev->dev,
322                         "Get MAC pkt stats fail, status = %d.\n", ret);
323
324                 return ret;
325         }
326
327         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328                 /* for special opcode 0032, only the first desc has the head */
329                 if (unlikely(i == 0)) {
330                         desc_data = (__le64 *)(&desc[i].data[0]);
331                         n = HCLGE_RD_FIRST_STATS_NUM;
332                 } else {
333                         desc_data = (__le64 *)(&desc[i]);
334                         n = HCLGE_RD_OTHER_STATS_NUM;
335                 }
336
337                 for (k = 0; k < n; k++) {
338                         *data += le64_to_cpu(*desc_data);
339                         data++;
340                         desc_data++;
341                 }
342         }
343
344         return 0;
345 }
346
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350         struct hclge_desc *desc;
351         __le64 *desc_data;
352         u16 i, k, n;
353         int ret;
354
355         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356         if (!desc)
357                 return -ENOMEM;
358         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360         if (ret) {
361                 kfree(desc);
362                 return ret;
363         }
364
365         for (i = 0; i < desc_num; i++) {
366                 /* for special opcode 0034, only the first desc has the head */
367                 if (i == 0) {
368                         desc_data = (__le64 *)(&desc[i].data[0]);
369                         n = HCLGE_RD_FIRST_STATS_NUM;
370                 } else {
371                         desc_data = (__le64 *)(&desc[i]);
372                         n = HCLGE_RD_OTHER_STATS_NUM;
373                 }
374
375                 for (k = 0; k < n; k++) {
376                         *data += le64_to_cpu(*desc_data);
377                         data++;
378                         desc_data++;
379                 }
380         }
381
382         kfree(desc);
383
384         return 0;
385 }
386
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389         struct hclge_desc desc;
390         __le32 *desc_data;
391         u32 reg_num;
392         int ret;
393
394         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396         if (ret)
397                 return ret;
398
399         desc_data = (__le32 *)(&desc.data[0]);
400         reg_num = le32_to_cpu(*desc_data);
401
402         *desc_num = 1 + ((reg_num - 3) >> 2) +
403                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404
405         return 0;
406 }
407
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410         u32 desc_num;
411         int ret;
412
413         ret = hclge_mac_query_reg_num(hdev, &desc_num);
414
415         /* The firmware supports the new statistics acquisition method */
416         if (!ret)
417                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418         else if (ret == -EOPNOTSUPP)
419                 ret = hclge_mac_update_stats_defective(hdev);
420         else
421                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422
423         return ret;
424 }
425
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429         struct hclge_vport *vport = hclge_get_vport(handle);
430         struct hclge_dev *hdev = vport->back;
431         struct hnae3_queue *queue;
432         struct hclge_desc desc[1];
433         struct hclge_tqp *tqp;
434         int ret, i;
435
436         for (i = 0; i < kinfo->num_tqps; i++) {
437                 queue = handle->kinfo.tqp[i];
438                 tqp = container_of(queue, struct hclge_tqp, q);
439                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440                 hclge_cmd_setup_basic_desc(&desc[0],
441                                            HCLGE_OPC_QUERY_RX_STATUS,
442                                            true);
443
444                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
446                 if (ret) {
447                         dev_err(&hdev->pdev->dev,
448                                 "Query tqp stat fail, status = %d,queue = %d\n",
449                                 ret,    i);
450                         return ret;
451                 }
452                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453                         le32_to_cpu(desc[0].data[1]);
454         }
455
456         for (i = 0; i < kinfo->num_tqps; i++) {
457                 queue = handle->kinfo.tqp[i];
458                 tqp = container_of(queue, struct hclge_tqp, q);
459                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460                 hclge_cmd_setup_basic_desc(&desc[0],
461                                            HCLGE_OPC_QUERY_TX_STATUS,
462                                            true);
463
464                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
466                 if (ret) {
467                         dev_err(&hdev->pdev->dev,
468                                 "Query tqp stat fail, status = %d,queue = %d\n",
469                                 ret, i);
470                         return ret;
471                 }
472                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473                         le32_to_cpu(desc[0].data[1]);
474         }
475
476         return 0;
477 }
478
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482         struct hclge_tqp *tqp;
483         u64 *buff = data;
484         int i;
485
486         for (i = 0; i < kinfo->num_tqps; i++) {
487                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489         }
490
491         for (i = 0; i < kinfo->num_tqps; i++) {
492                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494         }
495
496         return buff;
497 }
498
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502
503         return kinfo->num_tqps * (2);
504 }
505
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509         u8 *buff = data;
510         int i = 0;
511
512         for (i = 0; i < kinfo->num_tqps; i++) {
513                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514                         struct hclge_tqp, q);
515                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516                          tqp->index);
517                 buff = buff + ETH_GSTRING_LEN;
518         }
519
520         for (i = 0; i < kinfo->num_tqps; i++) {
521                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522                         struct hclge_tqp, q);
523                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524                          tqp->index);
525                 buff = buff + ETH_GSTRING_LEN;
526         }
527
528         return buff;
529 }
530
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532                                  const struct hclge_comm_stats_str strs[],
533                                  int size, u64 *data)
534 {
535         u64 *buf = data;
536         u32 i;
537
538         for (i = 0; i < size; i++)
539                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540
541         return buf + size;
542 }
543
544 static u8 *hclge_comm_get_strings(u32 stringset,
545                                   const struct hclge_comm_stats_str strs[],
546                                   int size, u8 *data)
547 {
548         char *buff = (char *)data;
549         u32 i;
550
551         if (stringset != ETH_SS_STATS)
552                 return buff;
553
554         for (i = 0; i < size; i++) {
555                 snprintf(buff, ETH_GSTRING_LEN,
556                          strs[i].desc);
557                 buff = buff + ETH_GSTRING_LEN;
558         }
559
560         return (u8 *)buff;
561 }
562
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565         struct hnae3_handle *handle;
566         int status;
567
568         handle = &hdev->vport[0].nic;
569         if (handle->client) {
570                 status = hclge_tqps_update_stats(handle);
571                 if (status) {
572                         dev_err(&hdev->pdev->dev,
573                                 "Update TQPS stats fail, status = %d.\n",
574                                 status);
575                 }
576         }
577
578         status = hclge_mac_update_stats(hdev);
579         if (status)
580                 dev_err(&hdev->pdev->dev,
581                         "Update MAC stats fail, status = %d.\n", status);
582 }
583
584 static void hclge_update_stats(struct hnae3_handle *handle,
585                                struct net_device_stats *net_stats)
586 {
587         struct hclge_vport *vport = hclge_get_vport(handle);
588         struct hclge_dev *hdev = vport->back;
589         int status;
590
591         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592                 return;
593
594         status = hclge_mac_update_stats(hdev);
595         if (status)
596                 dev_err(&hdev->pdev->dev,
597                         "Update MAC stats fail, status = %d.\n",
598                         status);
599
600         status = hclge_tqps_update_stats(handle);
601         if (status)
602                 dev_err(&hdev->pdev->dev,
603                         "Update TQPS stats fail, status = %d.\n",
604                         status);
605
606         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612                 HNAE3_SUPPORT_PHY_LOOPBACK |\
613                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615
616         struct hclge_vport *vport = hclge_get_vport(handle);
617         struct hclge_dev *hdev = vport->back;
618         int count = 0;
619
620         /* Loopback test support rules:
621          * mac: only GE mode support
622          * serdes: all mac mode will support include GE/XGE/LGE/CGE
623          * phy: only support when phy device exist on board
624          */
625         if (stringset == ETH_SS_TEST) {
626                 /* clear loopback bit flags at first */
627                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628                 if (hdev->pdev->revision >= 0x21 ||
629                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632                         count += 1;
633                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634                 }
635
636                 count += 2;
637                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639         } else if (stringset == ETH_SS_STATS) {
640                 count = ARRAY_SIZE(g_mac_stats_string) +
641                         hclge_tqps_get_sset_count(handle, stringset);
642         }
643
644         return count;
645 }
646
647 static void hclge_get_strings(struct hnae3_handle *handle,
648                               u32 stringset,
649                               u8 *data)
650 {
651         u8 *p = (char *)data;
652         int size;
653
654         if (stringset == ETH_SS_STATS) {
655                 size = ARRAY_SIZE(g_mac_stats_string);
656                 p = hclge_comm_get_strings(stringset,
657                                            g_mac_stats_string,
658                                            size,
659                                            p);
660                 p = hclge_tqps_get_strings(handle, p);
661         } else if (stringset == ETH_SS_TEST) {
662                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663                         memcpy(p,
664                                hns3_nic_test_strs[HNAE3_LOOP_APP],
665                                ETH_GSTRING_LEN);
666                         p += ETH_GSTRING_LEN;
667                 }
668                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669                         memcpy(p,
670                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671                                ETH_GSTRING_LEN);
672                         p += ETH_GSTRING_LEN;
673                 }
674                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675                         memcpy(p,
676                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677                                ETH_GSTRING_LEN);
678                         p += ETH_GSTRING_LEN;
679                 }
680                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681                         memcpy(p,
682                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686         }
687 }
688
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691         struct hclge_vport *vport = hclge_get_vport(handle);
692         struct hclge_dev *hdev = vport->back;
693         u64 *p;
694
695         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696                                  g_mac_stats_string,
697                                  ARRAY_SIZE(g_mac_stats_string),
698                                  data);
699         p = hclge_tqps_get_stats(handle, p);
700 }
701
702 static int hclge_parse_func_status(struct hclge_dev *hdev,
703                                    struct hclge_func_status_cmd *status)
704 {
705         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
706                 return -EINVAL;
707
708         /* Set the pf to main pf */
709         if (status->pf_state & HCLGE_PF_STATE_MAIN)
710                 hdev->flag |= HCLGE_FLAG_MAIN;
711         else
712                 hdev->flag &= ~HCLGE_FLAG_MAIN;
713
714         return 0;
715 }
716
717 static int hclge_query_function_status(struct hclge_dev *hdev)
718 {
719         struct hclge_func_status_cmd *req;
720         struct hclge_desc desc;
721         int timeout = 0;
722         int ret;
723
724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
725         req = (struct hclge_func_status_cmd *)desc.data;
726
727         do {
728                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
729                 if (ret) {
730                         dev_err(&hdev->pdev->dev,
731                                 "query function status failed %d.\n",
732                                 ret);
733
734                         return ret;
735                 }
736
737                 /* Check pf reset is done */
738                 if (req->pf_state)
739                         break;
740                 usleep_range(1000, 2000);
741         } while (timeout++ < 5);
742
743         ret = hclge_parse_func_status(hdev, req);
744
745         return ret;
746 }
747
748 static int hclge_query_pf_resource(struct hclge_dev *hdev)
749 {
750         struct hclge_pf_res_cmd *req;
751         struct hclge_desc desc;
752         int ret;
753
754         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
755         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
756         if (ret) {
757                 dev_err(&hdev->pdev->dev,
758                         "query pf resource failed %d.\n", ret);
759                 return ret;
760         }
761
762         req = (struct hclge_pf_res_cmd *)desc.data;
763         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
764         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
765
766         if (req->tx_buf_size)
767                 hdev->tx_buf_size =
768                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
769         else
770                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
771
772         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
773
774         if (req->dv_buf_size)
775                 hdev->dv_buf_size =
776                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
777         else
778                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
779
780         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
781
782         if (hnae3_dev_roce_supported(hdev)) {
783                 hdev->roce_base_msix_offset =
784                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
785                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
786                 hdev->num_roce_msi =
787                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
788                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
789
790                 /* PF should have NIC vectors and Roce vectors,
791                  * NIC vectors are queued before Roce vectors.
792                  */
793                 hdev->num_msi = hdev->num_roce_msi  +
794                                 hdev->roce_base_msix_offset;
795         } else {
796                 hdev->num_msi =
797                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799         }
800
801         return 0;
802 }
803
804 static int hclge_parse_speed(int speed_cmd, int *speed)
805 {
806         switch (speed_cmd) {
807         case 6:
808                 *speed = HCLGE_MAC_SPEED_10M;
809                 break;
810         case 7:
811                 *speed = HCLGE_MAC_SPEED_100M;
812                 break;
813         case 0:
814                 *speed = HCLGE_MAC_SPEED_1G;
815                 break;
816         case 1:
817                 *speed = HCLGE_MAC_SPEED_10G;
818                 break;
819         case 2:
820                 *speed = HCLGE_MAC_SPEED_25G;
821                 break;
822         case 3:
823                 *speed = HCLGE_MAC_SPEED_40G;
824                 break;
825         case 4:
826                 *speed = HCLGE_MAC_SPEED_50G;
827                 break;
828         case 5:
829                 *speed = HCLGE_MAC_SPEED_100G;
830                 break;
831         default:
832                 return -EINVAL;
833         }
834
835         return 0;
836 }
837
838 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
839                                         u8 speed_ability)
840 {
841         unsigned long *supported = hdev->hw.mac.supported;
842
843         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
844                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
845                                  supported);
846
847         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
848                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
849                                  supported);
850
851         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
852                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
853                                  supported);
854
855         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
856                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
857                                  supported);
858
859         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
860                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
861                                  supported);
862
863         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
864         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
865 }
866
867 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
868                                          u8 speed_ability)
869 {
870         unsigned long *supported = hdev->hw.mac.supported;
871
872         /* default to support all speed for GE port */
873         if (!speed_ability)
874                 speed_ability = HCLGE_SUPPORT_GE;
875
876         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
877                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
878                                  supported);
879
880         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
881                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
882                                  supported);
883                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
884                                  supported);
885         }
886
887         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
888                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
889                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
890         }
891
892         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
893         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
894         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
895 }
896
897 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
898 {
899         u8 media_type = hdev->hw.mac.media_type;
900
901         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
902                 hclge_parse_fiber_link_mode(hdev, speed_ability);
903         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
904                 hclge_parse_copper_link_mode(hdev, speed_ability);
905 }
906
907 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
908 {
909         struct hclge_cfg_param_cmd *req;
910         u64 mac_addr_tmp_high;
911         u64 mac_addr_tmp;
912         int i;
913
914         req = (struct hclge_cfg_param_cmd *)desc[0].data;
915
916         /* get the configuration */
917         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
918                                               HCLGE_CFG_VMDQ_M,
919                                               HCLGE_CFG_VMDQ_S);
920         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
921                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
922         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
923                                             HCLGE_CFG_TQP_DESC_N_M,
924                                             HCLGE_CFG_TQP_DESC_N_S);
925
926         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
927                                         HCLGE_CFG_PHY_ADDR_M,
928                                         HCLGE_CFG_PHY_ADDR_S);
929         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
930                                           HCLGE_CFG_MEDIA_TP_M,
931                                           HCLGE_CFG_MEDIA_TP_S);
932         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
933                                           HCLGE_CFG_RX_BUF_LEN_M,
934                                           HCLGE_CFG_RX_BUF_LEN_S);
935         /* get mac_address */
936         mac_addr_tmp = __le32_to_cpu(req->param[2]);
937         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
938                                             HCLGE_CFG_MAC_ADDR_H_M,
939                                             HCLGE_CFG_MAC_ADDR_H_S);
940
941         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
942
943         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
944                                              HCLGE_CFG_DEFAULT_SPEED_M,
945                                              HCLGE_CFG_DEFAULT_SPEED_S);
946         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
947                                             HCLGE_CFG_RSS_SIZE_M,
948                                             HCLGE_CFG_RSS_SIZE_S);
949
950         for (i = 0; i < ETH_ALEN; i++)
951                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
952
953         req = (struct hclge_cfg_param_cmd *)desc[1].data;
954         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
955
956         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
957                                              HCLGE_CFG_SPEED_ABILITY_M,
958                                              HCLGE_CFG_SPEED_ABILITY_S);
959         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
960                                          HCLGE_CFG_UMV_TBL_SPACE_M,
961                                          HCLGE_CFG_UMV_TBL_SPACE_S);
962         if (!cfg->umv_space)
963                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
964 }
965
966 /* hclge_get_cfg: query the static parameter from flash
967  * @hdev: pointer to struct hclge_dev
968  * @hcfg: the config structure to be getted
969  */
970 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
971 {
972         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
973         struct hclge_cfg_param_cmd *req;
974         int i, ret;
975
976         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
977                 u32 offset = 0;
978
979                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
980                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
981                                            true);
982                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
983                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
984                 /* Len should be united by 4 bytes when send to hardware */
985                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
986                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
987                 req->offset = cpu_to_le32(offset);
988         }
989
990         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
991         if (ret) {
992                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
993                 return ret;
994         }
995
996         hclge_parse_cfg(hcfg, desc);
997
998         return 0;
999 }
1000
1001 static int hclge_get_cap(struct hclge_dev *hdev)
1002 {
1003         int ret;
1004
1005         ret = hclge_query_function_status(hdev);
1006         if (ret) {
1007                 dev_err(&hdev->pdev->dev,
1008                         "query function status error %d.\n", ret);
1009                 return ret;
1010         }
1011
1012         /* get pf resource */
1013         ret = hclge_query_pf_resource(hdev);
1014         if (ret)
1015                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1016
1017         return ret;
1018 }
1019
1020 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1021 {
1022 #define HCLGE_MIN_TX_DESC       64
1023 #define HCLGE_MIN_RX_DESC       64
1024
1025         if (!is_kdump_kernel())
1026                 return;
1027
1028         dev_info(&hdev->pdev->dev,
1029                  "Running kdump kernel. Using minimal resources\n");
1030
1031         /* minimal queue pairs equals to the number of vports */
1032         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1033         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1034         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1035 }
1036
1037 static int hclge_configure(struct hclge_dev *hdev)
1038 {
1039         struct hclge_cfg cfg;
1040         int ret, i;
1041
1042         ret = hclge_get_cfg(hdev, &cfg);
1043         if (ret) {
1044                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1045                 return ret;
1046         }
1047
1048         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1049         hdev->base_tqp_pid = 0;
1050         hdev->rss_size_max = cfg.rss_size_max;
1051         hdev->rx_buf_len = cfg.rx_buf_len;
1052         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1053         hdev->hw.mac.media_type = cfg.media_type;
1054         hdev->hw.mac.phy_addr = cfg.phy_addr;
1055         hdev->num_tx_desc = cfg.tqp_desc_num;
1056         hdev->num_rx_desc = cfg.tqp_desc_num;
1057         hdev->tm_info.num_pg = 1;
1058         hdev->tc_max = cfg.tc_num;
1059         hdev->tm_info.hw_pfc_map = 0;
1060         hdev->wanted_umv_size = cfg.umv_space;
1061
1062         if (hnae3_dev_fd_supported(hdev))
1063                 hdev->fd_en = true;
1064
1065         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1066         if (ret) {
1067                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1068                 return ret;
1069         }
1070
1071         hclge_parse_link_mode(hdev, cfg.speed_ability);
1072
1073         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1074             (hdev->tc_max < 1)) {
1075                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1076                          hdev->tc_max);
1077                 hdev->tc_max = 1;
1078         }
1079
1080         /* Dev does not support DCB */
1081         if (!hnae3_dev_dcb_supported(hdev)) {
1082                 hdev->tc_max = 1;
1083                 hdev->pfc_max = 0;
1084         } else {
1085                 hdev->pfc_max = hdev->tc_max;
1086         }
1087
1088         hdev->tm_info.num_tc = 1;
1089
1090         /* Currently not support uncontiuous tc */
1091         for (i = 0; i < hdev->tm_info.num_tc; i++)
1092                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1093
1094         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1095
1096         hclge_init_kdump_kernel_config(hdev);
1097
1098         return ret;
1099 }
1100
1101 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1102                             int tso_mss_max)
1103 {
1104         struct hclge_cfg_tso_status_cmd *req;
1105         struct hclge_desc desc;
1106         u16 tso_mss;
1107
1108         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1109
1110         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1111
1112         tso_mss = 0;
1113         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1114                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1115         req->tso_mss_min = cpu_to_le16(tso_mss);
1116
1117         tso_mss = 0;
1118         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1120         req->tso_mss_max = cpu_to_le16(tso_mss);
1121
1122         return hclge_cmd_send(&hdev->hw, &desc, 1);
1123 }
1124
1125 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1126 {
1127         struct hclge_cfg_gro_status_cmd *req;
1128         struct hclge_desc desc;
1129         int ret;
1130
1131         if (!hnae3_dev_gro_supported(hdev))
1132                 return 0;
1133
1134         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1135         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1136
1137         req->gro_en = cpu_to_le16(en ? 1 : 0);
1138
1139         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1140         if (ret)
1141                 dev_err(&hdev->pdev->dev,
1142                         "GRO hardware config cmd failed, ret = %d\n", ret);
1143
1144         return ret;
1145 }
1146
1147 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1148 {
1149         struct hclge_tqp *tqp;
1150         int i;
1151
1152         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1153                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1154         if (!hdev->htqp)
1155                 return -ENOMEM;
1156
1157         tqp = hdev->htqp;
1158
1159         for (i = 0; i < hdev->num_tqps; i++) {
1160                 tqp->dev = &hdev->pdev->dev;
1161                 tqp->index = i;
1162
1163                 tqp->q.ae_algo = &ae_algo;
1164                 tqp->q.buf_size = hdev->rx_buf_len;
1165                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1166                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1167                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1168                         i * HCLGE_TQP_REG_SIZE;
1169
1170                 tqp++;
1171         }
1172
1173         return 0;
1174 }
1175
1176 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1177                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1178 {
1179         struct hclge_tqp_map_cmd *req;
1180         struct hclge_desc desc;
1181         int ret;
1182
1183         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1184
1185         req = (struct hclge_tqp_map_cmd *)desc.data;
1186         req->tqp_id = cpu_to_le16(tqp_pid);
1187         req->tqp_vf = func_id;
1188         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1189                         1 << HCLGE_TQP_MAP_EN_B;
1190         req->tqp_vid = cpu_to_le16(tqp_vid);
1191
1192         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1193         if (ret)
1194                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1195
1196         return ret;
1197 }
1198
1199 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1200 {
1201         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1202         struct hclge_dev *hdev = vport->back;
1203         int i, alloced;
1204
1205         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1206              alloced < num_tqps; i++) {
1207                 if (!hdev->htqp[i].alloced) {
1208                         hdev->htqp[i].q.handle = &vport->nic;
1209                         hdev->htqp[i].q.tqp_index = alloced;
1210                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1211                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1212                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1213                         hdev->htqp[i].alloced = true;
1214                         alloced++;
1215                 }
1216         }
1217         vport->alloc_tqps = alloced;
1218         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1219                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1220
1221         return 0;
1222 }
1223
1224 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1225                             u16 num_tx_desc, u16 num_rx_desc)
1226
1227 {
1228         struct hnae3_handle *nic = &vport->nic;
1229         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1230         struct hclge_dev *hdev = vport->back;
1231         int ret;
1232
1233         kinfo->num_tx_desc = num_tx_desc;
1234         kinfo->num_rx_desc = num_rx_desc;
1235
1236         kinfo->rx_buf_len = hdev->rx_buf_len;
1237
1238         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1239                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1240         if (!kinfo->tqp)
1241                 return -ENOMEM;
1242
1243         ret = hclge_assign_tqp(vport, num_tqps);
1244         if (ret)
1245                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1246
1247         return ret;
1248 }
1249
1250 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1251                                   struct hclge_vport *vport)
1252 {
1253         struct hnae3_handle *nic = &vport->nic;
1254         struct hnae3_knic_private_info *kinfo;
1255         u16 i;
1256
1257         kinfo = &nic->kinfo;
1258         for (i = 0; i < vport->alloc_tqps; i++) {
1259                 struct hclge_tqp *q =
1260                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1261                 bool is_pf;
1262                 int ret;
1263
1264                 is_pf = !(vport->vport_id);
1265                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1266                                              i, is_pf);
1267                 if (ret)
1268                         return ret;
1269         }
1270
1271         return 0;
1272 }
1273
1274 static int hclge_map_tqp(struct hclge_dev *hdev)
1275 {
1276         struct hclge_vport *vport = hdev->vport;
1277         u16 i, num_vport;
1278
1279         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1280         for (i = 0; i < num_vport; i++) {
1281                 int ret;
1282
1283                 ret = hclge_map_tqp_to_vport(hdev, vport);
1284                 if (ret)
1285                         return ret;
1286
1287                 vport++;
1288         }
1289
1290         return 0;
1291 }
1292
1293 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1294 {
1295         /* this would be initialized later */
1296 }
1297
1298 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1299 {
1300         struct hnae3_handle *nic = &vport->nic;
1301         struct hclge_dev *hdev = vport->back;
1302         int ret;
1303
1304         nic->pdev = hdev->pdev;
1305         nic->ae_algo = &ae_algo;
1306         nic->numa_node_mask = hdev->numa_node_mask;
1307
1308         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1309                 ret = hclge_knic_setup(vport, num_tqps,
1310                                        hdev->num_tx_desc, hdev->num_rx_desc);
1311
1312                 if (ret) {
1313                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1314                                 ret);
1315                         return ret;
1316                 }
1317         } else {
1318                 hclge_unic_setup(vport, num_tqps);
1319         }
1320
1321         return 0;
1322 }
1323
1324 static int hclge_alloc_vport(struct hclge_dev *hdev)
1325 {
1326         struct pci_dev *pdev = hdev->pdev;
1327         struct hclge_vport *vport;
1328         u32 tqp_main_vport;
1329         u32 tqp_per_vport;
1330         int num_vport, i;
1331         int ret;
1332
1333         /* We need to alloc a vport for main NIC of PF */
1334         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1335
1336         if (hdev->num_tqps < num_vport) {
1337                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1338                         hdev->num_tqps, num_vport);
1339                 return -EINVAL;
1340         }
1341
1342         /* Alloc the same number of TQPs for every vport */
1343         tqp_per_vport = hdev->num_tqps / num_vport;
1344         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1345
1346         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1347                              GFP_KERNEL);
1348         if (!vport)
1349                 return -ENOMEM;
1350
1351         hdev->vport = vport;
1352         hdev->num_alloc_vport = num_vport;
1353
1354         if (IS_ENABLED(CONFIG_PCI_IOV))
1355                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1356
1357         for (i = 0; i < num_vport; i++) {
1358                 vport->back = hdev;
1359                 vport->vport_id = i;
1360                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1361                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1362                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1363                 INIT_LIST_HEAD(&vport->vlan_list);
1364                 INIT_LIST_HEAD(&vport->uc_mac_list);
1365                 INIT_LIST_HEAD(&vport->mc_mac_list);
1366
1367                 if (i == 0)
1368                         ret = hclge_vport_setup(vport, tqp_main_vport);
1369                 else
1370                         ret = hclge_vport_setup(vport, tqp_per_vport);
1371                 if (ret) {
1372                         dev_err(&pdev->dev,
1373                                 "vport setup failed for vport %d, %d\n",
1374                                 i, ret);
1375                         return ret;
1376                 }
1377
1378                 vport++;
1379         }
1380
1381         return 0;
1382 }
1383
1384 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1385                                     struct hclge_pkt_buf_alloc *buf_alloc)
1386 {
1387 /* TX buffer size is unit by 128 byte */
1388 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1389 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1390         struct hclge_tx_buff_alloc_cmd *req;
1391         struct hclge_desc desc;
1392         int ret;
1393         u8 i;
1394
1395         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1396
1397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1398         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1399                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1400
1401                 req->tx_pkt_buff[i] =
1402                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1403                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1404         }
1405
1406         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1407         if (ret)
1408                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1409                         ret);
1410
1411         return ret;
1412 }
1413
1414 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1415                                  struct hclge_pkt_buf_alloc *buf_alloc)
1416 {
1417         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1418
1419         if (ret)
1420                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1421
1422         return ret;
1423 }
1424
1425 static int hclge_get_tc_num(struct hclge_dev *hdev)
1426 {
1427         int i, cnt = 0;
1428
1429         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1430                 if (hdev->hw_tc_map & BIT(i))
1431                         cnt++;
1432         return cnt;
1433 }
1434
1435 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1436 {
1437         int i, cnt = 0;
1438
1439         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1440                 if (hdev->hw_tc_map & BIT(i) &&
1441                     hdev->tm_info.hw_pfc_map & BIT(i))
1442                         cnt++;
1443         return cnt;
1444 }
1445
1446 /* Get the number of pfc enabled TCs, which have private buffer */
1447 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1448                                   struct hclge_pkt_buf_alloc *buf_alloc)
1449 {
1450         struct hclge_priv_buf *priv;
1451         int i, cnt = 0;
1452
1453         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1454                 priv = &buf_alloc->priv_buf[i];
1455                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1456                     priv->enable)
1457                         cnt++;
1458         }
1459
1460         return cnt;
1461 }
1462
1463 /* Get the number of pfc disabled TCs, which have private buffer */
1464 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1465                                      struct hclge_pkt_buf_alloc *buf_alloc)
1466 {
1467         struct hclge_priv_buf *priv;
1468         int i, cnt = 0;
1469
1470         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1471                 priv = &buf_alloc->priv_buf[i];
1472                 if (hdev->hw_tc_map & BIT(i) &&
1473                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1474                     priv->enable)
1475                         cnt++;
1476         }
1477
1478         return cnt;
1479 }
1480
1481 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1482 {
1483         struct hclge_priv_buf *priv;
1484         u32 rx_priv = 0;
1485         int i;
1486
1487         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1488                 priv = &buf_alloc->priv_buf[i];
1489                 if (priv->enable)
1490                         rx_priv += priv->buf_size;
1491         }
1492         return rx_priv;
1493 }
1494
1495 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1496 {
1497         u32 i, total_tx_size = 0;
1498
1499         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1500                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1501
1502         return total_tx_size;
1503 }
1504
1505 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1506                                 struct hclge_pkt_buf_alloc *buf_alloc,
1507                                 u32 rx_all)
1508 {
1509         u32 shared_buf_min, shared_buf_tc, shared_std;
1510         int tc_num, pfc_enable_num;
1511         u32 shared_buf, aligned_mps;
1512         u32 rx_priv;
1513         int i;
1514
1515         tc_num = hclge_get_tc_num(hdev);
1516         pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1517         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1518
1519         if (hnae3_dev_dcb_supported(hdev))
1520                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1521         else
1522                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1523                                         + hdev->dv_buf_size;
1524
1525         shared_buf_tc = pfc_enable_num * aligned_mps +
1526                         (tc_num - pfc_enable_num) * aligned_mps / 2 +
1527                         aligned_mps;
1528         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1529                              HCLGE_BUF_SIZE_UNIT);
1530
1531         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1532         if (rx_all < rx_priv + shared_std)
1533                 return false;
1534
1535         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1536         buf_alloc->s_buf.buf_size = shared_buf;
1537         if (hnae3_dev_dcb_supported(hdev)) {
1538                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1539                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1540                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1541         } else {
1542                 buf_alloc->s_buf.self.high = aligned_mps +
1543                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1544                 buf_alloc->s_buf.self.low =
1545                         roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1546         }
1547
1548         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1549                 if ((hdev->hw_tc_map & BIT(i)) &&
1550                     (hdev->tm_info.hw_pfc_map & BIT(i))) {
1551                         buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
1552                         buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
1553                 } else {
1554                         buf_alloc->s_buf.tc_thrd[i].low = 0;
1555                         buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
1556                 }
1557         }
1558
1559         return true;
1560 }
1561
1562 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1563                                 struct hclge_pkt_buf_alloc *buf_alloc)
1564 {
1565         u32 i, total_size;
1566
1567         total_size = hdev->pkt_buf_size;
1568
1569         /* alloc tx buffer for all enabled tc */
1570         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1571                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1572
1573                 if (hdev->hw_tc_map & BIT(i)) {
1574                         if (total_size < hdev->tx_buf_size)
1575                                 return -ENOMEM;
1576
1577                         priv->tx_buf_size = hdev->tx_buf_size;
1578                 } else {
1579                         priv->tx_buf_size = 0;
1580                 }
1581
1582                 total_size -= priv->tx_buf_size;
1583         }
1584
1585         return 0;
1586 }
1587
1588 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1589                                   struct hclge_pkt_buf_alloc *buf_alloc)
1590 {
1591         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1592         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1593         int i;
1594
1595         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1596                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1597
1598                 priv->enable = 0;
1599                 priv->wl.low = 0;
1600                 priv->wl.high = 0;
1601                 priv->buf_size = 0;
1602
1603                 if (!(hdev->hw_tc_map & BIT(i)))
1604                         continue;
1605
1606                 priv->enable = 1;
1607
1608                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1609                         priv->wl.low = max ? aligned_mps : 256;
1610                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1611                                                 HCLGE_BUF_SIZE_UNIT);
1612                 } else {
1613                         priv->wl.low = 0;
1614                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1615                 }
1616
1617                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1618         }
1619
1620         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1621 }
1622
1623 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1624                                           struct hclge_pkt_buf_alloc *buf_alloc)
1625 {
1626         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1627         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1628         int i;
1629
1630         /* let the last to be cleared first */
1631         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1632                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1633
1634                 if (hdev->hw_tc_map & BIT(i) &&
1635                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1636                         /* Clear the no pfc TC private buffer */
1637                         priv->wl.low = 0;
1638                         priv->wl.high = 0;
1639                         priv->buf_size = 0;
1640                         priv->enable = 0;
1641                         no_pfc_priv_num--;
1642                 }
1643
1644                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1645                     no_pfc_priv_num == 0)
1646                         break;
1647         }
1648
1649         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1650 }
1651
1652 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1653                                         struct hclge_pkt_buf_alloc *buf_alloc)
1654 {
1655         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1656         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1657         int i;
1658
1659         /* let the last to be cleared first */
1660         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1661                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1662
1663                 if (hdev->hw_tc_map & BIT(i) &&
1664                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1665                         /* Reduce the number of pfc TC with private buffer */
1666                         priv->wl.low = 0;
1667                         priv->enable = 0;
1668                         priv->wl.high = 0;
1669                         priv->buf_size = 0;
1670                         pfc_priv_num--;
1671                 }
1672
1673                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1674                     pfc_priv_num == 0)
1675                         break;
1676         }
1677
1678         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1679 }
1680
1681 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1682  * @hdev: pointer to struct hclge_dev
1683  * @buf_alloc: pointer to buffer calculation data
1684  * @return: 0: calculate sucessful, negative: fail
1685  */
1686 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1687                                 struct hclge_pkt_buf_alloc *buf_alloc)
1688 {
1689         /* When DCB is not supported, rx private buffer is not allocated. */
1690         if (!hnae3_dev_dcb_supported(hdev)) {
1691                 u32 rx_all = hdev->pkt_buf_size;
1692
1693                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1694                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1695                         return -ENOMEM;
1696
1697                 return 0;
1698         }
1699
1700         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1701                 return 0;
1702
1703         /* try to decrease the buffer size */
1704         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1705                 return 0;
1706
1707         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1708                 return 0;
1709
1710         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1711                 return 0;
1712
1713         return -ENOMEM;
1714 }
1715
1716 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1717                                    struct hclge_pkt_buf_alloc *buf_alloc)
1718 {
1719         struct hclge_rx_priv_buff_cmd *req;
1720         struct hclge_desc desc;
1721         int ret;
1722         int i;
1723
1724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1725         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1726
1727         /* Alloc private buffer TCs */
1728         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1729                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1730
1731                 req->buf_num[i] =
1732                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1733                 req->buf_num[i] |=
1734                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1735         }
1736
1737         req->shared_buf =
1738                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1739                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1740
1741         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1742         if (ret)
1743                 dev_err(&hdev->pdev->dev,
1744                         "rx private buffer alloc cmd failed %d\n", ret);
1745
1746         return ret;
1747 }
1748
1749 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1750                                    struct hclge_pkt_buf_alloc *buf_alloc)
1751 {
1752         struct hclge_rx_priv_wl_buf *req;
1753         struct hclge_priv_buf *priv;
1754         struct hclge_desc desc[2];
1755         int i, j;
1756         int ret;
1757
1758         for (i = 0; i < 2; i++) {
1759                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1760                                            false);
1761                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1762
1763                 /* The first descriptor set the NEXT bit to 1 */
1764                 if (i == 0)
1765                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1766                 else
1767                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1768
1769                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1770                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1771
1772                         priv = &buf_alloc->priv_buf[idx];
1773                         req->tc_wl[j].high =
1774                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1775                         req->tc_wl[j].high |=
1776                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1777                         req->tc_wl[j].low =
1778                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1779                         req->tc_wl[j].low |=
1780                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1781                 }
1782         }
1783
1784         /* Send 2 descriptor at one time */
1785         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1786         if (ret)
1787                 dev_err(&hdev->pdev->dev,
1788                         "rx private waterline config cmd failed %d\n",
1789                         ret);
1790         return ret;
1791 }
1792
1793 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1794                                     struct hclge_pkt_buf_alloc *buf_alloc)
1795 {
1796         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1797         struct hclge_rx_com_thrd *req;
1798         struct hclge_desc desc[2];
1799         struct hclge_tc_thrd *tc;
1800         int i, j;
1801         int ret;
1802
1803         for (i = 0; i < 2; i++) {
1804                 hclge_cmd_setup_basic_desc(&desc[i],
1805                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1806                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1807
1808                 /* The first descriptor set the NEXT bit to 1 */
1809                 if (i == 0)
1810                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1811                 else
1812                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1813
1814                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1815                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1816
1817                         req->com_thrd[j].high =
1818                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1819                         req->com_thrd[j].high |=
1820                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1821                         req->com_thrd[j].low =
1822                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1823                         req->com_thrd[j].low |=
1824                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1825                 }
1826         }
1827
1828         /* Send 2 descriptors at one time */
1829         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1830         if (ret)
1831                 dev_err(&hdev->pdev->dev,
1832                         "common threshold config cmd failed %d\n", ret);
1833         return ret;
1834 }
1835
1836 static int hclge_common_wl_config(struct hclge_dev *hdev,
1837                                   struct hclge_pkt_buf_alloc *buf_alloc)
1838 {
1839         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1840         struct hclge_rx_com_wl *req;
1841         struct hclge_desc desc;
1842         int ret;
1843
1844         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1845
1846         req = (struct hclge_rx_com_wl *)desc.data;
1847         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1848         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1849
1850         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1851         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1852
1853         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1854         if (ret)
1855                 dev_err(&hdev->pdev->dev,
1856                         "common waterline config cmd failed %d\n", ret);
1857
1858         return ret;
1859 }
1860
1861 int hclge_buffer_alloc(struct hclge_dev *hdev)
1862 {
1863         struct hclge_pkt_buf_alloc *pkt_buf;
1864         int ret;
1865
1866         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1867         if (!pkt_buf)
1868                 return -ENOMEM;
1869
1870         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1871         if (ret) {
1872                 dev_err(&hdev->pdev->dev,
1873                         "could not calc tx buffer size for all TCs %d\n", ret);
1874                 goto out;
1875         }
1876
1877         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1878         if (ret) {
1879                 dev_err(&hdev->pdev->dev,
1880                         "could not alloc tx buffers %d\n", ret);
1881                 goto out;
1882         }
1883
1884         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1885         if (ret) {
1886                 dev_err(&hdev->pdev->dev,
1887                         "could not calc rx priv buffer size for all TCs %d\n",
1888                         ret);
1889                 goto out;
1890         }
1891
1892         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1893         if (ret) {
1894                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1895                         ret);
1896                 goto out;
1897         }
1898
1899         if (hnae3_dev_dcb_supported(hdev)) {
1900                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1901                 if (ret) {
1902                         dev_err(&hdev->pdev->dev,
1903                                 "could not configure rx private waterline %d\n",
1904                                 ret);
1905                         goto out;
1906                 }
1907
1908                 ret = hclge_common_thrd_config(hdev, pkt_buf);
1909                 if (ret) {
1910                         dev_err(&hdev->pdev->dev,
1911                                 "could not configure common threshold %d\n",
1912                                 ret);
1913                         goto out;
1914                 }
1915         }
1916
1917         ret = hclge_common_wl_config(hdev, pkt_buf);
1918         if (ret)
1919                 dev_err(&hdev->pdev->dev,
1920                         "could not configure common waterline %d\n", ret);
1921
1922 out:
1923         kfree(pkt_buf);
1924         return ret;
1925 }
1926
1927 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1928 {
1929         struct hnae3_handle *roce = &vport->roce;
1930         struct hnae3_handle *nic = &vport->nic;
1931
1932         roce->rinfo.num_vectors = vport->back->num_roce_msi;
1933
1934         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1935             vport->back->num_msi_left == 0)
1936                 return -EINVAL;
1937
1938         roce->rinfo.base_vector = vport->back->roce_base_vector;
1939
1940         roce->rinfo.netdev = nic->kinfo.netdev;
1941         roce->rinfo.roce_io_base = vport->back->hw.io_base;
1942
1943         roce->pdev = nic->pdev;
1944         roce->ae_algo = nic->ae_algo;
1945         roce->numa_node_mask = nic->numa_node_mask;
1946
1947         return 0;
1948 }
1949
1950 static int hclge_init_msi(struct hclge_dev *hdev)
1951 {
1952         struct pci_dev *pdev = hdev->pdev;
1953         int vectors;
1954         int i;
1955
1956         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1957                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
1958         if (vectors < 0) {
1959                 dev_err(&pdev->dev,
1960                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1961                         vectors);
1962                 return vectors;
1963         }
1964         if (vectors < hdev->num_msi)
1965                 dev_warn(&hdev->pdev->dev,
1966                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1967                          hdev->num_msi, vectors);
1968
1969         hdev->num_msi = vectors;
1970         hdev->num_msi_left = vectors;
1971         hdev->base_msi_vector = pdev->irq;
1972         hdev->roce_base_vector = hdev->base_msi_vector +
1973                                 hdev->roce_base_msix_offset;
1974
1975         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1976                                            sizeof(u16), GFP_KERNEL);
1977         if (!hdev->vector_status) {
1978                 pci_free_irq_vectors(pdev);
1979                 return -ENOMEM;
1980         }
1981
1982         for (i = 0; i < hdev->num_msi; i++)
1983                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1984
1985         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1986                                         sizeof(int), GFP_KERNEL);
1987         if (!hdev->vector_irq) {
1988                 pci_free_irq_vectors(pdev);
1989                 return -ENOMEM;
1990         }
1991
1992         return 0;
1993 }
1994
1995 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1996 {
1997
1998         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
1999                 duplex = HCLGE_MAC_FULL;
2000
2001         return duplex;
2002 }
2003
2004 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2005                                       u8 duplex)
2006 {
2007         struct hclge_config_mac_speed_dup_cmd *req;
2008         struct hclge_desc desc;
2009         int ret;
2010
2011         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2012
2013         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2014
2015         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2016
2017         switch (speed) {
2018         case HCLGE_MAC_SPEED_10M:
2019                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2020                                 HCLGE_CFG_SPEED_S, 6);
2021                 break;
2022         case HCLGE_MAC_SPEED_100M:
2023                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2024                                 HCLGE_CFG_SPEED_S, 7);
2025                 break;
2026         case HCLGE_MAC_SPEED_1G:
2027                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2028                                 HCLGE_CFG_SPEED_S, 0);
2029                 break;
2030         case HCLGE_MAC_SPEED_10G:
2031                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2032                                 HCLGE_CFG_SPEED_S, 1);
2033                 break;
2034         case HCLGE_MAC_SPEED_25G:
2035                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2036                                 HCLGE_CFG_SPEED_S, 2);
2037                 break;
2038         case HCLGE_MAC_SPEED_40G:
2039                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2040                                 HCLGE_CFG_SPEED_S, 3);
2041                 break;
2042         case HCLGE_MAC_SPEED_50G:
2043                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2044                                 HCLGE_CFG_SPEED_S, 4);
2045                 break;
2046         case HCLGE_MAC_SPEED_100G:
2047                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2048                                 HCLGE_CFG_SPEED_S, 5);
2049                 break;
2050         default:
2051                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2052                 return -EINVAL;
2053         }
2054
2055         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2056                       1);
2057
2058         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2059         if (ret) {
2060                 dev_err(&hdev->pdev->dev,
2061                         "mac speed/duplex config cmd failed %d.\n", ret);
2062                 return ret;
2063         }
2064
2065         return 0;
2066 }
2067
2068 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2069 {
2070         int ret;
2071
2072         duplex = hclge_check_speed_dup(duplex, speed);
2073         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2074                 return 0;
2075
2076         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2077         if (ret)
2078                 return ret;
2079
2080         hdev->hw.mac.speed = speed;
2081         hdev->hw.mac.duplex = duplex;
2082
2083         return 0;
2084 }
2085
2086 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2087                                      u8 duplex)
2088 {
2089         struct hclge_vport *vport = hclge_get_vport(handle);
2090         struct hclge_dev *hdev = vport->back;
2091
2092         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2093 }
2094
2095 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2096 {
2097         struct hclge_config_auto_neg_cmd *req;
2098         struct hclge_desc desc;
2099         u32 flag = 0;
2100         int ret;
2101
2102         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2103
2104         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2105         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2106         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2107
2108         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2109         if (ret)
2110                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2111                         ret);
2112
2113         return ret;
2114 }
2115
2116 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2117 {
2118         struct hclge_vport *vport = hclge_get_vport(handle);
2119         struct hclge_dev *hdev = vport->back;
2120
2121         return hclge_set_autoneg_en(hdev, enable);
2122 }
2123
2124 static int hclge_get_autoneg(struct hnae3_handle *handle)
2125 {
2126         struct hclge_vport *vport = hclge_get_vport(handle);
2127         struct hclge_dev *hdev = vport->back;
2128         struct phy_device *phydev = hdev->hw.mac.phydev;
2129
2130         if (phydev)
2131                 return phydev->autoneg;
2132
2133         return hdev->hw.mac.autoneg;
2134 }
2135
2136 static int hclge_mac_init(struct hclge_dev *hdev)
2137 {
2138         struct hclge_mac *mac = &hdev->hw.mac;
2139         int ret;
2140
2141         hdev->support_sfp_query = true;
2142         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2143         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2144                                          hdev->hw.mac.duplex);
2145         if (ret) {
2146                 dev_err(&hdev->pdev->dev,
2147                         "Config mac speed dup fail ret=%d\n", ret);
2148                 return ret;
2149         }
2150
2151         mac->link = 0;
2152
2153         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2154         if (ret) {
2155                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2156                 return ret;
2157         }
2158
2159         ret = hclge_buffer_alloc(hdev);
2160         if (ret)
2161                 dev_err(&hdev->pdev->dev,
2162                         "allocate buffer fail, ret=%d\n", ret);
2163
2164         return ret;
2165 }
2166
2167 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2168 {
2169         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2170             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2171                 schedule_work(&hdev->mbx_service_task);
2172 }
2173
2174 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2175 {
2176         if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2177                 schedule_work(&hdev->rst_service_task);
2178 }
2179
2180 static void hclge_task_schedule(struct hclge_dev *hdev)
2181 {
2182         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2183             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2184             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2185                 (void)schedule_work(&hdev->service_task);
2186 }
2187
2188 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2189 {
2190         struct hclge_link_status_cmd *req;
2191         struct hclge_desc desc;
2192         int link_status;
2193         int ret;
2194
2195         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2196         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2197         if (ret) {
2198                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2199                         ret);
2200                 return ret;
2201         }
2202
2203         req = (struct hclge_link_status_cmd *)desc.data;
2204         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2205
2206         return !!link_status;
2207 }
2208
2209 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2210 {
2211         int mac_state;
2212         int link_stat;
2213
2214         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2215                 return 0;
2216
2217         mac_state = hclge_get_mac_link_status(hdev);
2218
2219         if (hdev->hw.mac.phydev) {
2220                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2221                         link_stat = mac_state &
2222                                 hdev->hw.mac.phydev->link;
2223                 else
2224                         link_stat = 0;
2225
2226         } else {
2227                 link_stat = mac_state;
2228         }
2229
2230         return !!link_stat;
2231 }
2232
2233 static void hclge_update_link_status(struct hclge_dev *hdev)
2234 {
2235         struct hnae3_client *rclient = hdev->roce_client;
2236         struct hnae3_client *client = hdev->nic_client;
2237         struct hnae3_handle *rhandle;
2238         struct hnae3_handle *handle;
2239         int state;
2240         int i;
2241
2242         if (!client)
2243                 return;
2244         state = hclge_get_mac_phy_link(hdev);
2245         if (state != hdev->hw.mac.link) {
2246                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2247                         handle = &hdev->vport[i].nic;
2248                         client->ops->link_status_change(handle, state);
2249                         rhandle = &hdev->vport[i].roce;
2250                         if (rclient && rclient->ops->link_status_change)
2251                                 rclient->ops->link_status_change(rhandle,
2252                                                                  state);
2253                 }
2254                 hdev->hw.mac.link = state;
2255         }
2256 }
2257
2258 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2259 {
2260         struct hclge_sfp_speed_cmd *resp = NULL;
2261         struct hclge_desc desc;
2262         int ret;
2263
2264         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2265         resp = (struct hclge_sfp_speed_cmd *)desc.data;
2266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2267         if (ret == -EOPNOTSUPP) {
2268                 dev_warn(&hdev->pdev->dev,
2269                          "IMP do not support get SFP speed %d\n", ret);
2270                 return ret;
2271         } else if (ret) {
2272                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2273                 return ret;
2274         }
2275
2276         *speed = resp->sfp_speed;
2277
2278         return 0;
2279 }
2280
2281 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2282 {
2283         struct hclge_mac mac = hdev->hw.mac;
2284         int speed;
2285         int ret;
2286
2287         /* get the speed from SFP cmd when phy
2288          * doesn't exit.
2289          */
2290         if (mac.phydev)
2291                 return 0;
2292
2293         /* if IMP does not support get SFP/qSFP speed, return directly */
2294         if (!hdev->support_sfp_query)
2295                 return 0;
2296
2297         ret = hclge_get_sfp_speed(hdev, &speed);
2298         if (ret == -EOPNOTSUPP) {
2299                 hdev->support_sfp_query = false;
2300                 return ret;
2301         } else if (ret) {
2302                 return ret;
2303         }
2304
2305         if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2306                 return 0; /* do nothing if no SFP */
2307
2308         /* must config full duplex for SFP */
2309         return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2310 }
2311
2312 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2313 {
2314         struct hclge_vport *vport = hclge_get_vport(handle);
2315         struct hclge_dev *hdev = vport->back;
2316
2317         return hclge_update_speed_duplex(hdev);
2318 }
2319
2320 static int hclge_get_status(struct hnae3_handle *handle)
2321 {
2322         struct hclge_vport *vport = hclge_get_vport(handle);
2323         struct hclge_dev *hdev = vport->back;
2324
2325         hclge_update_link_status(hdev);
2326
2327         return hdev->hw.mac.link;
2328 }
2329
2330 static void hclge_service_timer(struct timer_list *t)
2331 {
2332         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2333
2334         mod_timer(&hdev->service_timer, jiffies + HZ);
2335         hdev->hw_stats.stats_timer++;
2336         hclge_task_schedule(hdev);
2337 }
2338
2339 static void hclge_service_complete(struct hclge_dev *hdev)
2340 {
2341         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2342
2343         /* Flush memory before next watchdog */
2344         smp_mb__before_atomic();
2345         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2346 }
2347
2348 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2349 {
2350         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2351
2352         /* fetch the events from their corresponding regs */
2353         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2354         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2355         msix_src_reg = hclge_read_dev(&hdev->hw,
2356                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2357
2358         /* Assumption: If by any chance reset and mailbox events are reported
2359          * together then we will only process reset event in this go and will
2360          * defer the processing of the mailbox events. Since, we would have not
2361          * cleared RX CMDQ event this time we would receive again another
2362          * interrupt from H/W just for the mailbox.
2363          */
2364
2365         /* check for vector0 reset event sources */
2366         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2367                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2368                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2369                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2370                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2371                 return HCLGE_VECTOR0_EVENT_RST;
2372         }
2373
2374         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2375                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2376                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2377                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2378                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2379                 return HCLGE_VECTOR0_EVENT_RST;
2380         }
2381
2382         if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2383                 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2384                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2385                 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2386                 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2387                 return HCLGE_VECTOR0_EVENT_RST;
2388         }
2389
2390         /* check for vector0 msix event source */
2391         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
2392                 return HCLGE_VECTOR0_EVENT_ERR;
2393
2394         /* check for vector0 mailbox(=CMDQ RX) event source */
2395         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2396                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2397                 *clearval = cmdq_src_reg;
2398                 return HCLGE_VECTOR0_EVENT_MBX;
2399         }
2400
2401         return HCLGE_VECTOR0_EVENT_OTHER;
2402 }
2403
2404 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2405                                     u32 regclr)
2406 {
2407         switch (event_type) {
2408         case HCLGE_VECTOR0_EVENT_RST:
2409                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2410                 break;
2411         case HCLGE_VECTOR0_EVENT_MBX:
2412                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2413                 break;
2414         default:
2415                 break;
2416         }
2417 }
2418
2419 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2420 {
2421         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2422                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2423                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2424                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2425         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2426 }
2427
2428 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2429 {
2430         writel(enable ? 1 : 0, vector->addr);
2431 }
2432
2433 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2434 {
2435         struct hclge_dev *hdev = data;
2436         u32 event_cause;
2437         u32 clearval;
2438
2439         hclge_enable_vector(&hdev->misc_vector, false);
2440         event_cause = hclge_check_event_cause(hdev, &clearval);
2441
2442         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2443         switch (event_cause) {
2444         case HCLGE_VECTOR0_EVENT_ERR:
2445                 /* we do not know what type of reset is required now. This could
2446                  * only be decided after we fetch the type of errors which
2447                  * caused this event. Therefore, we will do below for now:
2448                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2449                  *    have defered type of reset to be used.
2450                  * 2. Schedule the reset serivce task.
2451                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2452                  *    will fetch the correct type of reset.  This would be done
2453                  *    by first decoding the types of errors.
2454                  */
2455                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2456                 /* fall through */
2457         case HCLGE_VECTOR0_EVENT_RST:
2458                 hclge_reset_task_schedule(hdev);
2459                 break;
2460         case HCLGE_VECTOR0_EVENT_MBX:
2461                 /* If we are here then,
2462                  * 1. Either we are not handling any mbx task and we are not
2463                  *    scheduled as well
2464                  *                        OR
2465                  * 2. We could be handling a mbx task but nothing more is
2466                  *    scheduled.
2467                  * In both cases, we should schedule mbx task as there are more
2468                  * mbx messages reported by this interrupt.
2469                  */
2470                 hclge_mbx_task_schedule(hdev);
2471                 break;
2472         default:
2473                 dev_warn(&hdev->pdev->dev,
2474                          "received unknown or unhandled event of vector0\n");
2475                 break;
2476         }
2477
2478         /* clear the source of interrupt if it is not cause by reset */
2479         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2480                 hclge_clear_event_cause(hdev, event_cause, clearval);
2481                 hclge_enable_vector(&hdev->misc_vector, true);
2482         }
2483
2484         return IRQ_HANDLED;
2485 }
2486
2487 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2488 {
2489         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2490                 dev_warn(&hdev->pdev->dev,
2491                          "vector(vector_id %d) has been freed.\n", vector_id);
2492                 return;
2493         }
2494
2495         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2496         hdev->num_msi_left += 1;
2497         hdev->num_msi_used -= 1;
2498 }
2499
2500 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2501 {
2502         struct hclge_misc_vector *vector = &hdev->misc_vector;
2503
2504         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2505
2506         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2507         hdev->vector_status[0] = 0;
2508
2509         hdev->num_msi_left -= 1;
2510         hdev->num_msi_used += 1;
2511 }
2512
2513 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2514 {
2515         int ret;
2516
2517         hclge_get_misc_vector(hdev);
2518
2519         /* this would be explicitly freed in the end */
2520         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2521                           0, "hclge_misc", hdev);
2522         if (ret) {
2523                 hclge_free_vector(hdev, 0);
2524                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2525                         hdev->misc_vector.vector_irq);
2526         }
2527
2528         return ret;
2529 }
2530
2531 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2532 {
2533         free_irq(hdev->misc_vector.vector_irq, hdev);
2534         hclge_free_vector(hdev, 0);
2535 }
2536
2537 int hclge_notify_client(struct hclge_dev *hdev,
2538                         enum hnae3_reset_notify_type type)
2539 {
2540         struct hnae3_client *client = hdev->nic_client;
2541         u16 i;
2542
2543         if (!client->ops->reset_notify)
2544                 return -EOPNOTSUPP;
2545
2546         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2547                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2548                 int ret;
2549
2550                 ret = client->ops->reset_notify(handle, type);
2551                 if (ret) {
2552                         dev_err(&hdev->pdev->dev,
2553                                 "notify nic client failed %d(%d)\n", type, ret);
2554                         return ret;
2555                 }
2556         }
2557
2558         return 0;
2559 }
2560
2561 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2562                                     enum hnae3_reset_notify_type type)
2563 {
2564         struct hnae3_client *client = hdev->roce_client;
2565         int ret = 0;
2566         u16 i;
2567
2568         if (!client)
2569                 return 0;
2570
2571         if (!client->ops->reset_notify)
2572                 return -EOPNOTSUPP;
2573
2574         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2575                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2576
2577                 ret = client->ops->reset_notify(handle, type);
2578                 if (ret) {
2579                         dev_err(&hdev->pdev->dev,
2580                                 "notify roce client failed %d(%d)",
2581                                 type, ret);
2582                         return ret;
2583                 }
2584         }
2585
2586         return ret;
2587 }
2588
2589 static int hclge_reset_wait(struct hclge_dev *hdev)
2590 {
2591 #define HCLGE_RESET_WATI_MS     100
2592 #define HCLGE_RESET_WAIT_CNT    200
2593         u32 val, reg, reg_bit;
2594         u32 cnt = 0;
2595
2596         switch (hdev->reset_type) {
2597         case HNAE3_IMP_RESET:
2598                 reg = HCLGE_GLOBAL_RESET_REG;
2599                 reg_bit = HCLGE_IMP_RESET_BIT;
2600                 break;
2601         case HNAE3_GLOBAL_RESET:
2602                 reg = HCLGE_GLOBAL_RESET_REG;
2603                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2604                 break;
2605         case HNAE3_CORE_RESET:
2606                 reg = HCLGE_GLOBAL_RESET_REG;
2607                 reg_bit = HCLGE_CORE_RESET_BIT;
2608                 break;
2609         case HNAE3_FUNC_RESET:
2610                 reg = HCLGE_FUN_RST_ING;
2611                 reg_bit = HCLGE_FUN_RST_ING_B;
2612                 break;
2613         case HNAE3_FLR_RESET:
2614                 break;
2615         default:
2616                 dev_err(&hdev->pdev->dev,
2617                         "Wait for unsupported reset type: %d\n",
2618                         hdev->reset_type);
2619                 return -EINVAL;
2620         }
2621
2622         if (hdev->reset_type == HNAE3_FLR_RESET) {
2623                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2624                        cnt++ < HCLGE_RESET_WAIT_CNT)
2625                         msleep(HCLGE_RESET_WATI_MS);
2626
2627                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2628                         dev_err(&hdev->pdev->dev,
2629                                 "flr wait timeout: %d\n", cnt);
2630                         return -EBUSY;
2631                 }
2632
2633                 return 0;
2634         }
2635
2636         val = hclge_read_dev(&hdev->hw, reg);
2637         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2638                 msleep(HCLGE_RESET_WATI_MS);
2639                 val = hclge_read_dev(&hdev->hw, reg);
2640                 cnt++;
2641         }
2642
2643         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2644                 dev_warn(&hdev->pdev->dev,
2645                          "Wait for reset timeout: %d\n", hdev->reset_type);
2646                 return -EBUSY;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2653 {
2654         struct hclge_vf_rst_cmd *req;
2655         struct hclge_desc desc;
2656
2657         req = (struct hclge_vf_rst_cmd *)desc.data;
2658         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2659         req->dest_vfid = func_id;
2660
2661         if (reset)
2662                 req->vf_rst = 0x1;
2663
2664         return hclge_cmd_send(&hdev->hw, &desc, 1);
2665 }
2666
2667 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2668 {
2669         int i;
2670
2671         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2672                 struct hclge_vport *vport = &hdev->vport[i];
2673                 int ret;
2674
2675                 /* Send cmd to set/clear VF's FUNC_RST_ING */
2676                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2677                 if (ret) {
2678                         dev_err(&hdev->pdev->dev,
2679                                 "set vf(%d) rst failed %d!\n",
2680                                 vport->vport_id, ret);
2681                         return ret;
2682                 }
2683
2684                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2685                         continue;
2686
2687                 /* Inform VF to process the reset.
2688                  * hclge_inform_reset_assert_to_vf may fail if VF
2689                  * driver is not loaded.
2690                  */
2691                 ret = hclge_inform_reset_assert_to_vf(vport);
2692                 if (ret)
2693                         dev_warn(&hdev->pdev->dev,
2694                                  "inform reset to vf(%d) failed %d!\n",
2695                                  vport->vport_id, ret);
2696         }
2697
2698         return 0;
2699 }
2700
2701 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2702 {
2703         struct hclge_desc desc;
2704         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2705         int ret;
2706
2707         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2708         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2709         req->fun_reset_vfid = func_id;
2710
2711         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2712         if (ret)
2713                 dev_err(&hdev->pdev->dev,
2714                         "send function reset cmd fail, status =%d\n", ret);
2715
2716         return ret;
2717 }
2718
2719 static void hclge_do_reset(struct hclge_dev *hdev)
2720 {
2721         struct hnae3_handle *handle = &hdev->vport[0].nic;
2722         struct pci_dev *pdev = hdev->pdev;
2723         u32 val;
2724
2725         if (hclge_get_hw_reset_stat(handle)) {
2726                 dev_info(&pdev->dev, "Hardware reset not finish\n");
2727                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2728                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2729                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2730                 return;
2731         }
2732
2733         switch (hdev->reset_type) {
2734         case HNAE3_GLOBAL_RESET:
2735                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2736                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2737                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2738                 dev_info(&pdev->dev, "Global Reset requested\n");
2739                 break;
2740         case HNAE3_CORE_RESET:
2741                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2742                 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2743                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2744                 dev_info(&pdev->dev, "Core Reset requested\n");
2745                 break;
2746         case HNAE3_FUNC_RESET:
2747                 dev_info(&pdev->dev, "PF Reset requested\n");
2748                 /* schedule again to check later */
2749                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2750                 hclge_reset_task_schedule(hdev);
2751                 break;
2752         case HNAE3_FLR_RESET:
2753                 dev_info(&pdev->dev, "FLR requested\n");
2754                 /* schedule again to check later */
2755                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2756                 hclge_reset_task_schedule(hdev);
2757                 break;
2758         default:
2759                 dev_warn(&pdev->dev,
2760                          "Unsupported reset type: %d\n", hdev->reset_type);
2761                 break;
2762         }
2763 }
2764
2765 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2766                                                    unsigned long *addr)
2767 {
2768         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2769
2770         /* first, resolve any unknown reset type to the known type(s) */
2771         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2772                 /* we will intentionally ignore any errors from this function
2773                  *  as we will end up in *some* reset request in any case
2774                  */
2775                 hclge_handle_hw_msix_error(hdev, addr);
2776                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
2777                 /* We defered the clearing of the error event which caused
2778                  * interrupt since it was not posssible to do that in
2779                  * interrupt context (and this is the reason we introduced
2780                  * new UNKNOWN reset type). Now, the errors have been
2781                  * handled and cleared in hardware we can safely enable
2782                  * interrupts. This is an exception to the norm.
2783                  */
2784                 hclge_enable_vector(&hdev->misc_vector, true);
2785         }
2786
2787         /* return the highest priority reset level amongst all */
2788         if (test_bit(HNAE3_IMP_RESET, addr)) {
2789                 rst_level = HNAE3_IMP_RESET;
2790                 clear_bit(HNAE3_IMP_RESET, addr);
2791                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2792                 clear_bit(HNAE3_CORE_RESET, addr);
2793                 clear_bit(HNAE3_FUNC_RESET, addr);
2794         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2795                 rst_level = HNAE3_GLOBAL_RESET;
2796                 clear_bit(HNAE3_GLOBAL_RESET, addr);
2797                 clear_bit(HNAE3_CORE_RESET, addr);
2798                 clear_bit(HNAE3_FUNC_RESET, addr);
2799         } else if (test_bit(HNAE3_CORE_RESET, addr)) {
2800                 rst_level = HNAE3_CORE_RESET;
2801                 clear_bit(HNAE3_CORE_RESET, addr);
2802                 clear_bit(HNAE3_FUNC_RESET, addr);
2803         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2804                 rst_level = HNAE3_FUNC_RESET;
2805                 clear_bit(HNAE3_FUNC_RESET, addr);
2806         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
2807                 rst_level = HNAE3_FLR_RESET;
2808                 clear_bit(HNAE3_FLR_RESET, addr);
2809         }
2810
2811         if (hdev->reset_type != HNAE3_NONE_RESET &&
2812             rst_level < hdev->reset_type)
2813                 return HNAE3_NONE_RESET;
2814
2815         return rst_level;
2816 }
2817
2818 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2819 {
2820         u32 clearval = 0;
2821
2822         switch (hdev->reset_type) {
2823         case HNAE3_IMP_RESET:
2824                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2825                 break;
2826         case HNAE3_GLOBAL_RESET:
2827                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2828                 break;
2829         case HNAE3_CORE_RESET:
2830                 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2831                 break;
2832         default:
2833                 break;
2834         }
2835
2836         if (!clearval)
2837                 return;
2838
2839         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2840         hclge_enable_vector(&hdev->misc_vector, true);
2841 }
2842
2843 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2844 {
2845         int ret = 0;
2846
2847         switch (hdev->reset_type) {
2848         case HNAE3_FUNC_RESET:
2849                 /* fall through */
2850         case HNAE3_FLR_RESET:
2851                 ret = hclge_set_all_vf_rst(hdev, true);
2852                 break;
2853         default:
2854                 break;
2855         }
2856
2857         return ret;
2858 }
2859
2860 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2861 {
2862         u32 reg_val;
2863         int ret = 0;
2864
2865         switch (hdev->reset_type) {
2866         case HNAE3_FUNC_RESET:
2867                 /* There is no mechanism for PF to know if VF has stopped IO
2868                  * for now, just wait 100 ms for VF to stop IO
2869                  */
2870                 msleep(100);
2871                 ret = hclge_func_reset_cmd(hdev, 0);
2872                 if (ret) {
2873                         dev_err(&hdev->pdev->dev,
2874                                 "asserting function reset fail %d!\n", ret);
2875                         return ret;
2876                 }
2877
2878                 /* After performaning pf reset, it is not necessary to do the
2879                  * mailbox handling or send any command to firmware, because
2880                  * any mailbox handling or command to firmware is only valid
2881                  * after hclge_cmd_init is called.
2882                  */
2883                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2884                 break;
2885         case HNAE3_FLR_RESET:
2886                 /* There is no mechanism for PF to know if VF has stopped IO
2887                  * for now, just wait 100 ms for VF to stop IO
2888                  */
2889                 msleep(100);
2890                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2891                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2892                 break;
2893         case HNAE3_IMP_RESET:
2894                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2895                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2896                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2897                 break;
2898         default:
2899                 break;
2900         }
2901
2902         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2903
2904         return ret;
2905 }
2906
2907 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2908 {
2909 #define MAX_RESET_FAIL_CNT 5
2910 #define RESET_UPGRADE_DELAY_SEC 10
2911
2912         if (hdev->reset_pending) {
2913                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2914                          hdev->reset_pending);
2915                 return true;
2916         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2917                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2918                     BIT(HCLGE_IMP_RESET_BIT))) {
2919                 dev_info(&hdev->pdev->dev,
2920                          "reset failed because IMP Reset is pending\n");
2921                 hclge_clear_reset_cause(hdev);
2922                 return false;
2923         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2924                 hdev->reset_fail_cnt++;
2925                 if (is_timeout) {
2926                         set_bit(hdev->reset_type, &hdev->reset_pending);
2927                         dev_info(&hdev->pdev->dev,
2928                                  "re-schedule to wait for hw reset done\n");
2929                         return true;
2930                 }
2931
2932                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2933                 hclge_clear_reset_cause(hdev);
2934                 mod_timer(&hdev->reset_timer,
2935                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2936
2937                 return false;
2938         }
2939
2940         hclge_clear_reset_cause(hdev);
2941         dev_err(&hdev->pdev->dev, "Reset fail!\n");
2942         return false;
2943 }
2944
2945 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2946 {
2947         int ret = 0;
2948
2949         switch (hdev->reset_type) {
2950         case HNAE3_FUNC_RESET:
2951                 /* fall through */
2952         case HNAE3_FLR_RESET:
2953                 ret = hclge_set_all_vf_rst(hdev, false);
2954                 break;
2955         default:
2956                 break;
2957         }
2958
2959         return ret;
2960 }
2961
2962 static void hclge_reset(struct hclge_dev *hdev)
2963 {
2964         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2965         bool is_timeout = false;
2966         int ret;
2967
2968         /* Initialize ae_dev reset status as well, in case enet layer wants to
2969          * know if device is undergoing reset
2970          */
2971         ae_dev->reset_type = hdev->reset_type;
2972         hdev->reset_count++;
2973         /* perform reset of the stack & ae device for a client */
2974         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2975         if (ret)
2976                 goto err_reset;
2977
2978         ret = hclge_reset_prepare_down(hdev);
2979         if (ret)
2980                 goto err_reset;
2981
2982         rtnl_lock();
2983         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2984         if (ret)
2985                 goto err_reset_lock;
2986
2987         rtnl_unlock();
2988
2989         ret = hclge_reset_prepare_wait(hdev);
2990         if (ret)
2991                 goto err_reset;
2992
2993         if (hclge_reset_wait(hdev)) {
2994                 is_timeout = true;
2995                 goto err_reset;
2996         }
2997
2998         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
2999         if (ret)
3000                 goto err_reset;
3001
3002         rtnl_lock();
3003         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3004         if (ret)
3005                 goto err_reset_lock;
3006
3007         ret = hclge_reset_ae_dev(hdev->ae_dev);
3008         if (ret)
3009                 goto err_reset_lock;
3010
3011         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3012         if (ret)
3013                 goto err_reset_lock;
3014
3015         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3016         if (ret)
3017                 goto err_reset_lock;
3018
3019         hclge_clear_reset_cause(hdev);
3020
3021         ret = hclge_reset_prepare_up(hdev);
3022         if (ret)
3023                 goto err_reset_lock;
3024
3025         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3026         if (ret)
3027                 goto err_reset_lock;
3028
3029         rtnl_unlock();
3030
3031         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3032         if (ret)
3033                 goto err_reset;
3034
3035         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3036         if (ret)
3037                 goto err_reset;
3038
3039         hdev->last_reset_time = jiffies;
3040         hdev->reset_fail_cnt = 0;
3041         ae_dev->reset_type = HNAE3_NONE_RESET;
3042         del_timer(&hdev->reset_timer);
3043
3044         return;
3045
3046 err_reset_lock:
3047         rtnl_unlock();
3048 err_reset:
3049         if (hclge_reset_err_handle(hdev, is_timeout))
3050                 hclge_reset_task_schedule(hdev);
3051 }
3052
3053 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3054 {
3055         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3056         struct hclge_dev *hdev = ae_dev->priv;
3057
3058         /* We might end up getting called broadly because of 2 below cases:
3059          * 1. Recoverable error was conveyed through APEI and only way to bring
3060          *    normalcy is to reset.
3061          * 2. A new reset request from the stack due to timeout
3062          *
3063          * For the first case,error event might not have ae handle available.
3064          * check if this is a new reset request and we are not here just because
3065          * last reset attempt did not succeed and watchdog hit us again. We will
3066          * know this if last reset request did not occur very recently (watchdog
3067          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3068          * In case of new request we reset the "reset level" to PF reset.
3069          * And if it is a repeat reset request of the most recent one then we
3070          * want to make sure we throttle the reset request. Therefore, we will
3071          * not allow it again before 3*HZ times.
3072          */
3073         if (!handle)
3074                 handle = &hdev->vport[0].nic;
3075
3076         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3077                 return;
3078         else if (hdev->default_reset_request)
3079                 hdev->reset_level =
3080                         hclge_get_reset_level(hdev,
3081                                               &hdev->default_reset_request);
3082         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3083                 hdev->reset_level = HNAE3_FUNC_RESET;
3084
3085         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3086                  hdev->reset_level);
3087
3088         /* request reset & schedule reset task */
3089         set_bit(hdev->reset_level, &hdev->reset_request);
3090         hclge_reset_task_schedule(hdev);
3091
3092         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3093                 hdev->reset_level++;
3094 }
3095
3096 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3097                                         enum hnae3_reset_type rst_type)
3098 {
3099         struct hclge_dev *hdev = ae_dev->priv;
3100
3101         set_bit(rst_type, &hdev->default_reset_request);
3102 }
3103
3104 static void hclge_reset_timer(struct timer_list *t)
3105 {
3106         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3107
3108         dev_info(&hdev->pdev->dev,
3109                  "triggering global reset in reset timer\n");
3110         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3111         hclge_reset_event(hdev->pdev, NULL);
3112 }
3113
3114 static void hclge_reset_subtask(struct hclge_dev *hdev)
3115 {
3116         /* check if there is any ongoing reset in the hardware. This status can
3117          * be checked from reset_pending. If there is then, we need to wait for
3118          * hardware to complete reset.
3119          *    a. If we are able to figure out in reasonable time that hardware
3120          *       has fully resetted then, we can proceed with driver, client
3121          *       reset.
3122          *    b. else, we can come back later to check this status so re-sched
3123          *       now.
3124          */
3125         hdev->last_reset_time = jiffies;
3126         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3127         if (hdev->reset_type != HNAE3_NONE_RESET)
3128                 hclge_reset(hdev);
3129
3130         /* check if we got any *new* reset requests to be honored */
3131         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3132         if (hdev->reset_type != HNAE3_NONE_RESET)
3133                 hclge_do_reset(hdev);
3134
3135         hdev->reset_type = HNAE3_NONE_RESET;
3136 }
3137
3138 static void hclge_reset_service_task(struct work_struct *work)
3139 {
3140         struct hclge_dev *hdev =
3141                 container_of(work, struct hclge_dev, rst_service_task);
3142
3143         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3144                 return;
3145
3146         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3147
3148         hclge_reset_subtask(hdev);
3149
3150         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3151 }
3152
3153 static void hclge_mailbox_service_task(struct work_struct *work)
3154 {
3155         struct hclge_dev *hdev =
3156                 container_of(work, struct hclge_dev, mbx_service_task);
3157
3158         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3159                 return;
3160
3161         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3162
3163         hclge_mbx_handler(hdev);
3164
3165         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3166 }
3167
3168 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3169 {
3170         int i;
3171
3172         /* start from vport 1 for PF is always alive */
3173         for (i = 1; i < hdev->num_alloc_vport; i++) {
3174                 struct hclge_vport *vport = &hdev->vport[i];
3175
3176                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3177                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3178
3179                 /* If vf is not alive, set to default value */
3180                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3181                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3182         }
3183 }
3184
3185 static void hclge_service_task(struct work_struct *work)
3186 {
3187         struct hclge_dev *hdev =
3188                 container_of(work, struct hclge_dev, service_task);
3189
3190         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3191                 hclge_update_stats_for_all(hdev);
3192                 hdev->hw_stats.stats_timer = 0;
3193         }
3194
3195         hclge_update_speed_duplex(hdev);
3196         hclge_update_link_status(hdev);
3197         hclge_update_vport_alive(hdev);
3198         hclge_service_complete(hdev);
3199 }
3200
3201 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3202 {
3203         /* VF handle has no client */
3204         if (!handle->client)
3205                 return container_of(handle, struct hclge_vport, nic);
3206         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3207                 return container_of(handle, struct hclge_vport, roce);
3208         else
3209                 return container_of(handle, struct hclge_vport, nic);
3210 }
3211
3212 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3213                             struct hnae3_vector_info *vector_info)
3214 {
3215         struct hclge_vport *vport = hclge_get_vport(handle);
3216         struct hnae3_vector_info *vector = vector_info;
3217         struct hclge_dev *hdev = vport->back;
3218         int alloc = 0;
3219         int i, j;
3220
3221         vector_num = min(hdev->num_msi_left, vector_num);
3222
3223         for (j = 0; j < vector_num; j++) {
3224                 for (i = 1; i < hdev->num_msi; i++) {
3225                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3226                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3227                                 vector->io_addr = hdev->hw.io_base +
3228                                         HCLGE_VECTOR_REG_BASE +
3229                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3230                                         vport->vport_id *
3231                                         HCLGE_VECTOR_VF_OFFSET;
3232                                 hdev->vector_status[i] = vport->vport_id;
3233                                 hdev->vector_irq[i] = vector->vector;
3234
3235                                 vector++;
3236                                 alloc++;
3237
3238                                 break;
3239                         }
3240                 }
3241         }
3242         hdev->num_msi_left -= alloc;
3243         hdev->num_msi_used += alloc;
3244
3245         return alloc;
3246 }
3247
3248 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3249 {
3250         int i;
3251
3252         for (i = 0; i < hdev->num_msi; i++)
3253                 if (vector == hdev->vector_irq[i])
3254                         return i;
3255
3256         return -EINVAL;
3257 }
3258
3259 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3260 {
3261         struct hclge_vport *vport = hclge_get_vport(handle);
3262         struct hclge_dev *hdev = vport->back;
3263         int vector_id;
3264
3265         vector_id = hclge_get_vector_index(hdev, vector);
3266         if (vector_id < 0) {
3267                 dev_err(&hdev->pdev->dev,
3268                         "Get vector index fail. vector_id =%d\n", vector_id);
3269                 return vector_id;
3270         }
3271
3272         hclge_free_vector(hdev, vector_id);
3273
3274         return 0;
3275 }
3276
3277 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3278 {
3279         return HCLGE_RSS_KEY_SIZE;
3280 }
3281
3282 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3283 {
3284         return HCLGE_RSS_IND_TBL_SIZE;
3285 }
3286
3287 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3288                                   const u8 hfunc, const u8 *key)
3289 {
3290         struct hclge_rss_config_cmd *req;
3291         struct hclge_desc desc;
3292         int key_offset;
3293         int key_size;
3294         int ret;
3295
3296         req = (struct hclge_rss_config_cmd *)desc.data;
3297
3298         for (key_offset = 0; key_offset < 3; key_offset++) {
3299                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3300                                            false);
3301
3302                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3303                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3304
3305                 if (key_offset == 2)
3306                         key_size =
3307                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3308                 else
3309                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3310
3311                 memcpy(req->hash_key,
3312                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3313
3314                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3315                 if (ret) {
3316                         dev_err(&hdev->pdev->dev,
3317                                 "Configure RSS config fail, status = %d\n",
3318                                 ret);
3319                         return ret;
3320                 }
3321         }
3322         return 0;
3323 }
3324
3325 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3326 {
3327         struct hclge_rss_indirection_table_cmd *req;
3328         struct hclge_desc desc;
3329         int i, j;
3330         int ret;
3331
3332         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3333
3334         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3335                 hclge_cmd_setup_basic_desc
3336                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3337
3338                 req->start_table_index =
3339                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3340                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3341
3342                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3343                         req->rss_result[j] =
3344                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3345
3346                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3347                 if (ret) {
3348                         dev_err(&hdev->pdev->dev,
3349                                 "Configure rss indir table fail,status = %d\n",
3350                                 ret);
3351                         return ret;
3352                 }
3353         }
3354         return 0;
3355 }
3356
3357 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3358                                  u16 *tc_size, u16 *tc_offset)
3359 {
3360         struct hclge_rss_tc_mode_cmd *req;
3361         struct hclge_desc desc;
3362         int ret;
3363         int i;
3364
3365         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3366         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3367
3368         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3369                 u16 mode = 0;
3370
3371                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3372                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3373                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3374                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3375                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3376
3377                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3378         }
3379
3380         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3381         if (ret)
3382                 dev_err(&hdev->pdev->dev,
3383                         "Configure rss tc mode fail, status = %d\n", ret);
3384
3385         return ret;
3386 }
3387
3388 static void hclge_get_rss_type(struct hclge_vport *vport)
3389 {
3390         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3391             vport->rss_tuple_sets.ipv4_udp_en ||
3392             vport->rss_tuple_sets.ipv4_sctp_en ||
3393             vport->rss_tuple_sets.ipv6_tcp_en ||
3394             vport->rss_tuple_sets.ipv6_udp_en ||
3395             vport->rss_tuple_sets.ipv6_sctp_en)
3396                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3397         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3398                  vport->rss_tuple_sets.ipv6_fragment_en)
3399                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3400         else
3401                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3402 }
3403
3404 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3405 {
3406         struct hclge_rss_input_tuple_cmd *req;
3407         struct hclge_desc desc;
3408         int ret;
3409
3410         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3411
3412         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3413
3414         /* Get the tuple cfg from pf */
3415         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3416         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3417         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3418         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3419         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3420         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3421         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3422         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3423         hclge_get_rss_type(&hdev->vport[0]);
3424         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3425         if (ret)
3426                 dev_err(&hdev->pdev->dev,
3427                         "Configure rss input fail, status = %d\n", ret);
3428         return ret;
3429 }
3430
3431 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3432                          u8 *key, u8 *hfunc)
3433 {
3434         struct hclge_vport *vport = hclge_get_vport(handle);
3435         int i;
3436
3437         /* Get hash algorithm */
3438         if (hfunc) {
3439                 switch (vport->rss_algo) {
3440                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3441                         *hfunc = ETH_RSS_HASH_TOP;
3442                         break;
3443                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3444                         *hfunc = ETH_RSS_HASH_XOR;
3445                         break;
3446                 default:
3447                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3448                         break;
3449                 }
3450         }
3451
3452         /* Get the RSS Key required by the user */
3453         if (key)
3454                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3455
3456         /* Get indirect table */
3457         if (indir)
3458                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3459                         indir[i] =  vport->rss_indirection_tbl[i];
3460
3461         return 0;
3462 }
3463
3464 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3465                          const  u8 *key, const  u8 hfunc)
3466 {
3467         struct hclge_vport *vport = hclge_get_vport(handle);
3468         struct hclge_dev *hdev = vport->back;
3469         u8 hash_algo;
3470         int ret, i;
3471
3472         /* Set the RSS Hash Key if specififed by the user */
3473         if (key) {
3474                 switch (hfunc) {
3475                 case ETH_RSS_HASH_TOP:
3476                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3477                         break;
3478                 case ETH_RSS_HASH_XOR:
3479                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3480                         break;
3481                 case ETH_RSS_HASH_NO_CHANGE:
3482                         hash_algo = vport->rss_algo;
3483                         break;
3484                 default:
3485                         return -EINVAL;
3486                 }
3487
3488                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3489                 if (ret)
3490                         return ret;
3491
3492                 /* Update the shadow RSS key with user specified qids */
3493                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3494                 vport->rss_algo = hash_algo;
3495         }
3496
3497         /* Update the shadow RSS table with user specified qids */
3498         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3499                 vport->rss_indirection_tbl[i] = indir[i];
3500
3501         /* Update the hardware */
3502         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3503 }
3504
3505 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3506 {
3507         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3508
3509         if (nfc->data & RXH_L4_B_2_3)
3510                 hash_sets |= HCLGE_D_PORT_BIT;
3511         else
3512                 hash_sets &= ~HCLGE_D_PORT_BIT;
3513
3514         if (nfc->data & RXH_IP_SRC)
3515                 hash_sets |= HCLGE_S_IP_BIT;
3516         else
3517                 hash_sets &= ~HCLGE_S_IP_BIT;
3518
3519         if (nfc->data & RXH_IP_DST)
3520                 hash_sets |= HCLGE_D_IP_BIT;
3521         else
3522                 hash_sets &= ~HCLGE_D_IP_BIT;
3523
3524         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3525                 hash_sets |= HCLGE_V_TAG_BIT;
3526
3527         return hash_sets;
3528 }
3529
3530 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3531                                struct ethtool_rxnfc *nfc)
3532 {
3533         struct hclge_vport *vport = hclge_get_vport(handle);
3534         struct hclge_dev *hdev = vport->back;
3535         struct hclge_rss_input_tuple_cmd *req;
3536         struct hclge_desc desc;
3537         u8 tuple_sets;
3538         int ret;
3539
3540         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3541                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3542                 return -EINVAL;
3543
3544         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3545         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3546
3547         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3548         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3549         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3550         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3551         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3552         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3553         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3554         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3555
3556         tuple_sets = hclge_get_rss_hash_bits(nfc);
3557         switch (nfc->flow_type) {
3558         case TCP_V4_FLOW:
3559                 req->ipv4_tcp_en = tuple_sets;
3560                 break;
3561         case TCP_V6_FLOW:
3562                 req->ipv6_tcp_en = tuple_sets;
3563                 break;
3564         case UDP_V4_FLOW:
3565                 req->ipv4_udp_en = tuple_sets;
3566                 break;
3567         case UDP_V6_FLOW:
3568                 req->ipv6_udp_en = tuple_sets;
3569                 break;
3570         case SCTP_V4_FLOW:
3571                 req->ipv4_sctp_en = tuple_sets;
3572                 break;
3573         case SCTP_V6_FLOW:
3574                 if ((nfc->data & RXH_L4_B_0_1) ||
3575                     (nfc->data & RXH_L4_B_2_3))
3576                         return -EINVAL;
3577
3578                 req->ipv6_sctp_en = tuple_sets;
3579                 break;
3580         case IPV4_FLOW:
3581                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3582                 break;
3583         case IPV6_FLOW:
3584                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3585                 break;
3586         default:
3587                 return -EINVAL;
3588         }
3589
3590         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3591         if (ret) {
3592                 dev_err(&hdev->pdev->dev,
3593                         "Set rss tuple fail, status = %d\n", ret);
3594                 return ret;
3595         }
3596
3597         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3598         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3599         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3600         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3601         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3602         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3603         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3604         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3605         hclge_get_rss_type(vport);
3606         return 0;
3607 }
3608
3609 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3610                                struct ethtool_rxnfc *nfc)
3611 {
3612         struct hclge_vport *vport = hclge_get_vport(handle);
3613         u8 tuple_sets;
3614
3615         nfc->data = 0;
3616
3617         switch (nfc->flow_type) {
3618         case TCP_V4_FLOW:
3619                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3620                 break;
3621         case UDP_V4_FLOW:
3622                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3623                 break;
3624         case TCP_V6_FLOW:
3625                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3626                 break;
3627         case UDP_V6_FLOW:
3628                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3629                 break;
3630         case SCTP_V4_FLOW:
3631                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3632                 break;
3633         case SCTP_V6_FLOW:
3634                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3635                 break;
3636         case IPV4_FLOW:
3637         case IPV6_FLOW:
3638                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3639                 break;
3640         default:
3641                 return -EINVAL;
3642         }
3643
3644         if (!tuple_sets)
3645                 return 0;
3646
3647         if (tuple_sets & HCLGE_D_PORT_BIT)
3648                 nfc->data |= RXH_L4_B_2_3;
3649         if (tuple_sets & HCLGE_S_PORT_BIT)
3650                 nfc->data |= RXH_L4_B_0_1;
3651         if (tuple_sets & HCLGE_D_IP_BIT)
3652                 nfc->data |= RXH_IP_DST;
3653         if (tuple_sets & HCLGE_S_IP_BIT)
3654                 nfc->data |= RXH_IP_SRC;
3655
3656         return 0;
3657 }
3658
3659 static int hclge_get_tc_size(struct hnae3_handle *handle)
3660 {
3661         struct hclge_vport *vport = hclge_get_vport(handle);
3662         struct hclge_dev *hdev = vport->back;
3663
3664         return hdev->rss_size_max;
3665 }
3666
3667 int hclge_rss_init_hw(struct hclge_dev *hdev)
3668 {
3669         struct hclge_vport *vport = hdev->vport;
3670         u8 *rss_indir = vport[0].rss_indirection_tbl;
3671         u16 rss_size = vport[0].alloc_rss_size;
3672         u8 *key = vport[0].rss_hash_key;
3673         u8 hfunc = vport[0].rss_algo;
3674         u16 tc_offset[HCLGE_MAX_TC_NUM];
3675         u16 tc_valid[HCLGE_MAX_TC_NUM];
3676         u16 tc_size[HCLGE_MAX_TC_NUM];
3677         u16 roundup_size;
3678         int i, ret;
3679
3680         ret = hclge_set_rss_indir_table(hdev, rss_indir);
3681         if (ret)
3682                 return ret;
3683
3684         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3685         if (ret)
3686                 return ret;
3687
3688         ret = hclge_set_rss_input_tuple(hdev);
3689         if (ret)
3690                 return ret;
3691
3692         /* Each TC have the same queue size, and tc_size set to hardware is
3693          * the log2 of roundup power of two of rss_size, the acutal queue
3694          * size is limited by indirection table.
3695          */
3696         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3697                 dev_err(&hdev->pdev->dev,
3698                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3699                         rss_size);
3700                 return -EINVAL;
3701         }
3702
3703         roundup_size = roundup_pow_of_two(rss_size);
3704         roundup_size = ilog2(roundup_size);
3705
3706         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3707                 tc_valid[i] = 0;
3708
3709                 if (!(hdev->hw_tc_map & BIT(i)))
3710                         continue;
3711
3712                 tc_valid[i] = 1;
3713                 tc_size[i] = roundup_size;
3714                 tc_offset[i] = rss_size * i;
3715         }
3716
3717         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3718 }
3719
3720 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3721 {
3722         struct hclge_vport *vport = hdev->vport;
3723         int i, j;
3724
3725         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3726                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3727                         vport[j].rss_indirection_tbl[i] =
3728                                 i % vport[j].alloc_rss_size;
3729         }
3730 }
3731
3732 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3733 {
3734         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3735         struct hclge_vport *vport = hdev->vport;
3736
3737         if (hdev->pdev->revision >= 0x21)
3738                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3739
3740         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3741                 vport[i].rss_tuple_sets.ipv4_tcp_en =
3742                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3743                 vport[i].rss_tuple_sets.ipv4_udp_en =
3744                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3745                 vport[i].rss_tuple_sets.ipv4_sctp_en =
3746                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3747                 vport[i].rss_tuple_sets.ipv4_fragment_en =
3748                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3749                 vport[i].rss_tuple_sets.ipv6_tcp_en =
3750                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3751                 vport[i].rss_tuple_sets.ipv6_udp_en =
3752                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3753                 vport[i].rss_tuple_sets.ipv6_sctp_en =
3754                         HCLGE_RSS_INPUT_TUPLE_SCTP;
3755                 vport[i].rss_tuple_sets.ipv6_fragment_en =
3756                         HCLGE_RSS_INPUT_TUPLE_OTHER;
3757
3758                 vport[i].rss_algo = rss_algo;
3759
3760                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
3761                        HCLGE_RSS_KEY_SIZE);
3762         }
3763
3764         hclge_rss_indir_init_cfg(hdev);
3765 }
3766
3767 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3768                                 int vector_id, bool en,
3769                                 struct hnae3_ring_chain_node *ring_chain)
3770 {
3771         struct hclge_dev *hdev = vport->back;
3772         struct hnae3_ring_chain_node *node;
3773         struct hclge_desc desc;
3774         struct hclge_ctrl_vector_chain_cmd *req
3775                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3776         enum hclge_cmd_status status;
3777         enum hclge_opcode_type op;
3778         u16 tqp_type_and_id;
3779         int i;
3780
3781         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3782         hclge_cmd_setup_basic_desc(&desc, op, false);
3783         req->int_vector_id = vector_id;
3784
3785         i = 0;
3786         for (node = ring_chain; node; node = node->next) {
3787                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3788                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3789                                 HCLGE_INT_TYPE_S,
3790                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3791                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3792                                 HCLGE_TQP_ID_S, node->tqp_index);
3793                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3794                                 HCLGE_INT_GL_IDX_S,
3795                                 hnae3_get_field(node->int_gl_idx,
3796                                                 HNAE3_RING_GL_IDX_M,
3797                                                 HNAE3_RING_GL_IDX_S));
3798                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3799                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3800                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3801                         req->vfid = vport->vport_id;
3802
3803                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
3804                         if (status) {
3805                                 dev_err(&hdev->pdev->dev,
3806                                         "Map TQP fail, status is %d.\n",
3807                                         status);
3808                                 return -EIO;
3809                         }
3810                         i = 0;
3811
3812                         hclge_cmd_setup_basic_desc(&desc,
3813                                                    op,
3814                                                    false);
3815                         req->int_vector_id = vector_id;
3816                 }
3817         }
3818
3819         if (i > 0) {
3820                 req->int_cause_num = i;
3821                 req->vfid = vport->vport_id;
3822                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3823                 if (status) {
3824                         dev_err(&hdev->pdev->dev,
3825                                 "Map TQP fail, status is %d.\n", status);
3826                         return -EIO;
3827                 }
3828         }
3829
3830         return 0;
3831 }
3832
3833 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3834                                     int vector,
3835                                     struct hnae3_ring_chain_node *ring_chain)
3836 {
3837         struct hclge_vport *vport = hclge_get_vport(handle);
3838         struct hclge_dev *hdev = vport->back;
3839         int vector_id;
3840
3841         vector_id = hclge_get_vector_index(hdev, vector);
3842         if (vector_id < 0) {
3843                 dev_err(&hdev->pdev->dev,
3844                         "Get vector index fail. vector_id =%d\n", vector_id);
3845                 return vector_id;
3846         }
3847
3848         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3849 }
3850
3851 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3852                                        int vector,
3853                                        struct hnae3_ring_chain_node *ring_chain)
3854 {
3855         struct hclge_vport *vport = hclge_get_vport(handle);
3856         struct hclge_dev *hdev = vport->back;
3857         int vector_id, ret;
3858
3859         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3860                 return 0;
3861
3862         vector_id = hclge_get_vector_index(hdev, vector);
3863         if (vector_id < 0) {
3864                 dev_err(&handle->pdev->dev,
3865                         "Get vector index fail. ret =%d\n", vector_id);
3866                 return vector_id;
3867         }
3868
3869         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3870         if (ret)
3871                 dev_err(&handle->pdev->dev,
3872                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3873                         vector_id,
3874                         ret);
3875
3876         return ret;
3877 }
3878
3879 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3880                                struct hclge_promisc_param *param)
3881 {
3882         struct hclge_promisc_cfg_cmd *req;
3883         struct hclge_desc desc;
3884         int ret;
3885
3886         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3887
3888         req = (struct hclge_promisc_cfg_cmd *)desc.data;
3889         req->vf_id = param->vf_id;
3890
3891         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3892          * pdev revision(0x20), new revision support them. The
3893          * value of this two fields will not return error when driver
3894          * send command to fireware in revision(0x20).
3895          */
3896         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3897                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3898
3899         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3900         if (ret)
3901                 dev_err(&hdev->pdev->dev,
3902                         "Set promisc mode fail, status is %d.\n", ret);
3903
3904         return ret;
3905 }
3906
3907 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3908                               bool en_mc, bool en_bc, int vport_id)
3909 {
3910         if (!param)
3911                 return;
3912
3913         memset(param, 0, sizeof(struct hclge_promisc_param));
3914         if (en_uc)
3915                 param->enable = HCLGE_PROMISC_EN_UC;
3916         if (en_mc)
3917                 param->enable |= HCLGE_PROMISC_EN_MC;
3918         if (en_bc)
3919                 param->enable |= HCLGE_PROMISC_EN_BC;
3920         param->vf_id = vport_id;
3921 }
3922
3923 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3924                                   bool en_mc_pmc)
3925 {
3926         struct hclge_vport *vport = hclge_get_vport(handle);
3927         struct hclge_dev *hdev = vport->back;
3928         struct hclge_promisc_param param;
3929         bool en_bc_pmc = true;
3930
3931         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
3932          * always bypassed. So broadcast promisc should be disabled until
3933          * user enable promisc mode
3934          */
3935         if (handle->pdev->revision == 0x20)
3936                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3937
3938         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3939                                  vport->vport_id);
3940         return hclge_cmd_set_promisc_mode(hdev, &param);
3941 }
3942
3943 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3944 {
3945         struct hclge_get_fd_mode_cmd *req;
3946         struct hclge_desc desc;
3947         int ret;
3948
3949         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3950
3951         req = (struct hclge_get_fd_mode_cmd *)desc.data;
3952
3953         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3954         if (ret) {
3955                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3956                 return ret;
3957         }
3958
3959         *fd_mode = req->mode;
3960
3961         return ret;
3962 }
3963
3964 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3965                                    u32 *stage1_entry_num,
3966                                    u32 *stage2_entry_num,
3967                                    u16 *stage1_counter_num,
3968                                    u16 *stage2_counter_num)
3969 {
3970         struct hclge_get_fd_allocation_cmd *req;
3971         struct hclge_desc desc;
3972         int ret;
3973
3974         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3975
3976         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3977
3978         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3979         if (ret) {
3980                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3981                         ret);
3982                 return ret;
3983         }
3984
3985         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
3986         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
3987         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
3988         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
3989
3990         return ret;
3991 }
3992
3993 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
3994 {
3995         struct hclge_set_fd_key_config_cmd *req;
3996         struct hclge_fd_key_cfg *stage;
3997         struct hclge_desc desc;
3998         int ret;
3999
4000         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4001
4002         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4003         stage = &hdev->fd_cfg.key_cfg[stage_num];
4004         req->stage = stage_num;
4005         req->key_select = stage->key_sel;
4006         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4007         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4008         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4009         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4010         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4011         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4012
4013         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4014         if (ret)
4015                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4016
4017         return ret;
4018 }
4019
4020 static int hclge_init_fd_config(struct hclge_dev *hdev)
4021 {
4022 #define LOW_2_WORDS             0x03
4023         struct hclge_fd_key_cfg *key_cfg;
4024         int ret;
4025
4026         if (!hnae3_dev_fd_supported(hdev))
4027                 return 0;
4028
4029         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4030         if (ret)
4031                 return ret;
4032
4033         switch (hdev->fd_cfg.fd_mode) {
4034         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4035                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4036                 break;
4037         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4038                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4039                 break;
4040         default:
4041                 dev_err(&hdev->pdev->dev,
4042                         "Unsupported flow director mode %d\n",
4043                         hdev->fd_cfg.fd_mode);
4044                 return -EOPNOTSUPP;
4045         }
4046
4047         hdev->fd_cfg.proto_support =
4048                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4049                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4050         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4051         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4052         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4053         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4054         key_cfg->outer_sipv6_word_en = 0;
4055         key_cfg->outer_dipv6_word_en = 0;
4056
4057         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4058                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4059                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4060                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4061
4062         /* If use max 400bit key, we can support tuples for ether type */
4063         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4064                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4065                 key_cfg->tuple_active |=
4066                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4067         }
4068
4069         /* roce_type is used to filter roce frames
4070          * dst_vport is used to specify the rule
4071          */
4072         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4073
4074         ret = hclge_get_fd_allocation(hdev,
4075                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4076                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4077                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4078                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4079         if (ret)
4080                 return ret;
4081
4082         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4083 }
4084
4085 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4086                                 int loc, u8 *key, bool is_add)
4087 {
4088         struct hclge_fd_tcam_config_1_cmd *req1;
4089         struct hclge_fd_tcam_config_2_cmd *req2;
4090         struct hclge_fd_tcam_config_3_cmd *req3;
4091         struct hclge_desc desc[3];
4092         int ret;
4093
4094         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4095         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4096         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4097         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4098         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4099
4100         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4101         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4102         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4103
4104         req1->stage = stage;
4105         req1->xy_sel = sel_x ? 1 : 0;
4106         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4107         req1->index = cpu_to_le32(loc);
4108         req1->entry_vld = sel_x ? is_add : 0;
4109
4110         if (key) {
4111                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4112                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4113                        sizeof(req2->tcam_data));
4114                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4115                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4116         }
4117
4118         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4119         if (ret)
4120                 dev_err(&hdev->pdev->dev,
4121                         "config tcam key fail, ret=%d\n",
4122                         ret);
4123
4124         return ret;
4125 }
4126
4127 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4128                               struct hclge_fd_ad_data *action)
4129 {
4130         struct hclge_fd_ad_config_cmd *req;
4131         struct hclge_desc desc;
4132         u64 ad_data = 0;
4133         int ret;
4134
4135         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4136
4137         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4138         req->index = cpu_to_le32(loc);
4139         req->stage = stage;
4140
4141         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4142                       action->write_rule_id_to_bd);
4143         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4144                         action->rule_id);
4145         ad_data <<= 32;
4146         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4147         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4148                       action->forward_to_direct_queue);
4149         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4150                         action->queue_id);
4151         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4152         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4153                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4154         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4155         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4156                         action->counter_id);
4157
4158         req->ad_data = cpu_to_le64(ad_data);
4159         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4160         if (ret)
4161                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4162
4163         return ret;
4164 }
4165
4166 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4167                                    struct hclge_fd_rule *rule)
4168 {
4169         u16 tmp_x_s, tmp_y_s;
4170         u32 tmp_x_l, tmp_y_l;
4171         int i;
4172
4173         if (rule->unused_tuple & tuple_bit)
4174                 return true;
4175
4176         switch (tuple_bit) {
4177         case 0:
4178                 return false;
4179         case BIT(INNER_DST_MAC):
4180                 for (i = 0; i < 6; i++) {
4181                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4182                                rule->tuples_mask.dst_mac[i]);
4183                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4184                                rule->tuples_mask.dst_mac[i]);
4185                 }
4186
4187                 return true;
4188         case BIT(INNER_SRC_MAC):
4189                 for (i = 0; i < 6; i++) {
4190                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4191                                rule->tuples.src_mac[i]);
4192                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4193                                rule->tuples.src_mac[i]);
4194                 }
4195
4196                 return true;
4197         case BIT(INNER_VLAN_TAG_FST):
4198                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4199                        rule->tuples_mask.vlan_tag1);
4200                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4201                        rule->tuples_mask.vlan_tag1);
4202                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4203                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4204
4205                 return true;
4206         case BIT(INNER_ETH_TYPE):
4207                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4208                        rule->tuples_mask.ether_proto);
4209                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4210                        rule->tuples_mask.ether_proto);
4211                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4212                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4213
4214                 return true;
4215         case BIT(INNER_IP_TOS):
4216                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4217                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4218
4219                 return true;
4220         case BIT(INNER_IP_PROTO):
4221                 calc_x(*key_x, rule->tuples.ip_proto,
4222                        rule->tuples_mask.ip_proto);
4223                 calc_y(*key_y, rule->tuples.ip_proto,
4224                        rule->tuples_mask.ip_proto);
4225
4226                 return true;
4227         case BIT(INNER_SRC_IP):
4228                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4229                        rule->tuples_mask.src_ip[3]);
4230                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4231                        rule->tuples_mask.src_ip[3]);
4232                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4233                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4234
4235                 return true;
4236         case BIT(INNER_DST_IP):
4237                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4238                        rule->tuples_mask.dst_ip[3]);
4239                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4240                        rule->tuples_mask.dst_ip[3]);
4241                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4242                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4243
4244                 return true;
4245         case BIT(INNER_SRC_PORT):
4246                 calc_x(tmp_x_s, rule->tuples.src_port,
4247                        rule->tuples_mask.src_port);
4248                 calc_y(tmp_y_s, rule->tuples.src_port,
4249                        rule->tuples_mask.src_port);
4250                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4251                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4252
4253                 return true;
4254         case BIT(INNER_DST_PORT):
4255                 calc_x(tmp_x_s, rule->tuples.dst_port,
4256                        rule->tuples_mask.dst_port);
4257                 calc_y(tmp_y_s, rule->tuples.dst_port,
4258                        rule->tuples_mask.dst_port);
4259                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4260                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4261
4262                 return true;
4263         default:
4264                 return false;
4265         }
4266 }
4267
4268 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4269                                  u8 vf_id, u8 network_port_id)
4270 {
4271         u32 port_number = 0;
4272
4273         if (port_type == HOST_PORT) {
4274                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4275                                 pf_id);
4276                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4277                                 vf_id);
4278                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4279         } else {
4280                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4281                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4282                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4283         }
4284
4285         return port_number;
4286 }
4287
4288 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4289                                        __le32 *key_x, __le32 *key_y,
4290                                        struct hclge_fd_rule *rule)
4291 {
4292         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4293         u8 cur_pos = 0, tuple_size, shift_bits;
4294         int i;
4295
4296         for (i = 0; i < MAX_META_DATA; i++) {
4297                 tuple_size = meta_data_key_info[i].key_length;
4298                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4299
4300                 switch (tuple_bit) {
4301                 case BIT(ROCE_TYPE):
4302                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4303                         cur_pos += tuple_size;
4304                         break;
4305                 case BIT(DST_VPORT):
4306                         port_number = hclge_get_port_number(HOST_PORT, 0,
4307                                                             rule->vf_id, 0);
4308                         hnae3_set_field(meta_data,
4309                                         GENMASK(cur_pos + tuple_size, cur_pos),
4310                                         cur_pos, port_number);
4311                         cur_pos += tuple_size;
4312                         break;
4313                 default:
4314                         break;
4315                 }
4316         }
4317
4318         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4319         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4320         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4321
4322         *key_x = cpu_to_le32(tmp_x << shift_bits);
4323         *key_y = cpu_to_le32(tmp_y << shift_bits);
4324 }
4325
4326 /* A complete key is combined with meta data key and tuple key.
4327  * Meta data key is stored at the MSB region, and tuple key is stored at
4328  * the LSB region, unused bits will be filled 0.
4329  */
4330 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4331                             struct hclge_fd_rule *rule)
4332 {
4333         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4334         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4335         u8 *cur_key_x, *cur_key_y;
4336         int i, ret, tuple_size;
4337         u8 meta_data_region;
4338
4339         memset(key_x, 0, sizeof(key_x));
4340         memset(key_y, 0, sizeof(key_y));
4341         cur_key_x = key_x;
4342         cur_key_y = key_y;
4343
4344         for (i = 0 ; i < MAX_TUPLE; i++) {
4345                 bool tuple_valid;
4346                 u32 check_tuple;
4347
4348                 tuple_size = tuple_key_info[i].key_length / 8;
4349                 check_tuple = key_cfg->tuple_active & BIT(i);
4350
4351                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4352                                                      cur_key_y, rule);
4353                 if (tuple_valid) {
4354                         cur_key_x += tuple_size;
4355                         cur_key_y += tuple_size;
4356                 }
4357         }
4358
4359         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4360                         MAX_META_DATA_LENGTH / 8;
4361
4362         hclge_fd_convert_meta_data(key_cfg,
4363                                    (__le32 *)(key_x + meta_data_region),
4364                                    (__le32 *)(key_y + meta_data_region),
4365                                    rule);
4366
4367         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4368                                    true);
4369         if (ret) {
4370                 dev_err(&hdev->pdev->dev,
4371                         "fd key_y config fail, loc=%d, ret=%d\n",
4372                         rule->queue_id, ret);
4373                 return ret;
4374         }
4375
4376         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4377                                    true);
4378         if (ret)
4379                 dev_err(&hdev->pdev->dev,
4380                         "fd key_x config fail, loc=%d, ret=%d\n",
4381                         rule->queue_id, ret);
4382         return ret;
4383 }
4384
4385 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4386                                struct hclge_fd_rule *rule)
4387 {
4388         struct hclge_fd_ad_data ad_data;
4389
4390         ad_data.ad_id = rule->location;
4391
4392         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4393                 ad_data.drop_packet = true;
4394                 ad_data.forward_to_direct_queue = false;
4395                 ad_data.queue_id = 0;
4396         } else {
4397                 ad_data.drop_packet = false;
4398                 ad_data.forward_to_direct_queue = true;
4399                 ad_data.queue_id = rule->queue_id;
4400         }
4401
4402         ad_data.use_counter = false;
4403         ad_data.counter_id = 0;
4404
4405         ad_data.use_next_stage = false;
4406         ad_data.next_input_key = 0;
4407
4408         ad_data.write_rule_id_to_bd = true;
4409         ad_data.rule_id = rule->location;
4410
4411         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4412 }
4413
4414 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4415                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4416 {
4417         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4418         struct ethtool_usrip4_spec *usr_ip4_spec;
4419         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4420         struct ethtool_usrip6_spec *usr_ip6_spec;
4421         struct ethhdr *ether_spec;
4422
4423         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4424                 return -EINVAL;
4425
4426         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4427                 return -EOPNOTSUPP;
4428
4429         if ((fs->flow_type & FLOW_EXT) &&
4430             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4431                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4432                 return -EOPNOTSUPP;
4433         }
4434
4435         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4436         case SCTP_V4_FLOW:
4437         case TCP_V4_FLOW:
4438         case UDP_V4_FLOW:
4439                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4440                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4441
4442                 if (!tcp_ip4_spec->ip4src)
4443                         *unused |= BIT(INNER_SRC_IP);
4444
4445                 if (!tcp_ip4_spec->ip4dst)
4446                         *unused |= BIT(INNER_DST_IP);
4447
4448                 if (!tcp_ip4_spec->psrc)
4449                         *unused |= BIT(INNER_SRC_PORT);
4450
4451                 if (!tcp_ip4_spec->pdst)
4452                         *unused |= BIT(INNER_DST_PORT);
4453
4454                 if (!tcp_ip4_spec->tos)
4455                         *unused |= BIT(INNER_IP_TOS);
4456
4457                 break;
4458         case IP_USER_FLOW:
4459                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4460                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4461                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4462
4463                 if (!usr_ip4_spec->ip4src)
4464                         *unused |= BIT(INNER_SRC_IP);
4465
4466                 if (!usr_ip4_spec->ip4dst)
4467                         *unused |= BIT(INNER_DST_IP);
4468
4469                 if (!usr_ip4_spec->tos)
4470                         *unused |= BIT(INNER_IP_TOS);
4471
4472                 if (!usr_ip4_spec->proto)
4473                         *unused |= BIT(INNER_IP_PROTO);
4474
4475                 if (usr_ip4_spec->l4_4_bytes)
4476                         return -EOPNOTSUPP;
4477
4478                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4479                         return -EOPNOTSUPP;
4480
4481                 break;
4482         case SCTP_V6_FLOW:
4483         case TCP_V6_FLOW:
4484         case UDP_V6_FLOW:
4485                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4486                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4487                         BIT(INNER_IP_TOS);
4488
4489                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4490                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4491                         *unused |= BIT(INNER_SRC_IP);
4492
4493                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4494                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4495                         *unused |= BIT(INNER_DST_IP);
4496
4497                 if (!tcp_ip6_spec->psrc)
4498                         *unused |= BIT(INNER_SRC_PORT);
4499
4500                 if (!tcp_ip6_spec->pdst)
4501                         *unused |= BIT(INNER_DST_PORT);
4502
4503                 if (tcp_ip6_spec->tclass)
4504                         return -EOPNOTSUPP;
4505
4506                 break;
4507         case IPV6_USER_FLOW:
4508                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4509                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4510                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4511                         BIT(INNER_DST_PORT);
4512
4513                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4514                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4515                         *unused |= BIT(INNER_SRC_IP);
4516
4517                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4518                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4519                         *unused |= BIT(INNER_DST_IP);
4520
4521                 if (!usr_ip6_spec->l4_proto)
4522                         *unused |= BIT(INNER_IP_PROTO);
4523
4524                 if (usr_ip6_spec->tclass)
4525                         return -EOPNOTSUPP;
4526
4527                 if (usr_ip6_spec->l4_4_bytes)
4528                         return -EOPNOTSUPP;
4529
4530                 break;
4531         case ETHER_FLOW:
4532                 ether_spec = &fs->h_u.ether_spec;
4533                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4534                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4535                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4536
4537                 if (is_zero_ether_addr(ether_spec->h_source))
4538                         *unused |= BIT(INNER_SRC_MAC);
4539
4540                 if (is_zero_ether_addr(ether_spec->h_dest))
4541                         *unused |= BIT(INNER_DST_MAC);
4542
4543                 if (!ether_spec->h_proto)
4544                         *unused |= BIT(INNER_ETH_TYPE);
4545
4546                 break;
4547         default:
4548                 return -EOPNOTSUPP;
4549         }
4550
4551         if ((fs->flow_type & FLOW_EXT)) {
4552                 if (fs->h_ext.vlan_etype)
4553                         return -EOPNOTSUPP;
4554                 if (!fs->h_ext.vlan_tci)
4555                         *unused |= BIT(INNER_VLAN_TAG_FST);
4556
4557                 if (fs->m_ext.vlan_tci) {
4558                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4559                                 return -EINVAL;
4560                 }
4561         } else {
4562                 *unused |= BIT(INNER_VLAN_TAG_FST);
4563         }
4564
4565         if (fs->flow_type & FLOW_MAC_EXT) {
4566                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4567                         return -EOPNOTSUPP;
4568
4569                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4570                         *unused |= BIT(INNER_DST_MAC);
4571                 else
4572                         *unused &= ~(BIT(INNER_DST_MAC));
4573         }
4574
4575         return 0;
4576 }
4577
4578 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4579 {
4580         struct hclge_fd_rule *rule = NULL;
4581         struct hlist_node *node2;
4582
4583         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4584                 if (rule->location >= location)
4585                         break;
4586         }
4587
4588         return  rule && rule->location == location;
4589 }
4590
4591 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4592                                      struct hclge_fd_rule *new_rule,
4593                                      u16 location,
4594                                      bool is_add)
4595 {
4596         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4597         struct hlist_node *node2;
4598
4599         if (is_add && !new_rule)
4600                 return -EINVAL;
4601
4602         hlist_for_each_entry_safe(rule, node2,
4603                                   &hdev->fd_rule_list, rule_node) {
4604                 if (rule->location >= location)
4605                         break;
4606                 parent = rule;
4607         }
4608
4609         if (rule && rule->location == location) {
4610                 hlist_del(&rule->rule_node);
4611                 kfree(rule);
4612                 hdev->hclge_fd_rule_num--;
4613
4614                 if (!is_add)
4615                         return 0;
4616
4617         } else if (!is_add) {
4618                 dev_err(&hdev->pdev->dev,
4619                         "delete fail, rule %d is inexistent\n",
4620                         location);
4621                 return -EINVAL;
4622         }
4623
4624         INIT_HLIST_NODE(&new_rule->rule_node);
4625
4626         if (parent)
4627                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4628         else
4629                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4630
4631         hdev->hclge_fd_rule_num++;
4632
4633         return 0;
4634 }
4635
4636 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4637                               struct ethtool_rx_flow_spec *fs,
4638                               struct hclge_fd_rule *rule)
4639 {
4640         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4641
4642         switch (flow_type) {
4643         case SCTP_V4_FLOW:
4644         case TCP_V4_FLOW:
4645         case UDP_V4_FLOW:
4646                 rule->tuples.src_ip[3] =
4647                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4648                 rule->tuples_mask.src_ip[3] =
4649                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4650
4651                 rule->tuples.dst_ip[3] =
4652                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4653                 rule->tuples_mask.dst_ip[3] =
4654                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4655
4656                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4657                 rule->tuples_mask.src_port =
4658                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4659
4660                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4661                 rule->tuples_mask.dst_port =
4662                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4663
4664                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4665                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4666
4667                 rule->tuples.ether_proto = ETH_P_IP;
4668                 rule->tuples_mask.ether_proto = 0xFFFF;
4669
4670                 break;
4671         case IP_USER_FLOW:
4672                 rule->tuples.src_ip[3] =
4673                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4674                 rule->tuples_mask.src_ip[3] =
4675                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4676
4677                 rule->tuples.dst_ip[3] =
4678                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4679                 rule->tuples_mask.dst_ip[3] =
4680                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4681
4682                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4683                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4684
4685                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4686                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4687
4688                 rule->tuples.ether_proto = ETH_P_IP;
4689                 rule->tuples_mask.ether_proto = 0xFFFF;
4690
4691                 break;
4692         case SCTP_V6_FLOW:
4693         case TCP_V6_FLOW:
4694         case UDP_V6_FLOW:
4695                 be32_to_cpu_array(rule->tuples.src_ip,
4696                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
4697                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4698                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
4699
4700                 be32_to_cpu_array(rule->tuples.dst_ip,
4701                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
4702                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4703                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
4704
4705                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4706                 rule->tuples_mask.src_port =
4707                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4708
4709                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4710                 rule->tuples_mask.dst_port =
4711                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4712
4713                 rule->tuples.ether_proto = ETH_P_IPV6;
4714                 rule->tuples_mask.ether_proto = 0xFFFF;
4715
4716                 break;
4717         case IPV6_USER_FLOW:
4718                 be32_to_cpu_array(rule->tuples.src_ip,
4719                                   fs->h_u.usr_ip6_spec.ip6src, 4);
4720                 be32_to_cpu_array(rule->tuples_mask.src_ip,
4721                                   fs->m_u.usr_ip6_spec.ip6src, 4);
4722
4723                 be32_to_cpu_array(rule->tuples.dst_ip,
4724                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
4725                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
4726                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
4727
4728                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4729                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4730
4731                 rule->tuples.ether_proto = ETH_P_IPV6;
4732                 rule->tuples_mask.ether_proto = 0xFFFF;
4733
4734                 break;
4735         case ETHER_FLOW:
4736                 ether_addr_copy(rule->tuples.src_mac,
4737                                 fs->h_u.ether_spec.h_source);
4738                 ether_addr_copy(rule->tuples_mask.src_mac,
4739                                 fs->m_u.ether_spec.h_source);
4740
4741                 ether_addr_copy(rule->tuples.dst_mac,
4742                                 fs->h_u.ether_spec.h_dest);
4743                 ether_addr_copy(rule->tuples_mask.dst_mac,
4744                                 fs->m_u.ether_spec.h_dest);
4745
4746                 rule->tuples.ether_proto =
4747                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
4748                 rule->tuples_mask.ether_proto =
4749                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
4750
4751                 break;
4752         default:
4753                 return -EOPNOTSUPP;
4754         }
4755
4756         switch (flow_type) {
4757         case SCTP_V4_FLOW:
4758         case SCTP_V6_FLOW:
4759                 rule->tuples.ip_proto = IPPROTO_SCTP;
4760                 rule->tuples_mask.ip_proto = 0xFF;
4761                 break;
4762         case TCP_V4_FLOW:
4763         case TCP_V6_FLOW:
4764                 rule->tuples.ip_proto = IPPROTO_TCP;
4765                 rule->tuples_mask.ip_proto = 0xFF;
4766                 break;
4767         case UDP_V4_FLOW:
4768         case UDP_V6_FLOW:
4769                 rule->tuples.ip_proto = IPPROTO_UDP;
4770                 rule->tuples_mask.ip_proto = 0xFF;
4771                 break;
4772         default:
4773                 break;
4774         }
4775
4776         if ((fs->flow_type & FLOW_EXT)) {
4777                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4778                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4779         }
4780
4781         if (fs->flow_type & FLOW_MAC_EXT) {
4782                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4783                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4784         }
4785
4786         return 0;
4787 }
4788
4789 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4790                               struct ethtool_rxnfc *cmd)
4791 {
4792         struct hclge_vport *vport = hclge_get_vport(handle);
4793         struct hclge_dev *hdev = vport->back;
4794         u16 dst_vport_id = 0, q_index = 0;
4795         struct ethtool_rx_flow_spec *fs;
4796         struct hclge_fd_rule *rule;
4797         u32 unused = 0;
4798         u8 action;
4799         int ret;
4800
4801         if (!hnae3_dev_fd_supported(hdev))
4802                 return -EOPNOTSUPP;
4803
4804         if (!hdev->fd_en) {
4805                 dev_warn(&hdev->pdev->dev,
4806                          "Please enable flow director first\n");
4807                 return -EOPNOTSUPP;
4808         }
4809
4810         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4811
4812         ret = hclge_fd_check_spec(hdev, fs, &unused);
4813         if (ret) {
4814                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4815                 return ret;
4816         }
4817
4818         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4819                 action = HCLGE_FD_ACTION_DROP_PACKET;
4820         } else {
4821                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4822                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4823                 u16 tqps;
4824
4825                 if (vf > hdev->num_req_vfs) {
4826                         dev_err(&hdev->pdev->dev,
4827                                 "Error: vf id (%d) > max vf num (%d)\n",
4828                                 vf, hdev->num_req_vfs);
4829                         return -EINVAL;
4830                 }
4831
4832                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4833                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4834
4835                 if (ring >= tqps) {
4836                         dev_err(&hdev->pdev->dev,
4837                                 "Error: queue id (%d) > max tqp num (%d)\n",
4838                                 ring, tqps - 1);
4839                         return -EINVAL;
4840                 }
4841
4842                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4843                 q_index = ring;
4844         }
4845
4846         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4847         if (!rule)
4848                 return -ENOMEM;
4849
4850         ret = hclge_fd_get_tuple(hdev, fs, rule);
4851         if (ret)
4852                 goto free_rule;
4853
4854         rule->flow_type = fs->flow_type;
4855
4856         rule->location = fs->location;
4857         rule->unused_tuple = unused;
4858         rule->vf_id = dst_vport_id;
4859         rule->queue_id = q_index;
4860         rule->action = action;
4861
4862         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4863         if (ret)
4864                 goto free_rule;
4865
4866         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4867         if (ret)
4868                 goto free_rule;
4869
4870         ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4871         if (ret)
4872                 goto free_rule;
4873
4874         return ret;
4875
4876 free_rule:
4877         kfree(rule);
4878         return ret;
4879 }
4880
4881 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4882                               struct ethtool_rxnfc *cmd)
4883 {
4884         struct hclge_vport *vport = hclge_get_vport(handle);
4885         struct hclge_dev *hdev = vport->back;
4886         struct ethtool_rx_flow_spec *fs;
4887         int ret;
4888
4889         if (!hnae3_dev_fd_supported(hdev))
4890                 return -EOPNOTSUPP;
4891
4892         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4893
4894         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4895                 return -EINVAL;
4896
4897         if (!hclge_fd_rule_exist(hdev, fs->location)) {
4898                 dev_err(&hdev->pdev->dev,
4899                         "Delete fail, rule %d is inexistent\n",
4900                         fs->location);
4901                 return -ENOENT;
4902         }
4903
4904         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4905                                    fs->location, NULL, false);
4906         if (ret)
4907                 return ret;
4908
4909         return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4910                                          false);
4911 }
4912
4913 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4914                                      bool clear_list)
4915 {
4916         struct hclge_vport *vport = hclge_get_vport(handle);
4917         struct hclge_dev *hdev = vport->back;
4918         struct hclge_fd_rule *rule;
4919         struct hlist_node *node;
4920
4921         if (!hnae3_dev_fd_supported(hdev))
4922                 return;
4923
4924         if (clear_list) {
4925                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4926                                           rule_node) {
4927                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4928                                              rule->location, NULL, false);
4929                         hlist_del(&rule->rule_node);
4930                         kfree(rule);
4931                         hdev->hclge_fd_rule_num--;
4932                 }
4933         } else {
4934                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4935                                           rule_node)
4936                         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4937                                              rule->location, NULL, false);
4938         }
4939 }
4940
4941 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4942 {
4943         struct hclge_vport *vport = hclge_get_vport(handle);
4944         struct hclge_dev *hdev = vport->back;
4945         struct hclge_fd_rule *rule;
4946         struct hlist_node *node;
4947         int ret;
4948
4949         /* Return ok here, because reset error handling will check this
4950          * return value. If error is returned here, the reset process will
4951          * fail.
4952          */
4953         if (!hnae3_dev_fd_supported(hdev))
4954                 return 0;
4955
4956         /* if fd is disabled, should not restore it when reset */
4957         if (!hdev->fd_en)
4958                 return 0;
4959
4960         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4961                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4962                 if (!ret)
4963                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4964
4965                 if (ret) {
4966                         dev_warn(&hdev->pdev->dev,
4967                                  "Restore rule %d failed, remove it\n",
4968                                  rule->location);
4969                         hlist_del(&rule->rule_node);
4970                         kfree(rule);
4971                         hdev->hclge_fd_rule_num--;
4972                 }
4973         }
4974         return 0;
4975 }
4976
4977 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4978                                  struct ethtool_rxnfc *cmd)
4979 {
4980         struct hclge_vport *vport = hclge_get_vport(handle);
4981         struct hclge_dev *hdev = vport->back;
4982
4983         if (!hnae3_dev_fd_supported(hdev))
4984                 return -EOPNOTSUPP;
4985
4986         cmd->rule_cnt = hdev->hclge_fd_rule_num;
4987         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
4988
4989         return 0;
4990 }
4991
4992 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
4993                                   struct ethtool_rxnfc *cmd)
4994 {
4995         struct hclge_vport *vport = hclge_get_vport(handle);
4996         struct hclge_fd_rule *rule = NULL;
4997         struct hclge_dev *hdev = vport->back;
4998         struct ethtool_rx_flow_spec *fs;
4999         struct hlist_node *node2;
5000
5001         if (!hnae3_dev_fd_supported(hdev))
5002                 return -EOPNOTSUPP;
5003
5004         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5005
5006         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5007                 if (rule->location >= fs->location)
5008                         break;
5009         }
5010
5011         if (!rule || fs->location != rule->location)
5012                 return -ENOENT;
5013
5014         fs->flow_type = rule->flow_type;
5015         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5016         case SCTP_V4_FLOW:
5017         case TCP_V4_FLOW:
5018         case UDP_V4_FLOW:
5019                 fs->h_u.tcp_ip4_spec.ip4src =
5020                                 cpu_to_be32(rule->tuples.src_ip[3]);
5021                 fs->m_u.tcp_ip4_spec.ip4src =
5022                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5023                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5024
5025                 fs->h_u.tcp_ip4_spec.ip4dst =
5026                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5027                 fs->m_u.tcp_ip4_spec.ip4dst =
5028                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5029                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5030
5031                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5032                 fs->m_u.tcp_ip4_spec.psrc =
5033                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5034                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5035
5036                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5037                 fs->m_u.tcp_ip4_spec.pdst =
5038                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5039                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5040
5041                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5042                 fs->m_u.tcp_ip4_spec.tos =
5043                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5044                                 0 : rule->tuples_mask.ip_tos;
5045
5046                 break;
5047         case IP_USER_FLOW:
5048                 fs->h_u.usr_ip4_spec.ip4src =
5049                                 cpu_to_be32(rule->tuples.src_ip[3]);
5050                 fs->m_u.tcp_ip4_spec.ip4src =
5051                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5052                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5053
5054                 fs->h_u.usr_ip4_spec.ip4dst =
5055                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5056                 fs->m_u.usr_ip4_spec.ip4dst =
5057                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5058                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5059
5060                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5061                 fs->m_u.usr_ip4_spec.tos =
5062                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5063                                 0 : rule->tuples_mask.ip_tos;
5064
5065                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5066                 fs->m_u.usr_ip4_spec.proto =
5067                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5068                                 0 : rule->tuples_mask.ip_proto;
5069
5070                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5071
5072                 break;
5073         case SCTP_V6_FLOW:
5074         case TCP_V6_FLOW:
5075         case UDP_V6_FLOW:
5076                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5077                                   rule->tuples.src_ip, 4);
5078                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5079                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5080                 else
5081                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5082                                           rule->tuples_mask.src_ip, 4);
5083
5084                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5085                                   rule->tuples.dst_ip, 4);
5086                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5087                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5088                 else
5089                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5090                                           rule->tuples_mask.dst_ip, 4);
5091
5092                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5093                 fs->m_u.tcp_ip6_spec.psrc =
5094                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5095                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5096
5097                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5098                 fs->m_u.tcp_ip6_spec.pdst =
5099                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5100                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5101
5102                 break;
5103         case IPV6_USER_FLOW:
5104                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5105                                   rule->tuples.src_ip, 4);
5106                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5107                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5108                 else
5109                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5110                                           rule->tuples_mask.src_ip, 4);
5111
5112                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5113                                   rule->tuples.dst_ip, 4);
5114                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5115                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5116                 else
5117                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5118                                           rule->tuples_mask.dst_ip, 4);
5119
5120                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5121                 fs->m_u.usr_ip6_spec.l4_proto =
5122                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5123                                 0 : rule->tuples_mask.ip_proto;
5124
5125                 break;
5126         case ETHER_FLOW:
5127                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5128                                 rule->tuples.src_mac);
5129                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5130                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5131                 else
5132                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5133                                         rule->tuples_mask.src_mac);
5134
5135                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5136                                 rule->tuples.dst_mac);
5137                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5138                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5139                 else
5140                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5141                                         rule->tuples_mask.dst_mac);
5142
5143                 fs->h_u.ether_spec.h_proto =
5144                                 cpu_to_be16(rule->tuples.ether_proto);
5145                 fs->m_u.ether_spec.h_proto =
5146                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5147                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5148
5149                 break;
5150         default:
5151                 return -EOPNOTSUPP;
5152         }
5153
5154         if (fs->flow_type & FLOW_EXT) {
5155                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5156                 fs->m_ext.vlan_tci =
5157                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5158                                 cpu_to_be16(VLAN_VID_MASK) :
5159                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5160         }
5161
5162         if (fs->flow_type & FLOW_MAC_EXT) {
5163                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5164                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5165                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5166                 else
5167                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5168                                         rule->tuples_mask.dst_mac);
5169         }
5170
5171         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5172                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5173         } else {
5174                 u64 vf_id;
5175
5176                 fs->ring_cookie = rule->queue_id;
5177                 vf_id = rule->vf_id;
5178                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5179                 fs->ring_cookie |= vf_id;
5180         }
5181
5182         return 0;
5183 }
5184
5185 static int hclge_get_all_rules(struct hnae3_handle *handle,
5186                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5187 {
5188         struct hclge_vport *vport = hclge_get_vport(handle);
5189         struct hclge_dev *hdev = vport->back;
5190         struct hclge_fd_rule *rule;
5191         struct hlist_node *node2;
5192         int cnt = 0;
5193
5194         if (!hnae3_dev_fd_supported(hdev))
5195                 return -EOPNOTSUPP;
5196
5197         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5198
5199         hlist_for_each_entry_safe(rule, node2,
5200                                   &hdev->fd_rule_list, rule_node) {
5201                 if (cnt == cmd->rule_cnt)
5202                         return -EMSGSIZE;
5203
5204                 rule_locs[cnt] = rule->location;
5205                 cnt++;
5206         }
5207
5208         cmd->rule_cnt = cnt;
5209
5210         return 0;
5211 }
5212
5213 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5214 {
5215         struct hclge_vport *vport = hclge_get_vport(handle);
5216         struct hclge_dev *hdev = vport->back;
5217
5218         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5219                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5220 }
5221
5222 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5223 {
5224         struct hclge_vport *vport = hclge_get_vport(handle);
5225         struct hclge_dev *hdev = vport->back;
5226
5227         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5228 }
5229
5230 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5231 {
5232         struct hclge_vport *vport = hclge_get_vport(handle);
5233         struct hclge_dev *hdev = vport->back;
5234
5235         return hdev->reset_count;
5236 }
5237
5238 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5239 {
5240         struct hclge_vport *vport = hclge_get_vport(handle);
5241         struct hclge_dev *hdev = vport->back;
5242
5243         hdev->fd_en = enable;
5244         if (!enable)
5245                 hclge_del_all_fd_entries(handle, false);
5246         else
5247                 hclge_restore_fd_entries(handle);
5248 }
5249
5250 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5251 {
5252         struct hclge_desc desc;
5253         struct hclge_config_mac_mode_cmd *req =
5254                 (struct hclge_config_mac_mode_cmd *)desc.data;
5255         u32 loop_en = 0;
5256         int ret;
5257
5258         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5259         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5260         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5261         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5262         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5263         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5264         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5265         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5266         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5267         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5268         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5269         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5270         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5271         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5272         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5273         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5274
5275         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5276         if (ret)
5277                 dev_err(&hdev->pdev->dev,
5278                         "mac enable fail, ret =%d.\n", ret);
5279 }
5280
5281 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5282 {
5283         struct hclge_config_mac_mode_cmd *req;
5284         struct hclge_desc desc;
5285         u32 loop_en;
5286         int ret;
5287
5288         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5289         /* 1 Read out the MAC mode config at first */
5290         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5291         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5292         if (ret) {
5293                 dev_err(&hdev->pdev->dev,
5294                         "mac loopback get fail, ret =%d.\n", ret);
5295                 return ret;
5296         }
5297
5298         /* 2 Then setup the loopback flag */
5299         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5300         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5301         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5302         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5303
5304         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5305
5306         /* 3 Config mac work mode with loopback flag
5307          * and its original configure parameters
5308          */
5309         hclge_cmd_reuse_desc(&desc, false);
5310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5311         if (ret)
5312                 dev_err(&hdev->pdev->dev,
5313                         "mac loopback set fail, ret =%d.\n", ret);
5314         return ret;
5315 }
5316
5317 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5318                                      enum hnae3_loop loop_mode)
5319 {
5320 #define HCLGE_SERDES_RETRY_MS   10
5321 #define HCLGE_SERDES_RETRY_NUM  100
5322
5323 #define HCLGE_MAC_LINK_STATUS_MS   20
5324 #define HCLGE_MAC_LINK_STATUS_NUM  10
5325 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5326 #define HCLGE_MAC_LINK_STATUS_UP   1
5327
5328         struct hclge_serdes_lb_cmd *req;
5329         struct hclge_desc desc;
5330         int mac_link_ret = 0;
5331         int ret, i = 0;
5332         u8 loop_mode_b;
5333
5334         req = (struct hclge_serdes_lb_cmd *)desc.data;
5335         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5336
5337         switch (loop_mode) {
5338         case HNAE3_LOOP_SERIAL_SERDES:
5339                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5340                 break;
5341         case HNAE3_LOOP_PARALLEL_SERDES:
5342                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5343                 break;
5344         default:
5345                 dev_err(&hdev->pdev->dev,
5346                         "unsupported serdes loopback mode %d\n", loop_mode);
5347                 return -ENOTSUPP;
5348         }
5349
5350         if (en) {
5351                 req->enable = loop_mode_b;
5352                 req->mask = loop_mode_b;
5353                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5354         } else {
5355                 req->mask = loop_mode_b;
5356                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5357         }
5358
5359         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5360         if (ret) {
5361                 dev_err(&hdev->pdev->dev,
5362                         "serdes loopback set fail, ret = %d\n", ret);
5363                 return ret;
5364         }
5365
5366         do {
5367                 msleep(HCLGE_SERDES_RETRY_MS);
5368                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5369                                            true);
5370                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5371                 if (ret) {
5372                         dev_err(&hdev->pdev->dev,
5373                                 "serdes loopback get, ret = %d\n", ret);
5374                         return ret;
5375                 }
5376         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5377                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5378
5379         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5380                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5381                 return -EBUSY;
5382         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5383                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5384                 return -EIO;
5385         }
5386
5387         hclge_cfg_mac_mode(hdev, en);
5388
5389         i = 0;
5390         do {
5391                 /* serdes Internal loopback, independent of the network cable.*/
5392                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5393                 ret = hclge_get_mac_link_status(hdev);
5394                 if (ret == mac_link_ret)
5395                         return 0;
5396         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5397
5398         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5399
5400         return -EBUSY;
5401 }
5402
5403 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5404                             int stream_id, bool enable)
5405 {
5406         struct hclge_desc desc;
5407         struct hclge_cfg_com_tqp_queue_cmd *req =
5408                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5409         int ret;
5410
5411         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5412         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5413         req->stream_id = cpu_to_le16(stream_id);
5414         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5415
5416         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5417         if (ret)
5418                 dev_err(&hdev->pdev->dev,
5419                         "Tqp enable fail, status =%d.\n", ret);
5420         return ret;
5421 }
5422
5423 static int hclge_set_loopback(struct hnae3_handle *handle,
5424                               enum hnae3_loop loop_mode, bool en)
5425 {
5426         struct hclge_vport *vport = hclge_get_vport(handle);
5427         struct hnae3_knic_private_info *kinfo;
5428         struct hclge_dev *hdev = vport->back;
5429         int i, ret;
5430
5431         switch (loop_mode) {
5432         case HNAE3_LOOP_APP:
5433                 ret = hclge_set_app_loopback(hdev, en);
5434                 break;
5435         case HNAE3_LOOP_SERIAL_SERDES:
5436         case HNAE3_LOOP_PARALLEL_SERDES:
5437                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5438                 break;
5439         default:
5440                 ret = -ENOTSUPP;
5441                 dev_err(&hdev->pdev->dev,
5442                         "loop_mode %d is not supported\n", loop_mode);
5443                 break;
5444         }
5445
5446         if (ret)
5447                 return ret;
5448
5449         kinfo = &vport->nic.kinfo;
5450         for (i = 0; i < kinfo->num_tqps; i++) {
5451                 ret = hclge_tqp_enable(hdev, i, 0, en);
5452                 if (ret)
5453                         return ret;
5454         }
5455
5456         return 0;
5457 }
5458
5459 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5460 {
5461         struct hclge_vport *vport = hclge_get_vport(handle);
5462         struct hnae3_knic_private_info *kinfo;
5463         struct hnae3_queue *queue;
5464         struct hclge_tqp *tqp;
5465         int i;
5466
5467         kinfo = &vport->nic.kinfo;
5468         for (i = 0; i < kinfo->num_tqps; i++) {
5469                 queue = handle->kinfo.tqp[i];
5470                 tqp = container_of(queue, struct hclge_tqp, q);
5471                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5472         }
5473 }
5474
5475 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5476 {
5477         struct hclge_vport *vport = hclge_get_vport(handle);
5478         struct hclge_dev *hdev = vport->back;
5479
5480         if (enable) {
5481                 mod_timer(&hdev->service_timer, jiffies + HZ);
5482         } else {
5483                 del_timer_sync(&hdev->service_timer);
5484                 cancel_work_sync(&hdev->service_task);
5485                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5486         }
5487 }
5488
5489 static int hclge_ae_start(struct hnae3_handle *handle)
5490 {
5491         struct hclge_vport *vport = hclge_get_vport(handle);
5492         struct hclge_dev *hdev = vport->back;
5493
5494         /* mac enable */
5495         hclge_cfg_mac_mode(hdev, true);
5496         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5497         hdev->hw.mac.link = 0;
5498
5499         /* reset tqp stats */
5500         hclge_reset_tqp_stats(handle);
5501
5502         hclge_mac_start_phy(hdev);
5503
5504         return 0;
5505 }
5506
5507 static void hclge_ae_stop(struct hnae3_handle *handle)
5508 {
5509         struct hclge_vport *vport = hclge_get_vport(handle);
5510         struct hclge_dev *hdev = vport->back;
5511         int i;
5512
5513         set_bit(HCLGE_STATE_DOWN, &hdev->state);
5514
5515         /* If it is not PF reset, the firmware will disable the MAC,
5516          * so it only need to stop phy here.
5517          */
5518         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5519             hdev->reset_type != HNAE3_FUNC_RESET) {
5520                 hclge_mac_stop_phy(hdev);
5521                 return;
5522         }
5523
5524         for (i = 0; i < handle->kinfo.num_tqps; i++)
5525                 hclge_reset_tqp(handle, i);
5526
5527         /* Mac disable */
5528         hclge_cfg_mac_mode(hdev, false);
5529
5530         hclge_mac_stop_phy(hdev);
5531
5532         /* reset tqp stats */
5533         hclge_reset_tqp_stats(handle);
5534         hclge_update_link_status(hdev);
5535 }
5536
5537 int hclge_vport_start(struct hclge_vport *vport)
5538 {
5539         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5540         vport->last_active_jiffies = jiffies;
5541         return 0;
5542 }
5543
5544 void hclge_vport_stop(struct hclge_vport *vport)
5545 {
5546         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5547 }
5548
5549 static int hclge_client_start(struct hnae3_handle *handle)
5550 {
5551         struct hclge_vport *vport = hclge_get_vport(handle);
5552
5553         return hclge_vport_start(vport);
5554 }
5555
5556 static void hclge_client_stop(struct hnae3_handle *handle)
5557 {
5558         struct hclge_vport *vport = hclge_get_vport(handle);
5559
5560         hclge_vport_stop(vport);
5561 }
5562
5563 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5564                                          u16 cmdq_resp, u8  resp_code,
5565                                          enum hclge_mac_vlan_tbl_opcode op)
5566 {
5567         struct hclge_dev *hdev = vport->back;
5568         int return_status = -EIO;
5569
5570         if (cmdq_resp) {
5571                 dev_err(&hdev->pdev->dev,
5572                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5573                         cmdq_resp);
5574                 return -EIO;
5575         }
5576
5577         if (op == HCLGE_MAC_VLAN_ADD) {
5578                 if ((!resp_code) || (resp_code == 1)) {
5579                         return_status = 0;
5580                 } else if (resp_code == 2) {
5581                         return_status = -ENOSPC;
5582                         dev_err(&hdev->pdev->dev,
5583                                 "add mac addr failed for uc_overflow.\n");
5584                 } else if (resp_code == 3) {
5585                         return_status = -ENOSPC;
5586                         dev_err(&hdev->pdev->dev,
5587                                 "add mac addr failed for mc_overflow.\n");
5588                 } else {
5589                         dev_err(&hdev->pdev->dev,
5590                                 "add mac addr failed for undefined, code=%d.\n",
5591                                 resp_code);
5592                 }
5593         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5594                 if (!resp_code) {
5595                         return_status = 0;
5596                 } else if (resp_code == 1) {
5597                         return_status = -ENOENT;
5598                         dev_dbg(&hdev->pdev->dev,
5599                                 "remove mac addr failed for miss.\n");
5600                 } else {
5601                         dev_err(&hdev->pdev->dev,
5602                                 "remove mac addr failed for undefined, code=%d.\n",
5603                                 resp_code);
5604                 }
5605         } else if (op == HCLGE_MAC_VLAN_LKUP) {
5606                 if (!resp_code) {
5607                         return_status = 0;
5608                 } else if (resp_code == 1) {
5609                         return_status = -ENOENT;
5610                         dev_dbg(&hdev->pdev->dev,
5611                                 "lookup mac addr failed for miss.\n");
5612                 } else {
5613                         dev_err(&hdev->pdev->dev,
5614                                 "lookup mac addr failed for undefined, code=%d.\n",
5615                                 resp_code);
5616                 }
5617         } else {
5618                 return_status = -EINVAL;
5619                 dev_err(&hdev->pdev->dev,
5620                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5621                         op);
5622         }
5623
5624         return return_status;
5625 }
5626
5627 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5628 {
5629         int word_num;
5630         int bit_num;
5631
5632         if (vfid > 255 || vfid < 0)
5633                 return -EIO;
5634
5635         if (vfid >= 0 && vfid <= 191) {
5636                 word_num = vfid / 32;
5637                 bit_num  = vfid % 32;
5638                 if (clr)
5639                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5640                 else
5641                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5642         } else {
5643                 word_num = (vfid - 192) / 32;
5644                 bit_num  = vfid % 32;
5645                 if (clr)
5646                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5647                 else
5648                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5649         }
5650
5651         return 0;
5652 }
5653
5654 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5655 {
5656 #define HCLGE_DESC_NUMBER 3
5657 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5658         int i, j;
5659
5660         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5661                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5662                         if (desc[i].data[j])
5663                                 return false;
5664
5665         return true;
5666 }
5667
5668 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5669                                    const u8 *addr, bool is_mc)
5670 {
5671         const unsigned char *mac_addr = addr;
5672         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5673                        (mac_addr[0]) | (mac_addr[1] << 8);
5674         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5675
5676         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5677         if (is_mc) {
5678                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5679                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5680         }
5681
5682         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5683         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5684 }
5685
5686 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5687                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
5688 {
5689         struct hclge_dev *hdev = vport->back;
5690         struct hclge_desc desc;
5691         u8 resp_code;
5692         u16 retval;
5693         int ret;
5694
5695         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5696
5697         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5698
5699         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5700         if (ret) {
5701                 dev_err(&hdev->pdev->dev,
5702                         "del mac addr failed for cmd_send, ret =%d.\n",
5703                         ret);
5704                 return ret;
5705         }
5706         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5707         retval = le16_to_cpu(desc.retval);
5708
5709         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5710                                              HCLGE_MAC_VLAN_REMOVE);
5711 }
5712
5713 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5714                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
5715                                      struct hclge_desc *desc,
5716                                      bool is_mc)
5717 {
5718         struct hclge_dev *hdev = vport->back;
5719         u8 resp_code;
5720         u16 retval;
5721         int ret;
5722
5723         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5724         if (is_mc) {
5725                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5726                 memcpy(desc[0].data,
5727                        req,
5728                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5729                 hclge_cmd_setup_basic_desc(&desc[1],
5730                                            HCLGE_OPC_MAC_VLAN_ADD,
5731                                            true);
5732                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5733                 hclge_cmd_setup_basic_desc(&desc[2],
5734                                            HCLGE_OPC_MAC_VLAN_ADD,
5735                                            true);
5736                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5737         } else {
5738                 memcpy(desc[0].data,
5739                        req,
5740                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5741                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
5742         }
5743         if (ret) {
5744                 dev_err(&hdev->pdev->dev,
5745                         "lookup mac addr failed for cmd_send, ret =%d.\n",
5746                         ret);
5747                 return ret;
5748         }
5749         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5750         retval = le16_to_cpu(desc[0].retval);
5751
5752         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5753                                              HCLGE_MAC_VLAN_LKUP);
5754 }
5755
5756 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5757                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
5758                                   struct hclge_desc *mc_desc)
5759 {
5760         struct hclge_dev *hdev = vport->back;
5761         int cfg_status;
5762         u8 resp_code;
5763         u16 retval;
5764         int ret;
5765
5766         if (!mc_desc) {
5767                 struct hclge_desc desc;
5768
5769                 hclge_cmd_setup_basic_desc(&desc,
5770                                            HCLGE_OPC_MAC_VLAN_ADD,
5771                                            false);
5772                 memcpy(desc.data, req,
5773                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5774                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5775                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5776                 retval = le16_to_cpu(desc.retval);
5777
5778                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5779                                                            resp_code,
5780                                                            HCLGE_MAC_VLAN_ADD);
5781         } else {
5782                 hclge_cmd_reuse_desc(&mc_desc[0], false);
5783                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5784                 hclge_cmd_reuse_desc(&mc_desc[1], false);
5785                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5786                 hclge_cmd_reuse_desc(&mc_desc[2], false);
5787                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5788                 memcpy(mc_desc[0].data, req,
5789                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5790                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5791                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5792                 retval = le16_to_cpu(mc_desc[0].retval);
5793
5794                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5795                                                            resp_code,
5796                                                            HCLGE_MAC_VLAN_ADD);
5797         }
5798
5799         if (ret) {
5800                 dev_err(&hdev->pdev->dev,
5801                         "add mac addr failed for cmd_send, ret =%d.\n",
5802                         ret);
5803                 return ret;
5804         }
5805
5806         return cfg_status;
5807 }
5808
5809 static int hclge_init_umv_space(struct hclge_dev *hdev)
5810 {
5811         u16 allocated_size = 0;
5812         int ret;
5813
5814         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5815                                   true);
5816         if (ret)
5817                 return ret;
5818
5819         if (allocated_size < hdev->wanted_umv_size)
5820                 dev_warn(&hdev->pdev->dev,
5821                          "Alloc umv space failed, want %d, get %d\n",
5822                          hdev->wanted_umv_size, allocated_size);
5823
5824         mutex_init(&hdev->umv_mutex);
5825         hdev->max_umv_size = allocated_size;
5826         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5827         hdev->share_umv_size = hdev->priv_umv_size +
5828                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5829
5830         return 0;
5831 }
5832
5833 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5834 {
5835         int ret;
5836
5837         if (hdev->max_umv_size > 0) {
5838                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5839                                           false);
5840                 if (ret)
5841                         return ret;
5842                 hdev->max_umv_size = 0;
5843         }
5844         mutex_destroy(&hdev->umv_mutex);
5845
5846         return 0;
5847 }
5848
5849 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5850                                u16 *allocated_size, bool is_alloc)
5851 {
5852         struct hclge_umv_spc_alc_cmd *req;
5853         struct hclge_desc desc;
5854         int ret;
5855
5856         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5857         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5858         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5859         req->space_size = cpu_to_le32(space_size);
5860
5861         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5862         if (ret) {
5863                 dev_err(&hdev->pdev->dev,
5864                         "%s umv space failed for cmd_send, ret =%d\n",
5865                         is_alloc ? "allocate" : "free", ret);
5866                 return ret;
5867         }
5868
5869         if (is_alloc && allocated_size)
5870                 *allocated_size = le32_to_cpu(desc.data[1]);
5871
5872         return 0;
5873 }
5874
5875 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5876 {
5877         struct hclge_vport *vport;
5878         int i;
5879
5880         for (i = 0; i < hdev->num_alloc_vport; i++) {
5881                 vport = &hdev->vport[i];
5882                 vport->used_umv_num = 0;
5883         }
5884
5885         mutex_lock(&hdev->umv_mutex);
5886         hdev->share_umv_size = hdev->priv_umv_size +
5887                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
5888         mutex_unlock(&hdev->umv_mutex);
5889 }
5890
5891 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5892 {
5893         struct hclge_dev *hdev = vport->back;
5894         bool is_full;
5895
5896         mutex_lock(&hdev->umv_mutex);
5897         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5898                    hdev->share_umv_size == 0);
5899         mutex_unlock(&hdev->umv_mutex);
5900
5901         return is_full;
5902 }
5903
5904 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5905 {
5906         struct hclge_dev *hdev = vport->back;
5907
5908         mutex_lock(&hdev->umv_mutex);
5909         if (is_free) {
5910                 if (vport->used_umv_num > hdev->priv_umv_size)
5911                         hdev->share_umv_size++;
5912
5913                 if (vport->used_umv_num > 0)
5914                         vport->used_umv_num--;
5915         } else {
5916                 if (vport->used_umv_num >= hdev->priv_umv_size &&
5917                     hdev->share_umv_size > 0)
5918                         hdev->share_umv_size--;
5919                 vport->used_umv_num++;
5920         }
5921         mutex_unlock(&hdev->umv_mutex);
5922 }
5923
5924 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5925                              const unsigned char *addr)
5926 {
5927         struct hclge_vport *vport = hclge_get_vport(handle);
5928
5929         return hclge_add_uc_addr_common(vport, addr);
5930 }
5931
5932 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5933                              const unsigned char *addr)
5934 {
5935         struct hclge_dev *hdev = vport->back;
5936         struct hclge_mac_vlan_tbl_entry_cmd req;
5937         struct hclge_desc desc;
5938         u16 egress_port = 0;
5939         int ret;
5940
5941         /* mac addr check */
5942         if (is_zero_ether_addr(addr) ||
5943             is_broadcast_ether_addr(addr) ||
5944             is_multicast_ether_addr(addr)) {
5945                 dev_err(&hdev->pdev->dev,
5946                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5947                          addr,
5948                          is_zero_ether_addr(addr),
5949                          is_broadcast_ether_addr(addr),
5950                          is_multicast_ether_addr(addr));
5951                 return -EINVAL;
5952         }
5953
5954         memset(&req, 0, sizeof(req));
5955
5956         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5957                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5958
5959         req.egress_port = cpu_to_le16(egress_port);
5960
5961         hclge_prepare_mac_addr(&req, addr, false);
5962
5963         /* Lookup the mac address in the mac_vlan table, and add
5964          * it if the entry is inexistent. Repeated unicast entry
5965          * is not allowed in the mac vlan table.
5966          */
5967         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5968         if (ret == -ENOENT) {
5969                 if (!hclge_is_umv_space_full(vport)) {
5970                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5971                         if (!ret)
5972                                 hclge_update_umv_space(vport, false);
5973                         return ret;
5974                 }
5975
5976                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5977                         hdev->priv_umv_size);
5978
5979                 return -ENOSPC;
5980         }
5981
5982         /* check if we just hit the duplicate */
5983         if (!ret) {
5984                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
5985                          vport->vport_id, addr);
5986                 return 0;
5987         }
5988
5989         dev_err(&hdev->pdev->dev,
5990                 "PF failed to add unicast entry(%pM) in the MAC table\n",
5991                 addr);
5992
5993         return ret;
5994 }
5995
5996 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
5997                             const unsigned char *addr)
5998 {
5999         struct hclge_vport *vport = hclge_get_vport(handle);
6000
6001         return hclge_rm_uc_addr_common(vport, addr);
6002 }
6003
6004 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6005                             const unsigned char *addr)
6006 {
6007         struct hclge_dev *hdev = vport->back;
6008         struct hclge_mac_vlan_tbl_entry_cmd req;
6009         int ret;
6010
6011         /* mac addr check */
6012         if (is_zero_ether_addr(addr) ||
6013             is_broadcast_ether_addr(addr) ||
6014             is_multicast_ether_addr(addr)) {
6015                 dev_dbg(&hdev->pdev->dev,
6016                         "Remove mac err! invalid mac:%pM.\n",
6017                          addr);
6018                 return -EINVAL;
6019         }
6020
6021         memset(&req, 0, sizeof(req));
6022         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6023         hclge_prepare_mac_addr(&req, addr, false);
6024         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6025         if (!ret)
6026                 hclge_update_umv_space(vport, true);
6027
6028         return ret;
6029 }
6030
6031 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6032                              const unsigned char *addr)
6033 {
6034         struct hclge_vport *vport = hclge_get_vport(handle);
6035
6036         return hclge_add_mc_addr_common(vport, addr);
6037 }
6038
6039 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6040                              const unsigned char *addr)
6041 {
6042         struct hclge_dev *hdev = vport->back;
6043         struct hclge_mac_vlan_tbl_entry_cmd req;
6044         struct hclge_desc desc[3];
6045         int status;
6046
6047         /* mac addr check */
6048         if (!is_multicast_ether_addr(addr)) {
6049                 dev_err(&hdev->pdev->dev,
6050                         "Add mc mac err! invalid mac:%pM.\n",
6051                          addr);
6052                 return -EINVAL;
6053         }
6054         memset(&req, 0, sizeof(req));
6055         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6056         hclge_prepare_mac_addr(&req, addr, true);
6057         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6058         if (!status) {
6059                 /* This mac addr exist, update VFID for it */
6060                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6061                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6062         } else {
6063                 /* This mac addr do not exist, add new entry for it */
6064                 memset(desc[0].data, 0, sizeof(desc[0].data));
6065                 memset(desc[1].data, 0, sizeof(desc[0].data));
6066                 memset(desc[2].data, 0, sizeof(desc[0].data));
6067                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6068                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6069         }
6070
6071         if (status == -ENOSPC)
6072                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6073
6074         return status;
6075 }
6076
6077 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6078                             const unsigned char *addr)
6079 {
6080         struct hclge_vport *vport = hclge_get_vport(handle);
6081
6082         return hclge_rm_mc_addr_common(vport, addr);
6083 }
6084
6085 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6086                             const unsigned char *addr)
6087 {
6088         struct hclge_dev *hdev = vport->back;
6089         struct hclge_mac_vlan_tbl_entry_cmd req;
6090         enum hclge_cmd_status status;
6091         struct hclge_desc desc[3];
6092
6093         /* mac addr check */
6094         if (!is_multicast_ether_addr(addr)) {
6095                 dev_dbg(&hdev->pdev->dev,
6096                         "Remove mc mac err! invalid mac:%pM.\n",
6097                          addr);
6098                 return -EINVAL;
6099         }
6100
6101         memset(&req, 0, sizeof(req));
6102         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6103         hclge_prepare_mac_addr(&req, addr, true);
6104         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6105         if (!status) {
6106                 /* This mac addr exist, remove this handle's VFID for it */
6107                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6108
6109                 if (hclge_is_all_function_id_zero(desc))
6110                         /* All the vfid is zero, so need to delete this entry */
6111                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6112                 else
6113                         /* Not all the vfid is zero, update the vfid */
6114                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6115
6116         } else {
6117                 /* Maybe this mac address is in mta table, but it cannot be
6118                  * deleted here because an entry of mta represents an address
6119                  * range rather than a specific address. the delete action to
6120                  * all entries will take effect in update_mta_status called by
6121                  * hns3_nic_set_rx_mode.
6122                  */
6123                 status = 0;
6124         }
6125
6126         return status;
6127 }
6128
6129 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6130                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6131 {
6132         struct hclge_vport_mac_addr_cfg *mac_cfg;
6133         struct list_head *list;
6134
6135         if (!vport->vport_id)
6136                 return;
6137
6138         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6139         if (!mac_cfg)
6140                 return;
6141
6142         mac_cfg->hd_tbl_status = true;
6143         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6144
6145         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6146                &vport->uc_mac_list : &vport->mc_mac_list;
6147
6148         list_add_tail(&mac_cfg->node, list);
6149 }
6150
6151 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6152                               bool is_write_tbl,
6153                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6154 {
6155         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6156         struct list_head *list;
6157         bool uc_flag, mc_flag;
6158
6159         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6160                &vport->uc_mac_list : &vport->mc_mac_list;
6161
6162         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6163         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6164
6165         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6166                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6167                         if (uc_flag && mac_cfg->hd_tbl_status)
6168                                 hclge_rm_uc_addr_common(vport, mac_addr);
6169
6170                         if (mc_flag && mac_cfg->hd_tbl_status)
6171                                 hclge_rm_mc_addr_common(vport, mac_addr);
6172
6173                         list_del(&mac_cfg->node);
6174                         kfree(mac_cfg);
6175                         break;
6176                 }
6177         }
6178 }
6179
6180 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6181                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6182 {
6183         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6184         struct list_head *list;
6185
6186         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6187                &vport->uc_mac_list : &vport->mc_mac_list;
6188
6189         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6190                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6191                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6192
6193                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6194                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6195
6196                 mac_cfg->hd_tbl_status = false;
6197                 if (is_del_list) {
6198                         list_del(&mac_cfg->node);
6199                         kfree(mac_cfg);
6200                 }
6201         }
6202 }
6203
6204 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6205 {
6206         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6207         struct hclge_vport *vport;
6208         int i;
6209
6210         mutex_lock(&hdev->vport_cfg_mutex);
6211         for (i = 0; i < hdev->num_alloc_vport; i++) {
6212                 vport = &hdev->vport[i];
6213                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6214                         list_del(&mac->node);
6215                         kfree(mac);
6216                 }
6217
6218                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6219                         list_del(&mac->node);
6220                         kfree(mac);
6221                 }
6222         }
6223         mutex_unlock(&hdev->vport_cfg_mutex);
6224 }
6225
6226 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6227                                               u16 cmdq_resp, u8 resp_code)
6228 {
6229 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6230 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6231 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6232 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6233
6234         int return_status;
6235
6236         if (cmdq_resp) {
6237                 dev_err(&hdev->pdev->dev,
6238                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6239                         cmdq_resp);
6240                 return -EIO;
6241         }
6242
6243         switch (resp_code) {
6244         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6245         case HCLGE_ETHERTYPE_ALREADY_ADD:
6246                 return_status = 0;
6247                 break;
6248         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6249                 dev_err(&hdev->pdev->dev,
6250                         "add mac ethertype failed for manager table overflow.\n");
6251                 return_status = -EIO;
6252                 break;
6253         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6254                 dev_err(&hdev->pdev->dev,
6255                         "add mac ethertype failed for key conflict.\n");
6256                 return_status = -EIO;
6257                 break;
6258         default:
6259                 dev_err(&hdev->pdev->dev,
6260                         "add mac ethertype failed for undefined, code=%d.\n",
6261                         resp_code);
6262                 return_status = -EIO;
6263         }
6264
6265         return return_status;
6266 }
6267
6268 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6269                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6270 {
6271         struct hclge_desc desc;
6272         u8 resp_code;
6273         u16 retval;
6274         int ret;
6275
6276         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6277         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6278
6279         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6280         if (ret) {
6281                 dev_err(&hdev->pdev->dev,
6282                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6283                         ret);
6284                 return ret;
6285         }
6286
6287         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6288         retval = le16_to_cpu(desc.retval);
6289
6290         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6291 }
6292
6293 static int init_mgr_tbl(struct hclge_dev *hdev)
6294 {
6295         int ret;
6296         int i;
6297
6298         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6299                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6300                 if (ret) {
6301                         dev_err(&hdev->pdev->dev,
6302                                 "add mac ethertype failed, ret =%d.\n",
6303                                 ret);
6304                         return ret;
6305                 }
6306         }
6307
6308         return 0;
6309 }
6310
6311 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6312 {
6313         struct hclge_vport *vport = hclge_get_vport(handle);
6314         struct hclge_dev *hdev = vport->back;
6315
6316         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6317 }
6318
6319 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6320                               bool is_first)
6321 {
6322         const unsigned char *new_addr = (const unsigned char *)p;
6323         struct hclge_vport *vport = hclge_get_vport(handle);
6324         struct hclge_dev *hdev = vport->back;
6325         int ret;
6326
6327         /* mac addr check */
6328         if (is_zero_ether_addr(new_addr) ||
6329             is_broadcast_ether_addr(new_addr) ||
6330             is_multicast_ether_addr(new_addr)) {
6331                 dev_err(&hdev->pdev->dev,
6332                         "Change uc mac err! invalid mac:%p.\n",
6333                          new_addr);
6334                 return -EINVAL;
6335         }
6336
6337         if ((!is_first || is_kdump_kernel()) &&
6338             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6339                 dev_warn(&hdev->pdev->dev,
6340                          "remove old uc mac address fail.\n");
6341
6342         ret = hclge_add_uc_addr(handle, new_addr);
6343         if (ret) {
6344                 dev_err(&hdev->pdev->dev,
6345                         "add uc mac address fail, ret =%d.\n",
6346                         ret);
6347
6348                 if (!is_first &&
6349                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6350                         dev_err(&hdev->pdev->dev,
6351                                 "restore uc mac address fail.\n");
6352
6353                 return -EIO;
6354         }
6355
6356         ret = hclge_pause_addr_cfg(hdev, new_addr);
6357         if (ret) {
6358                 dev_err(&hdev->pdev->dev,
6359                         "configure mac pause address fail, ret =%d.\n",
6360                         ret);
6361                 return -EIO;
6362         }
6363
6364         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6365
6366         return 0;
6367 }
6368
6369 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6370                           int cmd)
6371 {
6372         struct hclge_vport *vport = hclge_get_vport(handle);
6373         struct hclge_dev *hdev = vport->back;
6374
6375         if (!hdev->hw.mac.phydev)
6376                 return -EOPNOTSUPP;
6377
6378         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6379 }
6380
6381 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6382                                       u8 fe_type, bool filter_en, u8 vf_id)
6383 {
6384         struct hclge_vlan_filter_ctrl_cmd *req;
6385         struct hclge_desc desc;
6386         int ret;
6387
6388         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6389
6390         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6391         req->vlan_type = vlan_type;
6392         req->vlan_fe = filter_en ? fe_type : 0;
6393         req->vf_id = vf_id;
6394
6395         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6396         if (ret)
6397                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6398                         ret);
6399
6400         return ret;
6401 }
6402
6403 #define HCLGE_FILTER_TYPE_VF            0
6404 #define HCLGE_FILTER_TYPE_PORT          1
6405 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6406 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6407 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6408 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6409 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6410 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6411                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6412 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6413                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6414
6415 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6416 {
6417         struct hclge_vport *vport = hclge_get_vport(handle);
6418         struct hclge_dev *hdev = vport->back;
6419
6420         if (hdev->pdev->revision >= 0x21) {
6421                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6422                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
6423                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6424                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
6425         } else {
6426                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6427                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6428                                            0);
6429         }
6430         if (enable)
6431                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6432         else
6433                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6434 }
6435
6436 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6437                                     bool is_kill, u16 vlan, u8 qos,
6438                                     __be16 proto)
6439 {
6440 #define HCLGE_MAX_VF_BYTES  16
6441         struct hclge_vlan_filter_vf_cfg_cmd *req0;
6442         struct hclge_vlan_filter_vf_cfg_cmd *req1;
6443         struct hclge_desc desc[2];
6444         u8 vf_byte_val;
6445         u8 vf_byte_off;
6446         int ret;
6447
6448         hclge_cmd_setup_basic_desc(&desc[0],
6449                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6450         hclge_cmd_setup_basic_desc(&desc[1],
6451                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6452
6453         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6454
6455         vf_byte_off = vfid / 8;
6456         vf_byte_val = 1 << (vfid % 8);
6457
6458         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6459         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6460
6461         req0->vlan_id  = cpu_to_le16(vlan);
6462         req0->vlan_cfg = is_kill;
6463
6464         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6465                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6466         else
6467                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6468
6469         ret = hclge_cmd_send(&hdev->hw, desc, 2);
6470         if (ret) {
6471                 dev_err(&hdev->pdev->dev,
6472                         "Send vf vlan command fail, ret =%d.\n",
6473                         ret);
6474                 return ret;
6475         }
6476
6477         if (!is_kill) {
6478 #define HCLGE_VF_VLAN_NO_ENTRY  2
6479                 if (!req0->resp_code || req0->resp_code == 1)
6480                         return 0;
6481
6482                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6483                         dev_warn(&hdev->pdev->dev,
6484                                  "vf vlan table is full, vf vlan filter is disabled\n");
6485                         return 0;
6486                 }
6487
6488                 dev_err(&hdev->pdev->dev,
6489                         "Add vf vlan filter fail, ret =%d.\n",
6490                         req0->resp_code);
6491         } else {
6492 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
6493                 if (!req0->resp_code)
6494                         return 0;
6495
6496                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6497                         dev_warn(&hdev->pdev->dev,
6498                                  "vlan %d filter is not in vf vlan table\n",
6499                                  vlan);
6500                         return 0;
6501                 }
6502
6503                 dev_err(&hdev->pdev->dev,
6504                         "Kill vf vlan filter fail, ret =%d.\n",
6505                         req0->resp_code);
6506         }
6507
6508         return -EIO;
6509 }
6510
6511 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6512                                       u16 vlan_id, bool is_kill)
6513 {
6514         struct hclge_vlan_filter_pf_cfg_cmd *req;
6515         struct hclge_desc desc;
6516         u8 vlan_offset_byte_val;
6517         u8 vlan_offset_byte;
6518         u8 vlan_offset_160;
6519         int ret;
6520
6521         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6522
6523         vlan_offset_160 = vlan_id / 160;
6524         vlan_offset_byte = (vlan_id % 160) / 8;
6525         vlan_offset_byte_val = 1 << (vlan_id % 8);
6526
6527         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6528         req->vlan_offset = vlan_offset_160;
6529         req->vlan_cfg = is_kill;
6530         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6531
6532         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6533         if (ret)
6534                 dev_err(&hdev->pdev->dev,
6535                         "port vlan command, send fail, ret =%d.\n", ret);
6536         return ret;
6537 }
6538
6539 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6540                                     u16 vport_id, u16 vlan_id, u8 qos,
6541                                     bool is_kill)
6542 {
6543         u16 vport_idx, vport_num = 0;
6544         int ret;
6545
6546         if (is_kill && !vlan_id)
6547                 return 0;
6548
6549         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6550                                        0, proto);
6551         if (ret) {
6552                 dev_err(&hdev->pdev->dev,
6553                         "Set %d vport vlan filter config fail, ret =%d.\n",
6554                         vport_id, ret);
6555                 return ret;
6556         }
6557
6558         /* vlan 0 may be added twice when 8021q module is enabled */
6559         if (!is_kill && !vlan_id &&
6560             test_bit(vport_id, hdev->vlan_table[vlan_id]))
6561                 return 0;
6562
6563         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6564                 dev_err(&hdev->pdev->dev,
6565                         "Add port vlan failed, vport %d is already in vlan %d\n",
6566                         vport_id, vlan_id);
6567                 return -EINVAL;
6568         }
6569
6570         if (is_kill &&
6571             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6572                 dev_err(&hdev->pdev->dev,
6573                         "Delete port vlan failed, vport %d is not in vlan %d\n",
6574                         vport_id, vlan_id);
6575                 return -EINVAL;
6576         }
6577
6578         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6579                 vport_num++;
6580
6581         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6582                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6583                                                  is_kill);
6584
6585         return ret;
6586 }
6587
6588 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
6589                           u16 vlan_id, bool is_kill)
6590 {
6591         struct hclge_vport *vport = hclge_get_vport(handle);
6592         struct hclge_dev *hdev = vport->back;
6593
6594         return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
6595                                         0, is_kill);
6596 }
6597
6598 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
6599                                     u16 vlan, u8 qos, __be16 proto)
6600 {
6601         struct hclge_vport *vport = hclge_get_vport(handle);
6602         struct hclge_dev *hdev = vport->back;
6603
6604         if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
6605                 return -EINVAL;
6606         if (proto != htons(ETH_P_8021Q))
6607                 return -EPROTONOSUPPORT;
6608
6609         return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
6610 }
6611
6612 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6613 {
6614         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6615         struct hclge_vport_vtag_tx_cfg_cmd *req;
6616         struct hclge_dev *hdev = vport->back;
6617         struct hclge_desc desc;
6618         int status;
6619
6620         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6621
6622         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6623         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6624         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6625         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6626                       vcfg->accept_tag1 ? 1 : 0);
6627         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6628                       vcfg->accept_untag1 ? 1 : 0);
6629         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6630                       vcfg->accept_tag2 ? 1 : 0);
6631         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6632                       vcfg->accept_untag2 ? 1 : 0);
6633         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6634                       vcfg->insert_tag1_en ? 1 : 0);
6635         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6636                       vcfg->insert_tag2_en ? 1 : 0);
6637         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6638
6639         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6640         req->vf_bitmap[req->vf_offset] =
6641                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6642
6643         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6644         if (status)
6645                 dev_err(&hdev->pdev->dev,
6646                         "Send port txvlan cfg command fail, ret =%d\n",
6647                         status);
6648
6649         return status;
6650 }
6651
6652 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6653 {
6654         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6655         struct hclge_vport_vtag_rx_cfg_cmd *req;
6656         struct hclge_dev *hdev = vport->back;
6657         struct hclge_desc desc;
6658         int status;
6659
6660         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6661
6662         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6663         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6664                       vcfg->strip_tag1_en ? 1 : 0);
6665         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6666                       vcfg->strip_tag2_en ? 1 : 0);
6667         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6668                       vcfg->vlan1_vlan_prionly ? 1 : 0);
6669         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6670                       vcfg->vlan2_vlan_prionly ? 1 : 0);
6671
6672         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6673         req->vf_bitmap[req->vf_offset] =
6674                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6675
6676         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6677         if (status)
6678                 dev_err(&hdev->pdev->dev,
6679                         "Send port rxvlan cfg command fail, ret =%d\n",
6680                         status);
6681
6682         return status;
6683 }
6684
6685 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6686                                   u16 port_base_vlan_state,
6687                                   u16 vlan_tag)
6688 {
6689         int ret;
6690
6691         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6692                 vport->txvlan_cfg.accept_tag1 = true;
6693                 vport->txvlan_cfg.insert_tag1_en = false;
6694                 vport->txvlan_cfg.default_tag1 = 0;
6695         } else {
6696                 vport->txvlan_cfg.accept_tag1 = false;
6697                 vport->txvlan_cfg.insert_tag1_en = true;
6698                 vport->txvlan_cfg.default_tag1 = vlan_tag;
6699         }
6700
6701         vport->txvlan_cfg.accept_untag1 = true;
6702
6703         /* accept_tag2 and accept_untag2 are not supported on
6704          * pdev revision(0x20), new revision support them,
6705          * this two fields can not be configured by user.
6706          */
6707         vport->txvlan_cfg.accept_tag2 = true;
6708         vport->txvlan_cfg.accept_untag2 = true;
6709         vport->txvlan_cfg.insert_tag2_en = false;
6710         vport->txvlan_cfg.default_tag2 = 0;
6711
6712         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6713                 vport->rxvlan_cfg.strip_tag1_en = false;
6714                 vport->rxvlan_cfg.strip_tag2_en =
6715                                 vport->rxvlan_cfg.rx_vlan_offload_en;
6716         } else {
6717                 vport->rxvlan_cfg.strip_tag1_en =
6718                                 vport->rxvlan_cfg.rx_vlan_offload_en;
6719                 vport->rxvlan_cfg.strip_tag2_en = true;
6720         }
6721         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6722         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6723
6724         ret = hclge_set_vlan_tx_offload_cfg(vport);
6725         if (ret)
6726                 return ret;
6727
6728         return hclge_set_vlan_rx_offload_cfg(vport);
6729 }
6730
6731 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6732 {
6733         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6734         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6735         struct hclge_desc desc;
6736         int status;
6737
6738         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6739         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6740         rx_req->ot_fst_vlan_type =
6741                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6742         rx_req->ot_sec_vlan_type =
6743                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6744         rx_req->in_fst_vlan_type =
6745                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6746         rx_req->in_sec_vlan_type =
6747                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6748
6749         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6750         if (status) {
6751                 dev_err(&hdev->pdev->dev,
6752                         "Send rxvlan protocol type command fail, ret =%d\n",
6753                         status);
6754                 return status;
6755         }
6756
6757         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6758
6759         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6760         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6761         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6762
6763         status = hclge_cmd_send(&hdev->hw, &desc, 1);
6764         if (status)
6765                 dev_err(&hdev->pdev->dev,
6766                         "Send txvlan protocol type command fail, ret =%d\n",
6767                         status);
6768
6769         return status;
6770 }
6771
6772 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6773 {
6774 #define HCLGE_DEF_VLAN_TYPE             0x8100
6775
6776         struct hnae3_handle *handle = &hdev->vport[0].nic;
6777         struct hclge_vport *vport;
6778         int ret;
6779         int i;
6780
6781         if (hdev->pdev->revision >= 0x21) {
6782                 /* for revision 0x21, vf vlan filter is per function */
6783                 for (i = 0; i < hdev->num_alloc_vport; i++) {
6784                         vport = &hdev->vport[i];
6785                         ret = hclge_set_vlan_filter_ctrl(hdev,
6786                                                          HCLGE_FILTER_TYPE_VF,
6787                                                          HCLGE_FILTER_FE_EGRESS,
6788                                                          true,
6789                                                          vport->vport_id);
6790                         if (ret)
6791                                 return ret;
6792                 }
6793
6794                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6795                                                  HCLGE_FILTER_FE_INGRESS, true,
6796                                                  0);
6797                 if (ret)
6798                         return ret;
6799         } else {
6800                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6801                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
6802                                                  true, 0);
6803                 if (ret)
6804                         return ret;
6805         }
6806
6807         handle->netdev_flags |= HNAE3_VLAN_FLTR;
6808
6809         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6810         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6811         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6812         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6813         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6814         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6815
6816         ret = hclge_set_vlan_protocol_type(hdev);
6817         if (ret)
6818                 return ret;
6819
6820         for (i = 0; i < hdev->num_alloc_vport; i++) {
6821                 u16 vlan_tag;
6822
6823                 vport = &hdev->vport[i];
6824                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6825
6826                 ret = hclge_vlan_offload_cfg(vport,
6827                                              vport->port_base_vlan_cfg.state,
6828                                              vlan_tag);
6829                 if (ret)
6830                         return ret;
6831         }
6832
6833         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6834 }
6835
6836 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
6837 {
6838         struct hclge_vport_vlan_cfg *vlan;
6839
6840         /* vlan 0 is reserved */
6841         if (!vlan_id)
6842                 return;
6843
6844         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6845         if (!vlan)
6846                 return;
6847
6848         vlan->hd_tbl_status = true;
6849         vlan->vlan_id = vlan_id;
6850
6851         list_add_tail(&vlan->node, &vport->vlan_list);
6852 }
6853
6854 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6855                                bool is_write_tbl)
6856 {
6857         struct hclge_vport_vlan_cfg *vlan, *tmp;
6858         struct hclge_dev *hdev = vport->back;
6859
6860         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6861                 if (vlan->vlan_id == vlan_id) {
6862                         if (is_write_tbl && vlan->hd_tbl_status)
6863                                 hclge_set_vlan_filter_hw(hdev,
6864                                                          htons(ETH_P_8021Q),
6865                                                          vport->vport_id,
6866                                                          vlan_id, 0,
6867                                                          true);
6868
6869                         list_del(&vlan->node);
6870                         kfree(vlan);
6871                         break;
6872                 }
6873         }
6874 }
6875
6876 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6877 {
6878         struct hclge_vport_vlan_cfg *vlan, *tmp;
6879         struct hclge_dev *hdev = vport->back;
6880
6881         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6882                 if (vlan->hd_tbl_status)
6883                         hclge_set_vlan_filter_hw(hdev,
6884                                                  htons(ETH_P_8021Q),
6885                                                  vport->vport_id,
6886                                                  vlan->vlan_id, 0,
6887                                                  true);
6888
6889                 vlan->hd_tbl_status = false;
6890                 if (is_del_list) {
6891                         list_del(&vlan->node);
6892                         kfree(vlan);
6893                 }
6894         }
6895 }
6896
6897 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6898 {
6899         struct hclge_vport_vlan_cfg *vlan, *tmp;
6900         struct hclge_vport *vport;
6901         int i;
6902
6903         mutex_lock(&hdev->vport_cfg_mutex);
6904         for (i = 0; i < hdev->num_alloc_vport; i++) {
6905                 vport = &hdev->vport[i];
6906                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6907                         list_del(&vlan->node);
6908                         kfree(vlan);
6909                 }
6910         }
6911         mutex_unlock(&hdev->vport_cfg_mutex);
6912 }
6913
6914 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6915 {
6916         struct hclge_vport *vport = hclge_get_vport(handle);
6917
6918         vport->rxvlan_cfg.strip_tag1_en = false;
6919         vport->rxvlan_cfg.strip_tag2_en = enable;
6920         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6921         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6922
6923         return hclge_set_vlan_rx_offload_cfg(vport);
6924 }
6925
6926 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
6927 {
6928         struct hclge_config_max_frm_size_cmd *req;
6929         struct hclge_desc desc;
6930
6931         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
6932
6933         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
6934         req->max_frm_size = cpu_to_le16(new_mps);
6935         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
6936
6937         return hclge_cmd_send(&hdev->hw, &desc, 1);
6938 }
6939
6940 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
6941 {
6942         struct hclge_vport *vport = hclge_get_vport(handle);
6943
6944         return hclge_set_vport_mtu(vport, new_mtu);
6945 }
6946
6947 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
6948 {
6949         struct hclge_dev *hdev = vport->back;
6950         int i, max_frm_size, ret = 0;
6951
6952         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
6953         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
6954             max_frm_size > HCLGE_MAC_MAX_FRAME)
6955                 return -EINVAL;
6956
6957         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
6958         mutex_lock(&hdev->vport_lock);
6959         /* VF's mps must fit within hdev->mps */
6960         if (vport->vport_id && max_frm_size > hdev->mps) {
6961                 mutex_unlock(&hdev->vport_lock);
6962                 return -EINVAL;
6963         } else if (vport->vport_id) {
6964                 vport->mps = max_frm_size;
6965                 mutex_unlock(&hdev->vport_lock);
6966                 return 0;
6967         }
6968
6969         /* PF's mps must be greater then VF's mps */
6970         for (i = 1; i < hdev->num_alloc_vport; i++)
6971                 if (max_frm_size < hdev->vport[i].mps) {
6972                         mutex_unlock(&hdev->vport_lock);
6973                         return -EINVAL;
6974                 }
6975
6976         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
6977
6978         ret = hclge_set_mac_mtu(hdev, max_frm_size);
6979         if (ret) {
6980                 dev_err(&hdev->pdev->dev,
6981                         "Change mtu fail, ret =%d\n", ret);
6982                 goto out;
6983         }
6984
6985         hdev->mps = max_frm_size;
6986         vport->mps = max_frm_size;
6987
6988         ret = hclge_buffer_alloc(hdev);
6989         if (ret)
6990                 dev_err(&hdev->pdev->dev,
6991                         "Allocate buffer fail, ret =%d\n", ret);
6992
6993 out:
6994         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
6995         mutex_unlock(&hdev->vport_lock);
6996         return ret;
6997 }
6998
6999 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7000                                     bool enable)
7001 {
7002         struct hclge_reset_tqp_queue_cmd *req;
7003         struct hclge_desc desc;
7004         int ret;
7005
7006         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7007
7008         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7009         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7010         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7011
7012         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7013         if (ret) {
7014                 dev_err(&hdev->pdev->dev,
7015                         "Send tqp reset cmd error, status =%d\n", ret);
7016                 return ret;
7017         }
7018
7019         return 0;
7020 }
7021
7022 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7023 {
7024         struct hclge_reset_tqp_queue_cmd *req;
7025         struct hclge_desc desc;
7026         int ret;
7027
7028         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7029
7030         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7031         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7032
7033         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7034         if (ret) {
7035                 dev_err(&hdev->pdev->dev,
7036                         "Get reset status error, status =%d\n", ret);
7037                 return ret;
7038         }
7039
7040         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7041 }
7042
7043 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7044 {
7045         struct hnae3_queue *queue;
7046         struct hclge_tqp *tqp;
7047
7048         queue = handle->kinfo.tqp[queue_id];
7049         tqp = container_of(queue, struct hclge_tqp, q);
7050
7051         return tqp->index;
7052 }
7053
7054 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7055 {
7056         struct hclge_vport *vport = hclge_get_vport(handle);
7057         struct hclge_dev *hdev = vport->back;
7058         int reset_try_times = 0;
7059         int reset_status;
7060         u16 queue_gid;
7061         int ret = 0;
7062
7063         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7064
7065         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7066         if (ret) {
7067                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7068                 return ret;
7069         }
7070
7071         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7072         if (ret) {
7073                 dev_err(&hdev->pdev->dev,
7074                         "Send reset tqp cmd fail, ret = %d\n", ret);
7075                 return ret;
7076         }
7077
7078         reset_try_times = 0;
7079         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7080                 /* Wait for tqp hw reset */
7081                 msleep(20);
7082                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7083                 if (reset_status)
7084                         break;
7085         }
7086
7087         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7088                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7089                 return ret;
7090         }
7091
7092         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7093         if (ret)
7094                 dev_err(&hdev->pdev->dev,
7095                         "Deassert the soft reset fail, ret = %d\n", ret);
7096
7097         return ret;
7098 }
7099
7100 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7101 {
7102         struct hclge_dev *hdev = vport->back;
7103         int reset_try_times = 0;
7104         int reset_status;
7105         u16 queue_gid;
7106         int ret;
7107
7108         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7109
7110         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7111         if (ret) {
7112                 dev_warn(&hdev->pdev->dev,
7113                          "Send reset tqp cmd fail, ret = %d\n", ret);
7114                 return;
7115         }
7116
7117         reset_try_times = 0;
7118         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7119                 /* Wait for tqp hw reset */
7120                 msleep(20);
7121                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7122                 if (reset_status)
7123                         break;
7124         }
7125
7126         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7127                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7128                 return;
7129         }
7130
7131         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7132         if (ret)
7133                 dev_warn(&hdev->pdev->dev,
7134                          "Deassert the soft reset fail, ret = %d\n", ret);
7135 }
7136
7137 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7138 {
7139         struct hclge_vport *vport = hclge_get_vport(handle);
7140         struct hclge_dev *hdev = vport->back;
7141
7142         return hdev->fw_version;
7143 }
7144
7145 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7146 {
7147         struct phy_device *phydev = hdev->hw.mac.phydev;
7148
7149         if (!phydev)
7150                 return;
7151
7152         phy_set_asym_pause(phydev, rx_en, tx_en);
7153 }
7154
7155 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7156 {
7157         int ret;
7158
7159         if (rx_en && tx_en)
7160                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7161         else if (rx_en && !tx_en)
7162                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7163         else if (!rx_en && tx_en)
7164                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7165         else
7166                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7167
7168         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7169                 return 0;
7170
7171         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7172         if (ret) {
7173                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7174                         ret);
7175                 return ret;
7176         }
7177
7178         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7179
7180         return 0;
7181 }
7182
7183 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7184 {
7185         struct phy_device *phydev = hdev->hw.mac.phydev;
7186         u16 remote_advertising = 0;
7187         u16 local_advertising = 0;
7188         u32 rx_pause, tx_pause;
7189         u8 flowctl;
7190
7191         if (!phydev->link || !phydev->autoneg)
7192                 return 0;
7193
7194         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7195
7196         if (phydev->pause)
7197                 remote_advertising = LPA_PAUSE_CAP;
7198
7199         if (phydev->asym_pause)
7200                 remote_advertising |= LPA_PAUSE_ASYM;
7201
7202         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7203                                            remote_advertising);
7204         tx_pause = flowctl & FLOW_CTRL_TX;
7205         rx_pause = flowctl & FLOW_CTRL_RX;
7206
7207         if (phydev->duplex == HCLGE_MAC_HALF) {
7208                 tx_pause = 0;
7209                 rx_pause = 0;
7210         }
7211
7212         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7213 }
7214
7215 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7216                                  u32 *rx_en, u32 *tx_en)
7217 {
7218         struct hclge_vport *vport = hclge_get_vport(handle);
7219         struct hclge_dev *hdev = vport->back;
7220
7221         *auto_neg = hclge_get_autoneg(handle);
7222
7223         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7224                 *rx_en = 0;
7225                 *tx_en = 0;
7226                 return;
7227         }
7228
7229         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7230                 *rx_en = 1;
7231                 *tx_en = 0;
7232         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7233                 *tx_en = 1;
7234                 *rx_en = 0;
7235         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7236                 *rx_en = 1;
7237                 *tx_en = 1;
7238         } else {
7239                 *rx_en = 0;
7240                 *tx_en = 0;
7241         }
7242 }
7243
7244 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7245                                 u32 rx_en, u32 tx_en)
7246 {
7247         struct hclge_vport *vport = hclge_get_vport(handle);
7248         struct hclge_dev *hdev = vport->back;
7249         struct phy_device *phydev = hdev->hw.mac.phydev;
7250         u32 fc_autoneg;
7251
7252         fc_autoneg = hclge_get_autoneg(handle);
7253         if (auto_neg != fc_autoneg) {
7254                 dev_info(&hdev->pdev->dev,
7255                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7256                 return -EOPNOTSUPP;
7257         }
7258
7259         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7260                 dev_info(&hdev->pdev->dev,
7261                          "Priority flow control enabled. Cannot set link flow control.\n");
7262                 return -EOPNOTSUPP;
7263         }
7264
7265         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7266
7267         if (!fc_autoneg)
7268                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7269
7270         /* Only support flow control negotiation for netdev with
7271          * phy attached for now.
7272          */
7273         if (!phydev)
7274                 return -EOPNOTSUPP;
7275
7276         return phy_start_aneg(phydev);
7277 }
7278
7279 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7280                                           u8 *auto_neg, u32 *speed, u8 *duplex)
7281 {
7282         struct hclge_vport *vport = hclge_get_vport(handle);
7283         struct hclge_dev *hdev = vport->back;
7284
7285         if (speed)
7286                 *speed = hdev->hw.mac.speed;
7287         if (duplex)
7288                 *duplex = hdev->hw.mac.duplex;
7289         if (auto_neg)
7290                 *auto_neg = hdev->hw.mac.autoneg;
7291 }
7292
7293 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7294 {
7295         struct hclge_vport *vport = hclge_get_vport(handle);
7296         struct hclge_dev *hdev = vport->back;
7297
7298         if (media_type)
7299                 *media_type = hdev->hw.mac.media_type;
7300 }
7301
7302 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7303                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7304 {
7305         struct hclge_vport *vport = hclge_get_vport(handle);
7306         struct hclge_dev *hdev = vport->back;
7307         struct phy_device *phydev = hdev->hw.mac.phydev;
7308         int mdix_ctrl, mdix, retval, is_resolved;
7309
7310         if (!phydev) {
7311                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7312                 *tp_mdix = ETH_TP_MDI_INVALID;
7313                 return;
7314         }
7315
7316         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7317
7318         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7319         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7320                                     HCLGE_PHY_MDIX_CTRL_S);
7321
7322         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7323         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7324         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7325
7326         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7327
7328         switch (mdix_ctrl) {
7329         case 0x0:
7330                 *tp_mdix_ctrl = ETH_TP_MDI;
7331                 break;
7332         case 0x1:
7333                 *tp_mdix_ctrl = ETH_TP_MDI_X;
7334                 break;
7335         case 0x3:
7336                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7337                 break;
7338         default:
7339                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7340                 break;
7341         }
7342
7343         if (!is_resolved)
7344                 *tp_mdix = ETH_TP_MDI_INVALID;
7345         else if (mdix)
7346                 *tp_mdix = ETH_TP_MDI_X;
7347         else
7348                 *tp_mdix = ETH_TP_MDI;
7349 }
7350
7351 static int hclge_init_client_instance(struct hnae3_client *client,
7352                                       struct hnae3_ae_dev *ae_dev)
7353 {
7354         struct hclge_dev *hdev = ae_dev->priv;
7355         struct hclge_vport *vport;
7356         int i, ret;
7357
7358         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7359                 vport = &hdev->vport[i];
7360
7361                 switch (client->type) {
7362                 case HNAE3_CLIENT_KNIC:
7363
7364                         hdev->nic_client = client;
7365                         vport->nic.client = client;
7366                         ret = client->ops->init_instance(&vport->nic);
7367                         if (ret)
7368                                 goto clear_nic;
7369
7370                         hnae3_set_client_init_flag(client, ae_dev, 1);
7371
7372                         if (hdev->roce_client &&
7373                             hnae3_dev_roce_supported(hdev)) {
7374                                 struct hnae3_client *rc = hdev->roce_client;
7375
7376                                 ret = hclge_init_roce_base_info(vport);
7377                                 if (ret)
7378                                         goto clear_roce;
7379
7380                                 ret = rc->ops->init_instance(&vport->roce);
7381                                 if (ret)
7382                                         goto clear_roce;
7383
7384                                 hnae3_set_client_init_flag(hdev->roce_client,
7385                                                            ae_dev, 1);
7386                         }
7387
7388                         break;
7389                 case HNAE3_CLIENT_UNIC:
7390                         hdev->nic_client = client;
7391                         vport->nic.client = client;
7392
7393                         ret = client->ops->init_instance(&vport->nic);
7394                         if (ret)
7395                                 goto clear_nic;
7396
7397                         hnae3_set_client_init_flag(client, ae_dev, 1);
7398
7399                         break;
7400                 case HNAE3_CLIENT_ROCE:
7401                         if (hnae3_dev_roce_supported(hdev)) {
7402                                 hdev->roce_client = client;
7403                                 vport->roce.client = client;
7404                         }
7405
7406                         if (hdev->roce_client && hdev->nic_client) {
7407                                 ret = hclge_init_roce_base_info(vport);
7408                                 if (ret)
7409                                         goto clear_roce;
7410
7411                                 ret = client->ops->init_instance(&vport->roce);
7412                                 if (ret)
7413                                         goto clear_roce;
7414
7415                                 hnae3_set_client_init_flag(client, ae_dev, 1);
7416                         }
7417
7418                         break;
7419                 default:
7420                         return -EINVAL;
7421                 }
7422         }
7423
7424         return 0;
7425
7426 clear_nic:
7427         hdev->nic_client = NULL;
7428         vport->nic.client = NULL;
7429         return ret;
7430 clear_roce:
7431         hdev->roce_client = NULL;
7432         vport->roce.client = NULL;
7433         return ret;
7434 }
7435
7436 static void hclge_uninit_client_instance(struct hnae3_client *client,
7437                                          struct hnae3_ae_dev *ae_dev)
7438 {
7439         struct hclge_dev *hdev = ae_dev->priv;
7440         struct hclge_vport *vport;
7441         int i;
7442
7443         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7444                 vport = &hdev->vport[i];
7445                 if (hdev->roce_client) {
7446                         hdev->roce_client->ops->uninit_instance(&vport->roce,
7447                                                                 0);
7448                         hdev->roce_client = NULL;
7449                         vport->roce.client = NULL;
7450                 }
7451                 if (client->type == HNAE3_CLIENT_ROCE)
7452                         return;
7453                 if (hdev->nic_client && client->ops->uninit_instance) {
7454                         client->ops->uninit_instance(&vport->nic, 0);
7455                         hdev->nic_client = NULL;
7456                         vport->nic.client = NULL;
7457                 }
7458         }
7459 }
7460
7461 static int hclge_pci_init(struct hclge_dev *hdev)
7462 {
7463         struct pci_dev *pdev = hdev->pdev;
7464         struct hclge_hw *hw;
7465         int ret;
7466
7467         ret = pci_enable_device(pdev);
7468         if (ret) {
7469                 dev_err(&pdev->dev, "failed to enable PCI device\n");
7470                 return ret;
7471         }
7472
7473         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7474         if (ret) {
7475                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7476                 if (ret) {
7477                         dev_err(&pdev->dev,
7478                                 "can't set consistent PCI DMA");
7479                         goto err_disable_device;
7480                 }
7481                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7482         }
7483
7484         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7485         if (ret) {
7486                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7487                 goto err_disable_device;
7488         }
7489
7490         pci_set_master(pdev);
7491         hw = &hdev->hw;
7492         hw->io_base = pcim_iomap(pdev, 2, 0);
7493         if (!hw->io_base) {
7494                 dev_err(&pdev->dev, "Can't map configuration register space\n");
7495                 ret = -ENOMEM;
7496                 goto err_clr_master;
7497         }
7498
7499         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7500
7501         return 0;
7502 err_clr_master:
7503         pci_clear_master(pdev);
7504         pci_release_regions(pdev);
7505 err_disable_device:
7506         pci_disable_device(pdev);
7507
7508         return ret;
7509 }
7510
7511 static void hclge_pci_uninit(struct hclge_dev *hdev)
7512 {
7513         struct pci_dev *pdev = hdev->pdev;
7514
7515         pcim_iounmap(pdev, hdev->hw.io_base);
7516         pci_free_irq_vectors(pdev);
7517         pci_clear_master(pdev);
7518         pci_release_mem_regions(pdev);
7519         pci_disable_device(pdev);
7520 }
7521
7522 static void hclge_state_init(struct hclge_dev *hdev)
7523 {
7524         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7525         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7526         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7527         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7528         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7529         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7530 }
7531
7532 static void hclge_state_uninit(struct hclge_dev *hdev)
7533 {
7534         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7535
7536         if (hdev->service_timer.function)
7537                 del_timer_sync(&hdev->service_timer);
7538         if (hdev->reset_timer.function)
7539                 del_timer_sync(&hdev->reset_timer);
7540         if (hdev->service_task.func)
7541                 cancel_work_sync(&hdev->service_task);
7542         if (hdev->rst_service_task.func)
7543                 cancel_work_sync(&hdev->rst_service_task);
7544         if (hdev->mbx_service_task.func)
7545                 cancel_work_sync(&hdev->mbx_service_task);
7546 }
7547
7548 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7549 {
7550 #define HCLGE_FLR_WAIT_MS       100
7551 #define HCLGE_FLR_WAIT_CNT      50
7552         struct hclge_dev *hdev = ae_dev->priv;
7553         int cnt = 0;
7554
7555         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7556         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7557         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7558         hclge_reset_event(hdev->pdev, NULL);
7559
7560         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7561                cnt++ < HCLGE_FLR_WAIT_CNT)
7562                 msleep(HCLGE_FLR_WAIT_MS);
7563
7564         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7565                 dev_err(&hdev->pdev->dev,
7566                         "flr wait down timeout: %d\n", cnt);
7567 }
7568
7569 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7570 {
7571         struct hclge_dev *hdev = ae_dev->priv;
7572
7573         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7574 }
7575
7576 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7577 {
7578         struct pci_dev *pdev = ae_dev->pdev;
7579         struct hclge_dev *hdev;
7580         int ret;
7581
7582         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7583         if (!hdev) {
7584                 ret = -ENOMEM;
7585                 goto out;
7586         }
7587
7588         hdev->pdev = pdev;
7589         hdev->ae_dev = ae_dev;
7590         hdev->reset_type = HNAE3_NONE_RESET;
7591         hdev->reset_level = HNAE3_FUNC_RESET;
7592         ae_dev->priv = hdev;
7593         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7594
7595         mutex_init(&hdev->vport_lock);
7596         mutex_init(&hdev->vport_cfg_mutex);
7597
7598         ret = hclge_pci_init(hdev);
7599         if (ret) {
7600                 dev_err(&pdev->dev, "PCI init failed\n");
7601                 goto out;
7602         }
7603
7604         /* Firmware command queue initialize */
7605         ret = hclge_cmd_queue_init(hdev);
7606         if (ret) {
7607                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7608                 goto err_pci_uninit;
7609         }
7610
7611         /* Firmware command initialize */
7612         ret = hclge_cmd_init(hdev);
7613         if (ret)
7614                 goto err_cmd_uninit;
7615
7616         ret = hclge_get_cap(hdev);
7617         if (ret) {
7618                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7619                         ret);
7620                 goto err_cmd_uninit;
7621         }
7622
7623         ret = hclge_configure(hdev);
7624         if (ret) {
7625                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7626                 goto err_cmd_uninit;
7627         }
7628
7629         ret = hclge_init_msi(hdev);
7630         if (ret) {
7631                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7632                 goto err_cmd_uninit;
7633         }
7634
7635         ret = hclge_misc_irq_init(hdev);
7636         if (ret) {
7637                 dev_err(&pdev->dev,
7638                         "Misc IRQ(vector0) init error, ret = %d.\n",
7639                         ret);
7640                 goto err_msi_uninit;
7641         }
7642
7643         ret = hclge_alloc_tqps(hdev);
7644         if (ret) {
7645                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7646                 goto err_msi_irq_uninit;
7647         }
7648
7649         ret = hclge_alloc_vport(hdev);
7650         if (ret) {
7651                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7652                 goto err_msi_irq_uninit;
7653         }
7654
7655         ret = hclge_map_tqp(hdev);
7656         if (ret) {
7657                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7658                 goto err_msi_irq_uninit;
7659         }
7660
7661         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7662                 ret = hclge_mac_mdio_config(hdev);
7663                 if (ret) {
7664                         dev_err(&hdev->pdev->dev,
7665                                 "mdio config fail ret=%d\n", ret);
7666                         goto err_msi_irq_uninit;
7667                 }
7668         }
7669
7670         ret = hclge_init_umv_space(hdev);
7671         if (ret) {
7672                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7673                 goto err_mdiobus_unreg;
7674         }
7675
7676         ret = hclge_mac_init(hdev);
7677         if (ret) {
7678                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7679                 goto err_mdiobus_unreg;
7680         }
7681
7682         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7683         if (ret) {
7684                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7685                 goto err_mdiobus_unreg;
7686         }
7687
7688         ret = hclge_config_gro(hdev, true);
7689         if (ret)
7690                 goto err_mdiobus_unreg;
7691
7692         ret = hclge_init_vlan_config(hdev);
7693         if (ret) {
7694                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7695                 goto err_mdiobus_unreg;
7696         }
7697
7698         ret = hclge_tm_schd_init(hdev);
7699         if (ret) {
7700                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7701                 goto err_mdiobus_unreg;
7702         }
7703
7704         hclge_rss_init_cfg(hdev);
7705         ret = hclge_rss_init_hw(hdev);
7706         if (ret) {
7707                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7708                 goto err_mdiobus_unreg;
7709         }
7710
7711         ret = init_mgr_tbl(hdev);
7712         if (ret) {
7713                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7714                 goto err_mdiobus_unreg;
7715         }
7716
7717         ret = hclge_init_fd_config(hdev);
7718         if (ret) {
7719                 dev_err(&pdev->dev,
7720                         "fd table init fail, ret=%d\n", ret);
7721                 goto err_mdiobus_unreg;
7722         }
7723
7724         ret = hclge_hw_error_set_state(hdev, true);
7725         if (ret) {
7726                 dev_err(&pdev->dev,
7727                         "fail(%d) to enable hw error interrupts\n", ret);
7728                 goto err_mdiobus_unreg;
7729         }
7730
7731         hclge_dcb_ops_set(hdev);
7732
7733         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7734         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7735         INIT_WORK(&hdev->service_task, hclge_service_task);
7736         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7737         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7738
7739         hclge_clear_all_event_cause(hdev);
7740
7741         /* Enable MISC vector(vector0) */
7742         hclge_enable_vector(&hdev->misc_vector, true);
7743
7744         hclge_state_init(hdev);
7745         hdev->last_reset_time = jiffies;
7746
7747         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7748         return 0;
7749
7750 err_mdiobus_unreg:
7751         if (hdev->hw.mac.phydev)
7752                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
7753 err_msi_irq_uninit:
7754         hclge_misc_irq_uninit(hdev);
7755 err_msi_uninit:
7756         pci_free_irq_vectors(pdev);
7757 err_cmd_uninit:
7758         hclge_cmd_uninit(hdev);
7759 err_pci_uninit:
7760         pcim_iounmap(pdev, hdev->hw.io_base);
7761         pci_clear_master(pdev);
7762         pci_release_regions(pdev);
7763         pci_disable_device(pdev);
7764 out:
7765         return ret;
7766 }
7767
7768 static void hclge_stats_clear(struct hclge_dev *hdev)
7769 {
7770         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
7771 }
7772
7773 static void hclge_reset_vport_state(struct hclge_dev *hdev)
7774 {
7775         struct hclge_vport *vport = hdev->vport;
7776         int i;
7777
7778         for (i = 0; i < hdev->num_alloc_vport; i++) {
7779                 hclge_vport_stop(vport);
7780                 vport++;
7781         }
7782 }
7783
7784 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
7785 {
7786         struct hclge_dev *hdev = ae_dev->priv;
7787         struct pci_dev *pdev = ae_dev->pdev;
7788         int ret;
7789
7790         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7791
7792         hclge_stats_clear(hdev);
7793         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
7794
7795         ret = hclge_cmd_init(hdev);
7796         if (ret) {
7797                 dev_err(&pdev->dev, "Cmd queue init failed\n");
7798                 return ret;
7799         }
7800
7801         ret = hclge_map_tqp(hdev);
7802         if (ret) {
7803                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7804                 return ret;
7805         }
7806
7807         hclge_reset_umv_space(hdev);
7808
7809         ret = hclge_mac_init(hdev);
7810         if (ret) {
7811                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7812                 return ret;
7813         }
7814
7815         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7816         if (ret) {
7817                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7818                 return ret;
7819         }
7820
7821         ret = hclge_config_gro(hdev, true);
7822         if (ret)
7823                 return ret;
7824
7825         ret = hclge_init_vlan_config(hdev);
7826         if (ret) {
7827                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7828                 return ret;
7829         }
7830
7831         ret = hclge_tm_init_hw(hdev, true);
7832         if (ret) {
7833                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
7834                 return ret;
7835         }
7836
7837         ret = hclge_rss_init_hw(hdev);
7838         if (ret) {
7839                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7840                 return ret;
7841         }
7842
7843         ret = hclge_init_fd_config(hdev);
7844         if (ret) {
7845                 dev_err(&pdev->dev,
7846                         "fd table init fail, ret=%d\n", ret);
7847                 return ret;
7848         }
7849
7850         /* Re-enable the hw error interrupts because
7851          * the interrupts get disabled on core/global reset.
7852          */
7853         ret = hclge_hw_error_set_state(hdev, true);
7854         if (ret) {
7855                 dev_err(&pdev->dev,
7856                         "fail(%d) to re-enable HNS hw error interrupts\n", ret);
7857                 return ret;
7858         }
7859
7860         hclge_reset_vport_state(hdev);
7861
7862         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
7863                  HCLGE_DRIVER_NAME);
7864
7865         return 0;
7866 }
7867
7868 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
7869 {
7870         struct hclge_dev *hdev = ae_dev->priv;
7871         struct hclge_mac *mac = &hdev->hw.mac;
7872
7873         hclge_state_uninit(hdev);
7874
7875         if (mac->phydev)
7876                 mdiobus_unregister(mac->mdio_bus);
7877
7878         hclge_uninit_umv_space(hdev);
7879
7880         /* Disable MISC vector(vector0) */
7881         hclge_enable_vector(&hdev->misc_vector, false);
7882         synchronize_irq(hdev->misc_vector.vector_irq);
7883
7884         hclge_hw_error_set_state(hdev, false);
7885         hclge_cmd_uninit(hdev);
7886         hclge_misc_irq_uninit(hdev);
7887         hclge_pci_uninit(hdev);
7888         mutex_destroy(&hdev->vport_lock);
7889         hclge_uninit_vport_mac_table(hdev);
7890         hclge_uninit_vport_vlan_table(hdev);
7891         mutex_destroy(&hdev->vport_cfg_mutex);
7892         ae_dev->priv = NULL;
7893 }
7894
7895 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
7896 {
7897         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
7898         struct hclge_vport *vport = hclge_get_vport(handle);
7899         struct hclge_dev *hdev = vport->back;
7900
7901         return min_t(u32, hdev->rss_size_max,
7902                      vport->alloc_tqps / kinfo->num_tc);
7903 }
7904
7905 static void hclge_get_channels(struct hnae3_handle *handle,
7906                                struct ethtool_channels *ch)
7907 {
7908         ch->max_combined = hclge_get_max_channels(handle);
7909         ch->other_count = 1;
7910         ch->max_other = 1;
7911         ch->combined_count = handle->kinfo.rss_size;
7912 }
7913
7914 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
7915                                         u16 *alloc_tqps, u16 *max_rss_size)
7916 {
7917         struct hclge_vport *vport = hclge_get_vport(handle);
7918         struct hclge_dev *hdev = vport->back;
7919
7920         *alloc_tqps = vport->alloc_tqps;
7921         *max_rss_size = hdev->rss_size_max;
7922 }
7923
7924 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
7925                               bool rxfh_configured)
7926 {
7927         struct hclge_vport *vport = hclge_get_vport(handle);
7928         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
7929         struct hclge_dev *hdev = vport->back;
7930         int cur_rss_size = kinfo->rss_size;
7931         int cur_tqps = kinfo->num_tqps;
7932         u16 tc_offset[HCLGE_MAX_TC_NUM];
7933         u16 tc_valid[HCLGE_MAX_TC_NUM];
7934         u16 tc_size[HCLGE_MAX_TC_NUM];
7935         u16 roundup_size;
7936         u32 *rss_indir;
7937         int ret, i;
7938
7939         kinfo->req_rss_size = new_tqps_num;
7940
7941         ret = hclge_tm_vport_map_update(hdev);
7942         if (ret) {
7943                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
7944                 return ret;
7945         }
7946
7947         roundup_size = roundup_pow_of_two(kinfo->rss_size);
7948         roundup_size = ilog2(roundup_size);
7949         /* Set the RSS TC mode according to the new RSS size */
7950         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
7951                 tc_valid[i] = 0;
7952
7953                 if (!(hdev->hw_tc_map & BIT(i)))
7954                         continue;
7955
7956                 tc_valid[i] = 1;
7957                 tc_size[i] = roundup_size;
7958                 tc_offset[i] = kinfo->rss_size * i;
7959         }
7960         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
7961         if (ret)
7962                 return ret;
7963
7964         /* RSS indirection table has been configuared by user */
7965         if (rxfh_configured)
7966                 goto out;
7967
7968         /* Reinitializes the rss indirect table according to the new RSS size */
7969         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
7970         if (!rss_indir)
7971                 return -ENOMEM;
7972
7973         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
7974                 rss_indir[i] = i % kinfo->rss_size;
7975
7976         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
7977         if (ret)
7978                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
7979                         ret);
7980
7981         kfree(rss_indir);
7982
7983 out:
7984         if (!ret)
7985                 dev_info(&hdev->pdev->dev,
7986                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
7987                          cur_rss_size, kinfo->rss_size,
7988                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
7989
7990         return ret;
7991 }
7992
7993 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
7994                               u32 *regs_num_64_bit)
7995 {
7996         struct hclge_desc desc;
7997         u32 total_num;
7998         int ret;
7999
8000         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8001         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8002         if (ret) {
8003                 dev_err(&hdev->pdev->dev,
8004                         "Query register number cmd failed, ret = %d.\n", ret);
8005                 return ret;
8006         }
8007
8008         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8009         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8010
8011         total_num = *regs_num_32_bit + *regs_num_64_bit;
8012         if (!total_num)
8013                 return -EINVAL;
8014
8015         return 0;
8016 }
8017
8018 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8019                                  void *data)
8020 {
8021 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8022
8023         struct hclge_desc *desc;
8024         u32 *reg_val = data;
8025         __le32 *desc_data;
8026         int cmd_num;
8027         int i, k, n;
8028         int ret;
8029
8030         if (regs_num == 0)
8031                 return 0;
8032
8033         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8034         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8035         if (!desc)
8036                 return -ENOMEM;
8037
8038         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8039         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8040         if (ret) {
8041                 dev_err(&hdev->pdev->dev,
8042                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8043                 kfree(desc);
8044                 return ret;
8045         }
8046
8047         for (i = 0; i < cmd_num; i++) {
8048                 if (i == 0) {
8049                         desc_data = (__le32 *)(&desc[i].data[0]);
8050                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8051                 } else {
8052                         desc_data = (__le32 *)(&desc[i]);
8053                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8054                 }
8055                 for (k = 0; k < n; k++) {
8056                         *reg_val++ = le32_to_cpu(*desc_data++);
8057
8058                         regs_num--;
8059                         if (!regs_num)
8060                                 break;
8061                 }
8062         }
8063
8064         kfree(desc);
8065         return 0;
8066 }
8067
8068 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8069                                  void *data)
8070 {
8071 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8072
8073         struct hclge_desc *desc;
8074         u64 *reg_val = data;
8075         __le64 *desc_data;
8076         int cmd_num;
8077         int i, k, n;
8078         int ret;
8079
8080         if (regs_num == 0)
8081                 return 0;
8082
8083         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8084         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8085         if (!desc)
8086                 return -ENOMEM;
8087
8088         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8089         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8090         if (ret) {
8091                 dev_err(&hdev->pdev->dev,
8092                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8093                 kfree(desc);
8094                 return ret;
8095         }
8096
8097         for (i = 0; i < cmd_num; i++) {
8098                 if (i == 0) {
8099                         desc_data = (__le64 *)(&desc[i].data[0]);
8100                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8101                 } else {
8102                         desc_data = (__le64 *)(&desc[i]);
8103                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8104                 }
8105                 for (k = 0; k < n; k++) {
8106                         *reg_val++ = le64_to_cpu(*desc_data++);
8107
8108                         regs_num--;
8109                         if (!regs_num)
8110                                 break;
8111                 }
8112         }
8113
8114         kfree(desc);
8115         return 0;
8116 }
8117
8118 #define MAX_SEPARATE_NUM        4
8119 #define SEPARATOR_VALUE         0xFFFFFFFF
8120 #define REG_NUM_PER_LINE        4
8121 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
8122
8123 static int hclge_get_regs_len(struct hnae3_handle *handle)
8124 {
8125         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8126         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8127         struct hclge_vport *vport = hclge_get_vport(handle);
8128         struct hclge_dev *hdev = vport->back;
8129         u32 regs_num_32_bit, regs_num_64_bit;
8130         int ret;
8131
8132         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8133         if (ret) {
8134                 dev_err(&hdev->pdev->dev,
8135                         "Get register number failed, ret = %d.\n", ret);
8136                 return -EOPNOTSUPP;
8137         }
8138
8139         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8140         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8141         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8142         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8143
8144         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8145                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8146                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8147 }
8148
8149 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8150                            void *data)
8151 {
8152         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8153         struct hclge_vport *vport = hclge_get_vport(handle);
8154         struct hclge_dev *hdev = vport->back;
8155         u32 regs_num_32_bit, regs_num_64_bit;
8156         int i, j, reg_um, separator_num;
8157         u32 *reg = data;
8158         int ret;
8159
8160         *version = hdev->fw_version;
8161
8162         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8163         if (ret) {
8164                 dev_err(&hdev->pdev->dev,
8165                         "Get register number failed, ret = %d.\n", ret);
8166                 return;
8167         }
8168
8169         /* fetching per-PF registers valus from PF PCIe register space */
8170         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8171         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8172         for (i = 0; i < reg_um; i++)
8173                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8174         for (i = 0; i < separator_num; i++)
8175                 *reg++ = SEPARATOR_VALUE;
8176
8177         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8178         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8179         for (i = 0; i < reg_um; i++)
8180                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8181         for (i = 0; i < separator_num; i++)
8182                 *reg++ = SEPARATOR_VALUE;
8183
8184         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8185         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8186         for (j = 0; j < kinfo->num_tqps; j++) {
8187                 for (i = 0; i < reg_um; i++)
8188                         *reg++ = hclge_read_dev(&hdev->hw,
8189                                                 ring_reg_addr_list[i] +
8190                                                 0x200 * j);
8191                 for (i = 0; i < separator_num; i++)
8192                         *reg++ = SEPARATOR_VALUE;
8193         }
8194
8195         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8196         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8197         for (j = 0; j < hdev->num_msi_used - 1; j++) {
8198                 for (i = 0; i < reg_um; i++)
8199                         *reg++ = hclge_read_dev(&hdev->hw,
8200                                                 tqp_intr_reg_addr_list[i] +
8201                                                 4 * j);
8202                 for (i = 0; i < separator_num; i++)
8203                         *reg++ = SEPARATOR_VALUE;
8204         }
8205
8206         /* fetching PF common registers values from firmware */
8207         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8208         if (ret) {
8209                 dev_err(&hdev->pdev->dev,
8210                         "Get 32 bit register failed, ret = %d.\n", ret);
8211                 return;
8212         }
8213
8214         reg += regs_num_32_bit;
8215         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8216         if (ret)
8217                 dev_err(&hdev->pdev->dev,
8218                         "Get 64 bit register failed, ret = %d.\n", ret);
8219 }
8220
8221 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8222 {
8223         struct hclge_set_led_state_cmd *req;
8224         struct hclge_desc desc;
8225         int ret;
8226
8227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8228
8229         req = (struct hclge_set_led_state_cmd *)desc.data;
8230         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8231                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8232
8233         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8234         if (ret)
8235                 dev_err(&hdev->pdev->dev,
8236                         "Send set led state cmd error, ret =%d\n", ret);
8237
8238         return ret;
8239 }
8240
8241 enum hclge_led_status {
8242         HCLGE_LED_OFF,
8243         HCLGE_LED_ON,
8244         HCLGE_LED_NO_CHANGE = 0xFF,
8245 };
8246
8247 static int hclge_set_led_id(struct hnae3_handle *handle,
8248                             enum ethtool_phys_id_state status)
8249 {
8250         struct hclge_vport *vport = hclge_get_vport(handle);
8251         struct hclge_dev *hdev = vport->back;
8252
8253         switch (status) {
8254         case ETHTOOL_ID_ACTIVE:
8255                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8256         case ETHTOOL_ID_INACTIVE:
8257                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8258         default:
8259                 return -EINVAL;
8260         }
8261 }
8262
8263 static void hclge_get_link_mode(struct hnae3_handle *handle,
8264                                 unsigned long *supported,
8265                                 unsigned long *advertising)
8266 {
8267         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8268         struct hclge_vport *vport = hclge_get_vport(handle);
8269         struct hclge_dev *hdev = vport->back;
8270         unsigned int idx = 0;
8271
8272         for (; idx < size; idx++) {
8273                 supported[idx] = hdev->hw.mac.supported[idx];
8274                 advertising[idx] = hdev->hw.mac.advertising[idx];
8275         }
8276 }
8277
8278 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8279 {
8280         struct hclge_vport *vport = hclge_get_vport(handle);
8281         struct hclge_dev *hdev = vport->back;
8282
8283         return hclge_config_gro(hdev, enable);
8284 }
8285
8286 static const struct hnae3_ae_ops hclge_ops = {
8287         .init_ae_dev = hclge_init_ae_dev,
8288         .uninit_ae_dev = hclge_uninit_ae_dev,
8289         .flr_prepare = hclge_flr_prepare,
8290         .flr_done = hclge_flr_done,
8291         .init_client_instance = hclge_init_client_instance,
8292         .uninit_client_instance = hclge_uninit_client_instance,
8293         .map_ring_to_vector = hclge_map_ring_to_vector,
8294         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8295         .get_vector = hclge_get_vector,
8296         .put_vector = hclge_put_vector,
8297         .set_promisc_mode = hclge_set_promisc_mode,
8298         .set_loopback = hclge_set_loopback,
8299         .start = hclge_ae_start,
8300         .stop = hclge_ae_stop,
8301         .client_start = hclge_client_start,
8302         .client_stop = hclge_client_stop,
8303         .get_status = hclge_get_status,
8304         .get_ksettings_an_result = hclge_get_ksettings_an_result,
8305         .update_speed_duplex_h = hclge_update_speed_duplex_h,
8306         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8307         .get_media_type = hclge_get_media_type,
8308         .get_rss_key_size = hclge_get_rss_key_size,
8309         .get_rss_indir_size = hclge_get_rss_indir_size,
8310         .get_rss = hclge_get_rss,
8311         .set_rss = hclge_set_rss,
8312         .set_rss_tuple = hclge_set_rss_tuple,
8313         .get_rss_tuple = hclge_get_rss_tuple,
8314         .get_tc_size = hclge_get_tc_size,
8315         .get_mac_addr = hclge_get_mac_addr,
8316         .set_mac_addr = hclge_set_mac_addr,
8317         .do_ioctl = hclge_do_ioctl,
8318         .add_uc_addr = hclge_add_uc_addr,
8319         .rm_uc_addr = hclge_rm_uc_addr,
8320         .add_mc_addr = hclge_add_mc_addr,
8321         .rm_mc_addr = hclge_rm_mc_addr,
8322         .set_autoneg = hclge_set_autoneg,
8323         .get_autoneg = hclge_get_autoneg,
8324         .get_pauseparam = hclge_get_pauseparam,
8325         .set_pauseparam = hclge_set_pauseparam,
8326         .set_mtu = hclge_set_mtu,
8327         .reset_queue = hclge_reset_tqp,
8328         .get_stats = hclge_get_stats,
8329         .update_stats = hclge_update_stats,
8330         .get_strings = hclge_get_strings,
8331         .get_sset_count = hclge_get_sset_count,
8332         .get_fw_version = hclge_get_fw_version,
8333         .get_mdix_mode = hclge_get_mdix_mode,
8334         .enable_vlan_filter = hclge_enable_vlan_filter,
8335         .set_vlan_filter = hclge_set_vlan_filter,
8336         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8337         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8338         .reset_event = hclge_reset_event,
8339         .set_default_reset_request = hclge_set_def_reset_request,
8340         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8341         .set_channels = hclge_set_channels,
8342         .get_channels = hclge_get_channels,
8343         .get_regs_len = hclge_get_regs_len,
8344         .get_regs = hclge_get_regs,
8345         .set_led_id = hclge_set_led_id,
8346         .get_link_mode = hclge_get_link_mode,
8347         .add_fd_entry = hclge_add_fd_entry,
8348         .del_fd_entry = hclge_del_fd_entry,
8349         .del_all_fd_entries = hclge_del_all_fd_entries,
8350         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8351         .get_fd_rule_info = hclge_get_fd_rule_info,
8352         .get_fd_all_rules = hclge_get_all_rules,
8353         .restore_fd_rules = hclge_restore_fd_entries,
8354         .enable_fd = hclge_enable_fd,
8355         .dbg_run_cmd = hclge_dbg_run_cmd,
8356         .handle_hw_ras_error = hclge_handle_hw_ras_error,
8357         .get_hw_reset_stat = hclge_get_hw_reset_stat,
8358         .ae_dev_resetting = hclge_ae_dev_resetting,
8359         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8360         .set_gro_en = hclge_gro_en,
8361         .get_global_queue_id = hclge_covert_handle_qid_global,
8362         .set_timer_task = hclge_set_timer_task,
8363         .mac_connect_phy = hclge_mac_connect_phy,
8364         .mac_disconnect_phy = hclge_mac_disconnect_phy,
8365 };
8366
8367 static struct hnae3_ae_algo ae_algo = {
8368         .ops = &hclge_ops,
8369         .pdev_id_table = ae_algo_pci_tbl,
8370 };
8371
8372 static int hclge_init(void)
8373 {
8374         pr_info("%s is initializing\n", HCLGE_NAME);
8375
8376         hnae3_register_ae_algo(&ae_algo);
8377
8378         return 0;
8379 }
8380
8381 static void hclge_exit(void)
8382 {
8383         hnae3_unregister_ae_algo(&ae_algo);
8384 }
8385 module_init(hclge_init);
8386 module_exit(hclge_exit);
8387
8388 MODULE_LICENSE("GPL");
8389 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8390 MODULE_DESCRIPTION("HCLGE Driver");
8391 MODULE_VERSION(HCLGE_MOD_VERSION);