Merge branch 'hns3-next'
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256
31
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37                                u16 *allocated_size, bool is_alloc);
38 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
39 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
40
41 static struct hnae3_ae_algo ae_algo;
42
43 static const struct pci_device_id ae_algo_pci_tbl[] = {
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
51         /* required last entry */
52         {0, }
53 };
54
55 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
56
57 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
58                                          HCLGE_CMDQ_TX_ADDR_H_REG,
59                                          HCLGE_CMDQ_TX_DEPTH_REG,
60                                          HCLGE_CMDQ_TX_TAIL_REG,
61                                          HCLGE_CMDQ_TX_HEAD_REG,
62                                          HCLGE_CMDQ_RX_ADDR_L_REG,
63                                          HCLGE_CMDQ_RX_ADDR_H_REG,
64                                          HCLGE_CMDQ_RX_DEPTH_REG,
65                                          HCLGE_CMDQ_RX_TAIL_REG,
66                                          HCLGE_CMDQ_RX_HEAD_REG,
67                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
68                                          HCLGE_CMDQ_INTR_STS_REG,
69                                          HCLGE_CMDQ_INTR_EN_REG,
70                                          HCLGE_CMDQ_INTR_GEN_REG};
71
72 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
73                                            HCLGE_VECTOR0_OTER_EN_REG,
74                                            HCLGE_MISC_RESET_STS_REG,
75                                            HCLGE_MISC_VECTOR_INT_STS,
76                                            HCLGE_GLOBAL_RESET_REG,
77                                            HCLGE_FUN_RST_ING,
78                                            HCLGE_GRO_EN_REG};
79
80 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
81                                          HCLGE_RING_RX_ADDR_H_REG,
82                                          HCLGE_RING_RX_BD_NUM_REG,
83                                          HCLGE_RING_RX_BD_LENGTH_REG,
84                                          HCLGE_RING_RX_MERGE_EN_REG,
85                                          HCLGE_RING_RX_TAIL_REG,
86                                          HCLGE_RING_RX_HEAD_REG,
87                                          HCLGE_RING_RX_FBD_NUM_REG,
88                                          HCLGE_RING_RX_OFFSET_REG,
89                                          HCLGE_RING_RX_FBD_OFFSET_REG,
90                                          HCLGE_RING_RX_STASH_REG,
91                                          HCLGE_RING_RX_BD_ERR_REG,
92                                          HCLGE_RING_TX_ADDR_L_REG,
93                                          HCLGE_RING_TX_ADDR_H_REG,
94                                          HCLGE_RING_TX_BD_NUM_REG,
95                                          HCLGE_RING_TX_PRIORITY_REG,
96                                          HCLGE_RING_TX_TC_REG,
97                                          HCLGE_RING_TX_MERGE_EN_REG,
98                                          HCLGE_RING_TX_TAIL_REG,
99                                          HCLGE_RING_TX_HEAD_REG,
100                                          HCLGE_RING_TX_FBD_NUM_REG,
101                                          HCLGE_RING_TX_OFFSET_REG,
102                                          HCLGE_RING_TX_EBD_NUM_REG,
103                                          HCLGE_RING_TX_EBD_OFFSET_REG,
104                                          HCLGE_RING_TX_BD_ERR_REG,
105                                          HCLGE_RING_EN_REG};
106
107 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
108                                              HCLGE_TQP_INTR_GL0_REG,
109                                              HCLGE_TQP_INTR_GL1_REG,
110                                              HCLGE_TQP_INTR_GL2_REG,
111                                              HCLGE_TQP_INTR_RL_REG};
112
113 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
114         "App    Loopback test",
115         "Serdes serial Loopback test",
116         "Serdes parallel Loopback test",
117         "Phy    Loopback test"
118 };
119
120 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
121         {"mac_tx_mac_pause_num",
122                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
123         {"mac_rx_mac_pause_num",
124                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
125         {"mac_tx_control_pkt_num",
126                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
127         {"mac_rx_control_pkt_num",
128                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
129         {"mac_tx_pfc_pkt_num",
130                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
131         {"mac_tx_pfc_pri0_pkt_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
133         {"mac_tx_pfc_pri1_pkt_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
135         {"mac_tx_pfc_pri2_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
137         {"mac_tx_pfc_pri3_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
139         {"mac_tx_pfc_pri4_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
141         {"mac_tx_pfc_pri5_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
143         {"mac_tx_pfc_pri6_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
145         {"mac_tx_pfc_pri7_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
147         {"mac_rx_pfc_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
149         {"mac_rx_pfc_pri0_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
151         {"mac_rx_pfc_pri1_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
153         {"mac_rx_pfc_pri2_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
155         {"mac_rx_pfc_pri3_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
157         {"mac_rx_pfc_pri4_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
159         {"mac_rx_pfc_pri5_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
161         {"mac_rx_pfc_pri6_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
163         {"mac_rx_pfc_pri7_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
165         {"mac_tx_total_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
167         {"mac_tx_total_oct_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
169         {"mac_tx_good_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
171         {"mac_tx_bad_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
173         {"mac_tx_good_oct_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
175         {"mac_tx_bad_oct_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
177         {"mac_tx_uni_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
179         {"mac_tx_multi_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
181         {"mac_tx_broad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
183         {"mac_tx_undersize_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
185         {"mac_tx_oversize_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
187         {"mac_tx_64_oct_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
189         {"mac_tx_65_127_oct_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
191         {"mac_tx_128_255_oct_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
193         {"mac_tx_256_511_oct_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
195         {"mac_tx_512_1023_oct_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
197         {"mac_tx_1024_1518_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
199         {"mac_tx_1519_2047_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
201         {"mac_tx_2048_4095_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
203         {"mac_tx_4096_8191_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
205         {"mac_tx_8192_9216_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
207         {"mac_tx_9217_12287_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
209         {"mac_tx_12288_16383_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
211         {"mac_tx_1519_max_good_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
213         {"mac_tx_1519_max_bad_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
215         {"mac_rx_total_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
217         {"mac_rx_total_oct_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
219         {"mac_rx_good_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
221         {"mac_rx_bad_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
223         {"mac_rx_good_oct_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
225         {"mac_rx_bad_oct_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
227         {"mac_rx_uni_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
229         {"mac_rx_multi_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
231         {"mac_rx_broad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
233         {"mac_rx_undersize_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
235         {"mac_rx_oversize_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
237         {"mac_rx_64_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
239         {"mac_rx_65_127_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
241         {"mac_rx_128_255_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
243         {"mac_rx_256_511_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
245         {"mac_rx_512_1023_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
247         {"mac_rx_1024_1518_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
249         {"mac_rx_1519_2047_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
251         {"mac_rx_2048_4095_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
253         {"mac_rx_4096_8191_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
255         {"mac_rx_8192_9216_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
257         {"mac_rx_9217_12287_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
259         {"mac_rx_12288_16383_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
261         {"mac_rx_1519_max_good_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
263         {"mac_rx_1519_max_bad_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
265
266         {"mac_tx_fragment_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
268         {"mac_tx_undermin_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
270         {"mac_tx_jabber_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
272         {"mac_tx_err_all_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
274         {"mac_tx_from_app_good_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
276         {"mac_tx_from_app_bad_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
278         {"mac_rx_fragment_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
280         {"mac_rx_undermin_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
282         {"mac_rx_jabber_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
284         {"mac_rx_fcs_err_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
286         {"mac_rx_send_app_good_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
288         {"mac_rx_send_app_bad_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 };
291
292 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
293         {
294                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
295                 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
296                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
297                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
298                 .i_port_bitmap = 0x1,
299         },
300 };
301
302 static const u8 hclge_hash_key[] = {
303         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
304         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
305         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
306         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
307         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 };
309
310 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
311 {
312 #define HCLGE_MAC_CMD_NUM 21
313
314         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
315         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
316         __le64 *desc_data;
317         int i, k, n;
318         int ret;
319
320         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
321         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
322         if (ret) {
323                 dev_err(&hdev->pdev->dev,
324                         "Get MAC pkt stats fail, status = %d.\n", ret);
325
326                 return ret;
327         }
328
329         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
330                 /* for special opcode 0032, only the first desc has the head */
331                 if (unlikely(i == 0)) {
332                         desc_data = (__le64 *)(&desc[i].data[0]);
333                         n = HCLGE_RD_FIRST_STATS_NUM;
334                 } else {
335                         desc_data = (__le64 *)(&desc[i]);
336                         n = HCLGE_RD_OTHER_STATS_NUM;
337                 }
338
339                 for (k = 0; k < n; k++) {
340                         *data += le64_to_cpu(*desc_data);
341                         data++;
342                         desc_data++;
343                 }
344         }
345
346         return 0;
347 }
348
349 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
350 {
351         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
352         struct hclge_desc *desc;
353         __le64 *desc_data;
354         u16 i, k, n;
355         int ret;
356
357         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358         if (!desc)
359                 return -ENOMEM;
360         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
361         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
362         if (ret) {
363                 kfree(desc);
364                 return ret;
365         }
366
367         for (i = 0; i < desc_num; i++) {
368                 /* for special opcode 0034, only the first desc has the head */
369                 if (i == 0) {
370                         desc_data = (__le64 *)(&desc[i].data[0]);
371                         n = HCLGE_RD_FIRST_STATS_NUM;
372                 } else {
373                         desc_data = (__le64 *)(&desc[i]);
374                         n = HCLGE_RD_OTHER_STATS_NUM;
375                 }
376
377                 for (k = 0; k < n; k++) {
378                         *data += le64_to_cpu(*desc_data);
379                         data++;
380                         desc_data++;
381                 }
382         }
383
384         kfree(desc);
385
386         return 0;
387 }
388
389 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
390 {
391         struct hclge_desc desc;
392         __le32 *desc_data;
393         u32 reg_num;
394         int ret;
395
396         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
398         if (ret)
399                 return ret;
400
401         desc_data = (__le32 *)(&desc.data[0]);
402         reg_num = le32_to_cpu(*desc_data);
403
404         *desc_num = 1 + ((reg_num - 3) >> 2) +
405                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
406
407         return 0;
408 }
409
410 static int hclge_mac_update_stats(struct hclge_dev *hdev)
411 {
412         u32 desc_num;
413         int ret;
414
415         ret = hclge_mac_query_reg_num(hdev, &desc_num);
416
417         /* The firmware supports the new statistics acquisition method */
418         if (!ret)
419                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
420         else if (ret == -EOPNOTSUPP)
421                 ret = hclge_mac_update_stats_defective(hdev);
422         else
423                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
424
425         return ret;
426 }
427
428 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
429 {
430         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
431         struct hclge_vport *vport = hclge_get_vport(handle);
432         struct hclge_dev *hdev = vport->back;
433         struct hnae3_queue *queue;
434         struct hclge_desc desc[1];
435         struct hclge_tqp *tqp;
436         int ret, i;
437
438         for (i = 0; i < kinfo->num_tqps; i++) {
439                 queue = handle->kinfo.tqp[i];
440                 tqp = container_of(queue, struct hclge_tqp, q);
441                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
442                 hclge_cmd_setup_basic_desc(&desc[0],
443                                            HCLGE_OPC_QUERY_RX_STATUS,
444                                            true);
445
446                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
447                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
448                 if (ret) {
449                         dev_err(&hdev->pdev->dev,
450                                 "Query tqp stat fail, status = %d,queue = %d\n",
451                                 ret,    i);
452                         return ret;
453                 }
454                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
455                         le32_to_cpu(desc[0].data[1]);
456         }
457
458         for (i = 0; i < kinfo->num_tqps; i++) {
459                 queue = handle->kinfo.tqp[i];
460                 tqp = container_of(queue, struct hclge_tqp, q);
461                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
462                 hclge_cmd_setup_basic_desc(&desc[0],
463                                            HCLGE_OPC_QUERY_TX_STATUS,
464                                            true);
465
466                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
467                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
468                 if (ret) {
469                         dev_err(&hdev->pdev->dev,
470                                 "Query tqp stat fail, status = %d,queue = %d\n",
471                                 ret, i);
472                         return ret;
473                 }
474                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
475                         le32_to_cpu(desc[0].data[1]);
476         }
477
478         return 0;
479 }
480
481 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
482 {
483         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
484         struct hclge_tqp *tqp;
485         u64 *buff = data;
486         int i;
487
488         for (i = 0; i < kinfo->num_tqps; i++) {
489                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
490                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491         }
492
493         for (i = 0; i < kinfo->num_tqps; i++) {
494                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
495                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
496         }
497
498         return buff;
499 }
500
501 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
502 {
503         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
504
505         return kinfo->num_tqps * (2);
506 }
507
508 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
509 {
510         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
511         u8 *buff = data;
512         int i = 0;
513
514         for (i = 0; i < kinfo->num_tqps; i++) {
515                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
516                         struct hclge_tqp, q);
517                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
518                          tqp->index);
519                 buff = buff + ETH_GSTRING_LEN;
520         }
521
522         for (i = 0; i < kinfo->num_tqps; i++) {
523                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
524                         struct hclge_tqp, q);
525                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
526                          tqp->index);
527                 buff = buff + ETH_GSTRING_LEN;
528         }
529
530         return buff;
531 }
532
533 static u64 *hclge_comm_get_stats(void *comm_stats,
534                                  const struct hclge_comm_stats_str strs[],
535                                  int size, u64 *data)
536 {
537         u64 *buf = data;
538         u32 i;
539
540         for (i = 0; i < size; i++)
541                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
542
543         return buf + size;
544 }
545
546 static u8 *hclge_comm_get_strings(u32 stringset,
547                                   const struct hclge_comm_stats_str strs[],
548                                   int size, u8 *data)
549 {
550         char *buff = (char *)data;
551         u32 i;
552
553         if (stringset != ETH_SS_STATS)
554                 return buff;
555
556         for (i = 0; i < size; i++) {
557                 snprintf(buff, ETH_GSTRING_LEN,
558                          strs[i].desc);
559                 buff = buff + ETH_GSTRING_LEN;
560         }
561
562         return (u8 *)buff;
563 }
564
565 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
566 {
567         struct hnae3_handle *handle;
568         int status;
569
570         handle = &hdev->vport[0].nic;
571         if (handle->client) {
572                 status = hclge_tqps_update_stats(handle);
573                 if (status) {
574                         dev_err(&hdev->pdev->dev,
575                                 "Update TQPS stats fail, status = %d.\n",
576                                 status);
577                 }
578         }
579
580         status = hclge_mac_update_stats(hdev);
581         if (status)
582                 dev_err(&hdev->pdev->dev,
583                         "Update MAC stats fail, status = %d.\n", status);
584 }
585
586 static void hclge_update_stats(struct hnae3_handle *handle,
587                                struct net_device_stats *net_stats)
588 {
589         struct hclge_vport *vport = hclge_get_vport(handle);
590         struct hclge_dev *hdev = vport->back;
591         int status;
592
593         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594                 return;
595
596         status = hclge_mac_update_stats(hdev);
597         if (status)
598                 dev_err(&hdev->pdev->dev,
599                         "Update MAC stats fail, status = %d.\n",
600                         status);
601
602         status = hclge_tqps_update_stats(handle);
603         if (status)
604                 dev_err(&hdev->pdev->dev,
605                         "Update TQPS stats fail, status = %d.\n",
606                         status);
607
608         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 }
610
611 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
614                 HNAE3_SUPPORT_PHY_LOOPBACK |\
615                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
616                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
617
618         struct hclge_vport *vport = hclge_get_vport(handle);
619         struct hclge_dev *hdev = vport->back;
620         int count = 0;
621
622         /* Loopback test support rules:
623          * mac: only GE mode support
624          * serdes: all mac mode will support include GE/XGE/LGE/CGE
625          * phy: only support when phy device exist on board
626          */
627         if (stringset == ETH_SS_TEST) {
628                 /* clear loopback bit flags at first */
629                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
630                 if (hdev->pdev->revision >= 0x21 ||
631                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
632                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
633                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
634                         count += 1;
635                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
636                 }
637
638                 count += 2;
639                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
640                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
641         } else if (stringset == ETH_SS_STATS) {
642                 count = ARRAY_SIZE(g_mac_stats_string) +
643                         hclge_tqps_get_sset_count(handle, stringset);
644         }
645
646         return count;
647 }
648
649 static void hclge_get_strings(struct hnae3_handle *handle,
650                               u32 stringset,
651                               u8 *data)
652 {
653         u8 *p = (char *)data;
654         int size;
655
656         if (stringset == ETH_SS_STATS) {
657                 size = ARRAY_SIZE(g_mac_stats_string);
658                 p = hclge_comm_get_strings(stringset,
659                                            g_mac_stats_string,
660                                            size,
661                                            p);
662                 p = hclge_tqps_get_strings(handle, p);
663         } else if (stringset == ETH_SS_TEST) {
664                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
665                         memcpy(p,
666                                hns3_nic_test_strs[HNAE3_LOOP_APP],
667                                ETH_GSTRING_LEN);
668                         p += ETH_GSTRING_LEN;
669                 }
670                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
671                         memcpy(p,
672                                hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
673                                ETH_GSTRING_LEN);
674                         p += ETH_GSTRING_LEN;
675                 }
676                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
677                         memcpy(p,
678                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
679                                ETH_GSTRING_LEN);
680                         p += ETH_GSTRING_LEN;
681                 }
682                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
683                         memcpy(p,
684                                hns3_nic_test_strs[HNAE3_LOOP_PHY],
685                                ETH_GSTRING_LEN);
686                         p += ETH_GSTRING_LEN;
687                 }
688         }
689 }
690
691 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
692 {
693         struct hclge_vport *vport = hclge_get_vport(handle);
694         struct hclge_dev *hdev = vport->back;
695         u64 *p;
696
697         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
698                                  g_mac_stats_string,
699                                  ARRAY_SIZE(g_mac_stats_string),
700                                  data);
701         p = hclge_tqps_get_stats(handle, p);
702 }
703
704 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705                                      u64 *rx_cnt)
706 {
707         struct hclge_vport *vport = hclge_get_vport(handle);
708         struct hclge_dev *hdev = vport->back;
709
710         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
711         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 }
713
714 static int hclge_parse_func_status(struct hclge_dev *hdev,
715                                    struct hclge_func_status_cmd *status)
716 {
717         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718                 return -EINVAL;
719
720         /* Set the pf to main pf */
721         if (status->pf_state & HCLGE_PF_STATE_MAIN)
722                 hdev->flag |= HCLGE_FLAG_MAIN;
723         else
724                 hdev->flag &= ~HCLGE_FLAG_MAIN;
725
726         return 0;
727 }
728
729 static int hclge_query_function_status(struct hclge_dev *hdev)
730 {
731         struct hclge_func_status_cmd *req;
732         struct hclge_desc desc;
733         int timeout = 0;
734         int ret;
735
736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
737         req = (struct hclge_func_status_cmd *)desc.data;
738
739         do {
740                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
741                 if (ret) {
742                         dev_err(&hdev->pdev->dev,
743                                 "query function status failed %d.\n",
744                                 ret);
745
746                         return ret;
747                 }
748
749                 /* Check pf reset is done */
750                 if (req->pf_state)
751                         break;
752                 usleep_range(1000, 2000);
753         } while (timeout++ < 5);
754
755         ret = hclge_parse_func_status(hdev, req);
756
757         return ret;
758 }
759
760 static int hclge_query_pf_resource(struct hclge_dev *hdev)
761 {
762         struct hclge_pf_res_cmd *req;
763         struct hclge_desc desc;
764         int ret;
765
766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(&hdev->pdev->dev,
770                         "query pf resource failed %d.\n", ret);
771                 return ret;
772         }
773
774         req = (struct hclge_pf_res_cmd *)desc.data;
775         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
776         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
777
778         if (req->tx_buf_size)
779                 hdev->tx_buf_size =
780                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
781         else
782                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
783
784         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
785
786         if (req->dv_buf_size)
787                 hdev->dv_buf_size =
788                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
789         else
790                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
791
792         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
793
794         if (hnae3_dev_roce_supported(hdev)) {
795                 hdev->roce_base_msix_offset =
796                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
797                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
798                 hdev->num_roce_msi =
799                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
800                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
801
802                 /* PF should have NIC vectors and Roce vectors,
803                  * NIC vectors are queued before Roce vectors.
804                  */
805                 hdev->num_msi = hdev->num_roce_msi  +
806                                 hdev->roce_base_msix_offset;
807         } else {
808                 hdev->num_msi =
809                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
810                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
811         }
812
813         return 0;
814 }
815
816 static int hclge_parse_speed(int speed_cmd, int *speed)
817 {
818         switch (speed_cmd) {
819         case 6:
820                 *speed = HCLGE_MAC_SPEED_10M;
821                 break;
822         case 7:
823                 *speed = HCLGE_MAC_SPEED_100M;
824                 break;
825         case 0:
826                 *speed = HCLGE_MAC_SPEED_1G;
827                 break;
828         case 1:
829                 *speed = HCLGE_MAC_SPEED_10G;
830                 break;
831         case 2:
832                 *speed = HCLGE_MAC_SPEED_25G;
833                 break;
834         case 3:
835                 *speed = HCLGE_MAC_SPEED_40G;
836                 break;
837         case 4:
838                 *speed = HCLGE_MAC_SPEED_50G;
839                 break;
840         case 5:
841                 *speed = HCLGE_MAC_SPEED_100G;
842                 break;
843         default:
844                 return -EINVAL;
845         }
846
847         return 0;
848 }
849
850 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u32 speed_ability = hdev->hw.mac.speed_ability;
855         u32 speed_bit = 0;
856
857         switch (speed) {
858         case HCLGE_MAC_SPEED_10M:
859                 speed_bit = HCLGE_SUPPORT_10M_BIT;
860                 break;
861         case HCLGE_MAC_SPEED_100M:
862                 speed_bit = HCLGE_SUPPORT_100M_BIT;
863                 break;
864         case HCLGE_MAC_SPEED_1G:
865                 speed_bit = HCLGE_SUPPORT_1G_BIT;
866                 break;
867         case HCLGE_MAC_SPEED_10G:
868                 speed_bit = HCLGE_SUPPORT_10G_BIT;
869                 break;
870         case HCLGE_MAC_SPEED_25G:
871                 speed_bit = HCLGE_SUPPORT_25G_BIT;
872                 break;
873         case HCLGE_MAC_SPEED_40G:
874                 speed_bit = HCLGE_SUPPORT_40G_BIT;
875                 break;
876         case HCLGE_MAC_SPEED_50G:
877                 speed_bit = HCLGE_SUPPORT_50G_BIT;
878                 break;
879         case HCLGE_MAC_SPEED_100G:
880                 speed_bit = HCLGE_SUPPORT_100G_BIT;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885
886         if (speed_bit & speed_ability)
887                 return 0;
888
889         return -EINVAL;
890 }
891
892 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
893 {
894         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
895                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
896                                  mac->supported);
897         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
898                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
899                                  mac->supported);
900         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
901                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
902                                  mac->supported);
903         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
904                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
905                                  mac->supported);
906         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
907                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
908                                  mac->supported);
909 }
910
911 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
912 {
913         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
914                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
915                                  mac->supported);
916         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
917                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
918                                  mac->supported);
919         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
920                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
921                                  mac->supported);
922         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
923                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
924                                  mac->supported);
925         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
926                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
927                                  mac->supported);
928 }
929
930 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
931 {
932         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
933                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
934                                  mac->supported);
935         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
936                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
937                                  mac->supported);
938         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
939                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
940                                  mac->supported);
941         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
942                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
943                                  mac->supported);
944         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
945                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
946                                  mac->supported);
947 }
948
949 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
950 {
951         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
952                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
953                                  mac->supported);
954         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
955                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
956                                  mac->supported);
957         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
958                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
959                                  mac->supported);
960         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
961                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
962                                  mac->supported);
963         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
964                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
965                                  mac->supported);
966         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
967                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
968                                  mac->supported);
969 }
970
971 static void hclge_convert_setting_fec(struct hclge_mac *mac)
972 {
973         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
975
976         switch (mac->speed) {
977         case HCLGE_MAC_SPEED_10G:
978         case HCLGE_MAC_SPEED_40G:
979                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980                                  mac->supported);
981                 mac->fec_ability =
982                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
983                 break;
984         case HCLGE_MAC_SPEED_25G:
985         case HCLGE_MAC_SPEED_50G:
986                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987                                  mac->supported);
988                 mac->fec_ability =
989                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990                         BIT(HNAE3_FEC_AUTO);
991                 break;
992         case HCLGE_MAC_SPEED_100G:
993                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
994                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995                 break;
996         default:
997                 mac->fec_ability = 0;
998                 break;
999         }
1000 }
1001
1002 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003                                         u8 speed_ability)
1004 {
1005         struct hclge_mac *mac = &hdev->hw.mac;
1006
1007         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1008                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009                                  mac->supported);
1010
1011         hclge_convert_setting_sr(mac, speed_ability);
1012         hclge_convert_setting_lr(mac, speed_ability);
1013         hclge_convert_setting_cr(mac, speed_ability);
1014         if (hdev->pdev->revision >= 0x21)
1015                 hclge_convert_setting_fec(mac);
1016
1017         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 }
1021
1022 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023                                             u8 speed_ability)
1024 {
1025         struct hclge_mac *mac = &hdev->hw.mac;
1026
1027         hclge_convert_setting_kr(mac, speed_ability);
1028         if (hdev->pdev->revision >= 0x21)
1029                 hclge_convert_setting_fec(mac);
1030         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 }
1034
1035 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036                                          u8 speed_ability)
1037 {
1038         unsigned long *supported = hdev->hw.mac.supported;
1039
1040         /* default to support all speed for GE port */
1041         if (!speed_ability)
1042                 speed_ability = HCLGE_SUPPORT_GE;
1043
1044         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046                                  supported);
1047
1048         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1050                                  supported);
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1052                                  supported);
1053         }
1054
1055         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1056                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058         }
1059
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 }
1064
1065 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1066 {
1067         u8 media_type = hdev->hw.mac.media_type;
1068
1069         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1070                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1071         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1072                 hclge_parse_copper_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1074                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1075 }
1076 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1077 {
1078         struct hclge_cfg_param_cmd *req;
1079         u64 mac_addr_tmp_high;
1080         u64 mac_addr_tmp;
1081         int i;
1082
1083         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1084
1085         /* get the configuration */
1086         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087                                               HCLGE_CFG_VMDQ_M,
1088                                               HCLGE_CFG_VMDQ_S);
1089         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1091         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                             HCLGE_CFG_TQP_DESC_N_M,
1093                                             HCLGE_CFG_TQP_DESC_N_S);
1094
1095         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1096                                         HCLGE_CFG_PHY_ADDR_M,
1097                                         HCLGE_CFG_PHY_ADDR_S);
1098         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1099                                           HCLGE_CFG_MEDIA_TP_M,
1100                                           HCLGE_CFG_MEDIA_TP_S);
1101         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1102                                           HCLGE_CFG_RX_BUF_LEN_M,
1103                                           HCLGE_CFG_RX_BUF_LEN_S);
1104         /* get mac_address */
1105         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1106         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1107                                             HCLGE_CFG_MAC_ADDR_H_M,
1108                                             HCLGE_CFG_MAC_ADDR_H_S);
1109
1110         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1111
1112         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1113                                              HCLGE_CFG_DEFAULT_SPEED_M,
1114                                              HCLGE_CFG_DEFAULT_SPEED_S);
1115         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1116                                             HCLGE_CFG_RSS_SIZE_M,
1117                                             HCLGE_CFG_RSS_SIZE_S);
1118
1119         for (i = 0; i < ETH_ALEN; i++)
1120                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1121
1122         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1123         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1124
1125         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1126                                              HCLGE_CFG_SPEED_ABILITY_M,
1127                                              HCLGE_CFG_SPEED_ABILITY_S);
1128         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1129                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1130                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1131         if (!cfg->umv_space)
1132                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 }
1134
1135 /* hclge_get_cfg: query the static parameter from flash
1136  * @hdev: pointer to struct hclge_dev
1137  * @hcfg: the config structure to be getted
1138  */
1139 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1140 {
1141         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1142         struct hclge_cfg_param_cmd *req;
1143         int i, ret;
1144
1145         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146                 u32 offset = 0;
1147
1148                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1149                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1150                                            true);
1151                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1152                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1153                 /* Len should be united by 4 bytes when send to hardware */
1154                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1155                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1156                 req->offset = cpu_to_le32(offset);
1157         }
1158
1159         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1160         if (ret) {
1161                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1162                 return ret;
1163         }
1164
1165         hclge_parse_cfg(hcfg, desc);
1166
1167         return 0;
1168 }
1169
1170 static int hclge_get_cap(struct hclge_dev *hdev)
1171 {
1172         int ret;
1173
1174         ret = hclge_query_function_status(hdev);
1175         if (ret) {
1176                 dev_err(&hdev->pdev->dev,
1177                         "query function status error %d.\n", ret);
1178                 return ret;
1179         }
1180
1181         /* get pf resource */
1182         ret = hclge_query_pf_resource(hdev);
1183         if (ret)
1184                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1185
1186         return ret;
1187 }
1188
1189 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1190 {
1191 #define HCLGE_MIN_TX_DESC       64
1192 #define HCLGE_MIN_RX_DESC       64
1193
1194         if (!is_kdump_kernel())
1195                 return;
1196
1197         dev_info(&hdev->pdev->dev,
1198                  "Running kdump kernel. Using minimal resources\n");
1199
1200         /* minimal queue pairs equals to the number of vports */
1201         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1202         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1203         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 }
1205
1206 static int hclge_configure(struct hclge_dev *hdev)
1207 {
1208         struct hclge_cfg cfg;
1209         int ret, i;
1210
1211         ret = hclge_get_cfg(hdev, &cfg);
1212         if (ret) {
1213                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1214                 return ret;
1215         }
1216
1217         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1218         hdev->base_tqp_pid = 0;
1219         hdev->rss_size_max = cfg.rss_size_max;
1220         hdev->rx_buf_len = cfg.rx_buf_len;
1221         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1222         hdev->hw.mac.media_type = cfg.media_type;
1223         hdev->hw.mac.phy_addr = cfg.phy_addr;
1224         hdev->num_tx_desc = cfg.tqp_desc_num;
1225         hdev->num_rx_desc = cfg.tqp_desc_num;
1226         hdev->tm_info.num_pg = 1;
1227         hdev->tc_max = cfg.tc_num;
1228         hdev->tm_info.hw_pfc_map = 0;
1229         hdev->wanted_umv_size = cfg.umv_space;
1230
1231         if (hnae3_dev_fd_supported(hdev)) {
1232                 hdev->fd_en = true;
1233                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1234         }
1235
1236         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1237         if (ret) {
1238                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1239                 return ret;
1240         }
1241
1242         hclge_parse_link_mode(hdev, cfg.speed_ability);
1243
1244         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1245             (hdev->tc_max < 1)) {
1246                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247                          hdev->tc_max);
1248                 hdev->tc_max = 1;
1249         }
1250
1251         /* Dev does not support DCB */
1252         if (!hnae3_dev_dcb_supported(hdev)) {
1253                 hdev->tc_max = 1;
1254                 hdev->pfc_max = 0;
1255         } else {
1256                 hdev->pfc_max = hdev->tc_max;
1257         }
1258
1259         hdev->tm_info.num_tc = 1;
1260
1261         /* Currently not support uncontiuous tc */
1262         for (i = 0; i < hdev->tm_info.num_tc; i++)
1263                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1264
1265         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1266
1267         hclge_init_kdump_kernel_config(hdev);
1268
1269         return ret;
1270 }
1271
1272 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1273                             int tso_mss_max)
1274 {
1275         struct hclge_cfg_tso_status_cmd *req;
1276         struct hclge_desc desc;
1277         u16 tso_mss;
1278
1279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1280
1281         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1282
1283         tso_mss = 0;
1284         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1285                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1286         req->tso_mss_min = cpu_to_le16(tso_mss);
1287
1288         tso_mss = 0;
1289         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1290                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1291         req->tso_mss_max = cpu_to_le16(tso_mss);
1292
1293         return hclge_cmd_send(&hdev->hw, &desc, 1);
1294 }
1295
1296 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1297 {
1298         struct hclge_cfg_gro_status_cmd *req;
1299         struct hclge_desc desc;
1300         int ret;
1301
1302         if (!hnae3_dev_gro_supported(hdev))
1303                 return 0;
1304
1305         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1306         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1307
1308         req->gro_en = cpu_to_le16(en ? 1 : 0);
1309
1310         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1311         if (ret)
1312                 dev_err(&hdev->pdev->dev,
1313                         "GRO hardware config cmd failed, ret = %d\n", ret);
1314
1315         return ret;
1316 }
1317
1318 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1319 {
1320         struct hclge_tqp *tqp;
1321         int i;
1322
1323         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1324                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1325         if (!hdev->htqp)
1326                 return -ENOMEM;
1327
1328         tqp = hdev->htqp;
1329
1330         for (i = 0; i < hdev->num_tqps; i++) {
1331                 tqp->dev = &hdev->pdev->dev;
1332                 tqp->index = i;
1333
1334                 tqp->q.ae_algo = &ae_algo;
1335                 tqp->q.buf_size = hdev->rx_buf_len;
1336                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1337                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1338                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1339                         i * HCLGE_TQP_REG_SIZE;
1340
1341                 tqp++;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1348                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1349 {
1350         struct hclge_tqp_map_cmd *req;
1351         struct hclge_desc desc;
1352         int ret;
1353
1354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1355
1356         req = (struct hclge_tqp_map_cmd *)desc.data;
1357         req->tqp_id = cpu_to_le16(tqp_pid);
1358         req->tqp_vf = func_id;
1359         req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1360                         1 << HCLGE_TQP_MAP_EN_B;
1361         req->tqp_vid = cpu_to_le16(tqp_vid);
1362
1363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1364         if (ret)
1365                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366
1367         return ret;
1368 }
1369
1370 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1371 {
1372         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1373         struct hclge_dev *hdev = vport->back;
1374         int i, alloced;
1375
1376         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1377              alloced < num_tqps; i++) {
1378                 if (!hdev->htqp[i].alloced) {
1379                         hdev->htqp[i].q.handle = &vport->nic;
1380                         hdev->htqp[i].q.tqp_index = alloced;
1381                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1382                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1383                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1384                         hdev->htqp[i].alloced = true;
1385                         alloced++;
1386                 }
1387         }
1388         vport->alloc_tqps = alloced;
1389         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1390                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1391
1392         return 0;
1393 }
1394
1395 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1396                             u16 num_tx_desc, u16 num_rx_desc)
1397
1398 {
1399         struct hnae3_handle *nic = &vport->nic;
1400         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1401         struct hclge_dev *hdev = vport->back;
1402         int ret;
1403
1404         kinfo->num_tx_desc = num_tx_desc;
1405         kinfo->num_rx_desc = num_rx_desc;
1406
1407         kinfo->rx_buf_len = hdev->rx_buf_len;
1408
1409         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1410                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1411         if (!kinfo->tqp)
1412                 return -ENOMEM;
1413
1414         ret = hclge_assign_tqp(vport, num_tqps);
1415         if (ret)
1416                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417
1418         return ret;
1419 }
1420
1421 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1422                                   struct hclge_vport *vport)
1423 {
1424         struct hnae3_handle *nic = &vport->nic;
1425         struct hnae3_knic_private_info *kinfo;
1426         u16 i;
1427
1428         kinfo = &nic->kinfo;
1429         for (i = 0; i < vport->alloc_tqps; i++) {
1430                 struct hclge_tqp *q =
1431                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1432                 bool is_pf;
1433                 int ret;
1434
1435                 is_pf = !(vport->vport_id);
1436                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1437                                              i, is_pf);
1438                 if (ret)
1439                         return ret;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hclge_map_tqp(struct hclge_dev *hdev)
1446 {
1447         struct hclge_vport *vport = hdev->vport;
1448         u16 i, num_vport;
1449
1450         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1451         for (i = 0; i < num_vport; i++) {
1452                 int ret;
1453
1454                 ret = hclge_map_tqp_to_vport(hdev, vport);
1455                 if (ret)
1456                         return ret;
1457
1458                 vport++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1465 {
1466         /* this would be initialized later */
1467 }
1468
1469 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1470 {
1471         struct hnae3_handle *nic = &vport->nic;
1472         struct hclge_dev *hdev = vport->back;
1473         int ret;
1474
1475         nic->pdev = hdev->pdev;
1476         nic->ae_algo = &ae_algo;
1477         nic->numa_node_mask = hdev->numa_node_mask;
1478
1479         if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1480                 ret = hclge_knic_setup(vport, num_tqps,
1481                                        hdev->num_tx_desc, hdev->num_rx_desc);
1482
1483                 if (ret) {
1484                         dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485                                 ret);
1486                         return ret;
1487                 }
1488         } else {
1489                 hclge_unic_setup(vport, num_tqps);
1490         }
1491
1492         return 0;
1493 }
1494
1495 static int hclge_alloc_vport(struct hclge_dev *hdev)
1496 {
1497         struct pci_dev *pdev = hdev->pdev;
1498         struct hclge_vport *vport;
1499         u32 tqp_main_vport;
1500         u32 tqp_per_vport;
1501         int num_vport, i;
1502         int ret;
1503
1504         /* We need to alloc a vport for main NIC of PF */
1505         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1506
1507         if (hdev->num_tqps < num_vport) {
1508                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1509                         hdev->num_tqps, num_vport);
1510                 return -EINVAL;
1511         }
1512
1513         /* Alloc the same number of TQPs for every vport */
1514         tqp_per_vport = hdev->num_tqps / num_vport;
1515         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1516
1517         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518                              GFP_KERNEL);
1519         if (!vport)
1520                 return -ENOMEM;
1521
1522         hdev->vport = vport;
1523         hdev->num_alloc_vport = num_vport;
1524
1525         if (IS_ENABLED(CONFIG_PCI_IOV))
1526                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1527
1528         for (i = 0; i < num_vport; i++) {
1529                 vport->back = hdev;
1530                 vport->vport_id = i;
1531                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1532                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1533                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1534                 INIT_LIST_HEAD(&vport->vlan_list);
1535                 INIT_LIST_HEAD(&vport->uc_mac_list);
1536                 INIT_LIST_HEAD(&vport->mc_mac_list);
1537
1538                 if (i == 0)
1539                         ret = hclge_vport_setup(vport, tqp_main_vport);
1540                 else
1541                         ret = hclge_vport_setup(vport, tqp_per_vport);
1542                 if (ret) {
1543                         dev_err(&pdev->dev,
1544                                 "vport setup failed for vport %d, %d\n",
1545                                 i, ret);
1546                         return ret;
1547                 }
1548
1549                 vport++;
1550         }
1551
1552         return 0;
1553 }
1554
1555 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1556                                     struct hclge_pkt_buf_alloc *buf_alloc)
1557 {
1558 /* TX buffer size is unit by 128 byte */
1559 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1560 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1561         struct hclge_tx_buff_alloc_cmd *req;
1562         struct hclge_desc desc;
1563         int ret;
1564         u8 i;
1565
1566         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1567
1568         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1569         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1570                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1571
1572                 req->tx_pkt_buff[i] =
1573                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1574                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1575         }
1576
1577         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1578         if (ret)
1579                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1580                         ret);
1581
1582         return ret;
1583 }
1584
1585 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1586                                  struct hclge_pkt_buf_alloc *buf_alloc)
1587 {
1588         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1589
1590         if (ret)
1591                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592
1593         return ret;
1594 }
1595
1596 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1597 {
1598         int i, cnt = 0;
1599
1600         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1601                 if (hdev->hw_tc_map & BIT(i))
1602                         cnt++;
1603         return cnt;
1604 }
1605
1606 /* Get the number of pfc enabled TCs, which have private buffer */
1607 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1608                                   struct hclge_pkt_buf_alloc *buf_alloc)
1609 {
1610         struct hclge_priv_buf *priv;
1611         int i, cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         int i, cnt = 0;
1629
1630         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1631                 priv = &buf_alloc->priv_buf[i];
1632                 if (hdev->hw_tc_map & BIT(i) &&
1633                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1634                     priv->enable)
1635                         cnt++;
1636         }
1637
1638         return cnt;
1639 }
1640
1641 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1642 {
1643         struct hclge_priv_buf *priv;
1644         u32 rx_priv = 0;
1645         int i;
1646
1647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648                 priv = &buf_alloc->priv_buf[i];
1649                 if (priv->enable)
1650                         rx_priv += priv->buf_size;
1651         }
1652         return rx_priv;
1653 }
1654
1655 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657         u32 i, total_tx_size = 0;
1658
1659         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1660                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1661
1662         return total_tx_size;
1663 }
1664
1665 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1666                                 struct hclge_pkt_buf_alloc *buf_alloc,
1667                                 u32 rx_all)
1668 {
1669         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1670         u32 tc_num = hclge_get_tc_num(hdev);
1671         u32 shared_buf, aligned_mps;
1672         u32 rx_priv;
1673         int i;
1674
1675         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1676
1677         if (hnae3_dev_dcb_supported(hdev))
1678                 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1679         else
1680                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1681                                         + hdev->dv_buf_size;
1682
1683         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1684         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1685                              HCLGE_BUF_SIZE_UNIT);
1686
1687         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1688         if (rx_all < rx_priv + shared_std)
1689                 return false;
1690
1691         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1692         buf_alloc->s_buf.buf_size = shared_buf;
1693         if (hnae3_dev_dcb_supported(hdev)) {
1694                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1695                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1696                         - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1697         } else {
1698                 buf_alloc->s_buf.self.high = aligned_mps +
1699                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1700                 buf_alloc->s_buf.self.low = aligned_mps;
1701         }
1702
1703         if (hnae3_dev_dcb_supported(hdev)) {
1704                 if (tc_num)
1705                         hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1706                 else
1707                         hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1710                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1711                 lo_thrd = hi_thrd - aligned_mps / 2;
1712         } else {
1713                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1714                 lo_thrd = aligned_mps;
1715         }
1716
1717         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1718                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1719                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1720         }
1721
1722         return true;
1723 }
1724
1725 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1726                                 struct hclge_pkt_buf_alloc *buf_alloc)
1727 {
1728         u32 i, total_size;
1729
1730         total_size = hdev->pkt_buf_size;
1731
1732         /* alloc tx buffer for all enabled tc */
1733         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1734                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1735
1736                 if (hdev->hw_tc_map & BIT(i)) {
1737                         if (total_size < hdev->tx_buf_size)
1738                                 return -ENOMEM;
1739
1740                         priv->tx_buf_size = hdev->tx_buf_size;
1741                 } else {
1742                         priv->tx_buf_size = 0;
1743                 }
1744
1745                 total_size -= priv->tx_buf_size;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1752                                   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1755         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1756         int i;
1757
1758         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1759                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1760
1761                 priv->enable = 0;
1762                 priv->wl.low = 0;
1763                 priv->wl.high = 0;
1764                 priv->buf_size = 0;
1765
1766                 if (!(hdev->hw_tc_map & BIT(i)))
1767                         continue;
1768
1769                 priv->enable = 1;
1770
1771                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1772                         priv->wl.low = max ? aligned_mps : 256;
1773                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1774                                                 HCLGE_BUF_SIZE_UNIT);
1775                 } else {
1776                         priv->wl.low = 0;
1777                         priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1778                 }
1779
1780                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1781         }
1782
1783         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1784 }
1785
1786 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1787                                           struct hclge_pkt_buf_alloc *buf_alloc)
1788 {
1789         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1790         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1791         int i;
1792
1793         /* let the last to be cleared first */
1794         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1795                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1796
1797                 if (hdev->hw_tc_map & BIT(i) &&
1798                     !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1799                         /* Clear the no pfc TC private buffer */
1800                         priv->wl.low = 0;
1801                         priv->wl.high = 0;
1802                         priv->buf_size = 0;
1803                         priv->enable = 0;
1804                         no_pfc_priv_num--;
1805                 }
1806
1807                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1808                     no_pfc_priv_num == 0)
1809                         break;
1810         }
1811
1812         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1813 }
1814
1815 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1816                                         struct hclge_pkt_buf_alloc *buf_alloc)
1817 {
1818         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1819         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1820         int i;
1821
1822         /* let the last to be cleared first */
1823         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1824                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1825
1826                 if (hdev->hw_tc_map & BIT(i) &&
1827                     hdev->tm_info.hw_pfc_map & BIT(i)) {
1828                         /* Reduce the number of pfc TC with private buffer */
1829                         priv->wl.low = 0;
1830                         priv->enable = 0;
1831                         priv->wl.high = 0;
1832                         priv->buf_size = 0;
1833                         pfc_priv_num--;
1834                 }
1835
1836                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837                     pfc_priv_num == 0)
1838                         break;
1839         }
1840
1841         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1842 }
1843
1844 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1845  * @hdev: pointer to struct hclge_dev
1846  * @buf_alloc: pointer to buffer calculation data
1847  * @return: 0: calculate sucessful, negative: fail
1848  */
1849 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1850                                 struct hclge_pkt_buf_alloc *buf_alloc)
1851 {
1852         /* When DCB is not supported, rx private buffer is not allocated. */
1853         if (!hnae3_dev_dcb_supported(hdev)) {
1854                 u32 rx_all = hdev->pkt_buf_size;
1855
1856                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1857                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1858                         return -ENOMEM;
1859
1860                 return 0;
1861         }
1862
1863         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1864                 return 0;
1865
1866         /* try to decrease the buffer size */
1867         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1868                 return 0;
1869
1870         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1871                 return 0;
1872
1873         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1874                 return 0;
1875
1876         return -ENOMEM;
1877 }
1878
1879 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1880                                    struct hclge_pkt_buf_alloc *buf_alloc)
1881 {
1882         struct hclge_rx_priv_buff_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         int i;
1886
1887         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1888         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1889
1890         /* Alloc private buffer TCs */
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1893
1894                 req->buf_num[i] =
1895                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1896                 req->buf_num[i] |=
1897                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1898         }
1899
1900         req->shared_buf =
1901                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1902                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1903
1904         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1905         if (ret)
1906                 dev_err(&hdev->pdev->dev,
1907                         "rx private buffer alloc cmd failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1913                                    struct hclge_pkt_buf_alloc *buf_alloc)
1914 {
1915         struct hclge_rx_priv_wl_buf *req;
1916         struct hclge_priv_buf *priv;
1917         struct hclge_desc desc[2];
1918         int i, j;
1919         int ret;
1920
1921         for (i = 0; i < 2; i++) {
1922                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1923                                            false);
1924                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1925
1926                 /* The first descriptor set the NEXT bit to 1 */
1927                 if (i == 0)
1928                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1929                 else
1930                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1931
1932                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1933                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1934
1935                         priv = &buf_alloc->priv_buf[idx];
1936                         req->tc_wl[j].high =
1937                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1938                         req->tc_wl[j].high |=
1939                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940                         req->tc_wl[j].low =
1941                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1942                         req->tc_wl[j].low |=
1943                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1944                 }
1945         }
1946
1947         /* Send 2 descriptor at one time */
1948         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1949         if (ret)
1950                 dev_err(&hdev->pdev->dev,
1951                         "rx private waterline config cmd failed %d\n",
1952                         ret);
1953         return ret;
1954 }
1955
1956 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1957                                     struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1960         struct hclge_rx_com_thrd *req;
1961         struct hclge_desc desc[2];
1962         struct hclge_tc_thrd *tc;
1963         int i, j;
1964         int ret;
1965
1966         for (i = 0; i < 2; i++) {
1967                 hclge_cmd_setup_basic_desc(&desc[i],
1968                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1969                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1970
1971                 /* The first descriptor set the NEXT bit to 1 */
1972                 if (i == 0)
1973                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1974                 else
1975                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1976
1977                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1978                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1979
1980                         req->com_thrd[j].high =
1981                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1982                         req->com_thrd[j].high |=
1983                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984                         req->com_thrd[j].low =
1985                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1986                         req->com_thrd[j].low |=
1987                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1988                 }
1989         }
1990
1991         /* Send 2 descriptors at one time */
1992         ret = hclge_cmd_send(&hdev->hw, desc, 2);
1993         if (ret)
1994                 dev_err(&hdev->pdev->dev,
1995                         "common threshold config cmd failed %d\n", ret);
1996         return ret;
1997 }
1998
1999 static int hclge_common_wl_config(struct hclge_dev *hdev,
2000                                   struct hclge_pkt_buf_alloc *buf_alloc)
2001 {
2002         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2003         struct hclge_rx_com_wl *req;
2004         struct hclge_desc desc;
2005         int ret;
2006
2007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2008
2009         req = (struct hclge_rx_com_wl *)desc.data;
2010         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2011         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012
2013         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2014         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2015
2016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2017         if (ret)
2018                 dev_err(&hdev->pdev->dev,
2019                         "common waterline config cmd failed %d\n", ret);
2020
2021         return ret;
2022 }
2023
2024 int hclge_buffer_alloc(struct hclge_dev *hdev)
2025 {
2026         struct hclge_pkt_buf_alloc *pkt_buf;
2027         int ret;
2028
2029         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2030         if (!pkt_buf)
2031                 return -ENOMEM;
2032
2033         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2034         if (ret) {
2035                 dev_err(&hdev->pdev->dev,
2036                         "could not calc tx buffer size for all TCs %d\n", ret);
2037                 goto out;
2038         }
2039
2040         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2041         if (ret) {
2042                 dev_err(&hdev->pdev->dev,
2043                         "could not alloc tx buffers %d\n", ret);
2044                 goto out;
2045         }
2046
2047         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2048         if (ret) {
2049                 dev_err(&hdev->pdev->dev,
2050                         "could not calc rx priv buffer size for all TCs %d\n",
2051                         ret);
2052                 goto out;
2053         }
2054
2055         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2056         if (ret) {
2057                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058                         ret);
2059                 goto out;
2060         }
2061
2062         if (hnae3_dev_dcb_supported(hdev)) {
2063                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2064                 if (ret) {
2065                         dev_err(&hdev->pdev->dev,
2066                                 "could not configure rx private waterline %d\n",
2067                                 ret);
2068                         goto out;
2069                 }
2070
2071                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2072                 if (ret) {
2073                         dev_err(&hdev->pdev->dev,
2074                                 "could not configure common threshold %d\n",
2075                                 ret);
2076                         goto out;
2077                 }
2078         }
2079
2080         ret = hclge_common_wl_config(hdev, pkt_buf);
2081         if (ret)
2082                 dev_err(&hdev->pdev->dev,
2083                         "could not configure common waterline %d\n", ret);
2084
2085 out:
2086         kfree(pkt_buf);
2087         return ret;
2088 }
2089
2090 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2091 {
2092         struct hnae3_handle *roce = &vport->roce;
2093         struct hnae3_handle *nic = &vport->nic;
2094
2095         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2096
2097         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2098             vport->back->num_msi_left == 0)
2099                 return -EINVAL;
2100
2101         roce->rinfo.base_vector = vport->back->roce_base_vector;
2102
2103         roce->rinfo.netdev = nic->kinfo.netdev;
2104         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2105
2106         roce->pdev = nic->pdev;
2107         roce->ae_algo = nic->ae_algo;
2108         roce->numa_node_mask = nic->numa_node_mask;
2109
2110         return 0;
2111 }
2112
2113 static int hclge_init_msi(struct hclge_dev *hdev)
2114 {
2115         struct pci_dev *pdev = hdev->pdev;
2116         int vectors;
2117         int i;
2118
2119         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2120                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2121         if (vectors < 0) {
2122                 dev_err(&pdev->dev,
2123                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2124                         vectors);
2125                 return vectors;
2126         }
2127         if (vectors < hdev->num_msi)
2128                 dev_warn(&hdev->pdev->dev,
2129                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2130                          hdev->num_msi, vectors);
2131
2132         hdev->num_msi = vectors;
2133         hdev->num_msi_left = vectors;
2134         hdev->base_msi_vector = pdev->irq;
2135         hdev->roce_base_vector = hdev->base_msi_vector +
2136                                 hdev->roce_base_msix_offset;
2137
2138         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2139                                            sizeof(u16), GFP_KERNEL);
2140         if (!hdev->vector_status) {
2141                 pci_free_irq_vectors(pdev);
2142                 return -ENOMEM;
2143         }
2144
2145         for (i = 0; i < hdev->num_msi; i++)
2146                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2147
2148         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2149                                         sizeof(int), GFP_KERNEL);
2150         if (!hdev->vector_irq) {
2151                 pci_free_irq_vectors(pdev);
2152                 return -ENOMEM;
2153         }
2154
2155         return 0;
2156 }
2157
2158 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2159 {
2160
2161         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2162                 duplex = HCLGE_MAC_FULL;
2163
2164         return duplex;
2165 }
2166
2167 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2168                                       u8 duplex)
2169 {
2170         struct hclge_config_mac_speed_dup_cmd *req;
2171         struct hclge_desc desc;
2172         int ret;
2173
2174         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2175
2176         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2177
2178         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2179
2180         switch (speed) {
2181         case HCLGE_MAC_SPEED_10M:
2182                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183                                 HCLGE_CFG_SPEED_S, 6);
2184                 break;
2185         case HCLGE_MAC_SPEED_100M:
2186                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187                                 HCLGE_CFG_SPEED_S, 7);
2188                 break;
2189         case HCLGE_MAC_SPEED_1G:
2190                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191                                 HCLGE_CFG_SPEED_S, 0);
2192                 break;
2193         case HCLGE_MAC_SPEED_10G:
2194                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195                                 HCLGE_CFG_SPEED_S, 1);
2196                 break;
2197         case HCLGE_MAC_SPEED_25G:
2198                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199                                 HCLGE_CFG_SPEED_S, 2);
2200                 break;
2201         case HCLGE_MAC_SPEED_40G:
2202                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203                                 HCLGE_CFG_SPEED_S, 3);
2204                 break;
2205         case HCLGE_MAC_SPEED_50G:
2206                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207                                 HCLGE_CFG_SPEED_S, 4);
2208                 break;
2209         case HCLGE_MAC_SPEED_100G:
2210                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2211                                 HCLGE_CFG_SPEED_S, 5);
2212                 break;
2213         default:
2214                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2215                 return -EINVAL;
2216         }
2217
2218         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2219                       1);
2220
2221         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2222         if (ret) {
2223                 dev_err(&hdev->pdev->dev,
2224                         "mac speed/duplex config cmd failed %d.\n", ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2232 {
2233         int ret;
2234
2235         duplex = hclge_check_speed_dup(duplex, speed);
2236         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2237                 return 0;
2238
2239         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2240         if (ret)
2241                 return ret;
2242
2243         hdev->hw.mac.speed = speed;
2244         hdev->hw.mac.duplex = duplex;
2245
2246         return 0;
2247 }
2248
2249 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2250                                      u8 duplex)
2251 {
2252         struct hclge_vport *vport = hclge_get_vport(handle);
2253         struct hclge_dev *hdev = vport->back;
2254
2255         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2256 }
2257
2258 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2259 {
2260         struct hclge_config_auto_neg_cmd *req;
2261         struct hclge_desc desc;
2262         u32 flag = 0;
2263         int ret;
2264
2265         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2266
2267         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2268         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2269         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2270
2271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2272         if (ret)
2273                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2274                         ret);
2275
2276         return ret;
2277 }
2278
2279 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2280 {
2281         struct hclge_vport *vport = hclge_get_vport(handle);
2282         struct hclge_dev *hdev = vport->back;
2283
2284         if (!hdev->hw.mac.support_autoneg) {
2285                 if (enable) {
2286                         dev_err(&hdev->pdev->dev,
2287                                 "autoneg is not supported by current port\n");
2288                         return -EOPNOTSUPP;
2289                 } else {
2290                         return 0;
2291                 }
2292         }
2293
2294         return hclge_set_autoneg_en(hdev, enable);
2295 }
2296
2297 static int hclge_get_autoneg(struct hnae3_handle *handle)
2298 {
2299         struct hclge_vport *vport = hclge_get_vport(handle);
2300         struct hclge_dev *hdev = vport->back;
2301         struct phy_device *phydev = hdev->hw.mac.phydev;
2302
2303         if (phydev)
2304                 return phydev->autoneg;
2305
2306         return hdev->hw.mac.autoneg;
2307 }
2308
2309 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2310 {
2311         struct hclge_vport *vport = hclge_get_vport(handle);
2312         struct hclge_dev *hdev = vport->back;
2313         int ret;
2314
2315         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2316
2317         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2318         if (ret)
2319                 return ret;
2320         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2321 }
2322
2323 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2324 {
2325         struct hclge_config_fec_cmd *req;
2326         struct hclge_desc desc;
2327         int ret;
2328
2329         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2330
2331         req = (struct hclge_config_fec_cmd *)desc.data;
2332         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2333                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2334         if (fec_mode & BIT(HNAE3_FEC_RS))
2335                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2336                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2337         if (fec_mode & BIT(HNAE3_FEC_BASER))
2338                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2339                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2340
2341         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342         if (ret)
2343                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344
2345         return ret;
2346 }
2347
2348 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2349 {
2350         struct hclge_vport *vport = hclge_get_vport(handle);
2351         struct hclge_dev *hdev = vport->back;
2352         struct hclge_mac *mac = &hdev->hw.mac;
2353         int ret;
2354
2355         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2356                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2357                 return -EINVAL;
2358         }
2359
2360         ret = hclge_set_fec_hw(hdev, fec_mode);
2361         if (ret)
2362                 return ret;
2363
2364         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2365         return 0;
2366 }
2367
2368 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2369                           u8 *fec_mode)
2370 {
2371         struct hclge_vport *vport = hclge_get_vport(handle);
2372         struct hclge_dev *hdev = vport->back;
2373         struct hclge_mac *mac = &hdev->hw.mac;
2374
2375         if (fec_ability)
2376                 *fec_ability = mac->fec_ability;
2377         if (fec_mode)
2378                 *fec_mode = mac->fec_mode;
2379 }
2380
2381 static int hclge_mac_init(struct hclge_dev *hdev)
2382 {
2383         struct hclge_mac *mac = &hdev->hw.mac;
2384         int ret;
2385
2386         hdev->support_sfp_query = true;
2387         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2388         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2389                                          hdev->hw.mac.duplex);
2390         if (ret) {
2391                 dev_err(&hdev->pdev->dev,
2392                         "Config mac speed dup fail ret=%d\n", ret);
2393                 return ret;
2394         }
2395
2396         mac->link = 0;
2397
2398         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2399                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2400                 if (ret) {
2401                         dev_err(&hdev->pdev->dev,
2402                                 "Fec mode init fail, ret = %d\n", ret);
2403                         return ret;
2404                 }
2405         }
2406
2407         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2408         if (ret) {
2409                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2410                 return ret;
2411         }
2412
2413         ret = hclge_buffer_alloc(hdev);
2414         if (ret)
2415                 dev_err(&hdev->pdev->dev,
2416                         "allocate buffer fail, ret=%d\n", ret);
2417
2418         return ret;
2419 }
2420
2421 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2422 {
2423         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2424             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2425                 schedule_work(&hdev->mbx_service_task);
2426 }
2427
2428 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2429 {
2430         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2431             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2432                 schedule_work(&hdev->rst_service_task);
2433 }
2434
2435 static void hclge_task_schedule(struct hclge_dev *hdev)
2436 {
2437         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2438             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2439             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2440                 (void)schedule_work(&hdev->service_task);
2441 }
2442
2443 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2444 {
2445         struct hclge_link_status_cmd *req;
2446         struct hclge_desc desc;
2447         int link_status;
2448         int ret;
2449
2450         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2451         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2452         if (ret) {
2453                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2454                         ret);
2455                 return ret;
2456         }
2457
2458         req = (struct hclge_link_status_cmd *)desc.data;
2459         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2460
2461         return !!link_status;
2462 }
2463
2464 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2465 {
2466         int mac_state;
2467         int link_stat;
2468
2469         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2470                 return 0;
2471
2472         mac_state = hclge_get_mac_link_status(hdev);
2473
2474         if (hdev->hw.mac.phydev) {
2475                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2476                         link_stat = mac_state &
2477                                 hdev->hw.mac.phydev->link;
2478                 else
2479                         link_stat = 0;
2480
2481         } else {
2482                 link_stat = mac_state;
2483         }
2484
2485         return !!link_stat;
2486 }
2487
2488 static void hclge_update_link_status(struct hclge_dev *hdev)
2489 {
2490         struct hnae3_client *rclient = hdev->roce_client;
2491         struct hnae3_client *client = hdev->nic_client;
2492         struct hnae3_handle *rhandle;
2493         struct hnae3_handle *handle;
2494         int state;
2495         int i;
2496
2497         if (!client)
2498                 return;
2499         state = hclge_get_mac_phy_link(hdev);
2500         if (state != hdev->hw.mac.link) {
2501                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2502                         handle = &hdev->vport[i].nic;
2503                         client->ops->link_status_change(handle, state);
2504                         hclge_config_mac_tnl_int(hdev, state);
2505                         rhandle = &hdev->vport[i].roce;
2506                         if (rclient && rclient->ops->link_status_change)
2507                                 rclient->ops->link_status_change(rhandle,
2508                                                                  state);
2509                 }
2510                 hdev->hw.mac.link = state;
2511         }
2512 }
2513
2514 static void hclge_update_port_capability(struct hclge_mac *mac)
2515 {
2516         /* update fec ability by speed */
2517         hclge_convert_setting_fec(mac);
2518
2519         /* firmware can not identify back plane type, the media type
2520          * read from configuration can help deal it
2521          */
2522         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2523             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2524                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2525         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2526                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2527
2528         if (mac->support_autoneg == true) {
2529                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2530                 linkmode_copy(mac->advertising, mac->supported);
2531         } else {
2532                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2533                                    mac->supported);
2534                 linkmode_zero(mac->advertising);
2535         }
2536 }
2537
2538 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2539 {
2540         struct hclge_sfp_info_cmd *resp = NULL;
2541         struct hclge_desc desc;
2542         int ret;
2543
2544         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2545         resp = (struct hclge_sfp_info_cmd *)desc.data;
2546         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2547         if (ret == -EOPNOTSUPP) {
2548                 dev_warn(&hdev->pdev->dev,
2549                          "IMP do not support get SFP speed %d\n", ret);
2550                 return ret;
2551         } else if (ret) {
2552                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2553                 return ret;
2554         }
2555
2556         *speed = le32_to_cpu(resp->speed);
2557
2558         return 0;
2559 }
2560
2561 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2562 {
2563         struct hclge_sfp_info_cmd *resp;
2564         struct hclge_desc desc;
2565         int ret;
2566
2567         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2568         resp = (struct hclge_sfp_info_cmd *)desc.data;
2569
2570         resp->query_type = QUERY_ACTIVE_SPEED;
2571
2572         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573         if (ret == -EOPNOTSUPP) {
2574                 dev_warn(&hdev->pdev->dev,
2575                          "IMP does not support get SFP info %d\n", ret);
2576                 return ret;
2577         } else if (ret) {
2578                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2579                 return ret;
2580         }
2581
2582         mac->speed = le32_to_cpu(resp->speed);
2583         /* if resp->speed_ability is 0, it means it's an old version
2584          * firmware, do not update these params
2585          */
2586         if (resp->speed_ability) {
2587                 mac->module_type = le32_to_cpu(resp->module_type);
2588                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2589                 mac->autoneg = resp->autoneg;
2590                 mac->support_autoneg = resp->autoneg_ability;
2591                 if (!resp->active_fec)
2592                         mac->fec_mode = 0;
2593                 else
2594                         mac->fec_mode = BIT(resp->active_fec);
2595         } else {
2596                 mac->speed_type = QUERY_SFP_SPEED;
2597         }
2598
2599         return 0;
2600 }
2601
2602 static int hclge_update_port_info(struct hclge_dev *hdev)
2603 {
2604         struct hclge_mac *mac = &hdev->hw.mac;
2605         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2606         int ret;
2607
2608         /* get the port info from SFP cmd if not copper port */
2609         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2610                 return 0;
2611
2612         /* if IMP does not support get SFP/qSFP info, return directly */
2613         if (!hdev->support_sfp_query)
2614                 return 0;
2615
2616         if (hdev->pdev->revision >= 0x21)
2617                 ret = hclge_get_sfp_info(hdev, mac);
2618         else
2619                 ret = hclge_get_sfp_speed(hdev, &speed);
2620
2621         if (ret == -EOPNOTSUPP) {
2622                 hdev->support_sfp_query = false;
2623                 return ret;
2624         } else if (ret) {
2625                 return ret;
2626         }
2627
2628         if (hdev->pdev->revision >= 0x21) {
2629                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2630                         hclge_update_port_capability(mac);
2631                         return 0;
2632                 }
2633                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2634                                                HCLGE_MAC_FULL);
2635         } else {
2636                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2637                         return 0; /* do nothing if no SFP */
2638
2639                 /* must config full duplex for SFP */
2640                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2641         }
2642 }
2643
2644 static int hclge_get_status(struct hnae3_handle *handle)
2645 {
2646         struct hclge_vport *vport = hclge_get_vport(handle);
2647         struct hclge_dev *hdev = vport->back;
2648
2649         hclge_update_link_status(hdev);
2650
2651         return hdev->hw.mac.link;
2652 }
2653
2654 static void hclge_service_timer(struct timer_list *t)
2655 {
2656         struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2657
2658         mod_timer(&hdev->service_timer, jiffies + HZ);
2659         hdev->hw_stats.stats_timer++;
2660         hdev->fd_arfs_expire_timer++;
2661         hclge_task_schedule(hdev);
2662 }
2663
2664 static void hclge_service_complete(struct hclge_dev *hdev)
2665 {
2666         WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2667
2668         /* Flush memory before next watchdog */
2669         smp_mb__before_atomic();
2670         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2671 }
2672
2673 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2674 {
2675         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2676
2677         /* fetch the events from their corresponding regs */
2678         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2679         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2680         msix_src_reg = hclge_read_dev(&hdev->hw,
2681                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2682
2683         /* Assumption: If by any chance reset and mailbox events are reported
2684          * together then we will only process reset event in this go and will
2685          * defer the processing of the mailbox events. Since, we would have not
2686          * cleared RX CMDQ event this time we would receive again another
2687          * interrupt from H/W just for the mailbox.
2688          */
2689
2690         /* check for vector0 reset event sources */
2691         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2692                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2693                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2694                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2695                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2696                 hdev->rst_stats.imp_rst_cnt++;
2697                 return HCLGE_VECTOR0_EVENT_RST;
2698         }
2699
2700         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2701                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2702                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2703                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2704                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2705                 hdev->rst_stats.global_rst_cnt++;
2706                 return HCLGE_VECTOR0_EVENT_RST;
2707         }
2708
2709         /* check for vector0 msix event source */
2710         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2711                 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2712                         msix_src_reg);
2713                 return HCLGE_VECTOR0_EVENT_ERR;
2714         }
2715
2716         /* check for vector0 mailbox(=CMDQ RX) event source */
2717         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2718                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2719                 *clearval = cmdq_src_reg;
2720                 return HCLGE_VECTOR0_EVENT_MBX;
2721         }
2722
2723         /* print other vector0 event source */
2724         dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2725                 cmdq_src_reg, msix_src_reg);
2726         return HCLGE_VECTOR0_EVENT_OTHER;
2727 }
2728
2729 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2730                                     u32 regclr)
2731 {
2732         switch (event_type) {
2733         case HCLGE_VECTOR0_EVENT_RST:
2734                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2735                 break;
2736         case HCLGE_VECTOR0_EVENT_MBX:
2737                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2738                 break;
2739         default:
2740                 break;
2741         }
2742 }
2743
2744 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2745 {
2746         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2747                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2748                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2749                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2750         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2751 }
2752
2753 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2754 {
2755         writel(enable ? 1 : 0, vector->addr);
2756 }
2757
2758 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2759 {
2760         struct hclge_dev *hdev = data;
2761         u32 event_cause;
2762         u32 clearval;
2763
2764         hclge_enable_vector(&hdev->misc_vector, false);
2765         event_cause = hclge_check_event_cause(hdev, &clearval);
2766
2767         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2768         switch (event_cause) {
2769         case HCLGE_VECTOR0_EVENT_ERR:
2770                 /* we do not know what type of reset is required now. This could
2771                  * only be decided after we fetch the type of errors which
2772                  * caused this event. Therefore, we will do below for now:
2773                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2774                  *    have defered type of reset to be used.
2775                  * 2. Schedule the reset serivce task.
2776                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2777                  *    will fetch the correct type of reset.  This would be done
2778                  *    by first decoding the types of errors.
2779                  */
2780                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2781                 /* fall through */
2782         case HCLGE_VECTOR0_EVENT_RST:
2783                 hclge_reset_task_schedule(hdev);
2784                 break;
2785         case HCLGE_VECTOR0_EVENT_MBX:
2786                 /* If we are here then,
2787                  * 1. Either we are not handling any mbx task and we are not
2788                  *    scheduled as well
2789                  *                        OR
2790                  * 2. We could be handling a mbx task but nothing more is
2791                  *    scheduled.
2792                  * In both cases, we should schedule mbx task as there are more
2793                  * mbx messages reported by this interrupt.
2794                  */
2795                 hclge_mbx_task_schedule(hdev);
2796                 break;
2797         default:
2798                 dev_warn(&hdev->pdev->dev,
2799                          "received unknown or unhandled event of vector0\n");
2800                 break;
2801         }
2802
2803         /* clear the source of interrupt if it is not cause by reset */
2804         if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2805                 hclge_clear_event_cause(hdev, event_cause, clearval);
2806                 hclge_enable_vector(&hdev->misc_vector, true);
2807         }
2808
2809         return IRQ_HANDLED;
2810 }
2811
2812 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2813 {
2814         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2815                 dev_warn(&hdev->pdev->dev,
2816                          "vector(vector_id %d) has been freed.\n", vector_id);
2817                 return;
2818         }
2819
2820         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2821         hdev->num_msi_left += 1;
2822         hdev->num_msi_used -= 1;
2823 }
2824
2825 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2826 {
2827         struct hclge_misc_vector *vector = &hdev->misc_vector;
2828
2829         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2830
2831         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2832         hdev->vector_status[0] = 0;
2833
2834         hdev->num_msi_left -= 1;
2835         hdev->num_msi_used += 1;
2836 }
2837
2838 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2839 {
2840         int ret;
2841
2842         hclge_get_misc_vector(hdev);
2843
2844         /* this would be explicitly freed in the end */
2845         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2846                           0, "hclge_misc", hdev);
2847         if (ret) {
2848                 hclge_free_vector(hdev, 0);
2849                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2850                         hdev->misc_vector.vector_irq);
2851         }
2852
2853         return ret;
2854 }
2855
2856 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2857 {
2858         free_irq(hdev->misc_vector.vector_irq, hdev);
2859         hclge_free_vector(hdev, 0);
2860 }
2861
2862 int hclge_notify_client(struct hclge_dev *hdev,
2863                         enum hnae3_reset_notify_type type)
2864 {
2865         struct hnae3_client *client = hdev->nic_client;
2866         u16 i;
2867
2868         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
2869             !client)
2870                 return 0;
2871
2872         if (!client->ops->reset_notify)
2873                 return -EOPNOTSUPP;
2874
2875         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2876                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2877                 int ret;
2878
2879                 ret = client->ops->reset_notify(handle, type);
2880                 if (ret) {
2881                         dev_err(&hdev->pdev->dev,
2882                                 "notify nic client failed %d(%d)\n", type, ret);
2883                         return ret;
2884                 }
2885         }
2886
2887         return 0;
2888 }
2889
2890 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2891                                     enum hnae3_reset_notify_type type)
2892 {
2893         struct hnae3_client *client = hdev->roce_client;
2894         int ret = 0;
2895         u16 i;
2896
2897         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
2898             !client)
2899                 return 0;
2900
2901         if (!client->ops->reset_notify)
2902                 return -EOPNOTSUPP;
2903
2904         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2905                 struct hnae3_handle *handle = &hdev->vport[i].roce;
2906
2907                 ret = client->ops->reset_notify(handle, type);
2908                 if (ret) {
2909                         dev_err(&hdev->pdev->dev,
2910                                 "notify roce client failed %d(%d)",
2911                                 type, ret);
2912                         return ret;
2913                 }
2914         }
2915
2916         return ret;
2917 }
2918
2919 static int hclge_reset_wait(struct hclge_dev *hdev)
2920 {
2921 #define HCLGE_RESET_WATI_MS     100
2922 #define HCLGE_RESET_WAIT_CNT    200
2923         u32 val, reg, reg_bit;
2924         u32 cnt = 0;
2925
2926         switch (hdev->reset_type) {
2927         case HNAE3_IMP_RESET:
2928                 reg = HCLGE_GLOBAL_RESET_REG;
2929                 reg_bit = HCLGE_IMP_RESET_BIT;
2930                 break;
2931         case HNAE3_GLOBAL_RESET:
2932                 reg = HCLGE_GLOBAL_RESET_REG;
2933                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2934                 break;
2935         case HNAE3_FUNC_RESET:
2936                 reg = HCLGE_FUN_RST_ING;
2937                 reg_bit = HCLGE_FUN_RST_ING_B;
2938                 break;
2939         case HNAE3_FLR_RESET:
2940                 break;
2941         default:
2942                 dev_err(&hdev->pdev->dev,
2943                         "Wait for unsupported reset type: %d\n",
2944                         hdev->reset_type);
2945                 return -EINVAL;
2946         }
2947
2948         if (hdev->reset_type == HNAE3_FLR_RESET) {
2949                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2950                        cnt++ < HCLGE_RESET_WAIT_CNT)
2951                         msleep(HCLGE_RESET_WATI_MS);
2952
2953                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2954                         dev_err(&hdev->pdev->dev,
2955                                 "flr wait timeout: %d\n", cnt);
2956                         return -EBUSY;
2957                 }
2958
2959                 return 0;
2960         }
2961
2962         val = hclge_read_dev(&hdev->hw, reg);
2963         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2964                 msleep(HCLGE_RESET_WATI_MS);
2965                 val = hclge_read_dev(&hdev->hw, reg);
2966                 cnt++;
2967         }
2968
2969         if (cnt >= HCLGE_RESET_WAIT_CNT) {
2970                 dev_warn(&hdev->pdev->dev,
2971                          "Wait for reset timeout: %d\n", hdev->reset_type);
2972                 return -EBUSY;
2973         }
2974
2975         return 0;
2976 }
2977
2978 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2979 {
2980         struct hclge_vf_rst_cmd *req;
2981         struct hclge_desc desc;
2982
2983         req = (struct hclge_vf_rst_cmd *)desc.data;
2984         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2985         req->dest_vfid = func_id;
2986
2987         if (reset)
2988                 req->vf_rst = 0x1;
2989
2990         return hclge_cmd_send(&hdev->hw, &desc, 1);
2991 }
2992
2993 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2994 {
2995         int i;
2996
2997         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2998                 struct hclge_vport *vport = &hdev->vport[i];
2999                 int ret;
3000
3001                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3002                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3003                 if (ret) {
3004                         dev_err(&hdev->pdev->dev,
3005                                 "set vf(%d) rst failed %d!\n",
3006                                 vport->vport_id, ret);
3007                         return ret;
3008                 }
3009
3010                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3011                         continue;
3012
3013                 /* Inform VF to process the reset.
3014                  * hclge_inform_reset_assert_to_vf may fail if VF
3015                  * driver is not loaded.
3016                  */
3017                 ret = hclge_inform_reset_assert_to_vf(vport);
3018                 if (ret)
3019                         dev_warn(&hdev->pdev->dev,
3020                                  "inform reset to vf(%d) failed %d!\n",
3021                                  vport->vport_id, ret);
3022         }
3023
3024         return 0;
3025 }
3026
3027 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3028 {
3029         struct hclge_desc desc;
3030         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3031         int ret;
3032
3033         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3034         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3035         req->fun_reset_vfid = func_id;
3036
3037         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3038         if (ret)
3039                 dev_err(&hdev->pdev->dev,
3040                         "send function reset cmd fail, status =%d\n", ret);
3041
3042         return ret;
3043 }
3044
3045 static void hclge_do_reset(struct hclge_dev *hdev)
3046 {
3047         struct hnae3_handle *handle = &hdev->vport[0].nic;
3048         struct pci_dev *pdev = hdev->pdev;
3049         u32 val;
3050
3051         if (hclge_get_hw_reset_stat(handle)) {
3052                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3053                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3054                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3055                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3056                 return;
3057         }
3058
3059         switch (hdev->reset_type) {
3060         case HNAE3_GLOBAL_RESET:
3061                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3062                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3063                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3064                 dev_info(&pdev->dev, "Global Reset requested\n");
3065                 break;
3066         case HNAE3_FUNC_RESET:
3067                 dev_info(&pdev->dev, "PF Reset requested\n");
3068                 /* schedule again to check later */
3069                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3070                 hclge_reset_task_schedule(hdev);
3071                 break;
3072         case HNAE3_FLR_RESET:
3073                 dev_info(&pdev->dev, "FLR requested\n");
3074                 /* schedule again to check later */
3075                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3076                 hclge_reset_task_schedule(hdev);
3077                 break;
3078         default:
3079                 dev_warn(&pdev->dev,
3080                          "Unsupported reset type: %d\n", hdev->reset_type);
3081                 break;
3082         }
3083 }
3084
3085 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3086                                                    unsigned long *addr)
3087 {
3088         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3089
3090         /* first, resolve any unknown reset type to the known type(s) */
3091         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3092                 /* we will intentionally ignore any errors from this function
3093                  *  as we will end up in *some* reset request in any case
3094                  */
3095                 hclge_handle_hw_msix_error(hdev, addr);
3096                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3097                 /* We defered the clearing of the error event which caused
3098                  * interrupt since it was not posssible to do that in
3099                  * interrupt context (and this is the reason we introduced
3100                  * new UNKNOWN reset type). Now, the errors have been
3101                  * handled and cleared in hardware we can safely enable
3102                  * interrupts. This is an exception to the norm.
3103                  */
3104                 hclge_enable_vector(&hdev->misc_vector, true);
3105         }
3106
3107         /* return the highest priority reset level amongst all */
3108         if (test_bit(HNAE3_IMP_RESET, addr)) {
3109                 rst_level = HNAE3_IMP_RESET;
3110                 clear_bit(HNAE3_IMP_RESET, addr);
3111                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3112                 clear_bit(HNAE3_FUNC_RESET, addr);
3113         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3114                 rst_level = HNAE3_GLOBAL_RESET;
3115                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3116                 clear_bit(HNAE3_FUNC_RESET, addr);
3117         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3118                 rst_level = HNAE3_FUNC_RESET;
3119                 clear_bit(HNAE3_FUNC_RESET, addr);
3120         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3121                 rst_level = HNAE3_FLR_RESET;
3122                 clear_bit(HNAE3_FLR_RESET, addr);
3123         }
3124
3125         if (hdev->reset_type != HNAE3_NONE_RESET &&
3126             rst_level < hdev->reset_type)
3127                 return HNAE3_NONE_RESET;
3128
3129         return rst_level;
3130 }
3131
3132 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3133 {
3134         u32 clearval = 0;
3135
3136         switch (hdev->reset_type) {
3137         case HNAE3_IMP_RESET:
3138                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3139                 break;
3140         case HNAE3_GLOBAL_RESET:
3141                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3142                 break;
3143         default:
3144                 break;
3145         }
3146
3147         if (!clearval)
3148                 return;
3149
3150         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3151         hclge_enable_vector(&hdev->misc_vector, true);
3152 }
3153
3154 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3155 {
3156         int ret = 0;
3157
3158         switch (hdev->reset_type) {
3159         case HNAE3_FUNC_RESET:
3160                 /* fall through */
3161         case HNAE3_FLR_RESET:
3162                 ret = hclge_set_all_vf_rst(hdev, true);
3163                 break;
3164         default:
3165                 break;
3166         }
3167
3168         return ret;
3169 }
3170
3171 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3172 {
3173 #define HCLGE_RESET_SYNC_TIME 100
3174
3175         u32 reg_val;
3176         int ret = 0;
3177
3178         switch (hdev->reset_type) {
3179         case HNAE3_FUNC_RESET:
3180                 /* There is no mechanism for PF to know if VF has stopped IO
3181                  * for now, just wait 100 ms for VF to stop IO
3182                  */
3183                 msleep(HCLGE_RESET_SYNC_TIME);
3184                 ret = hclge_func_reset_cmd(hdev, 0);
3185                 if (ret) {
3186                         dev_err(&hdev->pdev->dev,
3187                                 "asserting function reset fail %d!\n", ret);
3188                         return ret;
3189                 }
3190
3191                 /* After performaning pf reset, it is not necessary to do the
3192                  * mailbox handling or send any command to firmware, because
3193                  * any mailbox handling or command to firmware is only valid
3194                  * after hclge_cmd_init is called.
3195                  */
3196                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3197                 hdev->rst_stats.pf_rst_cnt++;
3198                 break;
3199         case HNAE3_FLR_RESET:
3200                 /* There is no mechanism for PF to know if VF has stopped IO
3201                  * for now, just wait 100 ms for VF to stop IO
3202                  */
3203                 msleep(HCLGE_RESET_SYNC_TIME);
3204                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3205                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3206                 hdev->rst_stats.flr_rst_cnt++;
3207                 break;
3208         case HNAE3_IMP_RESET:
3209                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3210                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3211                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3212                 break;
3213         default:
3214                 break;
3215         }
3216
3217         /* inform hardware that preparatory work is done */
3218         msleep(HCLGE_RESET_SYNC_TIME);
3219         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3220                         HCLGE_NIC_CMQ_ENABLE);
3221         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3222
3223         return ret;
3224 }
3225
3226 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3227 {
3228 #define MAX_RESET_FAIL_CNT 5
3229 #define RESET_UPGRADE_DELAY_SEC 10
3230
3231         if (hdev->reset_pending) {
3232                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3233                          hdev->reset_pending);
3234                 return true;
3235         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3236                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3237                     BIT(HCLGE_IMP_RESET_BIT))) {
3238                 dev_info(&hdev->pdev->dev,
3239                          "reset failed because IMP Reset is pending\n");
3240                 hclge_clear_reset_cause(hdev);
3241                 return false;
3242         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3243                 hdev->reset_fail_cnt++;
3244                 if (is_timeout) {
3245                         set_bit(hdev->reset_type, &hdev->reset_pending);
3246                         dev_info(&hdev->pdev->dev,
3247                                  "re-schedule to wait for hw reset done\n");
3248                         return true;
3249                 }
3250
3251                 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3252                 hclge_clear_reset_cause(hdev);
3253                 mod_timer(&hdev->reset_timer,
3254                           jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3255
3256                 return false;
3257         }
3258
3259         hclge_clear_reset_cause(hdev);
3260         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3261         return false;
3262 }
3263
3264 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3265 {
3266         int ret = 0;
3267
3268         switch (hdev->reset_type) {
3269         case HNAE3_FUNC_RESET:
3270                 /* fall through */
3271         case HNAE3_FLR_RESET:
3272                 ret = hclge_set_all_vf_rst(hdev, false);
3273                 break;
3274         default:
3275                 break;
3276         }
3277
3278         return ret;
3279 }
3280
3281 static void hclge_reset(struct hclge_dev *hdev)
3282 {
3283         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3284         bool is_timeout = false;
3285         int ret;
3286
3287         /* Initialize ae_dev reset status as well, in case enet layer wants to
3288          * know if device is undergoing reset
3289          */
3290         ae_dev->reset_type = hdev->reset_type;
3291         hdev->rst_stats.reset_cnt++;
3292         /* perform reset of the stack & ae device for a client */
3293         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3294         if (ret)
3295                 goto err_reset;
3296
3297         ret = hclge_reset_prepare_down(hdev);
3298         if (ret)
3299                 goto err_reset;
3300
3301         rtnl_lock();
3302         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3303         if (ret)
3304                 goto err_reset_lock;
3305
3306         rtnl_unlock();
3307
3308         ret = hclge_reset_prepare_wait(hdev);
3309         if (ret)
3310                 goto err_reset;
3311
3312         if (hclge_reset_wait(hdev)) {
3313                 is_timeout = true;
3314                 goto err_reset;
3315         }
3316
3317         hdev->rst_stats.hw_reset_done_cnt++;
3318
3319         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3320         if (ret)
3321                 goto err_reset;
3322
3323         rtnl_lock();
3324         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3325         if (ret)
3326                 goto err_reset_lock;
3327
3328         ret = hclge_reset_ae_dev(hdev->ae_dev);
3329         if (ret)
3330                 goto err_reset_lock;
3331
3332         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3333         if (ret)
3334                 goto err_reset_lock;
3335
3336         ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3337         if (ret)
3338                 goto err_reset_lock;
3339
3340         hclge_clear_reset_cause(hdev);
3341
3342         ret = hclge_reset_prepare_up(hdev);
3343         if (ret)
3344                 goto err_reset_lock;
3345
3346         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3347         if (ret)
3348                 goto err_reset_lock;
3349
3350         rtnl_unlock();
3351
3352         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3353         if (ret)
3354                 goto err_reset;
3355
3356         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3357         if (ret)
3358                 goto err_reset;
3359
3360         hdev->last_reset_time = jiffies;
3361         hdev->reset_fail_cnt = 0;
3362         hdev->rst_stats.reset_done_cnt++;
3363         ae_dev->reset_type = HNAE3_NONE_RESET;
3364         del_timer(&hdev->reset_timer);
3365
3366         return;
3367
3368 err_reset_lock:
3369         rtnl_unlock();
3370 err_reset:
3371         if (hclge_reset_err_handle(hdev, is_timeout))
3372                 hclge_reset_task_schedule(hdev);
3373 }
3374
3375 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3376 {
3377         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3378         struct hclge_dev *hdev = ae_dev->priv;
3379
3380         /* We might end up getting called broadly because of 2 below cases:
3381          * 1. Recoverable error was conveyed through APEI and only way to bring
3382          *    normalcy is to reset.
3383          * 2. A new reset request from the stack due to timeout
3384          *
3385          * For the first case,error event might not have ae handle available.
3386          * check if this is a new reset request and we are not here just because
3387          * last reset attempt did not succeed and watchdog hit us again. We will
3388          * know this if last reset request did not occur very recently (watchdog
3389          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3390          * In case of new request we reset the "reset level" to PF reset.
3391          * And if it is a repeat reset request of the most recent one then we
3392          * want to make sure we throttle the reset request. Therefore, we will
3393          * not allow it again before 3*HZ times.
3394          */
3395         if (!handle)
3396                 handle = &hdev->vport[0].nic;
3397
3398         if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3399                 return;
3400         else if (hdev->default_reset_request)
3401                 hdev->reset_level =
3402                         hclge_get_reset_level(hdev,
3403                                               &hdev->default_reset_request);
3404         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3405                 hdev->reset_level = HNAE3_FUNC_RESET;
3406
3407         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3408                  hdev->reset_level);
3409
3410         /* request reset & schedule reset task */
3411         set_bit(hdev->reset_level, &hdev->reset_request);
3412         hclge_reset_task_schedule(hdev);
3413
3414         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3415                 hdev->reset_level++;
3416 }
3417
3418 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3419                                         enum hnae3_reset_type rst_type)
3420 {
3421         struct hclge_dev *hdev = ae_dev->priv;
3422
3423         set_bit(rst_type, &hdev->default_reset_request);
3424 }
3425
3426 static void hclge_reset_timer(struct timer_list *t)
3427 {
3428         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3429
3430         dev_info(&hdev->pdev->dev,
3431                  "triggering global reset in reset timer\n");
3432         set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3433         hclge_reset_event(hdev->pdev, NULL);
3434 }
3435
3436 static void hclge_reset_subtask(struct hclge_dev *hdev)
3437 {
3438         /* check if there is any ongoing reset in the hardware. This status can
3439          * be checked from reset_pending. If there is then, we need to wait for
3440          * hardware to complete reset.
3441          *    a. If we are able to figure out in reasonable time that hardware
3442          *       has fully resetted then, we can proceed with driver, client
3443          *       reset.
3444          *    b. else, we can come back later to check this status so re-sched
3445          *       now.
3446          */
3447         hdev->last_reset_time = jiffies;
3448         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3449         if (hdev->reset_type != HNAE3_NONE_RESET)
3450                 hclge_reset(hdev);
3451
3452         /* check if we got any *new* reset requests to be honored */
3453         hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3454         if (hdev->reset_type != HNAE3_NONE_RESET)
3455                 hclge_do_reset(hdev);
3456
3457         hdev->reset_type = HNAE3_NONE_RESET;
3458 }
3459
3460 static void hclge_reset_service_task(struct work_struct *work)
3461 {
3462         struct hclge_dev *hdev =
3463                 container_of(work, struct hclge_dev, rst_service_task);
3464
3465         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3466                 return;
3467
3468         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3469
3470         hclge_reset_subtask(hdev);
3471
3472         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3473 }
3474
3475 static void hclge_mailbox_service_task(struct work_struct *work)
3476 {
3477         struct hclge_dev *hdev =
3478                 container_of(work, struct hclge_dev, mbx_service_task);
3479
3480         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3481                 return;
3482
3483         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3484
3485         hclge_mbx_handler(hdev);
3486
3487         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3488 }
3489
3490 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3491 {
3492         int i;
3493
3494         /* start from vport 1 for PF is always alive */
3495         for (i = 1; i < hdev->num_alloc_vport; i++) {
3496                 struct hclge_vport *vport = &hdev->vport[i];
3497
3498                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3499                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3500
3501                 /* If vf is not alive, set to default value */
3502                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3503                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3504         }
3505 }
3506
3507 static void hclge_service_task(struct work_struct *work)
3508 {
3509         struct hclge_dev *hdev =
3510                 container_of(work, struct hclge_dev, service_task);
3511
3512         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3513                 hclge_update_stats_for_all(hdev);
3514                 hdev->hw_stats.stats_timer = 0;
3515         }
3516
3517         hclge_update_port_info(hdev);
3518         hclge_update_link_status(hdev);
3519         hclge_update_vport_alive(hdev);
3520         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3521                 hclge_rfs_filter_expire(hdev);
3522                 hdev->fd_arfs_expire_timer = 0;
3523         }
3524         hclge_service_complete(hdev);
3525 }
3526
3527 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3528 {
3529         /* VF handle has no client */
3530         if (!handle->client)
3531                 return container_of(handle, struct hclge_vport, nic);
3532         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3533                 return container_of(handle, struct hclge_vport, roce);
3534         else
3535                 return container_of(handle, struct hclge_vport, nic);
3536 }
3537
3538 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3539                             struct hnae3_vector_info *vector_info)
3540 {
3541         struct hclge_vport *vport = hclge_get_vport(handle);
3542         struct hnae3_vector_info *vector = vector_info;
3543         struct hclge_dev *hdev = vport->back;
3544         int alloc = 0;
3545         int i, j;
3546
3547         vector_num = min(hdev->num_msi_left, vector_num);
3548
3549         for (j = 0; j < vector_num; j++) {
3550                 for (i = 1; i < hdev->num_msi; i++) {
3551                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3552                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3553                                 vector->io_addr = hdev->hw.io_base +
3554                                         HCLGE_VECTOR_REG_BASE +
3555                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3556                                         vport->vport_id *
3557                                         HCLGE_VECTOR_VF_OFFSET;
3558                                 hdev->vector_status[i] = vport->vport_id;
3559                                 hdev->vector_irq[i] = vector->vector;
3560
3561                                 vector++;
3562                                 alloc++;
3563
3564                                 break;
3565                         }
3566                 }
3567         }
3568         hdev->num_msi_left -= alloc;
3569         hdev->num_msi_used += alloc;
3570
3571         return alloc;
3572 }
3573
3574 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3575 {
3576         int i;
3577
3578         for (i = 0; i < hdev->num_msi; i++)
3579                 if (vector == hdev->vector_irq[i])
3580                         return i;
3581
3582         return -EINVAL;
3583 }
3584
3585 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3586 {
3587         struct hclge_vport *vport = hclge_get_vport(handle);
3588         struct hclge_dev *hdev = vport->back;
3589         int vector_id;
3590
3591         vector_id = hclge_get_vector_index(hdev, vector);
3592         if (vector_id < 0) {
3593                 dev_err(&hdev->pdev->dev,
3594                         "Get vector index fail. vector_id =%d\n", vector_id);
3595                 return vector_id;
3596         }
3597
3598         hclge_free_vector(hdev, vector_id);
3599
3600         return 0;
3601 }
3602
3603 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3604 {
3605         return HCLGE_RSS_KEY_SIZE;
3606 }
3607
3608 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3609 {
3610         return HCLGE_RSS_IND_TBL_SIZE;
3611 }
3612
3613 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3614                                   const u8 hfunc, const u8 *key)
3615 {
3616         struct hclge_rss_config_cmd *req;
3617         struct hclge_desc desc;
3618         int key_offset;
3619         int key_size;
3620         int ret;
3621
3622         req = (struct hclge_rss_config_cmd *)desc.data;
3623
3624         for (key_offset = 0; key_offset < 3; key_offset++) {
3625                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3626                                            false);
3627
3628                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3629                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3630
3631                 if (key_offset == 2)
3632                         key_size =
3633                         HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3634                 else
3635                         key_size = HCLGE_RSS_HASH_KEY_NUM;
3636
3637                 memcpy(req->hash_key,
3638                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3639
3640                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3641                 if (ret) {
3642                         dev_err(&hdev->pdev->dev,
3643                                 "Configure RSS config fail, status = %d\n",
3644                                 ret);
3645                         return ret;
3646                 }
3647         }
3648         return 0;
3649 }
3650
3651 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3652 {
3653         struct hclge_rss_indirection_table_cmd *req;
3654         struct hclge_desc desc;
3655         int i, j;
3656         int ret;
3657
3658         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3659
3660         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3661                 hclge_cmd_setup_basic_desc
3662                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3663
3664                 req->start_table_index =
3665                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3666                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3667
3668                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3669                         req->rss_result[j] =
3670                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3671
3672                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673                 if (ret) {
3674                         dev_err(&hdev->pdev->dev,
3675                                 "Configure rss indir table fail,status = %d\n",
3676                                 ret);
3677                         return ret;
3678                 }
3679         }
3680         return 0;
3681 }
3682
3683 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3684                                  u16 *tc_size, u16 *tc_offset)
3685 {
3686         struct hclge_rss_tc_mode_cmd *req;
3687         struct hclge_desc desc;
3688         int ret;
3689         int i;
3690
3691         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3692         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3693
3694         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3695                 u16 mode = 0;
3696
3697                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3698                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3699                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3700                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3701                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3702
3703                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3704         }
3705
3706         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3707         if (ret)
3708                 dev_err(&hdev->pdev->dev,
3709                         "Configure rss tc mode fail, status = %d\n", ret);
3710
3711         return ret;
3712 }
3713
3714 static void hclge_get_rss_type(struct hclge_vport *vport)
3715 {
3716         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3717             vport->rss_tuple_sets.ipv4_udp_en ||
3718             vport->rss_tuple_sets.ipv4_sctp_en ||
3719             vport->rss_tuple_sets.ipv6_tcp_en ||
3720             vport->rss_tuple_sets.ipv6_udp_en ||
3721             vport->rss_tuple_sets.ipv6_sctp_en)
3722                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3723         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3724                  vport->rss_tuple_sets.ipv6_fragment_en)
3725                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3726         else
3727                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3728 }
3729
3730 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3731 {
3732         struct hclge_rss_input_tuple_cmd *req;
3733         struct hclge_desc desc;
3734         int ret;
3735
3736         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3737
3738         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3739
3740         /* Get the tuple cfg from pf */
3741         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3742         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3743         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3744         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3745         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3746         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3747         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3748         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3749         hclge_get_rss_type(&hdev->vport[0]);
3750         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3751         if (ret)
3752                 dev_err(&hdev->pdev->dev,
3753                         "Configure rss input fail, status = %d\n", ret);
3754         return ret;
3755 }
3756
3757 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3758                          u8 *key, u8 *hfunc)
3759 {
3760         struct hclge_vport *vport = hclge_get_vport(handle);
3761         int i;
3762
3763         /* Get hash algorithm */
3764         if (hfunc) {
3765                 switch (vport->rss_algo) {
3766                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3767                         *hfunc = ETH_RSS_HASH_TOP;
3768                         break;
3769                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3770                         *hfunc = ETH_RSS_HASH_XOR;
3771                         break;
3772                 default:
3773                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3774                         break;
3775                 }
3776         }
3777
3778         /* Get the RSS Key required by the user */
3779         if (key)
3780                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3781
3782         /* Get indirect table */
3783         if (indir)
3784                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3785                         indir[i] =  vport->rss_indirection_tbl[i];
3786
3787         return 0;
3788 }
3789
3790 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3791                          const  u8 *key, const  u8 hfunc)
3792 {
3793         struct hclge_vport *vport = hclge_get_vport(handle);
3794         struct hclge_dev *hdev = vport->back;
3795         u8 hash_algo;
3796         int ret, i;
3797
3798         /* Set the RSS Hash Key if specififed by the user */
3799         if (key) {
3800                 switch (hfunc) {
3801                 case ETH_RSS_HASH_TOP:
3802                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3803                         break;
3804                 case ETH_RSS_HASH_XOR:
3805                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3806                         break;
3807                 case ETH_RSS_HASH_NO_CHANGE:
3808                         hash_algo = vport->rss_algo;
3809                         break;
3810                 default:
3811                         return -EINVAL;
3812                 }
3813
3814                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3815                 if (ret)
3816                         return ret;
3817
3818                 /* Update the shadow RSS key with user specified qids */
3819                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3820                 vport->rss_algo = hash_algo;
3821         }
3822
3823         /* Update the shadow RSS table with user specified qids */
3824         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3825                 vport->rss_indirection_tbl[i] = indir[i];
3826
3827         /* Update the hardware */
3828         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3829 }
3830
3831 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3832 {
3833         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3834
3835         if (nfc->data & RXH_L4_B_2_3)
3836                 hash_sets |= HCLGE_D_PORT_BIT;
3837         else
3838                 hash_sets &= ~HCLGE_D_PORT_BIT;
3839
3840         if (nfc->data & RXH_IP_SRC)
3841                 hash_sets |= HCLGE_S_IP_BIT;
3842         else
3843                 hash_sets &= ~HCLGE_S_IP_BIT;
3844
3845         if (nfc->data & RXH_IP_DST)
3846                 hash_sets |= HCLGE_D_IP_BIT;
3847         else
3848                 hash_sets &= ~HCLGE_D_IP_BIT;
3849
3850         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3851                 hash_sets |= HCLGE_V_TAG_BIT;
3852
3853         return hash_sets;
3854 }
3855
3856 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3857                                struct ethtool_rxnfc *nfc)
3858 {
3859         struct hclge_vport *vport = hclge_get_vport(handle);
3860         struct hclge_dev *hdev = vport->back;
3861         struct hclge_rss_input_tuple_cmd *req;
3862         struct hclge_desc desc;
3863         u8 tuple_sets;
3864         int ret;
3865
3866         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3867                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3868                 return -EINVAL;
3869
3870         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3871         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3872
3873         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3874         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3875         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3876         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3877         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3878         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3879         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3880         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3881
3882         tuple_sets = hclge_get_rss_hash_bits(nfc);
3883         switch (nfc->flow_type) {
3884         case TCP_V4_FLOW:
3885                 req->ipv4_tcp_en = tuple_sets;
3886                 break;
3887         case TCP_V6_FLOW:
3888                 req->ipv6_tcp_en = tuple_sets;
3889                 break;
3890         case UDP_V4_FLOW:
3891                 req->ipv4_udp_en = tuple_sets;
3892                 break;
3893         case UDP_V6_FLOW:
3894                 req->ipv6_udp_en = tuple_sets;
3895                 break;
3896         case SCTP_V4_FLOW:
3897                 req->ipv4_sctp_en = tuple_sets;
3898                 break;
3899         case SCTP_V6_FLOW:
3900                 if ((nfc->data & RXH_L4_B_0_1) ||
3901                     (nfc->data & RXH_L4_B_2_3))
3902                         return -EINVAL;
3903
3904                 req->ipv6_sctp_en = tuple_sets;
3905                 break;
3906         case IPV4_FLOW:
3907                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3908                 break;
3909         case IPV6_FLOW:
3910                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3911                 break;
3912         default:
3913                 return -EINVAL;
3914         }
3915
3916         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3917         if (ret) {
3918                 dev_err(&hdev->pdev->dev,
3919                         "Set rss tuple fail, status = %d\n", ret);
3920                 return ret;
3921         }
3922
3923         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3924         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3925         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3926         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3927         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3928         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3929         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3930         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3931         hclge_get_rss_type(vport);
3932         return 0;
3933 }
3934
3935 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3936                                struct ethtool_rxnfc *nfc)
3937 {
3938         struct hclge_vport *vport = hclge_get_vport(handle);
3939         u8 tuple_sets;
3940
3941         nfc->data = 0;
3942
3943         switch (nfc->flow_type) {
3944         case TCP_V4_FLOW:
3945                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3946                 break;
3947         case UDP_V4_FLOW:
3948                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3949                 break;
3950         case TCP_V6_FLOW:
3951                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3952                 break;
3953         case UDP_V6_FLOW:
3954                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3955                 break;
3956         case SCTP_V4_FLOW:
3957                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3958                 break;
3959         case SCTP_V6_FLOW:
3960                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3961                 break;
3962         case IPV4_FLOW:
3963         case IPV6_FLOW:
3964                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3965                 break;
3966         default:
3967                 return -EINVAL;
3968         }
3969
3970         if (!tuple_sets)
3971                 return 0;
3972
3973         if (tuple_sets & HCLGE_D_PORT_BIT)
3974                 nfc->data |= RXH_L4_B_2_3;
3975         if (tuple_sets & HCLGE_S_PORT_BIT)
3976                 nfc->data |= RXH_L4_B_0_1;
3977         if (tuple_sets & HCLGE_D_IP_BIT)
3978                 nfc->data |= RXH_IP_DST;
3979         if (tuple_sets & HCLGE_S_IP_BIT)
3980                 nfc->data |= RXH_IP_SRC;
3981
3982         return 0;
3983 }
3984
3985 static int hclge_get_tc_size(struct hnae3_handle *handle)
3986 {
3987         struct hclge_vport *vport = hclge_get_vport(handle);
3988         struct hclge_dev *hdev = vport->back;
3989
3990         return hdev->rss_size_max;
3991 }
3992
3993 int hclge_rss_init_hw(struct hclge_dev *hdev)
3994 {
3995         struct hclge_vport *vport = hdev->vport;
3996         u8 *rss_indir = vport[0].rss_indirection_tbl;
3997         u16 rss_size = vport[0].alloc_rss_size;
3998         u8 *key = vport[0].rss_hash_key;
3999         u8 hfunc = vport[0].rss_algo;
4000         u16 tc_offset[HCLGE_MAX_TC_NUM];
4001         u16 tc_valid[HCLGE_MAX_TC_NUM];
4002         u16 tc_size[HCLGE_MAX_TC_NUM];
4003         u16 roundup_size;
4004         int i, ret;
4005
4006         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4007         if (ret)
4008                 return ret;
4009
4010         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4011         if (ret)
4012                 return ret;
4013
4014         ret = hclge_set_rss_input_tuple(hdev);
4015         if (ret)
4016                 return ret;
4017
4018         /* Each TC have the same queue size, and tc_size set to hardware is
4019          * the log2 of roundup power of two of rss_size, the acutal queue
4020          * size is limited by indirection table.
4021          */
4022         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4023                 dev_err(&hdev->pdev->dev,
4024                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4025                         rss_size);
4026                 return -EINVAL;
4027         }
4028
4029         roundup_size = roundup_pow_of_two(rss_size);
4030         roundup_size = ilog2(roundup_size);
4031
4032         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4033                 tc_valid[i] = 0;
4034
4035                 if (!(hdev->hw_tc_map & BIT(i)))
4036                         continue;
4037
4038                 tc_valid[i] = 1;
4039                 tc_size[i] = roundup_size;
4040                 tc_offset[i] = rss_size * i;
4041         }
4042
4043         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4044 }
4045
4046 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4047 {
4048         struct hclge_vport *vport = hdev->vport;
4049         int i, j;
4050
4051         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4052                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4053                         vport[j].rss_indirection_tbl[i] =
4054                                 i % vport[j].alloc_rss_size;
4055         }
4056 }
4057
4058 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4059 {
4060         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4061         struct hclge_vport *vport = hdev->vport;
4062
4063         if (hdev->pdev->revision >= 0x21)
4064                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4065
4066         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4067                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4068                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4069                 vport[i].rss_tuple_sets.ipv4_udp_en =
4070                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4071                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4072                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4073                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4074                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4075                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4076                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4077                 vport[i].rss_tuple_sets.ipv6_udp_en =
4078                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4079                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4080                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4081                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4082                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4083
4084                 vport[i].rss_algo = rss_algo;
4085
4086                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4087                        HCLGE_RSS_KEY_SIZE);
4088         }
4089
4090         hclge_rss_indir_init_cfg(hdev);
4091 }
4092
4093 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4094                                 int vector_id, bool en,
4095                                 struct hnae3_ring_chain_node *ring_chain)
4096 {
4097         struct hclge_dev *hdev = vport->back;
4098         struct hnae3_ring_chain_node *node;
4099         struct hclge_desc desc;
4100         struct hclge_ctrl_vector_chain_cmd *req
4101                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4102         enum hclge_cmd_status status;
4103         enum hclge_opcode_type op;
4104         u16 tqp_type_and_id;
4105         int i;
4106
4107         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4108         hclge_cmd_setup_basic_desc(&desc, op, false);
4109         req->int_vector_id = vector_id;
4110
4111         i = 0;
4112         for (node = ring_chain; node; node = node->next) {
4113                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4114                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4115                                 HCLGE_INT_TYPE_S,
4116                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4117                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4118                                 HCLGE_TQP_ID_S, node->tqp_index);
4119                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4120                                 HCLGE_INT_GL_IDX_S,
4121                                 hnae3_get_field(node->int_gl_idx,
4122                                                 HNAE3_RING_GL_IDX_M,
4123                                                 HNAE3_RING_GL_IDX_S));
4124                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4125                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4126                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4127                         req->vfid = vport->vport_id;
4128
4129                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4130                         if (status) {
4131                                 dev_err(&hdev->pdev->dev,
4132                                         "Map TQP fail, status is %d.\n",
4133                                         status);
4134                                 return -EIO;
4135                         }
4136                         i = 0;
4137
4138                         hclge_cmd_setup_basic_desc(&desc,
4139                                                    op,
4140                                                    false);
4141                         req->int_vector_id = vector_id;
4142                 }
4143         }
4144
4145         if (i > 0) {
4146                 req->int_cause_num = i;
4147                 req->vfid = vport->vport_id;
4148                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4149                 if (status) {
4150                         dev_err(&hdev->pdev->dev,
4151                                 "Map TQP fail, status is %d.\n", status);
4152                         return -EIO;
4153                 }
4154         }
4155
4156         return 0;
4157 }
4158
4159 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4160                                     int vector,
4161                                     struct hnae3_ring_chain_node *ring_chain)
4162 {
4163         struct hclge_vport *vport = hclge_get_vport(handle);
4164         struct hclge_dev *hdev = vport->back;
4165         int vector_id;
4166
4167         vector_id = hclge_get_vector_index(hdev, vector);
4168         if (vector_id < 0) {
4169                 dev_err(&hdev->pdev->dev,
4170                         "Get vector index fail. vector_id =%d\n", vector_id);
4171                 return vector_id;
4172         }
4173
4174         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4175 }
4176
4177 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4178                                        int vector,
4179                                        struct hnae3_ring_chain_node *ring_chain)
4180 {
4181         struct hclge_vport *vport = hclge_get_vport(handle);
4182         struct hclge_dev *hdev = vport->back;
4183         int vector_id, ret;
4184
4185         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4186                 return 0;
4187
4188         vector_id = hclge_get_vector_index(hdev, vector);
4189         if (vector_id < 0) {
4190                 dev_err(&handle->pdev->dev,
4191                         "Get vector index fail. ret =%d\n", vector_id);
4192                 return vector_id;
4193         }
4194
4195         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4196         if (ret)
4197                 dev_err(&handle->pdev->dev,
4198                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4199                         vector_id,
4200                         ret);
4201
4202         return ret;
4203 }
4204
4205 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4206                                struct hclge_promisc_param *param)
4207 {
4208         struct hclge_promisc_cfg_cmd *req;
4209         struct hclge_desc desc;
4210         int ret;
4211
4212         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4213
4214         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4215         req->vf_id = param->vf_id;
4216
4217         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4218          * pdev revision(0x20), new revision support them. The
4219          * value of this two fields will not return error when driver
4220          * send command to fireware in revision(0x20).
4221          */
4222         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4223                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4224
4225         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4226         if (ret)
4227                 dev_err(&hdev->pdev->dev,
4228                         "Set promisc mode fail, status is %d.\n", ret);
4229
4230         return ret;
4231 }
4232
4233 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4234                               bool en_mc, bool en_bc, int vport_id)
4235 {
4236         if (!param)
4237                 return;
4238
4239         memset(param, 0, sizeof(struct hclge_promisc_param));
4240         if (en_uc)
4241                 param->enable = HCLGE_PROMISC_EN_UC;
4242         if (en_mc)
4243                 param->enable |= HCLGE_PROMISC_EN_MC;
4244         if (en_bc)
4245                 param->enable |= HCLGE_PROMISC_EN_BC;
4246         param->vf_id = vport_id;
4247 }
4248
4249 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4250                                   bool en_mc_pmc)
4251 {
4252         struct hclge_vport *vport = hclge_get_vport(handle);
4253         struct hclge_dev *hdev = vport->back;
4254         struct hclge_promisc_param param;
4255         bool en_bc_pmc = true;
4256
4257         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4258          * always bypassed. So broadcast promisc should be disabled until
4259          * user enable promisc mode
4260          */
4261         if (handle->pdev->revision == 0x20)
4262                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4263
4264         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4265                                  vport->vport_id);
4266         return hclge_cmd_set_promisc_mode(hdev, &param);
4267 }
4268
4269 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4270 {
4271         struct hclge_get_fd_mode_cmd *req;
4272         struct hclge_desc desc;
4273         int ret;
4274
4275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4276
4277         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4278
4279         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4280         if (ret) {
4281                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4282                 return ret;
4283         }
4284
4285         *fd_mode = req->mode;
4286
4287         return ret;
4288 }
4289
4290 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4291                                    u32 *stage1_entry_num,
4292                                    u32 *stage2_entry_num,
4293                                    u16 *stage1_counter_num,
4294                                    u16 *stage2_counter_num)
4295 {
4296         struct hclge_get_fd_allocation_cmd *req;
4297         struct hclge_desc desc;
4298         int ret;
4299
4300         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4301
4302         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4303
4304         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4305         if (ret) {
4306                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4307                         ret);
4308                 return ret;
4309         }
4310
4311         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4312         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4313         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4314         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4315
4316         return ret;
4317 }
4318
4319 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4320 {
4321         struct hclge_set_fd_key_config_cmd *req;
4322         struct hclge_fd_key_cfg *stage;
4323         struct hclge_desc desc;
4324         int ret;
4325
4326         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4327
4328         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4329         stage = &hdev->fd_cfg.key_cfg[stage_num];
4330         req->stage = stage_num;
4331         req->key_select = stage->key_sel;
4332         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4333         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4334         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4335         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4336         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4337         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4338
4339         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4340         if (ret)
4341                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4342
4343         return ret;
4344 }
4345
4346 static int hclge_init_fd_config(struct hclge_dev *hdev)
4347 {
4348 #define LOW_2_WORDS             0x03
4349         struct hclge_fd_key_cfg *key_cfg;
4350         int ret;
4351
4352         if (!hnae3_dev_fd_supported(hdev))
4353                 return 0;
4354
4355         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4356         if (ret)
4357                 return ret;
4358
4359         switch (hdev->fd_cfg.fd_mode) {
4360         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4361                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4362                 break;
4363         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4364                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4365                 break;
4366         default:
4367                 dev_err(&hdev->pdev->dev,
4368                         "Unsupported flow director mode %d\n",
4369                         hdev->fd_cfg.fd_mode);
4370                 return -EOPNOTSUPP;
4371         }
4372
4373         hdev->fd_cfg.proto_support =
4374                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4375                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4376         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4377         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4378         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4379         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4380         key_cfg->outer_sipv6_word_en = 0;
4381         key_cfg->outer_dipv6_word_en = 0;
4382
4383         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4384                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4385                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4386                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4387
4388         /* If use max 400bit key, we can support tuples for ether type */
4389         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4390                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4391                 key_cfg->tuple_active |=
4392                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4393         }
4394
4395         /* roce_type is used to filter roce frames
4396          * dst_vport is used to specify the rule
4397          */
4398         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4399
4400         ret = hclge_get_fd_allocation(hdev,
4401                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4402                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4403                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4404                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4405         if (ret)
4406                 return ret;
4407
4408         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4409 }
4410
4411 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4412                                 int loc, u8 *key, bool is_add)
4413 {
4414         struct hclge_fd_tcam_config_1_cmd *req1;
4415         struct hclge_fd_tcam_config_2_cmd *req2;
4416         struct hclge_fd_tcam_config_3_cmd *req3;
4417         struct hclge_desc desc[3];
4418         int ret;
4419
4420         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4421         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4422         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4423         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4424         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4425
4426         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4427         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4428         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4429
4430         req1->stage = stage;
4431         req1->xy_sel = sel_x ? 1 : 0;
4432         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4433         req1->index = cpu_to_le32(loc);
4434         req1->entry_vld = sel_x ? is_add : 0;
4435
4436         if (key) {
4437                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4438                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4439                        sizeof(req2->tcam_data));
4440                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4441                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4442         }
4443
4444         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4445         if (ret)
4446                 dev_err(&hdev->pdev->dev,
4447                         "config tcam key fail, ret=%d\n",
4448                         ret);
4449
4450         return ret;
4451 }
4452
4453 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4454                               struct hclge_fd_ad_data *action)
4455 {
4456         struct hclge_fd_ad_config_cmd *req;
4457         struct hclge_desc desc;
4458         u64 ad_data = 0;
4459         int ret;
4460
4461         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4462
4463         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4464         req->index = cpu_to_le32(loc);
4465         req->stage = stage;
4466
4467         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4468                       action->write_rule_id_to_bd);
4469         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4470                         action->rule_id);
4471         ad_data <<= 32;
4472         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4473         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4474                       action->forward_to_direct_queue);
4475         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4476                         action->queue_id);
4477         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4478         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4479                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4480         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4481         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4482                         action->counter_id);
4483
4484         req->ad_data = cpu_to_le64(ad_data);
4485         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4486         if (ret)
4487                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4488
4489         return ret;
4490 }
4491
4492 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4493                                    struct hclge_fd_rule *rule)
4494 {
4495         u16 tmp_x_s, tmp_y_s;
4496         u32 tmp_x_l, tmp_y_l;
4497         int i;
4498
4499         if (rule->unused_tuple & tuple_bit)
4500                 return true;
4501
4502         switch (tuple_bit) {
4503         case 0:
4504                 return false;
4505         case BIT(INNER_DST_MAC):
4506                 for (i = 0; i < 6; i++) {
4507                         calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4508                                rule->tuples_mask.dst_mac[i]);
4509                         calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4510                                rule->tuples_mask.dst_mac[i]);
4511                 }
4512
4513                 return true;
4514         case BIT(INNER_SRC_MAC):
4515                 for (i = 0; i < 6; i++) {
4516                         calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4517                                rule->tuples.src_mac[i]);
4518                         calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4519                                rule->tuples.src_mac[i]);
4520                 }
4521
4522                 return true;
4523         case BIT(INNER_VLAN_TAG_FST):
4524                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4525                        rule->tuples_mask.vlan_tag1);
4526                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4527                        rule->tuples_mask.vlan_tag1);
4528                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4529                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4530
4531                 return true;
4532         case BIT(INNER_ETH_TYPE):
4533                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4534                        rule->tuples_mask.ether_proto);
4535                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4536                        rule->tuples_mask.ether_proto);
4537                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4538                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4539
4540                 return true;
4541         case BIT(INNER_IP_TOS):
4542                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4543                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4544
4545                 return true;
4546         case BIT(INNER_IP_PROTO):
4547                 calc_x(*key_x, rule->tuples.ip_proto,
4548                        rule->tuples_mask.ip_proto);
4549                 calc_y(*key_y, rule->tuples.ip_proto,
4550                        rule->tuples_mask.ip_proto);
4551
4552                 return true;
4553         case BIT(INNER_SRC_IP):
4554                 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4555                        rule->tuples_mask.src_ip[3]);
4556                 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4557                        rule->tuples_mask.src_ip[3]);
4558                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4559                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4560
4561                 return true;
4562         case BIT(INNER_DST_IP):
4563                 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4564                        rule->tuples_mask.dst_ip[3]);
4565                 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4566                        rule->tuples_mask.dst_ip[3]);
4567                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4568                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4569
4570                 return true;
4571         case BIT(INNER_SRC_PORT):
4572                 calc_x(tmp_x_s, rule->tuples.src_port,
4573                        rule->tuples_mask.src_port);
4574                 calc_y(tmp_y_s, rule->tuples.src_port,
4575                        rule->tuples_mask.src_port);
4576                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4577                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4578
4579                 return true;
4580         case BIT(INNER_DST_PORT):
4581                 calc_x(tmp_x_s, rule->tuples.dst_port,
4582                        rule->tuples_mask.dst_port);
4583                 calc_y(tmp_y_s, rule->tuples.dst_port,
4584                        rule->tuples_mask.dst_port);
4585                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4586                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4587
4588                 return true;
4589         default:
4590                 return false;
4591         }
4592 }
4593
4594 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4595                                  u8 vf_id, u8 network_port_id)
4596 {
4597         u32 port_number = 0;
4598
4599         if (port_type == HOST_PORT) {
4600                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4601                                 pf_id);
4602                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4603                                 vf_id);
4604                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4605         } else {
4606                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4607                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4608                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4609         }
4610
4611         return port_number;
4612 }
4613
4614 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4615                                        __le32 *key_x, __le32 *key_y,
4616                                        struct hclge_fd_rule *rule)
4617 {
4618         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4619         u8 cur_pos = 0, tuple_size, shift_bits;
4620         int i;
4621
4622         for (i = 0; i < MAX_META_DATA; i++) {
4623                 tuple_size = meta_data_key_info[i].key_length;
4624                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4625
4626                 switch (tuple_bit) {
4627                 case BIT(ROCE_TYPE):
4628                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4629                         cur_pos += tuple_size;
4630                         break;
4631                 case BIT(DST_VPORT):
4632                         port_number = hclge_get_port_number(HOST_PORT, 0,
4633                                                             rule->vf_id, 0);
4634                         hnae3_set_field(meta_data,
4635                                         GENMASK(cur_pos + tuple_size, cur_pos),
4636                                         cur_pos, port_number);
4637                         cur_pos += tuple_size;
4638                         break;
4639                 default:
4640                         break;
4641                 }
4642         }
4643
4644         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4645         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4646         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4647
4648         *key_x = cpu_to_le32(tmp_x << shift_bits);
4649         *key_y = cpu_to_le32(tmp_y << shift_bits);
4650 }
4651
4652 /* A complete key is combined with meta data key and tuple key.
4653  * Meta data key is stored at the MSB region, and tuple key is stored at
4654  * the LSB region, unused bits will be filled 0.
4655  */
4656 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4657                             struct hclge_fd_rule *rule)
4658 {
4659         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4660         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4661         u8 *cur_key_x, *cur_key_y;
4662         int i, ret, tuple_size;
4663         u8 meta_data_region;
4664
4665         memset(key_x, 0, sizeof(key_x));
4666         memset(key_y, 0, sizeof(key_y));
4667         cur_key_x = key_x;
4668         cur_key_y = key_y;
4669
4670         for (i = 0 ; i < MAX_TUPLE; i++) {
4671                 bool tuple_valid;
4672                 u32 check_tuple;
4673
4674                 tuple_size = tuple_key_info[i].key_length / 8;
4675                 check_tuple = key_cfg->tuple_active & BIT(i);
4676
4677                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4678                                                      cur_key_y, rule);
4679                 if (tuple_valid) {
4680                         cur_key_x += tuple_size;
4681                         cur_key_y += tuple_size;
4682                 }
4683         }
4684
4685         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4686                         MAX_META_DATA_LENGTH / 8;
4687
4688         hclge_fd_convert_meta_data(key_cfg,
4689                                    (__le32 *)(key_x + meta_data_region),
4690                                    (__le32 *)(key_y + meta_data_region),
4691                                    rule);
4692
4693         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4694                                    true);
4695         if (ret) {
4696                 dev_err(&hdev->pdev->dev,
4697                         "fd key_y config fail, loc=%d, ret=%d\n",
4698                         rule->queue_id, ret);
4699                 return ret;
4700         }
4701
4702         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4703                                    true);
4704         if (ret)
4705                 dev_err(&hdev->pdev->dev,
4706                         "fd key_x config fail, loc=%d, ret=%d\n",
4707                         rule->queue_id, ret);
4708         return ret;
4709 }
4710
4711 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4712                                struct hclge_fd_rule *rule)
4713 {
4714         struct hclge_fd_ad_data ad_data;
4715
4716         ad_data.ad_id = rule->location;
4717
4718         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4719                 ad_data.drop_packet = true;
4720                 ad_data.forward_to_direct_queue = false;
4721                 ad_data.queue_id = 0;
4722         } else {
4723                 ad_data.drop_packet = false;
4724                 ad_data.forward_to_direct_queue = true;
4725                 ad_data.queue_id = rule->queue_id;
4726         }
4727
4728         ad_data.use_counter = false;
4729         ad_data.counter_id = 0;
4730
4731         ad_data.use_next_stage = false;
4732         ad_data.next_input_key = 0;
4733
4734         ad_data.write_rule_id_to_bd = true;
4735         ad_data.rule_id = rule->location;
4736
4737         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4738 }
4739
4740 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4741                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4742 {
4743         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4744         struct ethtool_usrip4_spec *usr_ip4_spec;
4745         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4746         struct ethtool_usrip6_spec *usr_ip6_spec;
4747         struct ethhdr *ether_spec;
4748
4749         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4750                 return -EINVAL;
4751
4752         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4753                 return -EOPNOTSUPP;
4754
4755         if ((fs->flow_type & FLOW_EXT) &&
4756             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4757                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4758                 return -EOPNOTSUPP;
4759         }
4760
4761         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4762         case SCTP_V4_FLOW:
4763         case TCP_V4_FLOW:
4764         case UDP_V4_FLOW:
4765                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4766                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4767
4768                 if (!tcp_ip4_spec->ip4src)
4769                         *unused |= BIT(INNER_SRC_IP);
4770
4771                 if (!tcp_ip4_spec->ip4dst)
4772                         *unused |= BIT(INNER_DST_IP);
4773
4774                 if (!tcp_ip4_spec->psrc)
4775                         *unused |= BIT(INNER_SRC_PORT);
4776
4777                 if (!tcp_ip4_spec->pdst)
4778                         *unused |= BIT(INNER_DST_PORT);
4779
4780                 if (!tcp_ip4_spec->tos)
4781                         *unused |= BIT(INNER_IP_TOS);
4782
4783                 break;
4784         case IP_USER_FLOW:
4785                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4786                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4787                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4788
4789                 if (!usr_ip4_spec->ip4src)
4790                         *unused |= BIT(INNER_SRC_IP);
4791
4792                 if (!usr_ip4_spec->ip4dst)
4793                         *unused |= BIT(INNER_DST_IP);
4794
4795                 if (!usr_ip4_spec->tos)
4796                         *unused |= BIT(INNER_IP_TOS);
4797
4798                 if (!usr_ip4_spec->proto)
4799                         *unused |= BIT(INNER_IP_PROTO);
4800
4801                 if (usr_ip4_spec->l4_4_bytes)
4802                         return -EOPNOTSUPP;
4803
4804                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4805                         return -EOPNOTSUPP;
4806
4807                 break;
4808         case SCTP_V6_FLOW:
4809         case TCP_V6_FLOW:
4810         case UDP_V6_FLOW:
4811                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4812                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4813                         BIT(INNER_IP_TOS);
4814
4815                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4816                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4817                         *unused |= BIT(INNER_SRC_IP);
4818
4819                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4820                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4821                         *unused |= BIT(INNER_DST_IP);
4822
4823                 if (!tcp_ip6_spec->psrc)
4824                         *unused |= BIT(INNER_SRC_PORT);
4825
4826                 if (!tcp_ip6_spec->pdst)
4827                         *unused |= BIT(INNER_DST_PORT);
4828
4829                 if (tcp_ip6_spec->tclass)
4830                         return -EOPNOTSUPP;
4831
4832                 break;
4833         case IPV6_USER_FLOW:
4834                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4835                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4836                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4837                         BIT(INNER_DST_PORT);
4838
4839                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4840                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4841                         *unused |= BIT(INNER_SRC_IP);
4842
4843                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4844                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4845                         *unused |= BIT(INNER_DST_IP);
4846
4847                 if (!usr_ip6_spec->l4_proto)
4848                         *unused |= BIT(INNER_IP_PROTO);
4849
4850                 if (usr_ip6_spec->tclass)
4851                         return -EOPNOTSUPP;
4852
4853                 if (usr_ip6_spec->l4_4_bytes)
4854                         return -EOPNOTSUPP;
4855
4856                 break;
4857         case ETHER_FLOW:
4858                 ether_spec = &fs->h_u.ether_spec;
4859                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4860                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4861                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4862
4863                 if (is_zero_ether_addr(ether_spec->h_source))
4864                         *unused |= BIT(INNER_SRC_MAC);
4865
4866                 if (is_zero_ether_addr(ether_spec->h_dest))
4867                         *unused |= BIT(INNER_DST_MAC);
4868
4869                 if (!ether_spec->h_proto)
4870                         *unused |= BIT(INNER_ETH_TYPE);
4871
4872                 break;
4873         default:
4874                 return -EOPNOTSUPP;
4875         }
4876
4877         if ((fs->flow_type & FLOW_EXT)) {
4878                 if (fs->h_ext.vlan_etype)
4879                         return -EOPNOTSUPP;
4880                 if (!fs->h_ext.vlan_tci)
4881                         *unused |= BIT(INNER_VLAN_TAG_FST);
4882
4883                 if (fs->m_ext.vlan_tci) {
4884                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4885                                 return -EINVAL;
4886                 }
4887         } else {
4888                 *unused |= BIT(INNER_VLAN_TAG_FST);
4889         }
4890
4891         if (fs->flow_type & FLOW_MAC_EXT) {
4892                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4893                         return -EOPNOTSUPP;
4894
4895                 if (is_zero_ether_addr(fs->h_ext.h_dest))
4896                         *unused |= BIT(INNER_DST_MAC);
4897                 else
4898                         *unused &= ~(BIT(INNER_DST_MAC));
4899         }
4900
4901         return 0;
4902 }
4903
4904 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4905 {
4906         struct hclge_fd_rule *rule = NULL;
4907         struct hlist_node *node2;
4908
4909         spin_lock_bh(&hdev->fd_rule_lock);
4910         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4911                 if (rule->location >= location)
4912                         break;
4913         }
4914
4915         spin_unlock_bh(&hdev->fd_rule_lock);
4916
4917         return  rule && rule->location == location;
4918 }
4919
4920 /* make sure being called after lock up with fd_rule_lock */
4921 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4922                                      struct hclge_fd_rule *new_rule,
4923                                      u16 location,
4924                                      bool is_add)
4925 {
4926         struct hclge_fd_rule *rule = NULL, *parent = NULL;
4927         struct hlist_node *node2;
4928
4929         if (is_add && !new_rule)
4930                 return -EINVAL;
4931
4932         hlist_for_each_entry_safe(rule, node2,
4933                                   &hdev->fd_rule_list, rule_node) {
4934                 if (rule->location >= location)
4935                         break;
4936                 parent = rule;
4937         }
4938
4939         if (rule && rule->location == location) {
4940                 hlist_del(&rule->rule_node);
4941                 kfree(rule);
4942                 hdev->hclge_fd_rule_num--;
4943
4944                 if (!is_add) {
4945                         if (!hdev->hclge_fd_rule_num)
4946                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4947                         clear_bit(location, hdev->fd_bmap);
4948
4949                         return 0;
4950                 }
4951         } else if (!is_add) {
4952                 dev_err(&hdev->pdev->dev,
4953                         "delete fail, rule %d is inexistent\n",
4954                         location);
4955                 return -EINVAL;
4956         }
4957
4958         INIT_HLIST_NODE(&new_rule->rule_node);
4959
4960         if (parent)
4961                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4962         else
4963                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4964
4965         set_bit(location, hdev->fd_bmap);
4966         hdev->hclge_fd_rule_num++;
4967         hdev->fd_active_type = new_rule->rule_type;
4968
4969         return 0;
4970 }
4971
4972 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4973                               struct ethtool_rx_flow_spec *fs,
4974                               struct hclge_fd_rule *rule)
4975 {
4976         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4977
4978         switch (flow_type) {
4979         case SCTP_V4_FLOW:
4980         case TCP_V4_FLOW:
4981         case UDP_V4_FLOW:
4982                 rule->tuples.src_ip[3] =
4983                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4984                 rule->tuples_mask.src_ip[3] =
4985                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4986
4987                 rule->tuples.dst_ip[3] =
4988                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4989                 rule->tuples_mask.dst_ip[3] =
4990                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4991
4992                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4993                 rule->tuples_mask.src_port =
4994                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4995
4996                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4997                 rule->tuples_mask.dst_port =
4998                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4999
5000                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5001                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5002
5003                 rule->tuples.ether_proto = ETH_P_IP;
5004                 rule->tuples_mask.ether_proto = 0xFFFF;
5005
5006                 break;
5007         case IP_USER_FLOW:
5008                 rule->tuples.src_ip[3] =
5009                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5010                 rule->tuples_mask.src_ip[3] =
5011                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5012
5013                 rule->tuples.dst_ip[3] =
5014                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5015                 rule->tuples_mask.dst_ip[3] =
5016                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5017
5018                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5019                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5020
5021                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5022                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5023
5024                 rule->tuples.ether_proto = ETH_P_IP;
5025                 rule->tuples_mask.ether_proto = 0xFFFF;
5026
5027                 break;
5028         case SCTP_V6_FLOW:
5029         case TCP_V6_FLOW:
5030         case UDP_V6_FLOW:
5031                 be32_to_cpu_array(rule->tuples.src_ip,
5032                                   fs->h_u.tcp_ip6_spec.ip6src, 4);
5033                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5034                                   fs->m_u.tcp_ip6_spec.ip6src, 4);
5035
5036                 be32_to_cpu_array(rule->tuples.dst_ip,
5037                                   fs->h_u.tcp_ip6_spec.ip6dst, 4);
5038                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5039                                   fs->m_u.tcp_ip6_spec.ip6dst, 4);
5040
5041                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5042                 rule->tuples_mask.src_port =
5043                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5044
5045                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5046                 rule->tuples_mask.dst_port =
5047                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5048
5049                 rule->tuples.ether_proto = ETH_P_IPV6;
5050                 rule->tuples_mask.ether_proto = 0xFFFF;
5051
5052                 break;
5053         case IPV6_USER_FLOW:
5054                 be32_to_cpu_array(rule->tuples.src_ip,
5055                                   fs->h_u.usr_ip6_spec.ip6src, 4);
5056                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5057                                   fs->m_u.usr_ip6_spec.ip6src, 4);
5058
5059                 be32_to_cpu_array(rule->tuples.dst_ip,
5060                                   fs->h_u.usr_ip6_spec.ip6dst, 4);
5061                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5062                                   fs->m_u.usr_ip6_spec.ip6dst, 4);
5063
5064                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5065                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5066
5067                 rule->tuples.ether_proto = ETH_P_IPV6;
5068                 rule->tuples_mask.ether_proto = 0xFFFF;
5069
5070                 break;
5071         case ETHER_FLOW:
5072                 ether_addr_copy(rule->tuples.src_mac,
5073                                 fs->h_u.ether_spec.h_source);
5074                 ether_addr_copy(rule->tuples_mask.src_mac,
5075                                 fs->m_u.ether_spec.h_source);
5076
5077                 ether_addr_copy(rule->tuples.dst_mac,
5078                                 fs->h_u.ether_spec.h_dest);
5079                 ether_addr_copy(rule->tuples_mask.dst_mac,
5080                                 fs->m_u.ether_spec.h_dest);
5081
5082                 rule->tuples.ether_proto =
5083                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5084                 rule->tuples_mask.ether_proto =
5085                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5086
5087                 break;
5088         default:
5089                 return -EOPNOTSUPP;
5090         }
5091
5092         switch (flow_type) {
5093         case SCTP_V4_FLOW:
5094         case SCTP_V6_FLOW:
5095                 rule->tuples.ip_proto = IPPROTO_SCTP;
5096                 rule->tuples_mask.ip_proto = 0xFF;
5097                 break;
5098         case TCP_V4_FLOW:
5099         case TCP_V6_FLOW:
5100                 rule->tuples.ip_proto = IPPROTO_TCP;
5101                 rule->tuples_mask.ip_proto = 0xFF;
5102                 break;
5103         case UDP_V4_FLOW:
5104         case UDP_V6_FLOW:
5105                 rule->tuples.ip_proto = IPPROTO_UDP;
5106                 rule->tuples_mask.ip_proto = 0xFF;
5107                 break;
5108         default:
5109                 break;
5110         }
5111
5112         if ((fs->flow_type & FLOW_EXT)) {
5113                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5114                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5115         }
5116
5117         if (fs->flow_type & FLOW_MAC_EXT) {
5118                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5119                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5120         }
5121
5122         return 0;
5123 }
5124
5125 /* make sure being called after lock up with fd_rule_lock */
5126 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5127                                 struct hclge_fd_rule *rule)
5128 {
5129         int ret;
5130
5131         if (!rule) {
5132                 dev_err(&hdev->pdev->dev,
5133                         "The flow director rule is NULL\n");
5134                 return -EINVAL;
5135         }
5136
5137         /* it will never fail here, so needn't to check return value */
5138         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5139
5140         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5141         if (ret)
5142                 goto clear_rule;
5143
5144         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5145         if (ret)
5146                 goto clear_rule;
5147
5148         return 0;
5149
5150 clear_rule:
5151         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5152         return ret;
5153 }
5154
5155 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5156                               struct ethtool_rxnfc *cmd)
5157 {
5158         struct hclge_vport *vport = hclge_get_vport(handle);
5159         struct hclge_dev *hdev = vport->back;
5160         u16 dst_vport_id = 0, q_index = 0;
5161         struct ethtool_rx_flow_spec *fs;
5162         struct hclge_fd_rule *rule;
5163         u32 unused = 0;
5164         u8 action;
5165         int ret;
5166
5167         if (!hnae3_dev_fd_supported(hdev))
5168                 return -EOPNOTSUPP;
5169
5170         if (!hdev->fd_en) {
5171                 dev_warn(&hdev->pdev->dev,
5172                          "Please enable flow director first\n");
5173                 return -EOPNOTSUPP;
5174         }
5175
5176         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5177
5178         ret = hclge_fd_check_spec(hdev, fs, &unused);
5179         if (ret) {
5180                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5181                 return ret;
5182         }
5183
5184         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5185                 action = HCLGE_FD_ACTION_DROP_PACKET;
5186         } else {
5187                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5188                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5189                 u16 tqps;
5190
5191                 if (vf > hdev->num_req_vfs) {
5192                         dev_err(&hdev->pdev->dev,
5193                                 "Error: vf id (%d) > max vf num (%d)\n",
5194                                 vf, hdev->num_req_vfs);
5195                         return -EINVAL;
5196                 }
5197
5198                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5199                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5200
5201                 if (ring >= tqps) {
5202                         dev_err(&hdev->pdev->dev,
5203                                 "Error: queue id (%d) > max tqp num (%d)\n",
5204                                 ring, tqps - 1);
5205                         return -EINVAL;
5206                 }
5207
5208                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5209                 q_index = ring;
5210         }
5211
5212         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5213         if (!rule)
5214                 return -ENOMEM;
5215
5216         ret = hclge_fd_get_tuple(hdev, fs, rule);
5217         if (ret) {
5218                 kfree(rule);
5219                 return ret;
5220         }
5221
5222         rule->flow_type = fs->flow_type;
5223
5224         rule->location = fs->location;
5225         rule->unused_tuple = unused;
5226         rule->vf_id = dst_vport_id;
5227         rule->queue_id = q_index;
5228         rule->action = action;
5229         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5230
5231         /* to avoid rule conflict, when user configure rule by ethtool,
5232          * we need to clear all arfs rules
5233          */
5234         hclge_clear_arfs_rules(handle);
5235
5236         spin_lock_bh(&hdev->fd_rule_lock);
5237         ret = hclge_fd_config_rule(hdev, rule);
5238
5239         spin_unlock_bh(&hdev->fd_rule_lock);
5240
5241         return ret;
5242 }
5243
5244 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5245                               struct ethtool_rxnfc *cmd)
5246 {
5247         struct hclge_vport *vport = hclge_get_vport(handle);
5248         struct hclge_dev *hdev = vport->back;
5249         struct ethtool_rx_flow_spec *fs;
5250         int ret;
5251
5252         if (!hnae3_dev_fd_supported(hdev))
5253                 return -EOPNOTSUPP;
5254
5255         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5256
5257         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5258                 return -EINVAL;
5259
5260         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5261                 dev_err(&hdev->pdev->dev,
5262                         "Delete fail, rule %d is inexistent\n",
5263                         fs->location);
5264                 return -ENOENT;
5265         }
5266
5267         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5268                                    fs->location, NULL, false);
5269         if (ret)
5270                 return ret;
5271
5272         spin_lock_bh(&hdev->fd_rule_lock);
5273         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5274
5275         spin_unlock_bh(&hdev->fd_rule_lock);
5276
5277         return ret;
5278 }
5279
5280 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5281                                      bool clear_list)
5282 {
5283         struct hclge_vport *vport = hclge_get_vport(handle);
5284         struct hclge_dev *hdev = vport->back;
5285         struct hclge_fd_rule *rule;
5286         struct hlist_node *node;
5287         u16 location;
5288
5289         if (!hnae3_dev_fd_supported(hdev))
5290                 return;
5291
5292         spin_lock_bh(&hdev->fd_rule_lock);
5293         for_each_set_bit(location, hdev->fd_bmap,
5294                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5295                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5296                                      NULL, false);
5297
5298         if (clear_list) {
5299                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5300                                           rule_node) {
5301                         hlist_del(&rule->rule_node);
5302                         kfree(rule);
5303                 }
5304                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5305                 hdev->hclge_fd_rule_num = 0;
5306                 bitmap_zero(hdev->fd_bmap,
5307                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5308         }
5309
5310         spin_unlock_bh(&hdev->fd_rule_lock);
5311 }
5312
5313 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5314 {
5315         struct hclge_vport *vport = hclge_get_vport(handle);
5316         struct hclge_dev *hdev = vport->back;
5317         struct hclge_fd_rule *rule;
5318         struct hlist_node *node;
5319         int ret;
5320
5321         /* Return ok here, because reset error handling will check this
5322          * return value. If error is returned here, the reset process will
5323          * fail.
5324          */
5325         if (!hnae3_dev_fd_supported(hdev))
5326                 return 0;
5327
5328         /* if fd is disabled, should not restore it when reset */
5329         if (!hdev->fd_en)
5330                 return 0;
5331
5332         spin_lock_bh(&hdev->fd_rule_lock);
5333         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5334                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5335                 if (!ret)
5336                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5337
5338                 if (ret) {
5339                         dev_warn(&hdev->pdev->dev,
5340                                  "Restore rule %d failed, remove it\n",
5341                                  rule->location);
5342                         clear_bit(rule->location, hdev->fd_bmap);
5343                         hlist_del(&rule->rule_node);
5344                         kfree(rule);
5345                         hdev->hclge_fd_rule_num--;
5346                 }
5347         }
5348
5349         if (hdev->hclge_fd_rule_num)
5350                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5351
5352         spin_unlock_bh(&hdev->fd_rule_lock);
5353
5354         return 0;
5355 }
5356
5357 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5358                                  struct ethtool_rxnfc *cmd)
5359 {
5360         struct hclge_vport *vport = hclge_get_vport(handle);
5361         struct hclge_dev *hdev = vport->back;
5362
5363         if (!hnae3_dev_fd_supported(hdev))
5364                 return -EOPNOTSUPP;
5365
5366         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5367         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5368
5369         return 0;
5370 }
5371
5372 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5373                                   struct ethtool_rxnfc *cmd)
5374 {
5375         struct hclge_vport *vport = hclge_get_vport(handle);
5376         struct hclge_fd_rule *rule = NULL;
5377         struct hclge_dev *hdev = vport->back;
5378         struct ethtool_rx_flow_spec *fs;
5379         struct hlist_node *node2;
5380
5381         if (!hnae3_dev_fd_supported(hdev))
5382                 return -EOPNOTSUPP;
5383
5384         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5385
5386         spin_lock_bh(&hdev->fd_rule_lock);
5387
5388         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5389                 if (rule->location >= fs->location)
5390                         break;
5391         }
5392
5393         if (!rule || fs->location != rule->location) {
5394                 spin_unlock_bh(&hdev->fd_rule_lock);
5395
5396                 return -ENOENT;
5397         }
5398
5399         fs->flow_type = rule->flow_type;
5400         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5401         case SCTP_V4_FLOW:
5402         case TCP_V4_FLOW:
5403         case UDP_V4_FLOW:
5404                 fs->h_u.tcp_ip4_spec.ip4src =
5405                                 cpu_to_be32(rule->tuples.src_ip[3]);
5406                 fs->m_u.tcp_ip4_spec.ip4src =
5407                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5408                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5409
5410                 fs->h_u.tcp_ip4_spec.ip4dst =
5411                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5412                 fs->m_u.tcp_ip4_spec.ip4dst =
5413                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5414                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5415
5416                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5417                 fs->m_u.tcp_ip4_spec.psrc =
5418                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5419                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5420
5421                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5422                 fs->m_u.tcp_ip4_spec.pdst =
5423                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5424                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5425
5426                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5427                 fs->m_u.tcp_ip4_spec.tos =
5428                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5429                                 0 : rule->tuples_mask.ip_tos;
5430
5431                 break;
5432         case IP_USER_FLOW:
5433                 fs->h_u.usr_ip4_spec.ip4src =
5434                                 cpu_to_be32(rule->tuples.src_ip[3]);
5435                 fs->m_u.tcp_ip4_spec.ip4src =
5436                                 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5437                                 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5438
5439                 fs->h_u.usr_ip4_spec.ip4dst =
5440                                 cpu_to_be32(rule->tuples.dst_ip[3]);
5441                 fs->m_u.usr_ip4_spec.ip4dst =
5442                                 rule->unused_tuple & BIT(INNER_DST_IP) ?
5443                                 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5444
5445                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5446                 fs->m_u.usr_ip4_spec.tos =
5447                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5448                                 0 : rule->tuples_mask.ip_tos;
5449
5450                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5451                 fs->m_u.usr_ip4_spec.proto =
5452                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5453                                 0 : rule->tuples_mask.ip_proto;
5454
5455                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5456
5457                 break;
5458         case SCTP_V6_FLOW:
5459         case TCP_V6_FLOW:
5460         case UDP_V6_FLOW:
5461                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5462                                   rule->tuples.src_ip, 4);
5463                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5464                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5465                 else
5466                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5467                                           rule->tuples_mask.src_ip, 4);
5468
5469                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5470                                   rule->tuples.dst_ip, 4);
5471                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5472                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5473                 else
5474                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5475                                           rule->tuples_mask.dst_ip, 4);
5476
5477                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5478                 fs->m_u.tcp_ip6_spec.psrc =
5479                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5480                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5481
5482                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5483                 fs->m_u.tcp_ip6_spec.pdst =
5484                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5485                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5486
5487                 break;
5488         case IPV6_USER_FLOW:
5489                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5490                                   rule->tuples.src_ip, 4);
5491                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5492                         memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5493                 else
5494                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5495                                           rule->tuples_mask.src_ip, 4);
5496
5497                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5498                                   rule->tuples.dst_ip, 4);
5499                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5500                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5501                 else
5502                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5503                                           rule->tuples_mask.dst_ip, 4);
5504
5505                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5506                 fs->m_u.usr_ip6_spec.l4_proto =
5507                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5508                                 0 : rule->tuples_mask.ip_proto;
5509
5510                 break;
5511         case ETHER_FLOW:
5512                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5513                                 rule->tuples.src_mac);
5514                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5515                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5516                 else
5517                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5518                                         rule->tuples_mask.src_mac);
5519
5520                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5521                                 rule->tuples.dst_mac);
5522                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5523                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5524                 else
5525                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5526                                         rule->tuples_mask.dst_mac);
5527
5528                 fs->h_u.ether_spec.h_proto =
5529                                 cpu_to_be16(rule->tuples.ether_proto);
5530                 fs->m_u.ether_spec.h_proto =
5531                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5532                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5533
5534                 break;
5535         default:
5536                 spin_unlock_bh(&hdev->fd_rule_lock);
5537                 return -EOPNOTSUPP;
5538         }
5539
5540         if (fs->flow_type & FLOW_EXT) {
5541                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5542                 fs->m_ext.vlan_tci =
5543                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5544                                 cpu_to_be16(VLAN_VID_MASK) :
5545                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5546         }
5547
5548         if (fs->flow_type & FLOW_MAC_EXT) {
5549                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5550                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5551                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5552                 else
5553                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5554                                         rule->tuples_mask.dst_mac);
5555         }
5556
5557         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5558                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5559         } else {
5560                 u64 vf_id;
5561
5562                 fs->ring_cookie = rule->queue_id;
5563                 vf_id = rule->vf_id;
5564                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5565                 fs->ring_cookie |= vf_id;
5566         }
5567
5568         spin_unlock_bh(&hdev->fd_rule_lock);
5569
5570         return 0;
5571 }
5572
5573 static int hclge_get_all_rules(struct hnae3_handle *handle,
5574                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5575 {
5576         struct hclge_vport *vport = hclge_get_vport(handle);
5577         struct hclge_dev *hdev = vport->back;
5578         struct hclge_fd_rule *rule;
5579         struct hlist_node *node2;
5580         int cnt = 0;
5581
5582         if (!hnae3_dev_fd_supported(hdev))
5583                 return -EOPNOTSUPP;
5584
5585         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5586
5587         spin_lock_bh(&hdev->fd_rule_lock);
5588         hlist_for_each_entry_safe(rule, node2,
5589                                   &hdev->fd_rule_list, rule_node) {
5590                 if (cnt == cmd->rule_cnt) {
5591                         spin_unlock_bh(&hdev->fd_rule_lock);
5592                         return -EMSGSIZE;
5593                 }
5594
5595                 rule_locs[cnt] = rule->location;
5596                 cnt++;
5597         }
5598
5599         spin_unlock_bh(&hdev->fd_rule_lock);
5600
5601         cmd->rule_cnt = cnt;
5602
5603         return 0;
5604 }
5605
5606 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5607                                      struct hclge_fd_rule_tuples *tuples)
5608 {
5609         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5610         tuples->ip_proto = fkeys->basic.ip_proto;
5611         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5612
5613         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5614                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5615                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5616         } else {
5617                 memcpy(tuples->src_ip,
5618                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5619                        sizeof(tuples->src_ip));
5620                 memcpy(tuples->dst_ip,
5621                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5622                        sizeof(tuples->dst_ip));
5623         }
5624 }
5625
5626 /* traverse all rules, check whether an existed rule has the same tuples */
5627 static struct hclge_fd_rule *
5628 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5629                           const struct hclge_fd_rule_tuples *tuples)
5630 {
5631         struct hclge_fd_rule *rule = NULL;
5632         struct hlist_node *node;
5633
5634         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5635                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5636                         return rule;
5637         }
5638
5639         return NULL;
5640 }
5641
5642 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5643                                      struct hclge_fd_rule *rule)
5644 {
5645         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5646                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5647                              BIT(INNER_SRC_PORT);
5648         rule->action = 0;
5649         rule->vf_id = 0;
5650         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5651         if (tuples->ether_proto == ETH_P_IP) {
5652                 if (tuples->ip_proto == IPPROTO_TCP)
5653                         rule->flow_type = TCP_V4_FLOW;
5654                 else
5655                         rule->flow_type = UDP_V4_FLOW;
5656         } else {
5657                 if (tuples->ip_proto == IPPROTO_TCP)
5658                         rule->flow_type = TCP_V6_FLOW;
5659                 else
5660                         rule->flow_type = UDP_V6_FLOW;
5661         }
5662         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5663         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5664 }
5665
5666 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5667                                       u16 flow_id, struct flow_keys *fkeys)
5668 {
5669         struct hclge_vport *vport = hclge_get_vport(handle);
5670         struct hclge_fd_rule_tuples new_tuples;
5671         struct hclge_dev *hdev = vport->back;
5672         struct hclge_fd_rule *rule;
5673         u16 tmp_queue_id;
5674         u16 bit_id;
5675         int ret;
5676
5677         if (!hnae3_dev_fd_supported(hdev))
5678                 return -EOPNOTSUPP;
5679
5680         memset(&new_tuples, 0, sizeof(new_tuples));
5681         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5682
5683         spin_lock_bh(&hdev->fd_rule_lock);
5684
5685         /* when there is already fd rule existed add by user,
5686          * arfs should not work
5687          */
5688         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5689                 spin_unlock_bh(&hdev->fd_rule_lock);
5690
5691                 return -EOPNOTSUPP;
5692         }
5693
5694         /* check is there flow director filter existed for this flow,
5695          * if not, create a new filter for it;
5696          * if filter exist with different queue id, modify the filter;
5697          * if filter exist with same queue id, do nothing
5698          */
5699         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5700         if (!rule) {
5701                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5702                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5703                         spin_unlock_bh(&hdev->fd_rule_lock);
5704
5705                         return -ENOSPC;
5706                 }
5707
5708                 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5709                 if (!rule) {
5710                         spin_unlock_bh(&hdev->fd_rule_lock);
5711
5712                         return -ENOMEM;
5713                 }
5714
5715                 set_bit(bit_id, hdev->fd_bmap);
5716                 rule->location = bit_id;
5717                 rule->flow_id = flow_id;
5718                 rule->queue_id = queue_id;
5719                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5720                 ret = hclge_fd_config_rule(hdev, rule);
5721
5722                 spin_unlock_bh(&hdev->fd_rule_lock);
5723
5724                 if (ret)
5725                         return ret;
5726
5727                 return rule->location;
5728         }
5729
5730         spin_unlock_bh(&hdev->fd_rule_lock);
5731
5732         if (rule->queue_id == queue_id)
5733                 return rule->location;
5734
5735         tmp_queue_id = rule->queue_id;
5736         rule->queue_id = queue_id;
5737         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5738         if (ret) {
5739                 rule->queue_id = tmp_queue_id;
5740                 return ret;
5741         }
5742
5743         return rule->location;
5744 }
5745
5746 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5747 {
5748 #ifdef CONFIG_RFS_ACCEL
5749         struct hnae3_handle *handle = &hdev->vport[0].nic;
5750         struct hclge_fd_rule *rule;
5751         struct hlist_node *node;
5752         HLIST_HEAD(del_list);
5753
5754         spin_lock_bh(&hdev->fd_rule_lock);
5755         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5756                 spin_unlock_bh(&hdev->fd_rule_lock);
5757                 return;
5758         }
5759         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5760                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5761                                         rule->flow_id, rule->location)) {
5762                         hlist_del_init(&rule->rule_node);
5763                         hlist_add_head(&rule->rule_node, &del_list);
5764                         hdev->hclge_fd_rule_num--;
5765                         clear_bit(rule->location, hdev->fd_bmap);
5766                 }
5767         }
5768         spin_unlock_bh(&hdev->fd_rule_lock);
5769
5770         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5771                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5772                                      rule->location, NULL, false);
5773                 kfree(rule);
5774         }
5775 #endif
5776 }
5777
5778 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5779 {
5780 #ifdef CONFIG_RFS_ACCEL
5781         struct hclge_vport *vport = hclge_get_vport(handle);
5782         struct hclge_dev *hdev = vport->back;
5783
5784         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5785                 hclge_del_all_fd_entries(handle, true);
5786 #endif
5787 }
5788
5789 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5790 {
5791         struct hclge_vport *vport = hclge_get_vport(handle);
5792         struct hclge_dev *hdev = vport->back;
5793
5794         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5795                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5796 }
5797
5798 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5799 {
5800         struct hclge_vport *vport = hclge_get_vport(handle);
5801         struct hclge_dev *hdev = vport->back;
5802
5803         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5804 }
5805
5806 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5807 {
5808         struct hclge_vport *vport = hclge_get_vport(handle);
5809         struct hclge_dev *hdev = vport->back;
5810
5811         return hdev->rst_stats.hw_reset_done_cnt;
5812 }
5813
5814 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5815 {
5816         struct hclge_vport *vport = hclge_get_vport(handle);
5817         struct hclge_dev *hdev = vport->back;
5818         bool clear;
5819
5820         hdev->fd_en = enable;
5821         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5822         if (!enable)
5823                 hclge_del_all_fd_entries(handle, clear);
5824         else
5825                 hclge_restore_fd_entries(handle);
5826 }
5827
5828 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5829 {
5830         struct hclge_desc desc;
5831         struct hclge_config_mac_mode_cmd *req =
5832                 (struct hclge_config_mac_mode_cmd *)desc.data;
5833         u32 loop_en = 0;
5834         int ret;
5835
5836         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5837         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5838         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5839         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5840         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5841         hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5842         hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5843         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5844         hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5845         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5846         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5847         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5848         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5849         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5850         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5851         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5852
5853         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5854         if (ret)
5855                 dev_err(&hdev->pdev->dev,
5856                         "mac enable fail, ret =%d.\n", ret);
5857 }
5858
5859 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5860 {
5861         struct hclge_config_mac_mode_cmd *req;
5862         struct hclge_desc desc;
5863         u32 loop_en;
5864         int ret;
5865
5866         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5867         /* 1 Read out the MAC mode config at first */
5868         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5869         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5870         if (ret) {
5871                 dev_err(&hdev->pdev->dev,
5872                         "mac loopback get fail, ret =%d.\n", ret);
5873                 return ret;
5874         }
5875
5876         /* 2 Then setup the loopback flag */
5877         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5878         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5879         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5880         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5881
5882         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5883
5884         /* 3 Config mac work mode with loopback flag
5885          * and its original configure parameters
5886          */
5887         hclge_cmd_reuse_desc(&desc, false);
5888         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5889         if (ret)
5890                 dev_err(&hdev->pdev->dev,
5891                         "mac loopback set fail, ret =%d.\n", ret);
5892         return ret;
5893 }
5894
5895 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5896                                      enum hnae3_loop loop_mode)
5897 {
5898 #define HCLGE_SERDES_RETRY_MS   10
5899 #define HCLGE_SERDES_RETRY_NUM  100
5900
5901 #define HCLGE_MAC_LINK_STATUS_MS   10
5902 #define HCLGE_MAC_LINK_STATUS_NUM  100
5903 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5904 #define HCLGE_MAC_LINK_STATUS_UP   1
5905
5906         struct hclge_serdes_lb_cmd *req;
5907         struct hclge_desc desc;
5908         int mac_link_ret = 0;
5909         int ret, i = 0;
5910         u8 loop_mode_b;
5911
5912         req = (struct hclge_serdes_lb_cmd *)desc.data;
5913         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5914
5915         switch (loop_mode) {
5916         case HNAE3_LOOP_SERIAL_SERDES:
5917                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5918                 break;
5919         case HNAE3_LOOP_PARALLEL_SERDES:
5920                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5921                 break;
5922         default:
5923                 dev_err(&hdev->pdev->dev,
5924                         "unsupported serdes loopback mode %d\n", loop_mode);
5925                 return -ENOTSUPP;
5926         }
5927
5928         if (en) {
5929                 req->enable = loop_mode_b;
5930                 req->mask = loop_mode_b;
5931                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5932         } else {
5933                 req->mask = loop_mode_b;
5934                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5935         }
5936
5937         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5938         if (ret) {
5939                 dev_err(&hdev->pdev->dev,
5940                         "serdes loopback set fail, ret = %d\n", ret);
5941                 return ret;
5942         }
5943
5944         do {
5945                 msleep(HCLGE_SERDES_RETRY_MS);
5946                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5947                                            true);
5948                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5949                 if (ret) {
5950                         dev_err(&hdev->pdev->dev,
5951                                 "serdes loopback get, ret = %d\n", ret);
5952                         return ret;
5953                 }
5954         } while (++i < HCLGE_SERDES_RETRY_NUM &&
5955                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
5956
5957         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5958                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5959                 return -EBUSY;
5960         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5961                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5962                 return -EIO;
5963         }
5964
5965         hclge_cfg_mac_mode(hdev, en);
5966
5967         i = 0;
5968         do {
5969                 /* serdes Internal loopback, independent of the network cable.*/
5970                 msleep(HCLGE_MAC_LINK_STATUS_MS);
5971                 ret = hclge_get_mac_link_status(hdev);
5972                 if (ret == mac_link_ret)
5973                         return 0;
5974         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5975
5976         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5977
5978         return -EBUSY;
5979 }
5980
5981 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5982                             int stream_id, bool enable)
5983 {
5984         struct hclge_desc desc;
5985         struct hclge_cfg_com_tqp_queue_cmd *req =
5986                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5987         int ret;
5988
5989         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5990         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5991         req->stream_id = cpu_to_le16(stream_id);
5992         req->enable |= enable << HCLGE_TQP_ENABLE_B;
5993
5994         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5995         if (ret)
5996                 dev_err(&hdev->pdev->dev,
5997                         "Tqp enable fail, status =%d.\n", ret);
5998         return ret;
5999 }
6000
6001 static int hclge_set_loopback(struct hnae3_handle *handle,
6002                               enum hnae3_loop loop_mode, bool en)
6003 {
6004         struct hclge_vport *vport = hclge_get_vport(handle);
6005         struct hnae3_knic_private_info *kinfo;
6006         struct hclge_dev *hdev = vport->back;
6007         int i, ret;
6008
6009         switch (loop_mode) {
6010         case HNAE3_LOOP_APP:
6011                 ret = hclge_set_app_loopback(hdev, en);
6012                 break;
6013         case HNAE3_LOOP_SERIAL_SERDES:
6014         case HNAE3_LOOP_PARALLEL_SERDES:
6015                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6016                 break;
6017         default:
6018                 ret = -ENOTSUPP;
6019                 dev_err(&hdev->pdev->dev,
6020                         "loop_mode %d is not supported\n", loop_mode);
6021                 break;
6022         }
6023
6024         if (ret)
6025                 return ret;
6026
6027         kinfo = &vport->nic.kinfo;
6028         for (i = 0; i < kinfo->num_tqps; i++) {
6029                 ret = hclge_tqp_enable(hdev, i, 0, en);
6030                 if (ret)
6031                         return ret;
6032         }
6033
6034         return 0;
6035 }
6036
6037 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6038 {
6039         struct hclge_vport *vport = hclge_get_vport(handle);
6040         struct hnae3_knic_private_info *kinfo;
6041         struct hnae3_queue *queue;
6042         struct hclge_tqp *tqp;
6043         int i;
6044
6045         kinfo = &vport->nic.kinfo;
6046         for (i = 0; i < kinfo->num_tqps; i++) {
6047                 queue = handle->kinfo.tqp[i];
6048                 tqp = container_of(queue, struct hclge_tqp, q);
6049                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6050         }
6051 }
6052
6053 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6054 {
6055         struct hclge_vport *vport = hclge_get_vport(handle);
6056         struct hclge_dev *hdev = vport->back;
6057
6058         if (enable) {
6059                 mod_timer(&hdev->service_timer, jiffies + HZ);
6060         } else {
6061                 del_timer_sync(&hdev->service_timer);
6062                 cancel_work_sync(&hdev->service_task);
6063                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6064         }
6065 }
6066
6067 static int hclge_ae_start(struct hnae3_handle *handle)
6068 {
6069         struct hclge_vport *vport = hclge_get_vport(handle);
6070         struct hclge_dev *hdev = vport->back;
6071
6072         /* mac enable */
6073         hclge_cfg_mac_mode(hdev, true);
6074         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6075         hdev->hw.mac.link = 0;
6076
6077         /* reset tqp stats */
6078         hclge_reset_tqp_stats(handle);
6079
6080         hclge_mac_start_phy(hdev);
6081
6082         return 0;
6083 }
6084
6085 static void hclge_ae_stop(struct hnae3_handle *handle)
6086 {
6087         struct hclge_vport *vport = hclge_get_vport(handle);
6088         struct hclge_dev *hdev = vport->back;
6089         int i;
6090
6091         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6092
6093         hclge_clear_arfs_rules(handle);
6094
6095         /* If it is not PF reset, the firmware will disable the MAC,
6096          * so it only need to stop phy here.
6097          */
6098         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6099             hdev->reset_type != HNAE3_FUNC_RESET) {
6100                 hclge_mac_stop_phy(hdev);
6101                 return;
6102         }
6103
6104         for (i = 0; i < handle->kinfo.num_tqps; i++)
6105                 hclge_reset_tqp(handle, i);
6106
6107         /* Mac disable */
6108         hclge_cfg_mac_mode(hdev, false);
6109
6110         hclge_mac_stop_phy(hdev);
6111
6112         /* reset tqp stats */
6113         hclge_reset_tqp_stats(handle);
6114         hclge_update_link_status(hdev);
6115 }
6116
6117 int hclge_vport_start(struct hclge_vport *vport)
6118 {
6119         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6120         vport->last_active_jiffies = jiffies;
6121         return 0;
6122 }
6123
6124 void hclge_vport_stop(struct hclge_vport *vport)
6125 {
6126         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6127 }
6128
6129 static int hclge_client_start(struct hnae3_handle *handle)
6130 {
6131         struct hclge_vport *vport = hclge_get_vport(handle);
6132
6133         return hclge_vport_start(vport);
6134 }
6135
6136 static void hclge_client_stop(struct hnae3_handle *handle)
6137 {
6138         struct hclge_vport *vport = hclge_get_vport(handle);
6139
6140         hclge_vport_stop(vport);
6141 }
6142
6143 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6144                                          u16 cmdq_resp, u8  resp_code,
6145                                          enum hclge_mac_vlan_tbl_opcode op)
6146 {
6147         struct hclge_dev *hdev = vport->back;
6148         int return_status = -EIO;
6149
6150         if (cmdq_resp) {
6151                 dev_err(&hdev->pdev->dev,
6152                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6153                         cmdq_resp);
6154                 return -EIO;
6155         }
6156
6157         if (op == HCLGE_MAC_VLAN_ADD) {
6158                 if ((!resp_code) || (resp_code == 1)) {
6159                         return_status = 0;
6160                 } else if (resp_code == 2) {
6161                         return_status = -ENOSPC;
6162                         dev_err(&hdev->pdev->dev,
6163                                 "add mac addr failed for uc_overflow.\n");
6164                 } else if (resp_code == 3) {
6165                         return_status = -ENOSPC;
6166                         dev_err(&hdev->pdev->dev,
6167                                 "add mac addr failed for mc_overflow.\n");
6168                 } else {
6169                         dev_err(&hdev->pdev->dev,
6170                                 "add mac addr failed for undefined, code=%d.\n",
6171                                 resp_code);
6172                 }
6173         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6174                 if (!resp_code) {
6175                         return_status = 0;
6176                 } else if (resp_code == 1) {
6177                         return_status = -ENOENT;
6178                         dev_dbg(&hdev->pdev->dev,
6179                                 "remove mac addr failed for miss.\n");
6180                 } else {
6181                         dev_err(&hdev->pdev->dev,
6182                                 "remove mac addr failed for undefined, code=%d.\n",
6183                                 resp_code);
6184                 }
6185         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6186                 if (!resp_code) {
6187                         return_status = 0;
6188                 } else if (resp_code == 1) {
6189                         return_status = -ENOENT;
6190                         dev_dbg(&hdev->pdev->dev,
6191                                 "lookup mac addr failed for miss.\n");
6192                 } else {
6193                         dev_err(&hdev->pdev->dev,
6194                                 "lookup mac addr failed for undefined, code=%d.\n",
6195                                 resp_code);
6196                 }
6197         } else {
6198                 return_status = -EINVAL;
6199                 dev_err(&hdev->pdev->dev,
6200                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6201                         op);
6202         }
6203
6204         return return_status;
6205 }
6206
6207 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6208 {
6209         int word_num;
6210         int bit_num;
6211
6212         if (vfid > 255 || vfid < 0)
6213                 return -EIO;
6214
6215         if (vfid >= 0 && vfid <= 191) {
6216                 word_num = vfid / 32;
6217                 bit_num  = vfid % 32;
6218                 if (clr)
6219                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6220                 else
6221                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6222         } else {
6223                 word_num = (vfid - 192) / 32;
6224                 bit_num  = vfid % 32;
6225                 if (clr)
6226                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6227                 else
6228                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6229         }
6230
6231         return 0;
6232 }
6233
6234 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6235 {
6236 #define HCLGE_DESC_NUMBER 3
6237 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6238         int i, j;
6239
6240         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6241                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6242                         if (desc[i].data[j])
6243                                 return false;
6244
6245         return true;
6246 }
6247
6248 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6249                                    const u8 *addr, bool is_mc)
6250 {
6251         const unsigned char *mac_addr = addr;
6252         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6253                        (mac_addr[0]) | (mac_addr[1] << 8);
6254         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6255
6256         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6257         if (is_mc) {
6258                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6259                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6260         }
6261
6262         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6263         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6264 }
6265
6266 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6267                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6268 {
6269         struct hclge_dev *hdev = vport->back;
6270         struct hclge_desc desc;
6271         u8 resp_code;
6272         u16 retval;
6273         int ret;
6274
6275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6276
6277         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6278
6279         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6280         if (ret) {
6281                 dev_err(&hdev->pdev->dev,
6282                         "del mac addr failed for cmd_send, ret =%d.\n",
6283                         ret);
6284                 return ret;
6285         }
6286         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6287         retval = le16_to_cpu(desc.retval);
6288
6289         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6290                                              HCLGE_MAC_VLAN_REMOVE);
6291 }
6292
6293 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6294                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6295                                      struct hclge_desc *desc,
6296                                      bool is_mc)
6297 {
6298         struct hclge_dev *hdev = vport->back;
6299         u8 resp_code;
6300         u16 retval;
6301         int ret;
6302
6303         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6304         if (is_mc) {
6305                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6306                 memcpy(desc[0].data,
6307                        req,
6308                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6309                 hclge_cmd_setup_basic_desc(&desc[1],
6310                                            HCLGE_OPC_MAC_VLAN_ADD,
6311                                            true);
6312                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6313                 hclge_cmd_setup_basic_desc(&desc[2],
6314                                            HCLGE_OPC_MAC_VLAN_ADD,
6315                                            true);
6316                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6317         } else {
6318                 memcpy(desc[0].data,
6319                        req,
6320                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6321                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6322         }
6323         if (ret) {
6324                 dev_err(&hdev->pdev->dev,
6325                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6326                         ret);
6327                 return ret;
6328         }
6329         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6330         retval = le16_to_cpu(desc[0].retval);
6331
6332         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6333                                              HCLGE_MAC_VLAN_LKUP);
6334 }
6335
6336 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6337                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6338                                   struct hclge_desc *mc_desc)
6339 {
6340         struct hclge_dev *hdev = vport->back;
6341         int cfg_status;
6342         u8 resp_code;
6343         u16 retval;
6344         int ret;
6345
6346         if (!mc_desc) {
6347                 struct hclge_desc desc;
6348
6349                 hclge_cmd_setup_basic_desc(&desc,
6350                                            HCLGE_OPC_MAC_VLAN_ADD,
6351                                            false);
6352                 memcpy(desc.data, req,
6353                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6354                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6355                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6356                 retval = le16_to_cpu(desc.retval);
6357
6358                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6359                                                            resp_code,
6360                                                            HCLGE_MAC_VLAN_ADD);
6361         } else {
6362                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6363                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6364                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6365                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6366                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6367                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6368                 memcpy(mc_desc[0].data, req,
6369                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6370                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6371                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6372                 retval = le16_to_cpu(mc_desc[0].retval);
6373
6374                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6375                                                            resp_code,
6376                                                            HCLGE_MAC_VLAN_ADD);
6377         }
6378
6379         if (ret) {
6380                 dev_err(&hdev->pdev->dev,
6381                         "add mac addr failed for cmd_send, ret =%d.\n",
6382                         ret);
6383                 return ret;
6384         }
6385
6386         return cfg_status;
6387 }
6388
6389 static int hclge_init_umv_space(struct hclge_dev *hdev)
6390 {
6391         u16 allocated_size = 0;
6392         int ret;
6393
6394         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6395                                   true);
6396         if (ret)
6397                 return ret;
6398
6399         if (allocated_size < hdev->wanted_umv_size)
6400                 dev_warn(&hdev->pdev->dev,
6401                          "Alloc umv space failed, want %d, get %d\n",
6402                          hdev->wanted_umv_size, allocated_size);
6403
6404         mutex_init(&hdev->umv_mutex);
6405         hdev->max_umv_size = allocated_size;
6406         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6407         hdev->share_umv_size = hdev->priv_umv_size +
6408                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6409
6410         return 0;
6411 }
6412
6413 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6414 {
6415         int ret;
6416
6417         if (hdev->max_umv_size > 0) {
6418                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6419                                           false);
6420                 if (ret)
6421                         return ret;
6422                 hdev->max_umv_size = 0;
6423         }
6424         mutex_destroy(&hdev->umv_mutex);
6425
6426         return 0;
6427 }
6428
6429 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6430                                u16 *allocated_size, bool is_alloc)
6431 {
6432         struct hclge_umv_spc_alc_cmd *req;
6433         struct hclge_desc desc;
6434         int ret;
6435
6436         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6437         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6438         hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6439         req->space_size = cpu_to_le32(space_size);
6440
6441         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6442         if (ret) {
6443                 dev_err(&hdev->pdev->dev,
6444                         "%s umv space failed for cmd_send, ret =%d\n",
6445                         is_alloc ? "allocate" : "free", ret);
6446                 return ret;
6447         }
6448
6449         if (is_alloc && allocated_size)
6450                 *allocated_size = le32_to_cpu(desc.data[1]);
6451
6452         return 0;
6453 }
6454
6455 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6456 {
6457         struct hclge_vport *vport;
6458         int i;
6459
6460         for (i = 0; i < hdev->num_alloc_vport; i++) {
6461                 vport = &hdev->vport[i];
6462                 vport->used_umv_num = 0;
6463         }
6464
6465         mutex_lock(&hdev->umv_mutex);
6466         hdev->share_umv_size = hdev->priv_umv_size +
6467                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6468         mutex_unlock(&hdev->umv_mutex);
6469 }
6470
6471 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6472 {
6473         struct hclge_dev *hdev = vport->back;
6474         bool is_full;
6475
6476         mutex_lock(&hdev->umv_mutex);
6477         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6478                    hdev->share_umv_size == 0);
6479         mutex_unlock(&hdev->umv_mutex);
6480
6481         return is_full;
6482 }
6483
6484 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6485 {
6486         struct hclge_dev *hdev = vport->back;
6487
6488         mutex_lock(&hdev->umv_mutex);
6489         if (is_free) {
6490                 if (vport->used_umv_num > hdev->priv_umv_size)
6491                         hdev->share_umv_size++;
6492
6493                 if (vport->used_umv_num > 0)
6494                         vport->used_umv_num--;
6495         } else {
6496                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6497                     hdev->share_umv_size > 0)
6498                         hdev->share_umv_size--;
6499                 vport->used_umv_num++;
6500         }
6501         mutex_unlock(&hdev->umv_mutex);
6502 }
6503
6504 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6505                              const unsigned char *addr)
6506 {
6507         struct hclge_vport *vport = hclge_get_vport(handle);
6508
6509         return hclge_add_uc_addr_common(vport, addr);
6510 }
6511
6512 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6513                              const unsigned char *addr)
6514 {
6515         struct hclge_dev *hdev = vport->back;
6516         struct hclge_mac_vlan_tbl_entry_cmd req;
6517         struct hclge_desc desc;
6518         u16 egress_port = 0;
6519         int ret;
6520
6521         /* mac addr check */
6522         if (is_zero_ether_addr(addr) ||
6523             is_broadcast_ether_addr(addr) ||
6524             is_multicast_ether_addr(addr)) {
6525                 dev_err(&hdev->pdev->dev,
6526                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6527                          addr,
6528                          is_zero_ether_addr(addr),
6529                          is_broadcast_ether_addr(addr),
6530                          is_multicast_ether_addr(addr));
6531                 return -EINVAL;
6532         }
6533
6534         memset(&req, 0, sizeof(req));
6535
6536         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6537                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6538
6539         req.egress_port = cpu_to_le16(egress_port);
6540
6541         hclge_prepare_mac_addr(&req, addr, false);
6542
6543         /* Lookup the mac address in the mac_vlan table, and add
6544          * it if the entry is inexistent. Repeated unicast entry
6545          * is not allowed in the mac vlan table.
6546          */
6547         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6548         if (ret == -ENOENT) {
6549                 if (!hclge_is_umv_space_full(vport)) {
6550                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6551                         if (!ret)
6552                                 hclge_update_umv_space(vport, false);
6553                         return ret;
6554                 }
6555
6556                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6557                         hdev->priv_umv_size);
6558
6559                 return -ENOSPC;
6560         }
6561
6562         /* check if we just hit the duplicate */
6563         if (!ret) {
6564                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6565                          vport->vport_id, addr);
6566                 return 0;
6567         }
6568
6569         dev_err(&hdev->pdev->dev,
6570                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6571                 addr);
6572
6573         return ret;
6574 }
6575
6576 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6577                             const unsigned char *addr)
6578 {
6579         struct hclge_vport *vport = hclge_get_vport(handle);
6580
6581         return hclge_rm_uc_addr_common(vport, addr);
6582 }
6583
6584 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6585                             const unsigned char *addr)
6586 {
6587         struct hclge_dev *hdev = vport->back;
6588         struct hclge_mac_vlan_tbl_entry_cmd req;
6589         int ret;
6590
6591         /* mac addr check */
6592         if (is_zero_ether_addr(addr) ||
6593             is_broadcast_ether_addr(addr) ||
6594             is_multicast_ether_addr(addr)) {
6595                 dev_dbg(&hdev->pdev->dev,
6596                         "Remove mac err! invalid mac:%pM.\n",
6597                          addr);
6598                 return -EINVAL;
6599         }
6600
6601         memset(&req, 0, sizeof(req));
6602         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6603         hclge_prepare_mac_addr(&req, addr, false);
6604         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6605         if (!ret)
6606                 hclge_update_umv_space(vport, true);
6607
6608         return ret;
6609 }
6610
6611 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6612                              const unsigned char *addr)
6613 {
6614         struct hclge_vport *vport = hclge_get_vport(handle);
6615
6616         return hclge_add_mc_addr_common(vport, addr);
6617 }
6618
6619 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6620                              const unsigned char *addr)
6621 {
6622         struct hclge_dev *hdev = vport->back;
6623         struct hclge_mac_vlan_tbl_entry_cmd req;
6624         struct hclge_desc desc[3];
6625         int status;
6626
6627         /* mac addr check */
6628         if (!is_multicast_ether_addr(addr)) {
6629                 dev_err(&hdev->pdev->dev,
6630                         "Add mc mac err! invalid mac:%pM.\n",
6631                          addr);
6632                 return -EINVAL;
6633         }
6634         memset(&req, 0, sizeof(req));
6635         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6636         hclge_prepare_mac_addr(&req, addr, true);
6637         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6638         if (!status) {
6639                 /* This mac addr exist, update VFID for it */
6640                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6641                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6642         } else {
6643                 /* This mac addr do not exist, add new entry for it */
6644                 memset(desc[0].data, 0, sizeof(desc[0].data));
6645                 memset(desc[1].data, 0, sizeof(desc[0].data));
6646                 memset(desc[2].data, 0, sizeof(desc[0].data));
6647                 hclge_update_desc_vfid(desc, vport->vport_id, false);
6648                 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6649         }
6650
6651         if (status == -ENOSPC)
6652                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6653
6654         return status;
6655 }
6656
6657 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6658                             const unsigned char *addr)
6659 {
6660         struct hclge_vport *vport = hclge_get_vport(handle);
6661
6662         return hclge_rm_mc_addr_common(vport, addr);
6663 }
6664
6665 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6666                             const unsigned char *addr)
6667 {
6668         struct hclge_dev *hdev = vport->back;
6669         struct hclge_mac_vlan_tbl_entry_cmd req;
6670         enum hclge_cmd_status status;
6671         struct hclge_desc desc[3];
6672
6673         /* mac addr check */
6674         if (!is_multicast_ether_addr(addr)) {
6675                 dev_dbg(&hdev->pdev->dev,
6676                         "Remove mc mac err! invalid mac:%pM.\n",
6677                          addr);
6678                 return -EINVAL;
6679         }
6680
6681         memset(&req, 0, sizeof(req));
6682         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6683         hclge_prepare_mac_addr(&req, addr, true);
6684         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6685         if (!status) {
6686                 /* This mac addr exist, remove this handle's VFID for it */
6687                 hclge_update_desc_vfid(desc, vport->vport_id, true);
6688
6689                 if (hclge_is_all_function_id_zero(desc))
6690                         /* All the vfid is zero, so need to delete this entry */
6691                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6692                 else
6693                         /* Not all the vfid is zero, update the vfid */
6694                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6695
6696         } else {
6697                 /* Maybe this mac address is in mta table, but it cannot be
6698                  * deleted here because an entry of mta represents an address
6699                  * range rather than a specific address. the delete action to
6700                  * all entries will take effect in update_mta_status called by
6701                  * hns3_nic_set_rx_mode.
6702                  */
6703                 status = 0;
6704         }
6705
6706         return status;
6707 }
6708
6709 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6710                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6711 {
6712         struct hclge_vport_mac_addr_cfg *mac_cfg;
6713         struct list_head *list;
6714
6715         if (!vport->vport_id)
6716                 return;
6717
6718         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6719         if (!mac_cfg)
6720                 return;
6721
6722         mac_cfg->hd_tbl_status = true;
6723         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6724
6725         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6726                &vport->uc_mac_list : &vport->mc_mac_list;
6727
6728         list_add_tail(&mac_cfg->node, list);
6729 }
6730
6731 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6732                               bool is_write_tbl,
6733                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6734 {
6735         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6736         struct list_head *list;
6737         bool uc_flag, mc_flag;
6738
6739         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6740                &vport->uc_mac_list : &vport->mc_mac_list;
6741
6742         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6743         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6744
6745         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6746                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6747                         if (uc_flag && mac_cfg->hd_tbl_status)
6748                                 hclge_rm_uc_addr_common(vport, mac_addr);
6749
6750                         if (mc_flag && mac_cfg->hd_tbl_status)
6751                                 hclge_rm_mc_addr_common(vport, mac_addr);
6752
6753                         list_del(&mac_cfg->node);
6754                         kfree(mac_cfg);
6755                         break;
6756                 }
6757         }
6758 }
6759
6760 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6761                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6762 {
6763         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6764         struct list_head *list;
6765
6766         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6767                &vport->uc_mac_list : &vport->mc_mac_list;
6768
6769         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6770                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6771                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6772
6773                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6774                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6775
6776                 mac_cfg->hd_tbl_status = false;
6777                 if (is_del_list) {
6778                         list_del(&mac_cfg->node);
6779                         kfree(mac_cfg);
6780                 }
6781         }
6782 }
6783
6784 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6785 {
6786         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6787         struct hclge_vport *vport;
6788         int i;
6789
6790         mutex_lock(&hdev->vport_cfg_mutex);
6791         for (i = 0; i < hdev->num_alloc_vport; i++) {
6792                 vport = &hdev->vport[i];
6793                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6794                         list_del(&mac->node);
6795                         kfree(mac);
6796                 }
6797
6798                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6799                         list_del(&mac->node);
6800                         kfree(mac);
6801                 }
6802         }
6803         mutex_unlock(&hdev->vport_cfg_mutex);
6804 }
6805
6806 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6807                                               u16 cmdq_resp, u8 resp_code)
6808 {
6809 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6810 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6811 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6812 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6813
6814         int return_status;
6815
6816         if (cmdq_resp) {
6817                 dev_err(&hdev->pdev->dev,
6818                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6819                         cmdq_resp);
6820                 return -EIO;
6821         }
6822
6823         switch (resp_code) {
6824         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6825         case HCLGE_ETHERTYPE_ALREADY_ADD:
6826                 return_status = 0;
6827                 break;
6828         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6829                 dev_err(&hdev->pdev->dev,
6830                         "add mac ethertype failed for manager table overflow.\n");
6831                 return_status = -EIO;
6832                 break;
6833         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6834                 dev_err(&hdev->pdev->dev,
6835                         "add mac ethertype failed for key conflict.\n");
6836                 return_status = -EIO;
6837                 break;
6838         default:
6839                 dev_err(&hdev->pdev->dev,
6840                         "add mac ethertype failed for undefined, code=%d.\n",
6841                         resp_code);
6842                 return_status = -EIO;
6843         }
6844
6845         return return_status;
6846 }
6847
6848 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6849                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6850 {
6851         struct hclge_desc desc;
6852         u8 resp_code;
6853         u16 retval;
6854         int ret;
6855
6856         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6857         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6858
6859         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6860         if (ret) {
6861                 dev_err(&hdev->pdev->dev,
6862                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6863                         ret);
6864                 return ret;
6865         }
6866
6867         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6868         retval = le16_to_cpu(desc.retval);
6869
6870         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6871 }
6872
6873 static int init_mgr_tbl(struct hclge_dev *hdev)
6874 {
6875         int ret;
6876         int i;
6877
6878         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6879                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6880                 if (ret) {
6881                         dev_err(&hdev->pdev->dev,
6882                                 "add mac ethertype failed, ret =%d.\n",
6883                                 ret);
6884                         return ret;
6885                 }
6886         }
6887
6888         return 0;
6889 }
6890
6891 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6892 {
6893         struct hclge_vport *vport = hclge_get_vport(handle);
6894         struct hclge_dev *hdev = vport->back;
6895
6896         ether_addr_copy(p, hdev->hw.mac.mac_addr);
6897 }
6898
6899 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6900                               bool is_first)
6901 {
6902         const unsigned char *new_addr = (const unsigned char *)p;
6903         struct hclge_vport *vport = hclge_get_vport(handle);
6904         struct hclge_dev *hdev = vport->back;
6905         int ret;
6906
6907         /* mac addr check */
6908         if (is_zero_ether_addr(new_addr) ||
6909             is_broadcast_ether_addr(new_addr) ||
6910             is_multicast_ether_addr(new_addr)) {
6911                 dev_err(&hdev->pdev->dev,
6912                         "Change uc mac err! invalid mac:%p.\n",
6913                          new_addr);
6914                 return -EINVAL;
6915         }
6916
6917         if ((!is_first || is_kdump_kernel()) &&
6918             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6919                 dev_warn(&hdev->pdev->dev,
6920                          "remove old uc mac address fail.\n");
6921
6922         ret = hclge_add_uc_addr(handle, new_addr);
6923         if (ret) {
6924                 dev_err(&hdev->pdev->dev,
6925                         "add uc mac address fail, ret =%d.\n",
6926                         ret);
6927
6928                 if (!is_first &&
6929                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6930                         dev_err(&hdev->pdev->dev,
6931                                 "restore uc mac address fail.\n");
6932
6933                 return -EIO;
6934         }
6935
6936         ret = hclge_pause_addr_cfg(hdev, new_addr);
6937         if (ret) {
6938                 dev_err(&hdev->pdev->dev,
6939                         "configure mac pause address fail, ret =%d.\n",
6940                         ret);
6941                 return -EIO;
6942         }
6943
6944         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6945
6946         return 0;
6947 }
6948
6949 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6950                           int cmd)
6951 {
6952         struct hclge_vport *vport = hclge_get_vport(handle);
6953         struct hclge_dev *hdev = vport->back;
6954
6955         if (!hdev->hw.mac.phydev)
6956                 return -EOPNOTSUPP;
6957
6958         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6959 }
6960
6961 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6962                                       u8 fe_type, bool filter_en, u8 vf_id)
6963 {
6964         struct hclge_vlan_filter_ctrl_cmd *req;
6965         struct hclge_desc desc;
6966         int ret;
6967
6968         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6969
6970         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6971         req->vlan_type = vlan_type;
6972         req->vlan_fe = filter_en ? fe_type : 0;
6973         req->vf_id = vf_id;
6974
6975         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6976         if (ret)
6977                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6978                         ret);
6979
6980         return ret;
6981 }
6982
6983 #define HCLGE_FILTER_TYPE_VF            0
6984 #define HCLGE_FILTER_TYPE_PORT          1
6985 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
6986 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
6987 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
6988 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
6989 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
6990 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
6991                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6992 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
6993                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6994
6995 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6996 {
6997         struct hclge_vport *vport = hclge_get_vport(handle);
6998         struct hclge_dev *hdev = vport->back;
6999
7000         if (hdev->pdev->revision >= 0x21) {
7001                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7002                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7003                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7004                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7005         } else {
7006                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7007                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7008                                            0);
7009         }
7010         if (enable)
7011                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7012         else
7013                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7014 }
7015
7016 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
7017                                     bool is_kill, u16 vlan, u8 qos,
7018                                     __be16 proto)
7019 {
7020 #define HCLGE_MAX_VF_BYTES  16
7021         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7022         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7023         struct hclge_desc desc[2];
7024         u8 vf_byte_val;
7025         u8 vf_byte_off;
7026         int ret;
7027
7028         /* if vf vlan table is full, firmware will close vf vlan filter, it
7029          * is unable and unnecessary to add new vlan id to vf vlan filter
7030          */
7031         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7032                 return 0;
7033
7034         hclge_cmd_setup_basic_desc(&desc[0],
7035                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7036         hclge_cmd_setup_basic_desc(&desc[1],
7037                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7038
7039         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7040
7041         vf_byte_off = vfid / 8;
7042         vf_byte_val = 1 << (vfid % 8);
7043
7044         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7045         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7046
7047         req0->vlan_id  = cpu_to_le16(vlan);
7048         req0->vlan_cfg = is_kill;
7049
7050         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7051                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7052         else
7053                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7054
7055         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7056         if (ret) {
7057                 dev_err(&hdev->pdev->dev,
7058                         "Send vf vlan command fail, ret =%d.\n",
7059                         ret);
7060                 return ret;
7061         }
7062
7063         if (!is_kill) {
7064 #define HCLGE_VF_VLAN_NO_ENTRY  2
7065                 if (!req0->resp_code || req0->resp_code == 1)
7066                         return 0;
7067
7068                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7069                         set_bit(vfid, hdev->vf_vlan_full);
7070                         dev_warn(&hdev->pdev->dev,
7071                                  "vf vlan table is full, vf vlan filter is disabled\n");
7072                         return 0;
7073                 }
7074
7075                 dev_err(&hdev->pdev->dev,
7076                         "Add vf vlan filter fail, ret =%d.\n",
7077                         req0->resp_code);
7078         } else {
7079 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7080                 if (!req0->resp_code)
7081                         return 0;
7082
7083                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
7084                         dev_warn(&hdev->pdev->dev,
7085                                  "vlan %d filter is not in vf vlan table\n",
7086                                  vlan);
7087                         return 0;
7088                 }
7089
7090                 dev_err(&hdev->pdev->dev,
7091                         "Kill vf vlan filter fail, ret =%d.\n",
7092                         req0->resp_code);
7093         }
7094
7095         return -EIO;
7096 }
7097
7098 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7099                                       u16 vlan_id, bool is_kill)
7100 {
7101         struct hclge_vlan_filter_pf_cfg_cmd *req;
7102         struct hclge_desc desc;
7103         u8 vlan_offset_byte_val;
7104         u8 vlan_offset_byte;
7105         u8 vlan_offset_160;
7106         int ret;
7107
7108         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7109
7110         vlan_offset_160 = vlan_id / 160;
7111         vlan_offset_byte = (vlan_id % 160) / 8;
7112         vlan_offset_byte_val = 1 << (vlan_id % 8);
7113
7114         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7115         req->vlan_offset = vlan_offset_160;
7116         req->vlan_cfg = is_kill;
7117         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7118
7119         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7120         if (ret)
7121                 dev_err(&hdev->pdev->dev,
7122                         "port vlan command, send fail, ret =%d.\n", ret);
7123         return ret;
7124 }
7125
7126 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7127                                     u16 vport_id, u16 vlan_id, u8 qos,
7128                                     bool is_kill)
7129 {
7130         u16 vport_idx, vport_num = 0;
7131         int ret;
7132
7133         if (is_kill && !vlan_id)
7134                 return 0;
7135
7136         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7137                                        0, proto);
7138         if (ret) {
7139                 dev_err(&hdev->pdev->dev,
7140                         "Set %d vport vlan filter config fail, ret =%d.\n",
7141                         vport_id, ret);
7142                 return ret;
7143         }
7144
7145         /* vlan 0 may be added twice when 8021q module is enabled */
7146         if (!is_kill && !vlan_id &&
7147             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7148                 return 0;
7149
7150         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7151                 dev_err(&hdev->pdev->dev,
7152                         "Add port vlan failed, vport %d is already in vlan %d\n",
7153                         vport_id, vlan_id);
7154                 return -EINVAL;
7155         }
7156
7157         if (is_kill &&
7158             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7159                 dev_err(&hdev->pdev->dev,
7160                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7161                         vport_id, vlan_id);
7162                 return -EINVAL;
7163         }
7164
7165         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7166                 vport_num++;
7167
7168         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7169                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7170                                                  is_kill);
7171
7172         return ret;
7173 }
7174
7175 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7176 {
7177         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7178         struct hclge_vport_vtag_tx_cfg_cmd *req;
7179         struct hclge_dev *hdev = vport->back;
7180         struct hclge_desc desc;
7181         int status;
7182
7183         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7184
7185         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7186         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7187         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7188         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7189                       vcfg->accept_tag1 ? 1 : 0);
7190         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7191                       vcfg->accept_untag1 ? 1 : 0);
7192         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7193                       vcfg->accept_tag2 ? 1 : 0);
7194         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7195                       vcfg->accept_untag2 ? 1 : 0);
7196         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7197                       vcfg->insert_tag1_en ? 1 : 0);
7198         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7199                       vcfg->insert_tag2_en ? 1 : 0);
7200         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7201
7202         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7203         req->vf_bitmap[req->vf_offset] =
7204                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7205
7206         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7207         if (status)
7208                 dev_err(&hdev->pdev->dev,
7209                         "Send port txvlan cfg command fail, ret =%d\n",
7210                         status);
7211
7212         return status;
7213 }
7214
7215 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7216 {
7217         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7218         struct hclge_vport_vtag_rx_cfg_cmd *req;
7219         struct hclge_dev *hdev = vport->back;
7220         struct hclge_desc desc;
7221         int status;
7222
7223         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7224
7225         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7226         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7227                       vcfg->strip_tag1_en ? 1 : 0);
7228         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7229                       vcfg->strip_tag2_en ? 1 : 0);
7230         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7231                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7232         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7233                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7234
7235         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7236         req->vf_bitmap[req->vf_offset] =
7237                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7238
7239         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7240         if (status)
7241                 dev_err(&hdev->pdev->dev,
7242                         "Send port rxvlan cfg command fail, ret =%d\n",
7243                         status);
7244
7245         return status;
7246 }
7247
7248 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7249                                   u16 port_base_vlan_state,
7250                                   u16 vlan_tag)
7251 {
7252         int ret;
7253
7254         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7255                 vport->txvlan_cfg.accept_tag1 = true;
7256                 vport->txvlan_cfg.insert_tag1_en = false;
7257                 vport->txvlan_cfg.default_tag1 = 0;
7258         } else {
7259                 vport->txvlan_cfg.accept_tag1 = false;
7260                 vport->txvlan_cfg.insert_tag1_en = true;
7261                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7262         }
7263
7264         vport->txvlan_cfg.accept_untag1 = true;
7265
7266         /* accept_tag2 and accept_untag2 are not supported on
7267          * pdev revision(0x20), new revision support them,
7268          * this two fields can not be configured by user.
7269          */
7270         vport->txvlan_cfg.accept_tag2 = true;
7271         vport->txvlan_cfg.accept_untag2 = true;
7272         vport->txvlan_cfg.insert_tag2_en = false;
7273         vport->txvlan_cfg.default_tag2 = 0;
7274
7275         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7276                 vport->rxvlan_cfg.strip_tag1_en = false;
7277                 vport->rxvlan_cfg.strip_tag2_en =
7278                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7279         } else {
7280                 vport->rxvlan_cfg.strip_tag1_en =
7281                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7282                 vport->rxvlan_cfg.strip_tag2_en = true;
7283         }
7284         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7285         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7286
7287         ret = hclge_set_vlan_tx_offload_cfg(vport);
7288         if (ret)
7289                 return ret;
7290
7291         return hclge_set_vlan_rx_offload_cfg(vport);
7292 }
7293
7294 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7295 {
7296         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7297         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7298         struct hclge_desc desc;
7299         int status;
7300
7301         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7302         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7303         rx_req->ot_fst_vlan_type =
7304                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7305         rx_req->ot_sec_vlan_type =
7306                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7307         rx_req->in_fst_vlan_type =
7308                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7309         rx_req->in_sec_vlan_type =
7310                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7311
7312         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7313         if (status) {
7314                 dev_err(&hdev->pdev->dev,
7315                         "Send rxvlan protocol type command fail, ret =%d\n",
7316                         status);
7317                 return status;
7318         }
7319
7320         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7321
7322         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7323         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7324         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7325
7326         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7327         if (status)
7328                 dev_err(&hdev->pdev->dev,
7329                         "Send txvlan protocol type command fail, ret =%d\n",
7330                         status);
7331
7332         return status;
7333 }
7334
7335 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7336 {
7337 #define HCLGE_DEF_VLAN_TYPE             0x8100
7338
7339         struct hnae3_handle *handle = &hdev->vport[0].nic;
7340         struct hclge_vport *vport;
7341         int ret;
7342         int i;
7343
7344         if (hdev->pdev->revision >= 0x21) {
7345                 /* for revision 0x21, vf vlan filter is per function */
7346                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7347                         vport = &hdev->vport[i];
7348                         ret = hclge_set_vlan_filter_ctrl(hdev,
7349                                                          HCLGE_FILTER_TYPE_VF,
7350                                                          HCLGE_FILTER_FE_EGRESS,
7351                                                          true,
7352                                                          vport->vport_id);
7353                         if (ret)
7354                                 return ret;
7355                 }
7356
7357                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7358                                                  HCLGE_FILTER_FE_INGRESS, true,
7359                                                  0);
7360                 if (ret)
7361                         return ret;
7362         } else {
7363                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7364                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7365                                                  true, 0);
7366                 if (ret)
7367                         return ret;
7368         }
7369
7370         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7371
7372         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7373         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7374         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7375         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7376         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7377         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7378
7379         ret = hclge_set_vlan_protocol_type(hdev);
7380         if (ret)
7381                 return ret;
7382
7383         for (i = 0; i < hdev->num_alloc_vport; i++) {
7384                 u16 vlan_tag;
7385
7386                 vport = &hdev->vport[i];
7387                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7388
7389                 ret = hclge_vlan_offload_cfg(vport,
7390                                              vport->port_base_vlan_cfg.state,
7391                                              vlan_tag);
7392                 if (ret)
7393                         return ret;
7394         }
7395
7396         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7397 }
7398
7399 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7400                                        bool writen_to_tbl)
7401 {
7402         struct hclge_vport_vlan_cfg *vlan;
7403
7404         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7405         if (!vlan)
7406                 return;
7407
7408         vlan->hd_tbl_status = writen_to_tbl;
7409         vlan->vlan_id = vlan_id;
7410
7411         list_add_tail(&vlan->node, &vport->vlan_list);
7412 }
7413
7414 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7415 {
7416         struct hclge_vport_vlan_cfg *vlan, *tmp;
7417         struct hclge_dev *hdev = vport->back;
7418         int ret;
7419
7420         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7421                 if (!vlan->hd_tbl_status) {
7422                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7423                                                        vport->vport_id,
7424                                                        vlan->vlan_id, 0, false);
7425                         if (ret) {
7426                                 dev_err(&hdev->pdev->dev,
7427                                         "restore vport vlan list failed, ret=%d\n",
7428                                         ret);
7429                                 return ret;
7430                         }
7431                 }
7432                 vlan->hd_tbl_status = true;
7433         }
7434
7435         return 0;
7436 }
7437
7438 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7439                                       bool is_write_tbl)
7440 {
7441         struct hclge_vport_vlan_cfg *vlan, *tmp;
7442         struct hclge_dev *hdev = vport->back;
7443
7444         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7445                 if (vlan->vlan_id == vlan_id) {
7446                         if (is_write_tbl && vlan->hd_tbl_status)
7447                                 hclge_set_vlan_filter_hw(hdev,
7448                                                          htons(ETH_P_8021Q),
7449                                                          vport->vport_id,
7450                                                          vlan_id, 0,
7451                                                          true);
7452
7453                         list_del(&vlan->node);
7454                         kfree(vlan);
7455                         break;
7456                 }
7457         }
7458 }
7459
7460 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7461 {
7462         struct hclge_vport_vlan_cfg *vlan, *tmp;
7463         struct hclge_dev *hdev = vport->back;
7464
7465         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7466                 if (vlan->hd_tbl_status)
7467                         hclge_set_vlan_filter_hw(hdev,
7468                                                  htons(ETH_P_8021Q),
7469                                                  vport->vport_id,
7470                                                  vlan->vlan_id, 0,
7471                                                  true);
7472
7473                 vlan->hd_tbl_status = false;
7474                 if (is_del_list) {
7475                         list_del(&vlan->node);
7476                         kfree(vlan);
7477                 }
7478         }
7479 }
7480
7481 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7482 {
7483         struct hclge_vport_vlan_cfg *vlan, *tmp;
7484         struct hclge_vport *vport;
7485         int i;
7486
7487         mutex_lock(&hdev->vport_cfg_mutex);
7488         for (i = 0; i < hdev->num_alloc_vport; i++) {
7489                 vport = &hdev->vport[i];
7490                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7491                         list_del(&vlan->node);
7492                         kfree(vlan);
7493                 }
7494         }
7495         mutex_unlock(&hdev->vport_cfg_mutex);
7496 }
7497
7498 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7499 {
7500         struct hclge_vport *vport = hclge_get_vport(handle);
7501         struct hclge_vport_vlan_cfg *vlan, *tmp;
7502         struct hclge_dev *hdev = vport->back;
7503         u16 vlan_proto, qos;
7504         u16 state, vlan_id;
7505         int i;
7506
7507         mutex_lock(&hdev->vport_cfg_mutex);
7508         for (i = 0; i < hdev->num_alloc_vport; i++) {
7509                 vport = &hdev->vport[i];
7510                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7511                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7512                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7513                 state = vport->port_base_vlan_cfg.state;
7514
7515                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7516                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7517                                                  vport->vport_id, vlan_id, qos,
7518                                                  false);
7519                         continue;
7520                 }
7521
7522                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7523                         if (vlan->hd_tbl_status)
7524                                 hclge_set_vlan_filter_hw(hdev,
7525                                                          htons(ETH_P_8021Q),
7526                                                          vport->vport_id,
7527                                                          vlan->vlan_id, 0,
7528                                                          false);
7529                 }
7530         }
7531
7532         mutex_unlock(&hdev->vport_cfg_mutex);
7533 }
7534
7535 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7536 {
7537         struct hclge_vport *vport = hclge_get_vport(handle);
7538
7539         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7540                 vport->rxvlan_cfg.strip_tag1_en = false;
7541                 vport->rxvlan_cfg.strip_tag2_en = enable;
7542         } else {
7543                 vport->rxvlan_cfg.strip_tag1_en = enable;
7544                 vport->rxvlan_cfg.strip_tag2_en = true;
7545         }
7546         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7547         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7548         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7549
7550         return hclge_set_vlan_rx_offload_cfg(vport);
7551 }
7552
7553 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7554                                             u16 port_base_vlan_state,
7555                                             struct hclge_vlan_info *new_info,
7556                                             struct hclge_vlan_info *old_info)
7557 {
7558         struct hclge_dev *hdev = vport->back;
7559         int ret;
7560
7561         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7562                 hclge_rm_vport_all_vlan_table(vport, false);
7563                 return hclge_set_vlan_filter_hw(hdev,
7564                                                  htons(new_info->vlan_proto),
7565                                                  vport->vport_id,
7566                                                  new_info->vlan_tag,
7567                                                  new_info->qos, false);
7568         }
7569
7570         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7571                                        vport->vport_id, old_info->vlan_tag,
7572                                        old_info->qos, true);
7573         if (ret)
7574                 return ret;
7575
7576         return hclge_add_vport_all_vlan_table(vport);
7577 }
7578
7579 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7580                                     struct hclge_vlan_info *vlan_info)
7581 {
7582         struct hnae3_handle *nic = &vport->nic;
7583         struct hclge_vlan_info *old_vlan_info;
7584         struct hclge_dev *hdev = vport->back;
7585         int ret;
7586
7587         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7588
7589         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7590         if (ret)
7591                 return ret;
7592
7593         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7594                 /* add new VLAN tag */
7595                 ret = hclge_set_vlan_filter_hw(hdev,
7596                                                htons(vlan_info->vlan_proto),
7597                                                vport->vport_id,
7598                                                vlan_info->vlan_tag,
7599                                                vlan_info->qos, false);
7600                 if (ret)
7601                         return ret;
7602
7603                 /* remove old VLAN tag */
7604                 ret = hclge_set_vlan_filter_hw(hdev,
7605                                                htons(old_vlan_info->vlan_proto),
7606                                                vport->vport_id,
7607                                                old_vlan_info->vlan_tag,
7608                                                old_vlan_info->qos, true);
7609                 if (ret)
7610                         return ret;
7611
7612                 goto update;
7613         }
7614
7615         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7616                                                old_vlan_info);
7617         if (ret)
7618                 return ret;
7619
7620         /* update state only when disable/enable port based VLAN */
7621         vport->port_base_vlan_cfg.state = state;
7622         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7623                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7624         else
7625                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7626
7627 update:
7628         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7629         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7630         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7631
7632         return 0;
7633 }
7634
7635 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7636                                           enum hnae3_port_base_vlan_state state,
7637                                           u16 vlan)
7638 {
7639         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7640                 if (!vlan)
7641                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7642                 else
7643                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7644         } else {
7645                 if (!vlan)
7646                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7647                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7648                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7649                 else
7650                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7651         }
7652 }
7653
7654 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7655                                     u16 vlan, u8 qos, __be16 proto)
7656 {
7657         struct hclge_vport *vport = hclge_get_vport(handle);
7658         struct hclge_dev *hdev = vport->back;
7659         struct hclge_vlan_info vlan_info;
7660         u16 state;
7661         int ret;
7662
7663         if (hdev->pdev->revision == 0x20)
7664                 return -EOPNOTSUPP;
7665
7666         /* qos is a 3 bits value, so can not be bigger than 7 */
7667         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7668                 return -EINVAL;
7669         if (proto != htons(ETH_P_8021Q))
7670                 return -EPROTONOSUPPORT;
7671
7672         vport = &hdev->vport[vfid];
7673         state = hclge_get_port_base_vlan_state(vport,
7674                                                vport->port_base_vlan_cfg.state,
7675                                                vlan);
7676         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7677                 return 0;
7678
7679         vlan_info.vlan_tag = vlan;
7680         vlan_info.qos = qos;
7681         vlan_info.vlan_proto = ntohs(proto);
7682
7683         /* update port based VLAN for PF */
7684         if (!vfid) {
7685                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7686                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7687                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7688
7689                 return ret;
7690         }
7691
7692         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7693                 return hclge_update_port_base_vlan_cfg(vport, state,
7694                                                        &vlan_info);
7695         } else {
7696                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7697                                                         (u8)vfid, state,
7698                                                         vlan, qos,
7699                                                         ntohs(proto));
7700                 return ret;
7701         }
7702 }
7703
7704 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7705                           u16 vlan_id, bool is_kill)
7706 {
7707         struct hclge_vport *vport = hclge_get_vport(handle);
7708         struct hclge_dev *hdev = vport->back;
7709         bool writen_to_tbl = false;
7710         int ret = 0;
7711
7712         /* when port based VLAN enabled, we use port based VLAN as the VLAN
7713          * filter entry. In this case, we don't update VLAN filter table
7714          * when user add new VLAN or remove exist VLAN, just update the vport
7715          * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7716          * table until port based VLAN disabled
7717          */
7718         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7719                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7720                                                vlan_id, 0, is_kill);
7721                 writen_to_tbl = true;
7722         }
7723
7724         if (ret)
7725                 return ret;
7726
7727         if (is_kill)
7728                 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7729         else
7730                 hclge_add_vport_vlan_table(vport, vlan_id,
7731                                            writen_to_tbl);
7732
7733         return 0;
7734 }
7735
7736 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7737 {
7738         struct hclge_config_max_frm_size_cmd *req;
7739         struct hclge_desc desc;
7740
7741         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7742
7743         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7744         req->max_frm_size = cpu_to_le16(new_mps);
7745         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7746
7747         return hclge_cmd_send(&hdev->hw, &desc, 1);
7748 }
7749
7750 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7751 {
7752         struct hclge_vport *vport = hclge_get_vport(handle);
7753
7754         return hclge_set_vport_mtu(vport, new_mtu);
7755 }
7756
7757 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7758 {
7759         struct hclge_dev *hdev = vport->back;
7760         int i, max_frm_size, ret = 0;
7761
7762         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7763         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7764             max_frm_size > HCLGE_MAC_MAX_FRAME)
7765                 return -EINVAL;
7766
7767         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7768         mutex_lock(&hdev->vport_lock);
7769         /* VF's mps must fit within hdev->mps */
7770         if (vport->vport_id && max_frm_size > hdev->mps) {
7771                 mutex_unlock(&hdev->vport_lock);
7772                 return -EINVAL;
7773         } else if (vport->vport_id) {
7774                 vport->mps = max_frm_size;
7775                 mutex_unlock(&hdev->vport_lock);
7776                 return 0;
7777         }
7778
7779         /* PF's mps must be greater then VF's mps */
7780         for (i = 1; i < hdev->num_alloc_vport; i++)
7781                 if (max_frm_size < hdev->vport[i].mps) {
7782                         mutex_unlock(&hdev->vport_lock);
7783                         return -EINVAL;
7784                 }
7785
7786         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7787
7788         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7789         if (ret) {
7790                 dev_err(&hdev->pdev->dev,
7791                         "Change mtu fail, ret =%d\n", ret);
7792                 goto out;
7793         }
7794
7795         hdev->mps = max_frm_size;
7796         vport->mps = max_frm_size;
7797
7798         ret = hclge_buffer_alloc(hdev);
7799         if (ret)
7800                 dev_err(&hdev->pdev->dev,
7801                         "Allocate buffer fail, ret =%d\n", ret);
7802
7803 out:
7804         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7805         mutex_unlock(&hdev->vport_lock);
7806         return ret;
7807 }
7808
7809 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7810                                     bool enable)
7811 {
7812         struct hclge_reset_tqp_queue_cmd *req;
7813         struct hclge_desc desc;
7814         int ret;
7815
7816         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7817
7818         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7819         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7820         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7821
7822         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7823         if (ret) {
7824                 dev_err(&hdev->pdev->dev,
7825                         "Send tqp reset cmd error, status =%d\n", ret);
7826                 return ret;
7827         }
7828
7829         return 0;
7830 }
7831
7832 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7833 {
7834         struct hclge_reset_tqp_queue_cmd *req;
7835         struct hclge_desc desc;
7836         int ret;
7837
7838         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7839
7840         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7841         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7842
7843         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7844         if (ret) {
7845                 dev_err(&hdev->pdev->dev,
7846                         "Get reset status error, status =%d\n", ret);
7847                 return ret;
7848         }
7849
7850         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7851 }
7852
7853 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7854 {
7855         struct hnae3_queue *queue;
7856         struct hclge_tqp *tqp;
7857
7858         queue = handle->kinfo.tqp[queue_id];
7859         tqp = container_of(queue, struct hclge_tqp, q);
7860
7861         return tqp->index;
7862 }
7863
7864 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7865 {
7866         struct hclge_vport *vport = hclge_get_vport(handle);
7867         struct hclge_dev *hdev = vport->back;
7868         int reset_try_times = 0;
7869         int reset_status;
7870         u16 queue_gid;
7871         int ret = 0;
7872
7873         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7874
7875         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7876         if (ret) {
7877                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7878                 return ret;
7879         }
7880
7881         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7882         if (ret) {
7883                 dev_err(&hdev->pdev->dev,
7884                         "Send reset tqp cmd fail, ret = %d\n", ret);
7885                 return ret;
7886         }
7887
7888         reset_try_times = 0;
7889         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7890                 /* Wait for tqp hw reset */
7891                 msleep(20);
7892                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7893                 if (reset_status)
7894                         break;
7895         }
7896
7897         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7898                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7899                 return ret;
7900         }
7901
7902         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7903         if (ret)
7904                 dev_err(&hdev->pdev->dev,
7905                         "Deassert the soft reset fail, ret = %d\n", ret);
7906
7907         return ret;
7908 }
7909
7910 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7911 {
7912         struct hclge_dev *hdev = vport->back;
7913         int reset_try_times = 0;
7914         int reset_status;
7915         u16 queue_gid;
7916         int ret;
7917
7918         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7919
7920         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7921         if (ret) {
7922                 dev_warn(&hdev->pdev->dev,
7923                          "Send reset tqp cmd fail, ret = %d\n", ret);
7924                 return;
7925         }
7926
7927         reset_try_times = 0;
7928         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7929                 /* Wait for tqp hw reset */
7930                 msleep(20);
7931                 reset_status = hclge_get_reset_status(hdev, queue_gid);
7932                 if (reset_status)
7933                         break;
7934         }
7935
7936         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7937                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7938                 return;
7939         }
7940
7941         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7942         if (ret)
7943                 dev_warn(&hdev->pdev->dev,
7944                          "Deassert the soft reset fail, ret = %d\n", ret);
7945 }
7946
7947 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7948 {
7949         struct hclge_vport *vport = hclge_get_vport(handle);
7950         struct hclge_dev *hdev = vport->back;
7951
7952         return hdev->fw_version;
7953 }
7954
7955 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7956 {
7957         struct phy_device *phydev = hdev->hw.mac.phydev;
7958
7959         if (!phydev)
7960                 return;
7961
7962         phy_set_asym_pause(phydev, rx_en, tx_en);
7963 }
7964
7965 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7966 {
7967         int ret;
7968
7969         if (rx_en && tx_en)
7970                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7971         else if (rx_en && !tx_en)
7972                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7973         else if (!rx_en && tx_en)
7974                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7975         else
7976                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7977
7978         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7979                 return 0;
7980
7981         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7982         if (ret) {
7983                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7984                         ret);
7985                 return ret;
7986         }
7987
7988         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7989
7990         return 0;
7991 }
7992
7993 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7994 {
7995         struct phy_device *phydev = hdev->hw.mac.phydev;
7996         u16 remote_advertising = 0;
7997         u16 local_advertising = 0;
7998         u32 rx_pause, tx_pause;
7999         u8 flowctl;
8000
8001         if (!phydev->link || !phydev->autoneg)
8002                 return 0;
8003
8004         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8005
8006         if (phydev->pause)
8007                 remote_advertising = LPA_PAUSE_CAP;
8008
8009         if (phydev->asym_pause)
8010                 remote_advertising |= LPA_PAUSE_ASYM;
8011
8012         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8013                                            remote_advertising);
8014         tx_pause = flowctl & FLOW_CTRL_TX;
8015         rx_pause = flowctl & FLOW_CTRL_RX;
8016
8017         if (phydev->duplex == HCLGE_MAC_HALF) {
8018                 tx_pause = 0;
8019                 rx_pause = 0;
8020         }
8021
8022         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8023 }
8024
8025 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8026                                  u32 *rx_en, u32 *tx_en)
8027 {
8028         struct hclge_vport *vport = hclge_get_vport(handle);
8029         struct hclge_dev *hdev = vport->back;
8030
8031         *auto_neg = hclge_get_autoneg(handle);
8032
8033         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8034                 *rx_en = 0;
8035                 *tx_en = 0;
8036                 return;
8037         }
8038
8039         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8040                 *rx_en = 1;
8041                 *tx_en = 0;
8042         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8043                 *tx_en = 1;
8044                 *rx_en = 0;
8045         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8046                 *rx_en = 1;
8047                 *tx_en = 1;
8048         } else {
8049                 *rx_en = 0;
8050                 *tx_en = 0;
8051         }
8052 }
8053
8054 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8055                                 u32 rx_en, u32 tx_en)
8056 {
8057         struct hclge_vport *vport = hclge_get_vport(handle);
8058         struct hclge_dev *hdev = vport->back;
8059         struct phy_device *phydev = hdev->hw.mac.phydev;
8060         u32 fc_autoneg;
8061
8062         fc_autoneg = hclge_get_autoneg(handle);
8063         if (auto_neg != fc_autoneg) {
8064                 dev_info(&hdev->pdev->dev,
8065                          "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8066                 return -EOPNOTSUPP;
8067         }
8068
8069         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8070                 dev_info(&hdev->pdev->dev,
8071                          "Priority flow control enabled. Cannot set link flow control.\n");
8072                 return -EOPNOTSUPP;
8073         }
8074
8075         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8076
8077         if (!fc_autoneg)
8078                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8079
8080         if (phydev)
8081                 return phy_start_aneg(phydev);
8082
8083         if (hdev->pdev->revision == 0x20)
8084                 return -EOPNOTSUPP;
8085
8086         return hclge_restart_autoneg(handle);
8087 }
8088
8089 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8090                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8091 {
8092         struct hclge_vport *vport = hclge_get_vport(handle);
8093         struct hclge_dev *hdev = vport->back;
8094
8095         if (speed)
8096                 *speed = hdev->hw.mac.speed;
8097         if (duplex)
8098                 *duplex = hdev->hw.mac.duplex;
8099         if (auto_neg)
8100                 *auto_neg = hdev->hw.mac.autoneg;
8101 }
8102
8103 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8104                                  u8 *module_type)
8105 {
8106         struct hclge_vport *vport = hclge_get_vport(handle);
8107         struct hclge_dev *hdev = vport->back;
8108
8109         if (media_type)
8110                 *media_type = hdev->hw.mac.media_type;
8111
8112         if (module_type)
8113                 *module_type = hdev->hw.mac.module_type;
8114 }
8115
8116 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8117                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8118 {
8119         struct hclge_vport *vport = hclge_get_vport(handle);
8120         struct hclge_dev *hdev = vport->back;
8121         struct phy_device *phydev = hdev->hw.mac.phydev;
8122         int mdix_ctrl, mdix, retval, is_resolved;
8123
8124         if (!phydev) {
8125                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8126                 *tp_mdix = ETH_TP_MDI_INVALID;
8127                 return;
8128         }
8129
8130         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8131
8132         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8133         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8134                                     HCLGE_PHY_MDIX_CTRL_S);
8135
8136         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8137         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8138         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8139
8140         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8141
8142         switch (mdix_ctrl) {
8143         case 0x0:
8144                 *tp_mdix_ctrl = ETH_TP_MDI;
8145                 break;
8146         case 0x1:
8147                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8148                 break;
8149         case 0x3:
8150                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8151                 break;
8152         default:
8153                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8154                 break;
8155         }
8156
8157         if (!is_resolved)
8158                 *tp_mdix = ETH_TP_MDI_INVALID;
8159         else if (mdix)
8160                 *tp_mdix = ETH_TP_MDI_X;
8161         else
8162                 *tp_mdix = ETH_TP_MDI;
8163 }
8164
8165 static void hclge_info_show(struct hclge_dev *hdev)
8166 {
8167         struct device *dev = &hdev->pdev->dev;
8168
8169         dev_info(dev, "PF info begin:\n");
8170
8171         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8172         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8173         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8174         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8175         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8176         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8177         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8178         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8179         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8180         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8181         dev_info(dev, "This is %s PF\n",
8182                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8183         dev_info(dev, "DCB %s\n",
8184                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8185         dev_info(dev, "MQPRIO %s\n",
8186                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8187
8188         dev_info(dev, "PF info end.\n");
8189 }
8190
8191 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8192                                           struct hclge_vport *vport)
8193 {
8194         struct hnae3_client *client = vport->nic.client;
8195         struct hclge_dev *hdev = ae_dev->priv;
8196         int ret;
8197
8198         ret = client->ops->init_instance(&vport->nic);
8199         if (ret)
8200                 return ret;
8201
8202         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8203         hnae3_set_client_init_flag(client, ae_dev, 1);
8204
8205         /* Enable nic hw error interrupts */
8206         ret = hclge_config_nic_hw_error(hdev, true);
8207         if (ret)
8208                 dev_err(&ae_dev->pdev->dev,
8209                         "fail(%d) to enable hw error interrupts\n", ret);
8210
8211         if (netif_msg_drv(&hdev->vport->nic))
8212                 hclge_info_show(hdev);
8213
8214         return ret;
8215 }
8216
8217 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8218                                            struct hclge_vport *vport)
8219 {
8220         struct hnae3_client *client = vport->roce.client;
8221         struct hclge_dev *hdev = ae_dev->priv;
8222         int ret;
8223
8224         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8225             !hdev->nic_client)
8226                 return 0;
8227
8228         client = hdev->roce_client;
8229         ret = hclge_init_roce_base_info(vport);
8230         if (ret)
8231                 return ret;
8232
8233         ret = client->ops->init_instance(&vport->roce);
8234         if (ret)
8235                 return ret;
8236
8237         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8238         hnae3_set_client_init_flag(client, ae_dev, 1);
8239
8240         return 0;
8241 }
8242
8243 static int hclge_init_client_instance(struct hnae3_client *client,
8244                                       struct hnae3_ae_dev *ae_dev)
8245 {
8246         struct hclge_dev *hdev = ae_dev->priv;
8247         struct hclge_vport *vport;
8248         int i, ret;
8249
8250         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8251                 vport = &hdev->vport[i];
8252
8253                 switch (client->type) {
8254                 case HNAE3_CLIENT_KNIC:
8255
8256                         hdev->nic_client = client;
8257                         vport->nic.client = client;
8258                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8259                         if (ret)
8260                                 goto clear_nic;
8261
8262                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8263                         if (ret)
8264                                 goto clear_roce;
8265
8266                         break;
8267                 case HNAE3_CLIENT_UNIC:
8268                         hdev->nic_client = client;
8269                         vport->nic.client = client;
8270
8271                         ret = client->ops->init_instance(&vport->nic);
8272                         if (ret)
8273                                 goto clear_nic;
8274
8275                         hnae3_set_client_init_flag(client, ae_dev, 1);
8276
8277                         break;
8278                 case HNAE3_CLIENT_ROCE:
8279                         if (hnae3_dev_roce_supported(hdev)) {
8280                                 hdev->roce_client = client;
8281                                 vport->roce.client = client;
8282                         }
8283
8284                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8285                         if (ret)
8286                                 goto clear_roce;
8287
8288                         break;
8289                 default:
8290                         return -EINVAL;
8291                 }
8292         }
8293
8294         /* Enable roce ras interrupts */
8295         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8296         if (ret)
8297                 dev_err(&ae_dev->pdev->dev,
8298                         "fail(%d) to enable roce ras interrupts\n", ret);
8299
8300         return ret;
8301
8302 clear_nic:
8303         hdev->nic_client = NULL;
8304         vport->nic.client = NULL;
8305         return ret;
8306 clear_roce:
8307         hdev->roce_client = NULL;
8308         vport->roce.client = NULL;
8309         return ret;
8310 }
8311
8312 static void hclge_uninit_client_instance(struct hnae3_client *client,
8313                                          struct hnae3_ae_dev *ae_dev)
8314 {
8315         struct hclge_dev *hdev = ae_dev->priv;
8316         struct hclge_vport *vport;
8317         int i;
8318
8319         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8320                 vport = &hdev->vport[i];
8321                 if (hdev->roce_client) {
8322                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8323                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8324                                                                 0);
8325                         hdev->roce_client = NULL;
8326                         vport->roce.client = NULL;
8327                 }
8328                 if (client->type == HNAE3_CLIENT_ROCE)
8329                         return;
8330                 if (hdev->nic_client && client->ops->uninit_instance) {
8331                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8332                         client->ops->uninit_instance(&vport->nic, 0);
8333                         hdev->nic_client = NULL;
8334                         vport->nic.client = NULL;
8335                 }
8336         }
8337 }
8338
8339 static int hclge_pci_init(struct hclge_dev *hdev)
8340 {
8341         struct pci_dev *pdev = hdev->pdev;
8342         struct hclge_hw *hw;
8343         int ret;
8344
8345         ret = pci_enable_device(pdev);
8346         if (ret) {
8347                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8348                 return ret;
8349         }
8350
8351         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8352         if (ret) {
8353                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8354                 if (ret) {
8355                         dev_err(&pdev->dev,
8356                                 "can't set consistent PCI DMA");
8357                         goto err_disable_device;
8358                 }
8359                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8360         }
8361
8362         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8363         if (ret) {
8364                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8365                 goto err_disable_device;
8366         }
8367
8368         pci_set_master(pdev);
8369         hw = &hdev->hw;
8370         hw->io_base = pcim_iomap(pdev, 2, 0);
8371         if (!hw->io_base) {
8372                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8373                 ret = -ENOMEM;
8374                 goto err_clr_master;
8375         }
8376
8377         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8378
8379         return 0;
8380 err_clr_master:
8381         pci_clear_master(pdev);
8382         pci_release_regions(pdev);
8383 err_disable_device:
8384         pci_disable_device(pdev);
8385
8386         return ret;
8387 }
8388
8389 static void hclge_pci_uninit(struct hclge_dev *hdev)
8390 {
8391         struct pci_dev *pdev = hdev->pdev;
8392
8393         pcim_iounmap(pdev, hdev->hw.io_base);
8394         pci_free_irq_vectors(pdev);
8395         pci_clear_master(pdev);
8396         pci_release_mem_regions(pdev);
8397         pci_disable_device(pdev);
8398 }
8399
8400 static void hclge_state_init(struct hclge_dev *hdev)
8401 {
8402         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8403         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8404         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8405         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8406         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8407         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8408 }
8409
8410 static void hclge_state_uninit(struct hclge_dev *hdev)
8411 {
8412         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8413         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8414
8415         if (hdev->service_timer.function)
8416                 del_timer_sync(&hdev->service_timer);
8417         if (hdev->reset_timer.function)
8418                 del_timer_sync(&hdev->reset_timer);
8419         if (hdev->service_task.func)
8420                 cancel_work_sync(&hdev->service_task);
8421         if (hdev->rst_service_task.func)
8422                 cancel_work_sync(&hdev->rst_service_task);
8423         if (hdev->mbx_service_task.func)
8424                 cancel_work_sync(&hdev->mbx_service_task);
8425 }
8426
8427 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8428 {
8429 #define HCLGE_FLR_WAIT_MS       100
8430 #define HCLGE_FLR_WAIT_CNT      50
8431         struct hclge_dev *hdev = ae_dev->priv;
8432         int cnt = 0;
8433
8434         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8435         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8436         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8437         hclge_reset_event(hdev->pdev, NULL);
8438
8439         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8440                cnt++ < HCLGE_FLR_WAIT_CNT)
8441                 msleep(HCLGE_FLR_WAIT_MS);
8442
8443         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8444                 dev_err(&hdev->pdev->dev,
8445                         "flr wait down timeout: %d\n", cnt);
8446 }
8447
8448 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8449 {
8450         struct hclge_dev *hdev = ae_dev->priv;
8451
8452         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8453 }
8454
8455 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8456 {
8457         struct pci_dev *pdev = ae_dev->pdev;
8458         struct hclge_dev *hdev;
8459         int ret;
8460
8461         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8462         if (!hdev) {
8463                 ret = -ENOMEM;
8464                 goto out;
8465         }
8466
8467         hdev->pdev = pdev;
8468         hdev->ae_dev = ae_dev;
8469         hdev->reset_type = HNAE3_NONE_RESET;
8470         hdev->reset_level = HNAE3_FUNC_RESET;
8471         ae_dev->priv = hdev;
8472         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8473
8474         mutex_init(&hdev->vport_lock);
8475         mutex_init(&hdev->vport_cfg_mutex);
8476         spin_lock_init(&hdev->fd_rule_lock);
8477
8478         ret = hclge_pci_init(hdev);
8479         if (ret) {
8480                 dev_err(&pdev->dev, "PCI init failed\n");
8481                 goto out;
8482         }
8483
8484         /* Firmware command queue initialize */
8485         ret = hclge_cmd_queue_init(hdev);
8486         if (ret) {
8487                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8488                 goto err_pci_uninit;
8489         }
8490
8491         /* Firmware command initialize */
8492         ret = hclge_cmd_init(hdev);
8493         if (ret)
8494                 goto err_cmd_uninit;
8495
8496         ret = hclge_get_cap(hdev);
8497         if (ret) {
8498                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8499                         ret);
8500                 goto err_cmd_uninit;
8501         }
8502
8503         ret = hclge_configure(hdev);
8504         if (ret) {
8505                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8506                 goto err_cmd_uninit;
8507         }
8508
8509         ret = hclge_init_msi(hdev);
8510         if (ret) {
8511                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8512                 goto err_cmd_uninit;
8513         }
8514
8515         ret = hclge_misc_irq_init(hdev);
8516         if (ret) {
8517                 dev_err(&pdev->dev,
8518                         "Misc IRQ(vector0) init error, ret = %d.\n",
8519                         ret);
8520                 goto err_msi_uninit;
8521         }
8522
8523         ret = hclge_alloc_tqps(hdev);
8524         if (ret) {
8525                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8526                 goto err_msi_irq_uninit;
8527         }
8528
8529         ret = hclge_alloc_vport(hdev);
8530         if (ret) {
8531                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8532                 goto err_msi_irq_uninit;
8533         }
8534
8535         ret = hclge_map_tqp(hdev);
8536         if (ret) {
8537                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8538                 goto err_msi_irq_uninit;
8539         }
8540
8541         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8542                 ret = hclge_mac_mdio_config(hdev);
8543                 if (ret) {
8544                         dev_err(&hdev->pdev->dev,
8545                                 "mdio config fail ret=%d\n", ret);
8546                         goto err_msi_irq_uninit;
8547                 }
8548         }
8549
8550         ret = hclge_init_umv_space(hdev);
8551         if (ret) {
8552                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8553                 goto err_mdiobus_unreg;
8554         }
8555
8556         ret = hclge_mac_init(hdev);
8557         if (ret) {
8558                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8559                 goto err_mdiobus_unreg;
8560         }
8561
8562         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8563         if (ret) {
8564                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8565                 goto err_mdiobus_unreg;
8566         }
8567
8568         ret = hclge_config_gro(hdev, true);
8569         if (ret)
8570                 goto err_mdiobus_unreg;
8571
8572         ret = hclge_init_vlan_config(hdev);
8573         if (ret) {
8574                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8575                 goto err_mdiobus_unreg;
8576         }
8577
8578         ret = hclge_tm_schd_init(hdev);
8579         if (ret) {
8580                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8581                 goto err_mdiobus_unreg;
8582         }
8583
8584         hclge_rss_init_cfg(hdev);
8585         ret = hclge_rss_init_hw(hdev);
8586         if (ret) {
8587                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8588                 goto err_mdiobus_unreg;
8589         }
8590
8591         ret = init_mgr_tbl(hdev);
8592         if (ret) {
8593                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8594                 goto err_mdiobus_unreg;
8595         }
8596
8597         ret = hclge_init_fd_config(hdev);
8598         if (ret) {
8599                 dev_err(&pdev->dev,
8600                         "fd table init fail, ret=%d\n", ret);
8601                 goto err_mdiobus_unreg;
8602         }
8603
8604         INIT_KFIFO(hdev->mac_tnl_log);
8605
8606         hclge_dcb_ops_set(hdev);
8607
8608         timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8609         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8610         INIT_WORK(&hdev->service_task, hclge_service_task);
8611         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8612         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8613
8614         hclge_clear_all_event_cause(hdev);
8615
8616         /* Enable MISC vector(vector0) */
8617         hclge_enable_vector(&hdev->misc_vector, true);
8618
8619         hclge_state_init(hdev);
8620         hdev->last_reset_time = jiffies;
8621
8622         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8623         return 0;
8624
8625 err_mdiobus_unreg:
8626         if (hdev->hw.mac.phydev)
8627                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8628 err_msi_irq_uninit:
8629         hclge_misc_irq_uninit(hdev);
8630 err_msi_uninit:
8631         pci_free_irq_vectors(pdev);
8632 err_cmd_uninit:
8633         hclge_cmd_uninit(hdev);
8634 err_pci_uninit:
8635         pcim_iounmap(pdev, hdev->hw.io_base);
8636         pci_clear_master(pdev);
8637         pci_release_regions(pdev);
8638         pci_disable_device(pdev);
8639 out:
8640         return ret;
8641 }
8642
8643 static void hclge_stats_clear(struct hclge_dev *hdev)
8644 {
8645         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8646 }
8647
8648 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8649 {
8650         struct hclge_vport *vport = hdev->vport;
8651         int i;
8652
8653         for (i = 0; i < hdev->num_alloc_vport; i++) {
8654                 hclge_vport_stop(vport);
8655                 vport++;
8656         }
8657 }
8658
8659 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8660 {
8661         struct hclge_dev *hdev = ae_dev->priv;
8662         struct pci_dev *pdev = ae_dev->pdev;
8663         int ret;
8664
8665         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8666
8667         hclge_stats_clear(hdev);
8668         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8669         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8670
8671         ret = hclge_cmd_init(hdev);
8672         if (ret) {
8673                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8674                 return ret;
8675         }
8676
8677         ret = hclge_map_tqp(hdev);
8678         if (ret) {
8679                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8680                 return ret;
8681         }
8682
8683         hclge_reset_umv_space(hdev);
8684
8685         ret = hclge_mac_init(hdev);
8686         if (ret) {
8687                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8688                 return ret;
8689         }
8690
8691         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8692         if (ret) {
8693                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8694                 return ret;
8695         }
8696
8697         ret = hclge_config_gro(hdev, true);
8698         if (ret)
8699                 return ret;
8700
8701         ret = hclge_init_vlan_config(hdev);
8702         if (ret) {
8703                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8704                 return ret;
8705         }
8706
8707         ret = hclge_tm_init_hw(hdev, true);
8708         if (ret) {
8709                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8710                 return ret;
8711         }
8712
8713         ret = hclge_rss_init_hw(hdev);
8714         if (ret) {
8715                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8716                 return ret;
8717         }
8718
8719         ret = hclge_init_fd_config(hdev);
8720         if (ret) {
8721                 dev_err(&pdev->dev,
8722                         "fd table init fail, ret=%d\n", ret);
8723                 return ret;
8724         }
8725
8726         /* Re-enable the hw error interrupts because
8727          * the interrupts get disabled on global reset.
8728          */
8729         ret = hclge_config_nic_hw_error(hdev, true);
8730         if (ret) {
8731                 dev_err(&pdev->dev,
8732                         "fail(%d) to re-enable NIC hw error interrupts\n",
8733                         ret);
8734                 return ret;
8735         }
8736
8737         if (hdev->roce_client) {
8738                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8739                 if (ret) {
8740                         dev_err(&pdev->dev,
8741                                 "fail(%d) to re-enable roce ras interrupts\n",
8742                                 ret);
8743                         return ret;
8744                 }
8745         }
8746
8747         hclge_reset_vport_state(hdev);
8748
8749         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8750                  HCLGE_DRIVER_NAME);
8751
8752         return 0;
8753 }
8754
8755 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8756 {
8757         struct hclge_dev *hdev = ae_dev->priv;
8758         struct hclge_mac *mac = &hdev->hw.mac;
8759
8760         hclge_state_uninit(hdev);
8761
8762         if (mac->phydev)
8763                 mdiobus_unregister(mac->mdio_bus);
8764
8765         hclge_uninit_umv_space(hdev);
8766
8767         /* Disable MISC vector(vector0) */
8768         hclge_enable_vector(&hdev->misc_vector, false);
8769         synchronize_irq(hdev->misc_vector.vector_irq);
8770
8771         /* Disable all hw interrupts */
8772         hclge_config_mac_tnl_int(hdev, false);
8773         hclge_config_nic_hw_error(hdev, false);
8774         hclge_config_rocee_ras_interrupt(hdev, false);
8775
8776         hclge_cmd_uninit(hdev);
8777         hclge_misc_irq_uninit(hdev);
8778         hclge_pci_uninit(hdev);
8779         mutex_destroy(&hdev->vport_lock);
8780         hclge_uninit_vport_mac_table(hdev);
8781         hclge_uninit_vport_vlan_table(hdev);
8782         mutex_destroy(&hdev->vport_cfg_mutex);
8783         ae_dev->priv = NULL;
8784 }
8785
8786 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8787 {
8788         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8789         struct hclge_vport *vport = hclge_get_vport(handle);
8790         struct hclge_dev *hdev = vport->back;
8791
8792         return min_t(u32, hdev->rss_size_max,
8793                      vport->alloc_tqps / kinfo->num_tc);
8794 }
8795
8796 static void hclge_get_channels(struct hnae3_handle *handle,
8797                                struct ethtool_channels *ch)
8798 {
8799         ch->max_combined = hclge_get_max_channels(handle);
8800         ch->other_count = 1;
8801         ch->max_other = 1;
8802         ch->combined_count = handle->kinfo.rss_size;
8803 }
8804
8805 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8806                                         u16 *alloc_tqps, u16 *max_rss_size)
8807 {
8808         struct hclge_vport *vport = hclge_get_vport(handle);
8809         struct hclge_dev *hdev = vport->back;
8810
8811         *alloc_tqps = vport->alloc_tqps;
8812         *max_rss_size = hdev->rss_size_max;
8813 }
8814
8815 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8816                               bool rxfh_configured)
8817 {
8818         struct hclge_vport *vport = hclge_get_vport(handle);
8819         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8820         struct hclge_dev *hdev = vport->back;
8821         int cur_rss_size = kinfo->rss_size;
8822         int cur_tqps = kinfo->num_tqps;
8823         u16 tc_offset[HCLGE_MAX_TC_NUM];
8824         u16 tc_valid[HCLGE_MAX_TC_NUM];
8825         u16 tc_size[HCLGE_MAX_TC_NUM];
8826         u16 roundup_size;
8827         u32 *rss_indir;
8828         int ret, i;
8829
8830         kinfo->req_rss_size = new_tqps_num;
8831
8832         ret = hclge_tm_vport_map_update(hdev);
8833         if (ret) {
8834                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8835                 return ret;
8836         }
8837
8838         roundup_size = roundup_pow_of_two(kinfo->rss_size);
8839         roundup_size = ilog2(roundup_size);
8840         /* Set the RSS TC mode according to the new RSS size */
8841         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8842                 tc_valid[i] = 0;
8843
8844                 if (!(hdev->hw_tc_map & BIT(i)))
8845                         continue;
8846
8847                 tc_valid[i] = 1;
8848                 tc_size[i] = roundup_size;
8849                 tc_offset[i] = kinfo->rss_size * i;
8850         }
8851         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8852         if (ret)
8853                 return ret;
8854
8855         /* RSS indirection table has been configuared by user */
8856         if (rxfh_configured)
8857                 goto out;
8858
8859         /* Reinitializes the rss indirect table according to the new RSS size */
8860         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8861         if (!rss_indir)
8862                 return -ENOMEM;
8863
8864         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8865                 rss_indir[i] = i % kinfo->rss_size;
8866
8867         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8868         if (ret)
8869                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8870                         ret);
8871
8872         kfree(rss_indir);
8873
8874 out:
8875         if (!ret)
8876                 dev_info(&hdev->pdev->dev,
8877                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8878                          cur_rss_size, kinfo->rss_size,
8879                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
8880
8881         return ret;
8882 }
8883
8884 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8885                               u32 *regs_num_64_bit)
8886 {
8887         struct hclge_desc desc;
8888         u32 total_num;
8889         int ret;
8890
8891         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8892         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8893         if (ret) {
8894                 dev_err(&hdev->pdev->dev,
8895                         "Query register number cmd failed, ret = %d.\n", ret);
8896                 return ret;
8897         }
8898
8899         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8900         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8901
8902         total_num = *regs_num_32_bit + *regs_num_64_bit;
8903         if (!total_num)
8904                 return -EINVAL;
8905
8906         return 0;
8907 }
8908
8909 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8910                                  void *data)
8911 {
8912 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8913
8914         struct hclge_desc *desc;
8915         u32 *reg_val = data;
8916         __le32 *desc_data;
8917         int cmd_num;
8918         int i, k, n;
8919         int ret;
8920
8921         if (regs_num == 0)
8922                 return 0;
8923
8924         cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8925         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8926         if (!desc)
8927                 return -ENOMEM;
8928
8929         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8930         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8931         if (ret) {
8932                 dev_err(&hdev->pdev->dev,
8933                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
8934                 kfree(desc);
8935                 return ret;
8936         }
8937
8938         for (i = 0; i < cmd_num; i++) {
8939                 if (i == 0) {
8940                         desc_data = (__le32 *)(&desc[i].data[0]);
8941                         n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8942                 } else {
8943                         desc_data = (__le32 *)(&desc[i]);
8944                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
8945                 }
8946                 for (k = 0; k < n; k++) {
8947                         *reg_val++ = le32_to_cpu(*desc_data++);
8948
8949                         regs_num--;
8950                         if (!regs_num)
8951                                 break;
8952                 }
8953         }
8954
8955         kfree(desc);
8956         return 0;
8957 }
8958
8959 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8960                                  void *data)
8961 {
8962 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8963
8964         struct hclge_desc *desc;
8965         u64 *reg_val = data;
8966         __le64 *desc_data;
8967         int cmd_num;
8968         int i, k, n;
8969         int ret;
8970
8971         if (regs_num == 0)
8972                 return 0;
8973
8974         cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8975         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8976         if (!desc)
8977                 return -ENOMEM;
8978
8979         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8980         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8981         if (ret) {
8982                 dev_err(&hdev->pdev->dev,
8983                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
8984                 kfree(desc);
8985                 return ret;
8986         }
8987
8988         for (i = 0; i < cmd_num; i++) {
8989                 if (i == 0) {
8990                         desc_data = (__le64 *)(&desc[i].data[0]);
8991                         n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8992                 } else {
8993                         desc_data = (__le64 *)(&desc[i]);
8994                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
8995                 }
8996                 for (k = 0; k < n; k++) {
8997                         *reg_val++ = le64_to_cpu(*desc_data++);
8998
8999                         regs_num--;
9000                         if (!regs_num)
9001                                 break;
9002                 }
9003         }
9004
9005         kfree(desc);
9006         return 0;
9007 }
9008
9009 #define MAX_SEPARATE_NUM        4
9010 #define SEPARATOR_VALUE         0xFFFFFFFF
9011 #define REG_NUM_PER_LINE        4
9012 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9013
9014 static int hclge_get_regs_len(struct hnae3_handle *handle)
9015 {
9016         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9017         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9018         struct hclge_vport *vport = hclge_get_vport(handle);
9019         struct hclge_dev *hdev = vport->back;
9020         u32 regs_num_32_bit, regs_num_64_bit;
9021         int ret;
9022
9023         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9024         if (ret) {
9025                 dev_err(&hdev->pdev->dev,
9026                         "Get register number failed, ret = %d.\n", ret);
9027                 return -EOPNOTSUPP;
9028         }
9029
9030         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9031         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9032         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9033         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9034
9035         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9036                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9037                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9038 }
9039
9040 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9041                            void *data)
9042 {
9043         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9044         struct hclge_vport *vport = hclge_get_vport(handle);
9045         struct hclge_dev *hdev = vport->back;
9046         u32 regs_num_32_bit, regs_num_64_bit;
9047         int i, j, reg_um, separator_num;
9048         u32 *reg = data;
9049         int ret;
9050
9051         *version = hdev->fw_version;
9052
9053         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9054         if (ret) {
9055                 dev_err(&hdev->pdev->dev,
9056                         "Get register number failed, ret = %d.\n", ret);
9057                 return;
9058         }
9059
9060         /* fetching per-PF registers valus from PF PCIe register space */
9061         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9062         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9063         for (i = 0; i < reg_um; i++)
9064                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9065         for (i = 0; i < separator_num; i++)
9066                 *reg++ = SEPARATOR_VALUE;
9067
9068         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9069         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9070         for (i = 0; i < reg_um; i++)
9071                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9072         for (i = 0; i < separator_num; i++)
9073                 *reg++ = SEPARATOR_VALUE;
9074
9075         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9076         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9077         for (j = 0; j < kinfo->num_tqps; j++) {
9078                 for (i = 0; i < reg_um; i++)
9079                         *reg++ = hclge_read_dev(&hdev->hw,
9080                                                 ring_reg_addr_list[i] +
9081                                                 0x200 * j);
9082                 for (i = 0; i < separator_num; i++)
9083                         *reg++ = SEPARATOR_VALUE;
9084         }
9085
9086         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9087         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9088         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9089                 for (i = 0; i < reg_um; i++)
9090                         *reg++ = hclge_read_dev(&hdev->hw,
9091                                                 tqp_intr_reg_addr_list[i] +
9092                                                 4 * j);
9093                 for (i = 0; i < separator_num; i++)
9094                         *reg++ = SEPARATOR_VALUE;
9095         }
9096
9097         /* fetching PF common registers values from firmware */
9098         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9099         if (ret) {
9100                 dev_err(&hdev->pdev->dev,
9101                         "Get 32 bit register failed, ret = %d.\n", ret);
9102                 return;
9103         }
9104
9105         reg += regs_num_32_bit;
9106         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9107         if (ret)
9108                 dev_err(&hdev->pdev->dev,
9109                         "Get 64 bit register failed, ret = %d.\n", ret);
9110 }
9111
9112 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9113 {
9114         struct hclge_set_led_state_cmd *req;
9115         struct hclge_desc desc;
9116         int ret;
9117
9118         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9119
9120         req = (struct hclge_set_led_state_cmd *)desc.data;
9121         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9122                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9123
9124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9125         if (ret)
9126                 dev_err(&hdev->pdev->dev,
9127                         "Send set led state cmd error, ret =%d\n", ret);
9128
9129         return ret;
9130 }
9131
9132 enum hclge_led_status {
9133         HCLGE_LED_OFF,
9134         HCLGE_LED_ON,
9135         HCLGE_LED_NO_CHANGE = 0xFF,
9136 };
9137
9138 static int hclge_set_led_id(struct hnae3_handle *handle,
9139                             enum ethtool_phys_id_state status)
9140 {
9141         struct hclge_vport *vport = hclge_get_vport(handle);
9142         struct hclge_dev *hdev = vport->back;
9143
9144         switch (status) {
9145         case ETHTOOL_ID_ACTIVE:
9146                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9147         case ETHTOOL_ID_INACTIVE:
9148                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9149         default:
9150                 return -EINVAL;
9151         }
9152 }
9153
9154 static void hclge_get_link_mode(struct hnae3_handle *handle,
9155                                 unsigned long *supported,
9156                                 unsigned long *advertising)
9157 {
9158         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9159         struct hclge_vport *vport = hclge_get_vport(handle);
9160         struct hclge_dev *hdev = vport->back;
9161         unsigned int idx = 0;
9162
9163         for (; idx < size; idx++) {
9164                 supported[idx] = hdev->hw.mac.supported[idx];
9165                 advertising[idx] = hdev->hw.mac.advertising[idx];
9166         }
9167 }
9168
9169 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9170 {
9171         struct hclge_vport *vport = hclge_get_vport(handle);
9172         struct hclge_dev *hdev = vport->back;
9173
9174         return hclge_config_gro(hdev, enable);
9175 }
9176
9177 static const struct hnae3_ae_ops hclge_ops = {
9178         .init_ae_dev = hclge_init_ae_dev,
9179         .uninit_ae_dev = hclge_uninit_ae_dev,
9180         .flr_prepare = hclge_flr_prepare,
9181         .flr_done = hclge_flr_done,
9182         .init_client_instance = hclge_init_client_instance,
9183         .uninit_client_instance = hclge_uninit_client_instance,
9184         .map_ring_to_vector = hclge_map_ring_to_vector,
9185         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9186         .get_vector = hclge_get_vector,
9187         .put_vector = hclge_put_vector,
9188         .set_promisc_mode = hclge_set_promisc_mode,
9189         .set_loopback = hclge_set_loopback,
9190         .start = hclge_ae_start,
9191         .stop = hclge_ae_stop,
9192         .client_start = hclge_client_start,
9193         .client_stop = hclge_client_stop,
9194         .get_status = hclge_get_status,
9195         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9196         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9197         .get_media_type = hclge_get_media_type,
9198         .check_port_speed = hclge_check_port_speed,
9199         .get_fec = hclge_get_fec,
9200         .set_fec = hclge_set_fec,
9201         .get_rss_key_size = hclge_get_rss_key_size,
9202         .get_rss_indir_size = hclge_get_rss_indir_size,
9203         .get_rss = hclge_get_rss,
9204         .set_rss = hclge_set_rss,
9205         .set_rss_tuple = hclge_set_rss_tuple,
9206         .get_rss_tuple = hclge_get_rss_tuple,
9207         .get_tc_size = hclge_get_tc_size,
9208         .get_mac_addr = hclge_get_mac_addr,
9209         .set_mac_addr = hclge_set_mac_addr,
9210         .do_ioctl = hclge_do_ioctl,
9211         .add_uc_addr = hclge_add_uc_addr,
9212         .rm_uc_addr = hclge_rm_uc_addr,
9213         .add_mc_addr = hclge_add_mc_addr,
9214         .rm_mc_addr = hclge_rm_mc_addr,
9215         .set_autoneg = hclge_set_autoneg,
9216         .get_autoneg = hclge_get_autoneg,
9217         .restart_autoneg = hclge_restart_autoneg,
9218         .get_pauseparam = hclge_get_pauseparam,
9219         .set_pauseparam = hclge_set_pauseparam,
9220         .set_mtu = hclge_set_mtu,
9221         .reset_queue = hclge_reset_tqp,
9222         .get_stats = hclge_get_stats,
9223         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9224         .update_stats = hclge_update_stats,
9225         .get_strings = hclge_get_strings,
9226         .get_sset_count = hclge_get_sset_count,
9227         .get_fw_version = hclge_get_fw_version,
9228         .get_mdix_mode = hclge_get_mdix_mode,
9229         .enable_vlan_filter = hclge_enable_vlan_filter,
9230         .set_vlan_filter = hclge_set_vlan_filter,
9231         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9232         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9233         .reset_event = hclge_reset_event,
9234         .set_default_reset_request = hclge_set_def_reset_request,
9235         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9236         .set_channels = hclge_set_channels,
9237         .get_channels = hclge_get_channels,
9238         .get_regs_len = hclge_get_regs_len,
9239         .get_regs = hclge_get_regs,
9240         .set_led_id = hclge_set_led_id,
9241         .get_link_mode = hclge_get_link_mode,
9242         .add_fd_entry = hclge_add_fd_entry,
9243         .del_fd_entry = hclge_del_fd_entry,
9244         .del_all_fd_entries = hclge_del_all_fd_entries,
9245         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9246         .get_fd_rule_info = hclge_get_fd_rule_info,
9247         .get_fd_all_rules = hclge_get_all_rules,
9248         .restore_fd_rules = hclge_restore_fd_entries,
9249         .enable_fd = hclge_enable_fd,
9250         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9251         .dbg_run_cmd = hclge_dbg_run_cmd,
9252         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9253         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9254         .ae_dev_resetting = hclge_ae_dev_resetting,
9255         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9256         .set_gro_en = hclge_gro_en,
9257         .get_global_queue_id = hclge_covert_handle_qid_global,
9258         .set_timer_task = hclge_set_timer_task,
9259         .mac_connect_phy = hclge_mac_connect_phy,
9260         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9261         .restore_vlan_table = hclge_restore_vlan_table,
9262 };
9263
9264 static struct hnae3_ae_algo ae_algo = {
9265         .ops = &hclge_ops,
9266         .pdev_id_table = ae_algo_pci_tbl,
9267 };
9268
9269 static int hclge_init(void)
9270 {
9271         pr_info("%s is initializing\n", HCLGE_NAME);
9272
9273         hnae3_register_ae_algo(&ae_algo);
9274
9275         return 0;
9276 }
9277
9278 static void hclge_exit(void)
9279 {
9280         hnae3_unregister_ae_algo(&ae_algo);
9281 }
9282 module_init(hclge_init);
9283 module_exit(hclge_exit);
9284
9285 MODULE_LICENSE("GPL");
9286 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9287 MODULE_DESCRIPTION("HCLGE Driver");
9288 MODULE_VERSION(HCLGE_MOD_VERSION);